mirror of
https://github.com/ipfs/kubo.git
synced 2026-03-09 18:28:08 +08:00
Remove go-datastore from Godeps
License: MIT Signed-off-by: Jakub Sztandera <kubuxu@protonmail.ch>
This commit is contained in:
parent
ecbca7e0f4
commit
6217e1f141
4
Godeps/Godeps.json
generated
4
Godeps/Godeps.json
generated
@ -29,10 +29,6 @@
|
||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||
"Rev": "253b2dc1ca8bae42c3b5b6e53dd2eab1a7551116"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ipfs/go-datastore",
|
||||
"Rev": "e63957b6da369d986ef3e7a3f249779ba3f56c7e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jbenet/go-detect-race",
|
||||
"Rev": "3463798d9574bd0b7eca275dccc530804ff5216f"
|
||||
|
||||
11
Godeps/_workspace/src/github.com/ipfs/go-datastore/.travis.yml
generated
vendored
11
Godeps/_workspace/src/github.com/ipfs/go-datastore/.travis.yml
generated
vendored
@ -1,11 +0,0 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.3
|
||||
- release
|
||||
- tip
|
||||
|
||||
script:
|
||||
- make test
|
||||
|
||||
env: TEST_NO_FUSE=1 TEST_VERBOSE=1
|
||||
76
Godeps/_workspace/src/github.com/ipfs/go-datastore/Godeps/Godeps.json
generated
vendored
76
Godeps/_workspace/src/github.com/ipfs/go-datastore/Godeps/Godeps.json
generated
vendored
@ -1,76 +0,0 @@
|
||||
{
|
||||
"ImportPath": "github.com/jbenet/go-datastore",
|
||||
"GoVersion": "go1.5",
|
||||
"Packages": [
|
||||
"./..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/Sirupsen/logrus",
|
||||
"Comment": "v0.8.3-37-g418b41d",
|
||||
"Rev": "418b41d23a1bf978c06faea5313ba194650ac088"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/codahale/blake2",
|
||||
"Rev": "3fa823583afba430e8fc7cdbcc670dbf90bfacc4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/codahale/hdrhistogram",
|
||||
"Rev": "5fd85ec0b4e2dd5d4158d257d943f2e586d86b62"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/codahale/metrics",
|
||||
"Rev": "7d3beb1b480077e77c08a6f6c65ea969f6e91420"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/dustin/randbo",
|
||||
"Rev": "7f1b564ca7242d22bcc6e2128beb90d9fa38b9f0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fzzy/radix/redis",
|
||||
"Comment": "v0.5.1",
|
||||
"Rev": "27a863cdffdb0998d13e1e11992b18489aeeaa25"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||
"Rev": "4dfff096c4973178c8f35cf6dd1a732a0a139370"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ipfs/go-log",
|
||||
"Rev": "ee5cb9834b33bcf29689183e0323e328c8b8de29"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jbenet/go-os-rename",
|
||||
"Rev": "2d93ae970ba96c41f717036a5bf5494faf1f38c0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jbenet/goprocess",
|
||||
"Rev": "5b02f8d275a2dd882fb06f8bbdf74347795ff3b1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mattbaird/elastigo/api",
|
||||
"Rev": "041b88c1fcf6489a5721ede24378ce1253b9159d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mattbaird/elastigo/core",
|
||||
"Rev": "041b88c1fcf6489a5721ede24378ce1253b9159d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/gosnappy/snappy",
|
||||
"Rev": "ce8acff4829e0c2458a67ead32390ac0a381c862"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context",
|
||||
"Rev": "dfcbca9c45aeabb8971affa4f76b2d40f6f72328"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/check.v1",
|
||||
"Rev": "91ae5f88a67b14891cfd43895b01164f6c120420"
|
||||
},
|
||||
{
|
||||
"ImportPath": "launchpad.net/gocheck",
|
||||
"Comment": "87",
|
||||
"Rev": "gustavo@niemeyer.net-20140225173054-xu9zlkf9kxhvow02"
|
||||
}
|
||||
]
|
||||
}
|
||||
5
Godeps/_workspace/src/github.com/ipfs/go-datastore/Godeps/Readme
generated
vendored
5
Godeps/_workspace/src/github.com/ipfs/go-datastore/Godeps/Readme
generated
vendored
@ -1,5 +0,0 @@
|
||||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
||||
21
Godeps/_workspace/src/github.com/ipfs/go-datastore/LICENSE
generated
vendored
21
Godeps/_workspace/src/github.com/ipfs/go-datastore/LICENSE
generated
vendored
@ -1,21 +0,0 @@
|
||||
The MIT License
|
||||
|
||||
Copyright (c) 2014 Juan Batiz-Benet
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
24
Godeps/_workspace/src/github.com/ipfs/go-datastore/Makefile
generated
vendored
24
Godeps/_workspace/src/github.com/ipfs/go-datastore/Makefile
generated
vendored
@ -1,24 +0,0 @@
|
||||
build:
|
||||
go build
|
||||
|
||||
test: build
|
||||
go test -race -cpu=5 -v ./...
|
||||
|
||||
# saves/vendors third-party dependencies to Godeps/_workspace
|
||||
# -r flag rewrites import paths to use the vendored path
|
||||
# ./... performs operation on all packages in tree
|
||||
vendor: godep
|
||||
godep save -r ./...
|
||||
|
||||
deps:
|
||||
go get ./...
|
||||
|
||||
watch:
|
||||
-make
|
||||
@echo "[watching *.go; for recompilation]"
|
||||
# for portability, use watchmedo -- pip install watchmedo
|
||||
@watchmedo shell-command --patterns="*.go;" --recursive \
|
||||
--command='make' .
|
||||
|
||||
godep:
|
||||
go get github.com/tools/godep
|
||||
15
Godeps/_workspace/src/github.com/ipfs/go-datastore/README.md
generated
vendored
15
Godeps/_workspace/src/github.com/ipfs/go-datastore/README.md
generated
vendored
@ -1,15 +0,0 @@
|
||||
# datastore interface
|
||||
|
||||
datastore is a generic layer of abstraction for data store and database access. It is a simple API with the aim to enable application development in a datastore-agnostic way, allowing datastores to be swapped seamlessly without changing application code. Thus, one can leverage different datastores with different strengths without committing the application to one datastore throughout its lifetime.
|
||||
|
||||
In addition, grouped datastores significantly simplify interesting data access patterns (such as caching and sharding).
|
||||
|
||||
Based on [datastore.py](https://github.com/datastore/datastore).
|
||||
|
||||
### Documentation
|
||||
|
||||
https://godoc.org/github.com/jbenet/go-datastore
|
||||
|
||||
### License
|
||||
|
||||
MIT
|
||||
189
Godeps/_workspace/src/github.com/ipfs/go-datastore/basic_ds.go
generated
vendored
189
Godeps/_workspace/src/github.com/ipfs/go-datastore/basic_ds.go
generated
vendored
@ -1,189 +0,0 @@
|
||||
package datastore
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
|
||||
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
|
||||
)
|
||||
|
||||
// Here are some basic datastore implementations.
|
||||
|
||||
type keyMap map[Key]interface{}
|
||||
|
||||
// MapDatastore uses a standard Go map for internal storage.
|
||||
type MapDatastore struct {
|
||||
values keyMap
|
||||
}
|
||||
|
||||
// NewMapDatastore constructs a MapDatastore
|
||||
func NewMapDatastore() (d *MapDatastore) {
|
||||
return &MapDatastore{
|
||||
values: keyMap{},
|
||||
}
|
||||
}
|
||||
|
||||
// Put implements Datastore.Put
|
||||
func (d *MapDatastore) Put(key Key, value interface{}) (err error) {
|
||||
d.values[key] = value
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get implements Datastore.Get
|
||||
func (d *MapDatastore) Get(key Key) (value interface{}, err error) {
|
||||
val, found := d.values[key]
|
||||
if !found {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// Has implements Datastore.Has
|
||||
func (d *MapDatastore) Has(key Key) (exists bool, err error) {
|
||||
_, found := d.values[key]
|
||||
return found, nil
|
||||
}
|
||||
|
||||
// Delete implements Datastore.Delete
|
||||
func (d *MapDatastore) Delete(key Key) (err error) {
|
||||
if _, found := d.values[key]; !found {
|
||||
return ErrNotFound
|
||||
}
|
||||
delete(d.values, key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Query implements Datastore.Query
|
||||
func (d *MapDatastore) Query(q dsq.Query) (dsq.Results, error) {
|
||||
re := make([]dsq.Entry, 0, len(d.values))
|
||||
for k, v := range d.values {
|
||||
re = append(re, dsq.Entry{Key: k.String(), Value: v})
|
||||
}
|
||||
r := dsq.ResultsWithEntries(q, re)
|
||||
r = dsq.NaiveQueryApply(q, r)
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (d *MapDatastore) Batch() (Batch, error) {
|
||||
return NewBasicBatch(d), nil
|
||||
}
|
||||
|
||||
func (d *MapDatastore) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NullDatastore stores nothing, but conforms to the API.
|
||||
// Useful to test with.
|
||||
type NullDatastore struct {
|
||||
}
|
||||
|
||||
// NewNullDatastore constructs a null datastoe
|
||||
func NewNullDatastore() *NullDatastore {
|
||||
return &NullDatastore{}
|
||||
}
|
||||
|
||||
// Put implements Datastore.Put
|
||||
func (d *NullDatastore) Put(key Key, value interface{}) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get implements Datastore.Get
|
||||
func (d *NullDatastore) Get(key Key) (value interface{}, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Has implements Datastore.Has
|
||||
func (d *NullDatastore) Has(key Key) (exists bool, err error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Delete implements Datastore.Delete
|
||||
func (d *NullDatastore) Delete(key Key) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Query implements Datastore.Query
|
||||
func (d *NullDatastore) Query(q dsq.Query) (dsq.Results, error) {
|
||||
return dsq.ResultsWithEntries(q, nil), nil
|
||||
}
|
||||
|
||||
func (d *NullDatastore) Batch() (Batch, error) {
|
||||
return NewBasicBatch(d), nil
|
||||
}
|
||||
|
||||
func (d *NullDatastore) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// LogDatastore logs all accesses through the datastore.
|
||||
type LogDatastore struct {
|
||||
Name string
|
||||
child Datastore
|
||||
}
|
||||
|
||||
// Shim is a datastore which has a child.
|
||||
type Shim interface {
|
||||
Datastore
|
||||
|
||||
Children() []Datastore
|
||||
}
|
||||
|
||||
// NewLogDatastore constructs a log datastore.
|
||||
func NewLogDatastore(ds Datastore, name string) *LogDatastore {
|
||||
if len(name) < 1 {
|
||||
name = "LogDatastore"
|
||||
}
|
||||
return &LogDatastore{Name: name, child: ds}
|
||||
}
|
||||
|
||||
// Children implements Shim
|
||||
func (d *LogDatastore) Children() []Datastore {
|
||||
return []Datastore{d.child}
|
||||
}
|
||||
|
||||
// Put implements Datastore.Put
|
||||
func (d *LogDatastore) Put(key Key, value interface{}) (err error) {
|
||||
log.Printf("%s: Put %s\n", d.Name, key)
|
||||
// log.Printf("%s: Put %s ```%s```", d.Name, key, value)
|
||||
return d.child.Put(key, value)
|
||||
}
|
||||
|
||||
// Get implements Datastore.Get
|
||||
func (d *LogDatastore) Get(key Key) (value interface{}, err error) {
|
||||
log.Printf("%s: Get %s\n", d.Name, key)
|
||||
return d.child.Get(key)
|
||||
}
|
||||
|
||||
// Has implements Datastore.Has
|
||||
func (d *LogDatastore) Has(key Key) (exists bool, err error) {
|
||||
log.Printf("%s: Has %s\n", d.Name, key)
|
||||
return d.child.Has(key)
|
||||
}
|
||||
|
||||
// Delete implements Datastore.Delete
|
||||
func (d *LogDatastore) Delete(key Key) (err error) {
|
||||
log.Printf("%s: Delete %s\n", d.Name, key)
|
||||
return d.child.Delete(key)
|
||||
}
|
||||
|
||||
// Query implements Datastore.Query
|
||||
func (d *LogDatastore) Query(q dsq.Query) (dsq.Results, error) {
|
||||
log.Printf("%s: Query\n", d.Name)
|
||||
return d.child.Query(q)
|
||||
}
|
||||
|
||||
func (d *LogDatastore) Batch() (Batch, error) {
|
||||
log.Printf("%s: Batch\n", d.Name)
|
||||
if bds, ok := d.child.(Batching); ok {
|
||||
return bds.Batch()
|
||||
}
|
||||
return nil, ErrBatchUnsupported
|
||||
}
|
||||
|
||||
func (d *LogDatastore) Close() error {
|
||||
log.Printf("%s: Close\n", d.Name)
|
||||
if cds, ok := d.child.(io.Closer); ok {
|
||||
return cds.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
44
Godeps/_workspace/src/github.com/ipfs/go-datastore/batch.go
generated
vendored
44
Godeps/_workspace/src/github.com/ipfs/go-datastore/batch.go
generated
vendored
@ -1,44 +0,0 @@
|
||||
package datastore
|
||||
|
||||
// basicBatch implements the transaction interface for datastores who do
|
||||
// not have any sort of underlying transactional support
|
||||
type basicBatch struct {
|
||||
puts map[Key]interface{}
|
||||
deletes map[Key]struct{}
|
||||
|
||||
target Datastore
|
||||
}
|
||||
|
||||
func NewBasicBatch(ds Datastore) Batch {
|
||||
return &basicBatch{
|
||||
puts: make(map[Key]interface{}),
|
||||
deletes: make(map[Key]struct{}),
|
||||
target: ds,
|
||||
}
|
||||
}
|
||||
|
||||
func (bt *basicBatch) Put(key Key, val interface{}) error {
|
||||
bt.puts[key] = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bt *basicBatch) Delete(key Key) error {
|
||||
bt.deletes[key] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bt *basicBatch) Commit() error {
|
||||
for k, val := range bt.puts {
|
||||
if err := bt.target.Put(k, val); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for k, _ := range bt.deletes {
|
||||
if err := bt.target.Delete(k); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
42
Godeps/_workspace/src/github.com/ipfs/go-datastore/callback/callback.go
generated
vendored
42
Godeps/_workspace/src/github.com/ipfs/go-datastore/callback/callback.go
generated
vendored
@ -1,42 +0,0 @@
|
||||
package callback
|
||||
|
||||
import (
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
|
||||
)
|
||||
|
||||
type Datastore struct {
|
||||
D ds.Datastore
|
||||
F func()
|
||||
}
|
||||
|
||||
func Wrap(ds ds.Datastore, f func()) *Datastore {
|
||||
return &Datastore{ds, f}
|
||||
}
|
||||
|
||||
func (c *Datastore) SetFunc(f func()) { c.F = f }
|
||||
|
||||
func (c *Datastore) Put(key ds.Key, value interface{}) (err error) {
|
||||
c.F()
|
||||
return c.D.Put(key, value)
|
||||
}
|
||||
|
||||
func (c *Datastore) Get(key ds.Key) (value interface{}, err error) {
|
||||
c.F()
|
||||
return c.D.Get(key)
|
||||
}
|
||||
|
||||
func (c *Datastore) Has(key ds.Key) (exists bool, err error) {
|
||||
c.F()
|
||||
return c.D.Has(key)
|
||||
}
|
||||
|
||||
func (c *Datastore) Delete(key ds.Key) (err error) {
|
||||
c.F()
|
||||
return c.D.Delete(key)
|
||||
}
|
||||
|
||||
func (c *Datastore) Query(q dsq.Query) (dsq.Results, error) {
|
||||
c.F()
|
||||
return c.D.Query(q)
|
||||
}
|
||||
140
Godeps/_workspace/src/github.com/ipfs/go-datastore/coalesce/coalesce.go
generated
vendored
140
Godeps/_workspace/src/github.com/ipfs/go-datastore/coalesce/coalesce.go
generated
vendored
@ -1,140 +0,0 @@
|
||||
package coalesce
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
|
||||
)
|
||||
|
||||
// parent keys
|
||||
var (
|
||||
putKey = "put"
|
||||
getKey = "get"
|
||||
hasKey = "has"
|
||||
deleteKey = "delete"
|
||||
)
|
||||
|
||||
type keySync struct {
|
||||
op string
|
||||
k ds.Key
|
||||
value interface{}
|
||||
}
|
||||
|
||||
type valSync struct {
|
||||
val interface{}
|
||||
err error
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// Datastore uses golang-lru for internal storage.
|
||||
type datastore struct {
|
||||
child ds.Datastore
|
||||
|
||||
reqmu sync.Mutex
|
||||
req map[keySync]*valSync
|
||||
}
|
||||
|
||||
// Wrap wraps a given datastore with a coalescing datastore.
|
||||
// All simultaenous requests which have the same keys will
|
||||
// yield the exact same result. Note that this shares
|
||||
// memory. It is not possible to copy a generic interface{}
|
||||
func Wrap(d ds.Datastore) ds.Datastore {
|
||||
return &datastore{child: d, req: make(map[keySync]*valSync)}
|
||||
}
|
||||
|
||||
// sync synchronizes requests for a given key.
|
||||
func (d *datastore) sync(k keySync) (vs *valSync, found bool) {
|
||||
d.reqmu.Lock()
|
||||
vs, found = d.req[k]
|
||||
if !found {
|
||||
vs = &valSync{done: make(chan struct{})}
|
||||
d.req[k] = vs
|
||||
}
|
||||
d.reqmu.Unlock()
|
||||
|
||||
// if we did find one, wait till it's done.
|
||||
if found {
|
||||
<-vs.done
|
||||
}
|
||||
return vs, found
|
||||
}
|
||||
|
||||
// sync synchronizes requests for a given key.
|
||||
func (d *datastore) syncDone(k keySync) {
|
||||
|
||||
d.reqmu.Lock()
|
||||
vs, found := d.req[k]
|
||||
if !found {
|
||||
panic("attempt to syncDone non-existent request")
|
||||
}
|
||||
delete(d.req, k)
|
||||
d.reqmu.Unlock()
|
||||
|
||||
// release all the waiters.
|
||||
close(vs.done)
|
||||
}
|
||||
|
||||
// Put stores the object `value` named by `key`.
|
||||
func (d *datastore) Put(key ds.Key, value interface{}) (err error) {
|
||||
ks := keySync{putKey, key, value}
|
||||
vs, found := d.sync(ks)
|
||||
if !found {
|
||||
vs.err = d.child.Put(key, value)
|
||||
d.syncDone(ks)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Get retrieves the object `value` named by `key`.
|
||||
func (d *datastore) Get(key ds.Key) (value interface{}, err error) {
|
||||
ks := keySync{getKey, key, nil}
|
||||
vs, found := d.sync(ks)
|
||||
if !found {
|
||||
vs.val, vs.err = d.child.Get(key)
|
||||
d.syncDone(ks)
|
||||
}
|
||||
return vs.val, vs.err
|
||||
}
|
||||
|
||||
// Has returns whether the `key` is mapped to a `value`.
|
||||
func (d *datastore) Has(key ds.Key) (exists bool, err error) {
|
||||
ks := keySync{hasKey, key, nil}
|
||||
vs, found := d.sync(ks)
|
||||
if !found {
|
||||
vs.val, vs.err = d.child.Has(key)
|
||||
d.syncDone(ks)
|
||||
}
|
||||
return vs.val.(bool), vs.err
|
||||
}
|
||||
|
||||
// Delete removes the value for given `key`.
|
||||
func (d *datastore) Delete(key ds.Key) (err error) {
|
||||
ks := keySync{deleteKey, key, nil}
|
||||
vs, found := d.sync(ks)
|
||||
if !found {
|
||||
vs.err = d.child.Delete(key)
|
||||
d.syncDone(ks)
|
||||
}
|
||||
return vs.err
|
||||
}
|
||||
|
||||
// Query returns a list of keys in the datastore
|
||||
func (d *datastore) Query(q dsq.Query) (dsq.Results, error) {
|
||||
// query not coalesced yet.
|
||||
return d.child.Query(q)
|
||||
}
|
||||
|
||||
func (d *datastore) Close() error {
|
||||
d.reqmu.Lock()
|
||||
defer d.reqmu.Unlock()
|
||||
|
||||
for _, s := range d.req {
|
||||
<-s.done
|
||||
}
|
||||
if c, ok := d.child.(io.Closer); ok {
|
||||
return c.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
122
Godeps/_workspace/src/github.com/ipfs/go-datastore/datastore.go
generated
vendored
122
Godeps/_workspace/src/github.com/ipfs/go-datastore/datastore.go
generated
vendored
@ -1,122 +0,0 @@
|
||||
package datastore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
query "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
|
||||
)
|
||||
|
||||
/*
|
||||
Datastore represents storage for any key-value pair.
|
||||
|
||||
Datastores are general enough to be backed by all kinds of different storage:
|
||||
in-memory caches, databases, a remote datastore, flat files on disk, etc.
|
||||
|
||||
The general idea is to wrap a more complicated storage facility in a simple,
|
||||
uniform interface, keeping the freedom of using the right tools for the job.
|
||||
In particular, a Datastore can aggregate other datastores in interesting ways,
|
||||
like sharded (to distribute load) or tiered access (caches before databases).
|
||||
|
||||
While Datastores should be written general enough to accept all sorts of
|
||||
values, some implementations will undoubtedly have to be specific (e.g. SQL
|
||||
databases where fields should be decomposed into columns), particularly to
|
||||
support queries efficiently. Moreover, certain datastores may enforce certain
|
||||
types of values (e.g. requiring an io.Reader, a specific struct, etc) or
|
||||
serialization formats (JSON, Protobufs, etc).
|
||||
|
||||
IMPORTANT: No Datastore should ever Panic! This is a cross-module interface,
|
||||
and thus it should behave predictably and handle exceptional conditions with
|
||||
proper error reporting. Thus, all Datastore calls may return errors, which
|
||||
should be checked by callers.
|
||||
*/
|
||||
type Datastore interface {
|
||||
// Put stores the object `value` named by `key`.
|
||||
//
|
||||
// The generalized Datastore interface does not impose a value type,
|
||||
// allowing various datastore middleware implementations (which do not
|
||||
// handle the values directly) to be composed together.
|
||||
//
|
||||
// Ultimately, the lowest-level datastore will need to do some value checking
|
||||
// or risk getting incorrect values. It may also be useful to expose a more
|
||||
// type-safe interface to your application, and do the checking up-front.
|
||||
Put(key Key, value interface{}) error
|
||||
|
||||
// Get retrieves the object `value` named by `key`.
|
||||
// Get will return ErrNotFound if the key is not mapped to a value.
|
||||
Get(key Key) (value interface{}, err error)
|
||||
|
||||
// Has returns whether the `key` is mapped to a `value`.
|
||||
// In some contexts, it may be much cheaper only to check for existence of
|
||||
// a value, rather than retrieving the value itself. (e.g. HTTP HEAD).
|
||||
// The default implementation is found in `GetBackedHas`.
|
||||
Has(key Key) (exists bool, err error)
|
||||
|
||||
// Delete removes the value for given `key`.
|
||||
Delete(key Key) error
|
||||
|
||||
// Query searches the datastore and returns a query result. This function
|
||||
// may return before the query actually runs. To wait for the query:
|
||||
//
|
||||
// result, _ := ds.Query(q)
|
||||
//
|
||||
// // use the channel interface; result may come in at different times
|
||||
// for entry := range result.Entries() { ... }
|
||||
//
|
||||
// // or wait for the query to be completely done
|
||||
// result.Wait()
|
||||
// result.AllEntries()
|
||||
//
|
||||
Query(q query.Query) (query.Results, error)
|
||||
}
|
||||
|
||||
type Batching interface {
|
||||
Datastore
|
||||
|
||||
Batch() (Batch, error)
|
||||
}
|
||||
|
||||
var ErrBatchUnsupported = errors.New("this datastore does not support batching")
|
||||
|
||||
// ThreadSafeDatastore is an interface that all threadsafe datastore should
|
||||
// implement to leverage type safety checks.
|
||||
type ThreadSafeDatastore interface {
|
||||
Datastore
|
||||
IsThreadSafe()
|
||||
}
|
||||
|
||||
// Errors
|
||||
|
||||
// ErrNotFound is returned by Get, Has, and Delete when a datastore does not
|
||||
// map the given key to a value.
|
||||
var ErrNotFound = errors.New("datastore: key not found")
|
||||
|
||||
// ErrInvalidType is returned by Put when a given value is incopatible with
|
||||
// the type the datastore supports. This means a conversion (or serialization)
|
||||
// is needed beforehand.
|
||||
var ErrInvalidType = errors.New("datastore: invalid type error")
|
||||
|
||||
// GetBackedHas provides a default Datastore.Has implementation.
|
||||
// It exists so Datastore.Has implementations can use it, like so:
|
||||
//
|
||||
// func (*d SomeDatastore) Has(key Key) (exists bool, err error) {
|
||||
// return GetBackedHas(d, key)
|
||||
// }
|
||||
func GetBackedHas(ds Datastore, key Key) (bool, error) {
|
||||
_, err := ds.Get(key)
|
||||
switch err {
|
||||
case nil:
|
||||
return true, nil
|
||||
case ErrNotFound:
|
||||
return false, nil
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
type Batch interface {
|
||||
Put(key Key, val interface{}) error
|
||||
|
||||
Delete(key Key) error
|
||||
|
||||
Commit() error
|
||||
}
|
||||
128
Godeps/_workspace/src/github.com/ipfs/go-datastore/elastigo/datastore.go
generated
vendored
128
Godeps/_workspace/src/github.com/ipfs/go-datastore/elastigo/datastore.go
generated
vendored
@ -1,128 +0,0 @@
|
||||
package elastigo
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
query "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
|
||||
|
||||
"github.com/codahale/blake2"
|
||||
"github.com/mattbaird/elastigo/api"
|
||||
"github.com/mattbaird/elastigo/core"
|
||||
)
|
||||
|
||||
// Currently, elastigo does not allow connecting to multiple elasticsearch
|
||||
// instances. The elastigo API uses global static variables (ugh).
|
||||
// See https://github.com/mattbaird/elastigo/issues/22
|
||||
//
|
||||
// Thus, we use a global static variable (GlobalInstance), and return an
|
||||
// error if NewDatastore is called twice with different addresses.
|
||||
var GlobalInstance string
|
||||
|
||||
// Datastore uses a standard Go map for internal storage.
|
||||
type Datastore struct {
|
||||
url string
|
||||
index string
|
||||
|
||||
// Elastic search does not allow slashes in their object ids,
|
||||
// so we hash the key. By default, we use the provided BlakeKeyHash
|
||||
KeyHash func(ds.Key) string
|
||||
}
|
||||
|
||||
func NewDatastore(urlstr string) (*Datastore, error) {
|
||||
if GlobalInstance != "" && GlobalInstance != urlstr {
|
||||
return nil, fmt.Errorf("elastigo only allows one client. See godoc.")
|
||||
}
|
||||
|
||||
uf := "http://<host>:<port>/<index>"
|
||||
u, err := url.Parse(urlstr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing url: %s (%s)", urlstr, uf)
|
||||
}
|
||||
|
||||
host := strings.Split(u.Host, ":")
|
||||
api.Domain = host[0]
|
||||
if len(host) > 1 {
|
||||
api.Port = host[1]
|
||||
}
|
||||
|
||||
index := strings.Trim(u.Path, "/")
|
||||
if strings.Contains(index, "/") {
|
||||
e := "elastigo index cannot have slashes: %s (%s -> %s)"
|
||||
return nil, fmt.Errorf(e, index, urlstr, uf)
|
||||
}
|
||||
|
||||
GlobalInstance = urlstr
|
||||
return &Datastore{
|
||||
url: urlstr,
|
||||
index: index,
|
||||
KeyHash: BlakeKeyHash,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Returns the ElasticSearch index for given key. If the datastore specifies
|
||||
// an index, use that. Else, key.Parent
|
||||
func (d *Datastore) Index(key ds.Key) string {
|
||||
if len(d.index) > 0 {
|
||||
return d.index
|
||||
}
|
||||
return key.Parent().BaseNamespace()
|
||||
}
|
||||
|
||||
// value should be JSON serializable.
|
||||
func (d *Datastore) Put(key ds.Key, value interface{}) (err error) {
|
||||
id := d.KeyHash(key)
|
||||
res, err := core.Index(false, d.Index(key), key.Type(), id, value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !res.Ok {
|
||||
return fmt.Errorf("Elasticsearch response: NOT OK. %v", res)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Datastore) Get(key ds.Key) (value interface{}, err error) {
|
||||
id := d.KeyHash(key)
|
||||
res, err := core.Get(false, d.Index(key), key.Type(), id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !res.Ok {
|
||||
return nil, fmt.Errorf("Elasticsearch response: NOT OK. %v", res)
|
||||
}
|
||||
return res.Source, nil
|
||||
}
|
||||
|
||||
func (d *Datastore) Has(key ds.Key) (exists bool, err error) {
|
||||
id := d.KeyHash(key)
|
||||
return core.Exists(false, d.Index(key), key.Type(), id)
|
||||
}
|
||||
|
||||
func (d *Datastore) Delete(key ds.Key) (err error) {
|
||||
id := d.KeyHash(key)
|
||||
res, err := core.Delete(false, d.Index(key), key.Type(), id, 0, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !res.Ok {
|
||||
return fmt.Errorf("Elasticsearch response: NOT OK. %v", res)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Datastore) Query(query.Query) (query.Results, error) {
|
||||
return nil, errors.New("Not yet implemented!")
|
||||
}
|
||||
|
||||
// Hash a key and return the first 16 hex chars of its blake2b hash.
|
||||
// basically: Blake2b(key).HexString[:16]
|
||||
func BlakeKeyHash(key ds.Key) string {
|
||||
h := blake2.NewBlake2B()
|
||||
h.Write(key.Bytes())
|
||||
d := h.Sum(nil)
|
||||
return fmt.Sprintf("%x", d)[:16]
|
||||
}
|
||||
392
Godeps/_workspace/src/github.com/ipfs/go-datastore/flatfs/flatfs.go
generated
vendored
392
Godeps/_workspace/src/github.com/ipfs/go-datastore/flatfs/flatfs.go
generated
vendored
@ -1,392 +0,0 @@
|
||||
// Package flatfs is a Datastore implementation that stores all
|
||||
// objects in a two-level directory structure in the local file
|
||||
// system, regardless of the hierarchy of the keys.
|
||||
package flatfs
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-os-rename"
|
||||
|
||||
logging "gx/ipfs/QmYtB7Qge8cJpXc4irsEp8zRqfnZMBeB7aTrMEkPk67DRv/go-log"
|
||||
)
|
||||
|
||||
var log = logging.Logger("flatfs")
|
||||
|
||||
const (
|
||||
extension = ".data"
|
||||
maxPrefixLen = 16
|
||||
)
|
||||
|
||||
var (
|
||||
ErrBadPrefixLen = errors.New("bad prefix length")
|
||||
)
|
||||
|
||||
type Datastore struct {
|
||||
path string
|
||||
// length of the dir splay prefix, in bytes of hex digits
|
||||
hexPrefixLen int
|
||||
|
||||
// sychronize all writes and directory changes for added safety
|
||||
sync bool
|
||||
}
|
||||
|
||||
var _ datastore.Datastore = (*Datastore)(nil)
|
||||
|
||||
func New(path string, prefixLen int, sync bool) (*Datastore, error) {
|
||||
if prefixLen <= 0 || prefixLen > maxPrefixLen {
|
||||
return nil, ErrBadPrefixLen
|
||||
}
|
||||
fs := &Datastore{
|
||||
path: path,
|
||||
// convert from binary bytes to bytes of hex encoding
|
||||
hexPrefixLen: prefixLen * hex.EncodedLen(1),
|
||||
sync: sync,
|
||||
}
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
var padding = strings.Repeat("_", maxPrefixLen*hex.EncodedLen(1))
|
||||
|
||||
func (fs *Datastore) encode(key datastore.Key) (dir, file string) {
|
||||
safe := hex.EncodeToString(key.Bytes()[1:])
|
||||
prefix := (safe + padding)[:fs.hexPrefixLen]
|
||||
dir = path.Join(fs.path, prefix)
|
||||
file = path.Join(dir, safe+extension)
|
||||
return dir, file
|
||||
}
|
||||
|
||||
func (fs *Datastore) decode(file string) (key datastore.Key, ok bool) {
|
||||
if path.Ext(file) != extension {
|
||||
return datastore.Key{}, false
|
||||
}
|
||||
name := file[:len(file)-len(extension)]
|
||||
k, err := hex.DecodeString(name)
|
||||
if err != nil {
|
||||
return datastore.Key{}, false
|
||||
}
|
||||
return datastore.NewKey(string(k)), true
|
||||
}
|
||||
|
||||
func (fs *Datastore) makePrefixDir(dir string) error {
|
||||
if err := fs.makePrefixDirNoSync(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// In theory, if we create a new prefix dir and add a file to
|
||||
// it, the creation of the prefix dir itself might not be
|
||||
// durable yet. Sync the root dir after a successful mkdir of
|
||||
// a prefix dir, just to be paranoid.
|
||||
if fs.sync {
|
||||
if err := syncDir(fs.path); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *Datastore) makePrefixDirNoSync(dir string) error {
|
||||
if err := os.Mkdir(dir, 0777); err != nil {
|
||||
// EEXIST is safe to ignore here, that just means the prefix
|
||||
// directory already existed.
|
||||
if !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var putMaxRetries = 3
|
||||
|
||||
func (fs *Datastore) Put(key datastore.Key, value interface{}) error {
|
||||
val, ok := value.([]byte)
|
||||
if !ok {
|
||||
return datastore.ErrInvalidType
|
||||
}
|
||||
|
||||
var err error
|
||||
for i := 0; i < putMaxRetries; i++ {
|
||||
err = fs.doPut(key, val)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !strings.Contains(err.Error(), "too many open files") {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Error("too many open files, retrying in %dms", 100*i)
|
||||
time.Sleep(time.Millisecond * 100 * time.Duration(i))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (fs *Datastore) doPut(key datastore.Key, val []byte) error {
|
||||
dir, path := fs.encode(key)
|
||||
if err := fs.makePrefixDir(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tmp, err := ioutil.TempFile(dir, "put-")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
closed := false
|
||||
removed := false
|
||||
defer func() {
|
||||
if !closed {
|
||||
// silence errcheck
|
||||
_ = tmp.Close()
|
||||
}
|
||||
if !removed {
|
||||
// silence errcheck
|
||||
_ = os.Remove(tmp.Name())
|
||||
}
|
||||
}()
|
||||
|
||||
if _, err := tmp.Write(val); err != nil {
|
||||
return err
|
||||
}
|
||||
if fs.sync {
|
||||
if err := tmp.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := tmp.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
closed = true
|
||||
|
||||
err = osrename.Rename(tmp.Name(), path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
removed = true
|
||||
|
||||
if fs.sync {
|
||||
if err := syncDir(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *Datastore) putMany(data map[datastore.Key]interface{}) error {
|
||||
var dirsToSync []string
|
||||
files := make(map[*os.File]string)
|
||||
|
||||
for key, value := range data {
|
||||
val, ok := value.([]byte)
|
||||
if !ok {
|
||||
return datastore.ErrInvalidType
|
||||
}
|
||||
dir, path := fs.encode(key)
|
||||
if err := fs.makePrefixDirNoSync(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
dirsToSync = append(dirsToSync, dir)
|
||||
|
||||
tmp, err := ioutil.TempFile(dir, "put-")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tmp.Write(val); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
files[tmp] = path
|
||||
}
|
||||
|
||||
ops := make(map[*os.File]int)
|
||||
|
||||
defer func() {
|
||||
for fi, _ := range files {
|
||||
val, _ := ops[fi]
|
||||
switch val {
|
||||
case 0:
|
||||
_ = fi.Close()
|
||||
fallthrough
|
||||
case 1:
|
||||
_ = os.Remove(fi.Name())
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Now we sync everything
|
||||
// sync and close files
|
||||
for fi, _ := range files {
|
||||
if fs.sync {
|
||||
if err := fi.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := fi.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// signify closed
|
||||
ops[fi] = 1
|
||||
}
|
||||
|
||||
// move files to their proper places
|
||||
for fi, path := range files {
|
||||
if err := osrename.Rename(fi.Name(), path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// signify removed
|
||||
ops[fi] = 2
|
||||
}
|
||||
|
||||
// now sync the dirs for those files
|
||||
if fs.sync {
|
||||
for _, dir := range dirsToSync {
|
||||
if err := syncDir(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// sync top flatfs dir
|
||||
if err := syncDir(fs.path); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *Datastore) Get(key datastore.Key) (value interface{}, err error) {
|
||||
_, path := fs.encode(key)
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, datastore.ErrNotFound
|
||||
}
|
||||
// no specific error to return, so just pass it through
|
||||
return nil, err
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (fs *Datastore) Has(key datastore.Key) (exists bool, err error) {
|
||||
_, path := fs.encode(key)
|
||||
switch _, err := os.Stat(path); {
|
||||
case err == nil:
|
||||
return true, nil
|
||||
case os.IsNotExist(err):
|
||||
return false, nil
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *Datastore) Delete(key datastore.Key) error {
|
||||
_, path := fs.encode(key)
|
||||
switch err := os.Remove(path); {
|
||||
case err == nil:
|
||||
return nil
|
||||
case os.IsNotExist(err):
|
||||
return datastore.ErrNotFound
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *Datastore) Query(q query.Query) (query.Results, error) {
|
||||
if (q.Prefix != "" && q.Prefix != "/") ||
|
||||
len(q.Filters) > 0 ||
|
||||
len(q.Orders) > 0 ||
|
||||
q.Limit > 0 ||
|
||||
q.Offset > 0 ||
|
||||
!q.KeysOnly {
|
||||
// TODO this is overly simplistic, but the only caller is
|
||||
// `ipfs refs local` for now, and this gets us moving.
|
||||
return nil, errors.New("flatfs only supports listing all keys in random order")
|
||||
}
|
||||
|
||||
reschan := make(chan query.Result)
|
||||
go func() {
|
||||
defer close(reschan)
|
||||
err := filepath.Walk(fs.path, func(path string, info os.FileInfo, err error) error {
|
||||
|
||||
if !info.Mode().IsRegular() || info.Name()[0] == '.' {
|
||||
return nil
|
||||
}
|
||||
|
||||
key, ok := fs.decode(info.Name())
|
||||
if !ok {
|
||||
log.Warning("failed to decode entry in flatfs")
|
||||
return nil
|
||||
}
|
||||
|
||||
reschan <- query.Result{
|
||||
Entry: query.Entry{
|
||||
Key: key.String(),
|
||||
},
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Warning("walk failed: ", err)
|
||||
}
|
||||
}()
|
||||
return query.ResultsWithChan(q, reschan), nil
|
||||
}
|
||||
|
||||
func (fs *Datastore) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type flatfsBatch struct {
|
||||
puts map[datastore.Key]interface{}
|
||||
deletes map[datastore.Key]struct{}
|
||||
|
||||
ds *Datastore
|
||||
}
|
||||
|
||||
func (fs *Datastore) Batch() (datastore.Batch, error) {
|
||||
return &flatfsBatch{
|
||||
puts: make(map[datastore.Key]interface{}),
|
||||
deletes: make(map[datastore.Key]struct{}),
|
||||
ds: fs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (bt *flatfsBatch) Put(key datastore.Key, val interface{}) error {
|
||||
bt.puts[key] = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bt *flatfsBatch) Delete(key datastore.Key) error {
|
||||
bt.deletes[key] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bt *flatfsBatch) Commit() error {
|
||||
if err := bt.ds.putMany(bt.puts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for k, _ := range bt.deletes {
|
||||
if err := bt.ds.Delete(k); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ datastore.ThreadSafeDatastore = (*Datastore)(nil)
|
||||
|
||||
func (*Datastore) IsThreadSafe() {}
|
||||
17
Godeps/_workspace/src/github.com/ipfs/go-datastore/flatfs/sync_std.go
generated
vendored
17
Godeps/_workspace/src/github.com/ipfs/go-datastore/flatfs/sync_std.go
generated
vendored
@ -1,17 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package flatfs
|
||||
|
||||
import "os"
|
||||
|
||||
func syncDir(dir string) error {
|
||||
dirF, err := os.Open(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dirF.Close()
|
||||
if err := dirF.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
5
Godeps/_workspace/src/github.com/ipfs/go-datastore/flatfs/sync_windows.go
generated
vendored
5
Godeps/_workspace/src/github.com/ipfs/go-datastore/flatfs/sync_windows.go
generated
vendored
@ -1,5 +0,0 @@
|
||||
package flatfs
|
||||
|
||||
func syncDir(dir string) error {
|
||||
return nil
|
||||
}
|
||||
159
Godeps/_workspace/src/github.com/ipfs/go-datastore/fs/fs.go
generated
vendored
159
Godeps/_workspace/src/github.com/ipfs/go-datastore/fs/fs.go
generated
vendored
@ -1,159 +0,0 @@
|
||||
// Package fs is a simple Datastore implementation that stores keys
|
||||
// are directories and files, mirroring the key. That is, the key
|
||||
// "/foo/bar" is stored as file "PATH/foo/bar/.dsobject".
|
||||
//
|
||||
// This means key some segments will not work. For example, the
|
||||
// following keys will result in unwanted behavior:
|
||||
//
|
||||
// - "/foo/./bar"
|
||||
// - "/foo/../bar"
|
||||
// - "/foo\x00bar"
|
||||
//
|
||||
// Keys that only differ in case may be confused with each other on
|
||||
// case insensitive file systems, for example in OS X.
|
||||
//
|
||||
// This package is intended for exploratory use, where the user would
|
||||
// examine the file system manually, and should only be used with
|
||||
// human-friendly, trusted keys. You have been warned.
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
query "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
|
||||
)
|
||||
|
||||
var ObjectKeySuffix = ".dsobject"
|
||||
|
||||
// Datastore uses a uses a file per key to store values.
|
||||
type Datastore struct {
|
||||
path string
|
||||
}
|
||||
|
||||
// NewDatastore returns a new fs Datastore at given `path`
|
||||
func NewDatastore(path string) (ds.Datastore, error) {
|
||||
if !isDir(path) {
|
||||
return nil, fmt.Errorf("Failed to find directory at: %v (file? perms?)", path)
|
||||
}
|
||||
|
||||
return &Datastore{path: path}, nil
|
||||
}
|
||||
|
||||
// KeyFilename returns the filename associated with `key`
|
||||
func (d *Datastore) KeyFilename(key ds.Key) string {
|
||||
return filepath.Join(d.path, key.String(), ObjectKeySuffix)
|
||||
}
|
||||
|
||||
// Put stores the given value.
|
||||
func (d *Datastore) Put(key ds.Key, value interface{}) (err error) {
|
||||
|
||||
// TODO: maybe use io.Readers/Writers?
|
||||
// r, err := dsio.CastAsReader(value)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
val, ok := value.([]byte)
|
||||
if !ok {
|
||||
return ds.ErrInvalidType
|
||||
}
|
||||
|
||||
fn := d.KeyFilename(key)
|
||||
|
||||
// mkdirall above.
|
||||
err = os.MkdirAll(filepath.Dir(fn), 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ioutil.WriteFile(fn, val, 0666)
|
||||
}
|
||||
|
||||
// Get returns the value for given key
|
||||
func (d *Datastore) Get(key ds.Key) (value interface{}, err error) {
|
||||
fn := d.KeyFilename(key)
|
||||
if !isFile(fn) {
|
||||
return nil, ds.ErrNotFound
|
||||
}
|
||||
|
||||
return ioutil.ReadFile(fn)
|
||||
}
|
||||
|
||||
// Has returns whether the datastore has a value for a given key
|
||||
func (d *Datastore) Has(key ds.Key) (exists bool, err error) {
|
||||
return ds.GetBackedHas(d, key)
|
||||
}
|
||||
|
||||
// Delete removes the value for given key
|
||||
func (d *Datastore) Delete(key ds.Key) (err error) {
|
||||
fn := d.KeyFilename(key)
|
||||
if !isFile(fn) {
|
||||
return ds.ErrNotFound
|
||||
}
|
||||
|
||||
return os.Remove(fn)
|
||||
}
|
||||
|
||||
// Query implements Datastore.Query
|
||||
func (d *Datastore) Query(q query.Query) (query.Results, error) {
|
||||
|
||||
results := make(chan query.Result)
|
||||
|
||||
walkFn := func(path string, info os.FileInfo, err error) error {
|
||||
// remove ds path prefix
|
||||
if strings.HasPrefix(path, d.path) {
|
||||
path = path[len(d.path):]
|
||||
}
|
||||
|
||||
if !info.IsDir() {
|
||||
if strings.HasSuffix(path, ObjectKeySuffix) {
|
||||
path = path[:len(path)-len(ObjectKeySuffix)]
|
||||
}
|
||||
key := ds.NewKey(path)
|
||||
entry := query.Entry{Key: key.String(), Value: query.NotFetched}
|
||||
results <- query.Result{Entry: entry}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
go func() {
|
||||
filepath.Walk(d.path, walkFn)
|
||||
close(results)
|
||||
}()
|
||||
r := query.ResultsWithChan(q, results)
|
||||
r = query.NaiveQueryApply(q, r)
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// isDir returns whether given path is a directory
|
||||
func isDir(path string) bool {
|
||||
finfo, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return finfo.IsDir()
|
||||
}
|
||||
|
||||
// isFile returns whether given path is a file
|
||||
func isFile(path string) bool {
|
||||
finfo, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return !finfo.IsDir()
|
||||
}
|
||||
|
||||
func (d *Datastore) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Datastore) Batch() (ds.Batch, error) {
|
||||
return ds.NewBasicBatch(d), nil
|
||||
}
|
||||
252
Godeps/_workspace/src/github.com/ipfs/go-datastore/key.go
generated
vendored
252
Godeps/_workspace/src/github.com/ipfs/go-datastore/key.go
generated
vendored
@ -1,252 +0,0 @@
|
||||
package datastore
|
||||
|
||||
import (
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"gx/ipfs/QmcyaFHbyiZfoX5GTpcqqCPYmbjYNAhRDekXSJPFHdYNSV/go.uuid"
|
||||
|
||||
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
|
||||
)
|
||||
|
||||
/*
|
||||
A Key represents the unique identifier of an object.
|
||||
Our Key scheme is inspired by file systems and Google App Engine key model.
|
||||
|
||||
Keys are meant to be unique across a system. Keys are hierarchical,
|
||||
incorporating more and more specific namespaces. Thus keys can be deemed
|
||||
'children' or 'ancestors' of other keys::
|
||||
|
||||
Key("/Comedy")
|
||||
Key("/Comedy/MontyPython")
|
||||
|
||||
Also, every namespace can be parametrized to embed relevant object
|
||||
information. For example, the Key `name` (most specific namespace) could
|
||||
include the object type::
|
||||
|
||||
Key("/Comedy/MontyPython/Actor:JohnCleese")
|
||||
Key("/Comedy/MontyPython/Sketch:CheeseShop")
|
||||
Key("/Comedy/MontyPython/Sketch:CheeseShop/Character:Mousebender")
|
||||
|
||||
*/
|
||||
type Key struct {
|
||||
string
|
||||
}
|
||||
|
||||
// NewKey constructs a key from string. it will clean the value.
|
||||
func NewKey(s string) Key {
|
||||
k := Key{s}
|
||||
k.Clean()
|
||||
return k
|
||||
}
|
||||
|
||||
// KeyWithNamespaces constructs a key out of a namespace slice.
|
||||
func KeyWithNamespaces(ns []string) Key {
|
||||
return NewKey(strings.Join(ns, "/"))
|
||||
}
|
||||
|
||||
// Clean up a Key, using path.Clean.
|
||||
func (k *Key) Clean() {
|
||||
k.string = path.Clean("/" + k.string)
|
||||
}
|
||||
|
||||
// Strings is the string value of Key
|
||||
func (k Key) String() string {
|
||||
return k.string
|
||||
}
|
||||
|
||||
// Bytes returns the string value of Key as a []byte
|
||||
func (k Key) Bytes() []byte {
|
||||
return []byte(k.string)
|
||||
}
|
||||
|
||||
// Equal checks equality of two keys
|
||||
func (k Key) Equal(k2 Key) bool {
|
||||
return k.string == k2.string
|
||||
}
|
||||
|
||||
// Less checks whether this key is sorted lower than another.
|
||||
func (k Key) Less(k2 Key) bool {
|
||||
list1 := k.List()
|
||||
list2 := k2.List()
|
||||
for i, c1 := range list1 {
|
||||
if len(list2) < (i + 1) {
|
||||
return false
|
||||
}
|
||||
|
||||
c2 := list2[i]
|
||||
if c1 < c2 {
|
||||
return true
|
||||
} else if c1 > c2 {
|
||||
return false
|
||||
}
|
||||
// c1 == c2, continue
|
||||
}
|
||||
|
||||
// list1 is shorter or exactly the same.
|
||||
return len(list1) < len(list2)
|
||||
}
|
||||
|
||||
// List returns the `list` representation of this Key.
|
||||
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").List()
|
||||
// ["Comedy", "MontyPythong", "Actor:JohnCleese"]
|
||||
func (k Key) List() []string {
|
||||
return strings.Split(k.string, "/")[1:]
|
||||
}
|
||||
|
||||
// Reverse returns the reverse of this Key.
|
||||
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Reverse()
|
||||
// NewKey("/Actor:JohnCleese/MontyPython/Comedy")
|
||||
func (k Key) Reverse() Key {
|
||||
l := k.List()
|
||||
r := make([]string, len(l), len(l))
|
||||
for i, e := range l {
|
||||
r[len(l)-i-1] = e
|
||||
}
|
||||
return KeyWithNamespaces(r)
|
||||
}
|
||||
|
||||
// Namespaces returns the `namespaces` making up this Key.
|
||||
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").List()
|
||||
// ["Comedy", "MontyPythong", "Actor:JohnCleese"]
|
||||
func (k Key) Namespaces() []string {
|
||||
return k.List()
|
||||
}
|
||||
|
||||
// BaseNamespace returns the "base" namespace of this key (path.Base(filename))
|
||||
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").BaseNamespace()
|
||||
// "Actor:JohnCleese"
|
||||
func (k Key) BaseNamespace() string {
|
||||
n := k.Namespaces()
|
||||
return n[len(n)-1]
|
||||
}
|
||||
|
||||
// Type returns the "type" of this key (value of last namespace).
|
||||
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").List()
|
||||
// "Actor"
|
||||
func (k Key) Type() string {
|
||||
return NamespaceType(k.BaseNamespace())
|
||||
}
|
||||
|
||||
// Name returns the "name" of this key (field of last namespace).
|
||||
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").List()
|
||||
// "Actor"
|
||||
func (k Key) Name() string {
|
||||
return NamespaceValue(k.BaseNamespace())
|
||||
}
|
||||
|
||||
// Instance returns an "instance" of this type key (appends value to namespace).
|
||||
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").List()
|
||||
// "JohnCleese"
|
||||
func (k Key) Instance(s string) Key {
|
||||
return NewKey(k.string + ":" + s)
|
||||
}
|
||||
|
||||
// Path returns the "path" of this key (parent + type).
|
||||
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Path()
|
||||
// NewKey("/Comedy/MontyPython/Actor")
|
||||
func (k Key) Path() Key {
|
||||
s := k.Parent().string + "/" + NamespaceType(k.BaseNamespace())
|
||||
return NewKey(s)
|
||||
}
|
||||
|
||||
// Parent returns the `parent` Key of this Key.
|
||||
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Parent()
|
||||
// NewKey("/Comedy/MontyPython")
|
||||
func (k Key) Parent() Key {
|
||||
n := k.List()
|
||||
if len(n) == 1 {
|
||||
return NewKey("/")
|
||||
}
|
||||
return NewKey(strings.Join(n[:len(n)-1], "/"))
|
||||
}
|
||||
|
||||
// Child returns the `child` Key of this Key.
|
||||
// NewKey("/Comedy/MontyPython").Child("Actor:JohnCleese")
|
||||
// NewKey("/Comedy/MontyPython/Actor:JohnCleese")
|
||||
func (k Key) Child(k2 Key) Key {
|
||||
return NewKey(k.string + "/" + k2.string)
|
||||
}
|
||||
|
||||
// ChildString returns the `child` Key of this Key -- string helper.
|
||||
// NewKey("/Comedy/MontyPython").Child("Actor:JohnCleese")
|
||||
// NewKey("/Comedy/MontyPython/Actor:JohnCleese")
|
||||
func (k Key) ChildString(s string) Key {
|
||||
return NewKey(k.string + "/" + s)
|
||||
}
|
||||
|
||||
// IsAncestorOf returns whether this key is a prefix of `other`
|
||||
// NewKey("/Comedy").IsAncestorOf("/Comedy/MontyPython")
|
||||
// true
|
||||
func (k Key) IsAncestorOf(other Key) bool {
|
||||
if other.string == k.string {
|
||||
return false
|
||||
}
|
||||
return strings.HasPrefix(other.string, k.string)
|
||||
}
|
||||
|
||||
// IsDescendantOf returns whether this key contains another as a prefix.
|
||||
// NewKey("/Comedy/MontyPython").IsDescendantOf("/Comedy")
|
||||
// true
|
||||
func (k Key) IsDescendantOf(other Key) bool {
|
||||
if other.string == k.string {
|
||||
return false
|
||||
}
|
||||
return strings.HasPrefix(k.string, other.string)
|
||||
}
|
||||
|
||||
// IsTopLevel returns whether this key has only one namespace.
|
||||
func (k Key) IsTopLevel() bool {
|
||||
return len(k.List()) == 1
|
||||
}
|
||||
|
||||
// RandomKey returns a randomly (uuid) generated key.
|
||||
// RandomKey()
|
||||
// NewKey("/f98719ea086343f7b71f32ea9d9d521d")
|
||||
func RandomKey() Key {
|
||||
return NewKey(strings.Replace(uuid.NewV4().String(), "-", "", -1))
|
||||
}
|
||||
|
||||
/*
|
||||
A Key Namespace is like a path element.
|
||||
A namespace can optionally include a type (delimited by ':')
|
||||
|
||||
> NamespaceValue("Song:PhilosopherSong")
|
||||
PhilosopherSong
|
||||
> NamespaceType("Song:PhilosopherSong")
|
||||
Song
|
||||
> NamespaceType("Music:Song:PhilosopherSong")
|
||||
Music:Song
|
||||
*/
|
||||
|
||||
// NamespaceType is the first component of a namespace. `foo` in `foo:bar`
|
||||
func NamespaceType(namespace string) string {
|
||||
parts := strings.Split(namespace, ":")
|
||||
if len(parts) < 2 {
|
||||
return ""
|
||||
}
|
||||
return strings.Join(parts[0:len(parts)-1], ":")
|
||||
}
|
||||
|
||||
// NamespaceValue returns the last component of a namespace. `baz` in `f:b:baz`
|
||||
func NamespaceValue(namespace string) string {
|
||||
parts := strings.Split(namespace, ":")
|
||||
return parts[len(parts)-1]
|
||||
}
|
||||
|
||||
// KeySlice attaches the methods of sort.Interface to []Key,
|
||||
// sorting in increasing order.
|
||||
type KeySlice []Key
|
||||
|
||||
func (p KeySlice) Len() int { return len(p) }
|
||||
func (p KeySlice) Less(i, j int) bool { return p[i].Less(p[j]) }
|
||||
func (p KeySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
// EntryKeys
|
||||
func EntryKeys(e []dsq.Entry) []Key {
|
||||
ks := make([]Key, len(e))
|
||||
for i, e := range e {
|
||||
ks[i] = NewKey(e.Key)
|
||||
}
|
||||
return ks
|
||||
}
|
||||
25
Godeps/_workspace/src/github.com/ipfs/go-datastore/keytransform/doc.go
generated
vendored
25
Godeps/_workspace/src/github.com/ipfs/go-datastore/keytransform/doc.go
generated
vendored
@ -1,25 +0,0 @@
|
||||
// Package keytransform introduces a Datastore Shim that transforms keys before
|
||||
// passing them to its child. It can be used to manipulate what keys look like
|
||||
// to the user, for example namespacing keys, reversing them, etc.
|
||||
//
|
||||
// Use the Wrap function to wrap a datastore with any KeyTransform.
|
||||
// A KeyTransform is simply an interface with two functions, a conversion and
|
||||
// its inverse. For example:
|
||||
//
|
||||
// import (
|
||||
// ktds "github.com/ipfs/go-datastore/keytransform"
|
||||
// ds "github.com/ipfs/go-datastore"
|
||||
// )
|
||||
//
|
||||
// func reverseKey(k ds.Key) ds.Key {
|
||||
// return k.Reverse()
|
||||
// }
|
||||
//
|
||||
// func invertKeys(d ds.Datastore) {
|
||||
// return ktds.Wrap(d, &ktds.Pair{
|
||||
// Convert: reverseKey,
|
||||
// Invert: reverseKey, // reverse is its own inverse.
|
||||
// })
|
||||
// }
|
||||
//
|
||||
package keytransform
|
||||
34
Godeps/_workspace/src/github.com/ipfs/go-datastore/keytransform/interface.go
generated
vendored
34
Godeps/_workspace/src/github.com/ipfs/go-datastore/keytransform/interface.go
generated
vendored
@ -1,34 +0,0 @@
|
||||
package keytransform
|
||||
|
||||
import ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
|
||||
// KeyMapping is a function that maps one key to annother
|
||||
type KeyMapping func(ds.Key) ds.Key
|
||||
|
||||
// KeyTransform is an object with a pair of functions for (invertibly)
|
||||
// transforming keys
|
||||
type KeyTransform interface {
|
||||
ConvertKey(ds.Key) ds.Key
|
||||
InvertKey(ds.Key) ds.Key
|
||||
}
|
||||
|
||||
// Datastore is a keytransform.Datastore
|
||||
type Datastore interface {
|
||||
ds.Shim
|
||||
KeyTransform
|
||||
}
|
||||
|
||||
// Wrap wraps a given datastore with a KeyTransform function.
|
||||
// The resulting wrapped datastore will use the transform on all Datastore
|
||||
// operations.
|
||||
func Wrap(child ds.Datastore, t KeyTransform) *ktds {
|
||||
if t == nil {
|
||||
panic("t (KeyTransform) is nil")
|
||||
}
|
||||
|
||||
if child == nil {
|
||||
panic("child (ds.Datastore) is nil")
|
||||
}
|
||||
|
||||
return &ktds{child: child, KeyTransform: t}
|
||||
}
|
||||
118
Godeps/_workspace/src/github.com/ipfs/go-datastore/keytransform/keytransform.go
generated
vendored
118
Godeps/_workspace/src/github.com/ipfs/go-datastore/keytransform/keytransform.go
generated
vendored
@ -1,118 +0,0 @@
|
||||
package keytransform
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
|
||||
)
|
||||
|
||||
type Pair struct {
|
||||
Convert KeyMapping
|
||||
Invert KeyMapping
|
||||
}
|
||||
|
||||
func (t *Pair) ConvertKey(k ds.Key) ds.Key {
|
||||
return t.Convert(k)
|
||||
}
|
||||
|
||||
func (t *Pair) InvertKey(k ds.Key) ds.Key {
|
||||
return t.Invert(k)
|
||||
}
|
||||
|
||||
// ktds keeps a KeyTransform function
|
||||
type ktds struct {
|
||||
child ds.Datastore
|
||||
|
||||
KeyTransform
|
||||
}
|
||||
|
||||
// Children implements ds.Shim
|
||||
func (d *ktds) Children() []ds.Datastore {
|
||||
return []ds.Datastore{d.child}
|
||||
}
|
||||
|
||||
// Put stores the given value, transforming the key first.
|
||||
func (d *ktds) Put(key ds.Key, value interface{}) (err error) {
|
||||
return d.child.Put(d.ConvertKey(key), value)
|
||||
}
|
||||
|
||||
// Get returns the value for given key, transforming the key first.
|
||||
func (d *ktds) Get(key ds.Key) (value interface{}, err error) {
|
||||
return d.child.Get(d.ConvertKey(key))
|
||||
}
|
||||
|
||||
// Has returns whether the datastore has a value for a given key, transforming
|
||||
// the key first.
|
||||
func (d *ktds) Has(key ds.Key) (exists bool, err error) {
|
||||
return d.child.Has(d.ConvertKey(key))
|
||||
}
|
||||
|
||||
// Delete removes the value for given key
|
||||
func (d *ktds) Delete(key ds.Key) (err error) {
|
||||
return d.child.Delete(d.ConvertKey(key))
|
||||
}
|
||||
|
||||
// Query implements Query, inverting keys on the way back out.
|
||||
func (d *ktds) Query(q dsq.Query) (dsq.Results, error) {
|
||||
qr, err := d.child.Query(q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ch := make(chan dsq.Result)
|
||||
go func() {
|
||||
defer close(ch)
|
||||
defer qr.Close()
|
||||
|
||||
for r := range qr.Next() {
|
||||
if r.Error == nil {
|
||||
r.Entry.Key = d.InvertKey(ds.NewKey(r.Entry.Key)).String()
|
||||
}
|
||||
ch <- r
|
||||
}
|
||||
}()
|
||||
|
||||
return dsq.DerivedResults(qr, ch), nil
|
||||
}
|
||||
|
||||
func (d *ktds) Close() error {
|
||||
if c, ok := d.child.(io.Closer); ok {
|
||||
return c.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ktds) Batch() (ds.Batch, error) {
|
||||
bds, ok := d.child.(ds.Batching)
|
||||
if !ok {
|
||||
return nil, ds.ErrBatchUnsupported
|
||||
}
|
||||
|
||||
childbatch, err := bds.Batch()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &transformBatch{
|
||||
dst: childbatch,
|
||||
f: d.ConvertKey,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type transformBatch struct {
|
||||
dst ds.Batch
|
||||
|
||||
f KeyMapping
|
||||
}
|
||||
|
||||
func (t *transformBatch) Put(key ds.Key, val interface{}) error {
|
||||
return t.dst.Put(t.f(key), val)
|
||||
}
|
||||
|
||||
func (t *transformBatch) Delete(key ds.Key) error {
|
||||
return t.dst.Delete(t.f(key))
|
||||
}
|
||||
|
||||
func (t *transformBatch) Commit() error {
|
||||
return t.dst.Commit()
|
||||
}
|
||||
155
Godeps/_workspace/src/github.com/ipfs/go-datastore/leveldb/datastore.go
generated
vendored
155
Godeps/_workspace/src/github.com/ipfs/go-datastore/leveldb/datastore.go
generated
vendored
@ -1,155 +0,0 @@
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
|
||||
"gx/ipfs/QmbBhyDKsY4mbY6xsKt3qu9Y7FPvMJ6qbD8AMjYYvPRw1g/goleveldb/leveldb"
|
||||
"gx/ipfs/QmbBhyDKsY4mbY6xsKt3qu9Y7FPvMJ6qbD8AMjYYvPRw1g/goleveldb/leveldb/opt"
|
||||
"gx/ipfs/QmbBhyDKsY4mbY6xsKt3qu9Y7FPvMJ6qbD8AMjYYvPRw1g/goleveldb/leveldb/util"
|
||||
|
||||
"gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess"
|
||||
)
|
||||
|
||||
type datastore struct {
|
||||
DB *leveldb.DB
|
||||
}
|
||||
|
||||
type Options opt.Options
|
||||
|
||||
func NewDatastore(path string, opts *Options) (*datastore, error) {
|
||||
var nopts opt.Options
|
||||
if opts != nil {
|
||||
nopts = opt.Options(*opts)
|
||||
}
|
||||
db, err := leveldb.OpenFile(path, &nopts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &datastore{
|
||||
DB: db,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Returns ErrInvalidType if value is not of type []byte.
|
||||
//
|
||||
// NOTE: Using sync = false.
|
||||
// see http://godoc.org/github.com/syndtr/goleveldb/leveldb/opt#WriteOptions
|
||||
func (d *datastore) Put(key ds.Key, value interface{}) (err error) {
|
||||
val, ok := value.([]byte)
|
||||
if !ok {
|
||||
return ds.ErrInvalidType
|
||||
}
|
||||
return d.DB.Put(key.Bytes(), val, nil)
|
||||
}
|
||||
|
||||
func (d *datastore) Get(key ds.Key) (value interface{}, err error) {
|
||||
val, err := d.DB.Get(key.Bytes(), nil)
|
||||
if err != nil {
|
||||
if err == leveldb.ErrNotFound {
|
||||
return nil, ds.ErrNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (d *datastore) Has(key ds.Key) (exists bool, err error) {
|
||||
return d.DB.Has(key.Bytes(), nil)
|
||||
}
|
||||
|
||||
func (d *datastore) Delete(key ds.Key) (err error) {
|
||||
err = d.DB.Delete(key.Bytes(), nil)
|
||||
if err == leveldb.ErrNotFound {
|
||||
return ds.ErrNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *datastore) Query(q dsq.Query) (dsq.Results, error) {
|
||||
|
||||
// we can use multiple iterators concurrently. see:
|
||||
// https://godoc.org/github.com/syndtr/goleveldb/leveldb#DB.NewIterator
|
||||
// advance the iterator only if the reader reads
|
||||
//
|
||||
// run query in own sub-process tied to Results.Process(), so that
|
||||
// it waits for us to finish AND so that clients can signal to us
|
||||
// that resources should be reclaimed.
|
||||
qrb := dsq.NewResultBuilder(q)
|
||||
qrb.Process.Go(func(worker goprocess.Process) {
|
||||
d.runQuery(worker, qrb)
|
||||
})
|
||||
|
||||
// go wait on the worker (without signaling close)
|
||||
go qrb.Process.CloseAfterChildren()
|
||||
|
||||
// Now, apply remaining things (filters, order)
|
||||
qr := qrb.Results()
|
||||
for _, f := range q.Filters {
|
||||
qr = dsq.NaiveFilter(qr, f)
|
||||
}
|
||||
for _, o := range q.Orders {
|
||||
qr = dsq.NaiveOrder(qr, o)
|
||||
}
|
||||
return qr, nil
|
||||
}
|
||||
|
||||
func (d *datastore) runQuery(worker goprocess.Process, qrb *dsq.ResultBuilder) {
|
||||
|
||||
var rnge *util.Range
|
||||
if qrb.Query.Prefix != "" {
|
||||
rnge = util.BytesPrefix([]byte(qrb.Query.Prefix))
|
||||
}
|
||||
i := d.DB.NewIterator(rnge, nil)
|
||||
defer i.Release()
|
||||
|
||||
// advance iterator for offset
|
||||
if qrb.Query.Offset > 0 {
|
||||
for j := 0; j < qrb.Query.Offset; j++ {
|
||||
i.Next()
|
||||
}
|
||||
}
|
||||
|
||||
// iterate, and handle limit, too
|
||||
for sent := 0; i.Next(); sent++ {
|
||||
// end early if we hit the limit
|
||||
if qrb.Query.Limit > 0 && sent >= qrb.Query.Limit {
|
||||
break
|
||||
}
|
||||
|
||||
k := ds.NewKey(string(i.Key())).String()
|
||||
e := dsq.Entry{Key: k}
|
||||
|
||||
if !qrb.Query.KeysOnly {
|
||||
buf := make([]byte, len(i.Value()))
|
||||
copy(buf, i.Value())
|
||||
e.Value = buf
|
||||
}
|
||||
|
||||
select {
|
||||
case qrb.Output <- dsq.Result{Entry: e}: // we sent it out
|
||||
case <-worker.Closing(): // client told us to end early.
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err := i.Error(); err != nil {
|
||||
select {
|
||||
case qrb.Output <- dsq.Result{Error: err}: // client read our error
|
||||
case <-worker.Closing(): // client told us to end.
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *datastore) Batch() (ds.Batch, error) {
|
||||
// TODO: implement batch on leveldb
|
||||
return nil, ds.ErrBatchUnsupported
|
||||
}
|
||||
|
||||
// LevelDB needs to be closed.
|
||||
func (d *datastore) Close() (err error) {
|
||||
return d.DB.Close()
|
||||
}
|
||||
|
||||
func (d *datastore) IsThreadSafe() {}
|
||||
64
Godeps/_workspace/src/github.com/ipfs/go-datastore/lru/datastore.go
generated
vendored
64
Godeps/_workspace/src/github.com/ipfs/go-datastore/lru/datastore.go
generated
vendored
@ -1,64 +0,0 @@
|
||||
package lru
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
lru "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/hashicorp/golang-lru"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
|
||||
)
|
||||
|
||||
// Datastore uses golang-lru for internal storage.
|
||||
type Datastore struct {
|
||||
cache *lru.Cache
|
||||
}
|
||||
|
||||
// NewDatastore constructs a new LRU Datastore with given capacity.
|
||||
func NewDatastore(capacity int) (*Datastore, error) {
|
||||
cache, err := lru.New(capacity)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Datastore{cache: cache}, nil
|
||||
}
|
||||
|
||||
// Put stores the object `value` named by `key`.
|
||||
func (d *Datastore) Put(key ds.Key, value interface{}) (err error) {
|
||||
d.cache.Add(key, value)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves the object `value` named by `key`.
|
||||
func (d *Datastore) Get(key ds.Key) (value interface{}, err error) {
|
||||
val, ok := d.cache.Get(key)
|
||||
if !ok {
|
||||
return nil, ds.ErrNotFound
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// Has returns whether the `key` is mapped to a `value`.
|
||||
func (d *Datastore) Has(key ds.Key) (exists bool, err error) {
|
||||
return ds.GetBackedHas(d, key)
|
||||
}
|
||||
|
||||
// Delete removes the value for given `key`.
|
||||
func (d *Datastore) Delete(key ds.Key) (err error) {
|
||||
d.cache.Remove(key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// KeyList returns a list of keys in the datastore
|
||||
func (d *Datastore) Query(q dsq.Query) (dsq.Results, error) {
|
||||
return nil, errors.New("KeyList not implemented.")
|
||||
}
|
||||
|
||||
func (d *Datastore) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Datastore) Batch() (ds.Batch, error) {
|
||||
return nil, ds.ErrBatchUnsupported
|
||||
}
|
||||
248
Godeps/_workspace/src/github.com/ipfs/go-datastore/measure/measure.go
generated
vendored
248
Godeps/_workspace/src/github.com/ipfs/go-datastore/measure/measure.go
generated
vendored
@ -1,248 +0,0 @@
|
||||
// Package measure provides a Datastore wrapper that records metrics
|
||||
// using github.com/codahale/metrics.
|
||||
package measure
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/codahale/metrics"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
|
||||
)
|
||||
|
||||
// Histogram measurements exceeding these limits are dropped. TODO
|
||||
// maybe it would be better to cap the value? Should we keep track of
|
||||
// drops?
|
||||
const (
|
||||
maxLatency = int64(1 * time.Second)
|
||||
maxSize = int64(1 << 32)
|
||||
)
|
||||
|
||||
// New wraps the datastore, providing metrics on the operations. The
|
||||
// metrics are registered with names starting with prefix and a dot.
|
||||
//
|
||||
// If prefix is not unique, New will panic. Call Close to release the
|
||||
// prefix.
|
||||
func New(prefix string, ds datastore.Datastore) *measure {
|
||||
m := &measure{
|
||||
backend: ds,
|
||||
|
||||
putNum: metrics.Counter(prefix + ".Put.num"),
|
||||
putErr: metrics.Counter(prefix + ".Put.err"),
|
||||
putLatency: metrics.NewHistogram(prefix+".Put.latency", 0, maxLatency, 3),
|
||||
putSize: metrics.NewHistogram(prefix+".Put.size", 0, maxSize, 3),
|
||||
|
||||
getNum: metrics.Counter(prefix + ".Get.num"),
|
||||
getErr: metrics.Counter(prefix + ".Get.err"),
|
||||
getLatency: metrics.NewHistogram(prefix+".Get.latency", 0, maxLatency, 3),
|
||||
getSize: metrics.NewHistogram(prefix+".Get.size", 0, maxSize, 3),
|
||||
|
||||
hasNum: metrics.Counter(prefix + ".Has.num"),
|
||||
hasErr: metrics.Counter(prefix + ".Has.err"),
|
||||
hasLatency: metrics.NewHistogram(prefix+".Has.latency", 0, maxLatency, 3),
|
||||
|
||||
deleteNum: metrics.Counter(prefix + ".Delete.num"),
|
||||
deleteErr: metrics.Counter(prefix + ".Delete.err"),
|
||||
deleteLatency: metrics.NewHistogram(prefix+".Delete.latency", 0, maxLatency, 3),
|
||||
|
||||
queryNum: metrics.Counter(prefix + ".Query.num"),
|
||||
queryErr: metrics.Counter(prefix + ".Query.err"),
|
||||
queryLatency: metrics.NewHistogram(prefix+".Query.latency", 0, maxLatency, 3),
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type measure struct {
|
||||
backend datastore.Datastore
|
||||
|
||||
putNum metrics.Counter
|
||||
putErr metrics.Counter
|
||||
putLatency *metrics.Histogram
|
||||
putSize *metrics.Histogram
|
||||
|
||||
getNum metrics.Counter
|
||||
getErr metrics.Counter
|
||||
getLatency *metrics.Histogram
|
||||
getSize *metrics.Histogram
|
||||
|
||||
hasNum metrics.Counter
|
||||
hasErr metrics.Counter
|
||||
hasLatency *metrics.Histogram
|
||||
|
||||
deleteNum metrics.Counter
|
||||
deleteErr metrics.Counter
|
||||
deleteLatency *metrics.Histogram
|
||||
|
||||
queryNum metrics.Counter
|
||||
queryErr metrics.Counter
|
||||
queryLatency *metrics.Histogram
|
||||
}
|
||||
|
||||
var _ datastore.Datastore = (*measure)(nil)
|
||||
|
||||
func recordLatency(h *metrics.Histogram, start time.Time) {
|
||||
elapsed := time.Now().Sub(start) / time.Microsecond
|
||||
_ = h.RecordValue(int64(elapsed))
|
||||
}
|
||||
|
||||
func (m *measure) Put(key datastore.Key, value interface{}) error {
|
||||
defer recordLatency(m.putLatency, time.Now())
|
||||
m.putNum.Add()
|
||||
if b, ok := value.([]byte); ok {
|
||||
_ = m.putSize.RecordValue(int64(len(b)))
|
||||
}
|
||||
err := m.backend.Put(key, value)
|
||||
if err != nil {
|
||||
m.putErr.Add()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *measure) Get(key datastore.Key) (value interface{}, err error) {
|
||||
defer recordLatency(m.getLatency, time.Now())
|
||||
m.getNum.Add()
|
||||
value, err = m.backend.Get(key)
|
||||
if err != nil {
|
||||
m.getErr.Add()
|
||||
} else {
|
||||
if b, ok := value.([]byte); ok {
|
||||
_ = m.getSize.RecordValue(int64(len(b)))
|
||||
}
|
||||
}
|
||||
return value, err
|
||||
}
|
||||
|
||||
func (m *measure) Has(key datastore.Key) (exists bool, err error) {
|
||||
defer recordLatency(m.hasLatency, time.Now())
|
||||
m.hasNum.Add()
|
||||
exists, err = m.backend.Has(key)
|
||||
if err != nil {
|
||||
m.hasErr.Add()
|
||||
}
|
||||
return exists, err
|
||||
}
|
||||
|
||||
func (m *measure) Delete(key datastore.Key) error {
|
||||
defer recordLatency(m.deleteLatency, time.Now())
|
||||
m.deleteNum.Add()
|
||||
err := m.backend.Delete(key)
|
||||
if err != nil {
|
||||
m.deleteErr.Add()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *measure) Query(q query.Query) (query.Results, error) {
|
||||
defer recordLatency(m.queryLatency, time.Now())
|
||||
m.queryNum.Add()
|
||||
res, err := m.backend.Query(q)
|
||||
if err != nil {
|
||||
m.queryErr.Add()
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
|
||||
type measuredBatch struct {
|
||||
puts int
|
||||
deletes int
|
||||
|
||||
putts datastore.Batch
|
||||
delts datastore.Batch
|
||||
|
||||
m *measure
|
||||
}
|
||||
|
||||
func (m *measure) Batch() (datastore.Batch, error) {
|
||||
bds, ok := m.backend.(datastore.Batching)
|
||||
if !ok {
|
||||
return nil, datastore.ErrBatchUnsupported
|
||||
}
|
||||
pb, err := bds.Batch()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
db, err := bds.Batch()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &measuredBatch{
|
||||
putts: pb,
|
||||
delts: db,
|
||||
|
||||
m: m,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (mt *measuredBatch) Put(key datastore.Key, val interface{}) error {
|
||||
mt.puts++
|
||||
valb, ok := val.([]byte)
|
||||
if !ok {
|
||||
return datastore.ErrInvalidType
|
||||
}
|
||||
_ = mt.m.putSize.RecordValue(int64(len(valb)))
|
||||
return mt.putts.Put(key, val)
|
||||
}
|
||||
|
||||
func (mt *measuredBatch) Delete(key datastore.Key) error {
|
||||
mt.deletes++
|
||||
return mt.delts.Delete(key)
|
||||
}
|
||||
|
||||
func (mt *measuredBatch) Commit() error {
|
||||
err := logBatchCommit(mt.delts, mt.deletes, mt.m.deleteNum, mt.m.deleteErr, mt.m.deleteLatency)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = logBatchCommit(mt.putts, mt.puts, mt.m.putNum, mt.m.putErr, mt.m.putLatency)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func logBatchCommit(b datastore.Batch, n int, num, errs metrics.Counter, lat *metrics.Histogram) error {
|
||||
if n > 0 {
|
||||
before := time.Now()
|
||||
err := b.Commit()
|
||||
took := int(time.Now().Sub(before)/time.Microsecond) / n
|
||||
num.AddN(uint64(n))
|
||||
for i := 0; i < n; i++ {
|
||||
_ = lat.RecordValue(int64(took))
|
||||
}
|
||||
if err != nil {
|
||||
errs.Add()
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *measure) Close() error {
|
||||
m.putNum.Remove()
|
||||
m.putErr.Remove()
|
||||
m.putLatency.Remove()
|
||||
m.putSize.Remove()
|
||||
m.getNum.Remove()
|
||||
m.getErr.Remove()
|
||||
m.getLatency.Remove()
|
||||
m.getSize.Remove()
|
||||
m.hasNum.Remove()
|
||||
m.hasErr.Remove()
|
||||
m.hasLatency.Remove()
|
||||
m.deleteNum.Remove()
|
||||
m.deleteErr.Remove()
|
||||
m.deleteLatency.Remove()
|
||||
m.queryNum.Remove()
|
||||
m.queryErr.Remove()
|
||||
m.queryLatency.Remove()
|
||||
|
||||
if c, ok := m.backend.(io.Closer); ok {
|
||||
return c.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
188
Godeps/_workspace/src/github.com/ipfs/go-datastore/mount/mount.go
generated
vendored
188
Godeps/_workspace/src/github.com/ipfs/go-datastore/mount/mount.go
generated
vendored
@ -1,188 +0,0 @@
|
||||
// Package mount provides a Datastore that has other Datastores
|
||||
// mounted at various key prefixes.
|
||||
package mount
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/keytransform"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNoMount = errors.New("no datastore mounted for this key")
|
||||
)
|
||||
|
||||
type Mount struct {
|
||||
Prefix datastore.Key
|
||||
Datastore datastore.Datastore
|
||||
}
|
||||
|
||||
func New(mounts []Mount) *Datastore {
|
||||
// make a copy so we're sure it doesn't mutate
|
||||
m := make([]Mount, len(mounts))
|
||||
for i, v := range mounts {
|
||||
m[i] = v
|
||||
}
|
||||
return &Datastore{mounts: m}
|
||||
}
|
||||
|
||||
type Datastore struct {
|
||||
mounts []Mount
|
||||
}
|
||||
|
||||
var _ datastore.Datastore = (*Datastore)(nil)
|
||||
|
||||
func (d *Datastore) lookup(key datastore.Key) (ds datastore.Datastore, mountpoint, rest datastore.Key) {
|
||||
for _, m := range d.mounts {
|
||||
if m.Prefix.Equal(key) || m.Prefix.IsAncestorOf(key) {
|
||||
s := strings.TrimPrefix(key.String(), m.Prefix.String())
|
||||
k := datastore.NewKey(s)
|
||||
return m.Datastore, m.Prefix, k
|
||||
}
|
||||
}
|
||||
return nil, datastore.NewKey("/"), key
|
||||
}
|
||||
|
||||
func (d *Datastore) Put(key datastore.Key, value interface{}) error {
|
||||
ds, _, k := d.lookup(key)
|
||||
if ds == nil {
|
||||
return ErrNoMount
|
||||
}
|
||||
return ds.Put(k, value)
|
||||
}
|
||||
|
||||
func (d *Datastore) Get(key datastore.Key) (value interface{}, err error) {
|
||||
ds, _, k := d.lookup(key)
|
||||
if ds == nil {
|
||||
return nil, datastore.ErrNotFound
|
||||
}
|
||||
return ds.Get(k)
|
||||
}
|
||||
|
||||
func (d *Datastore) Has(key datastore.Key) (exists bool, err error) {
|
||||
ds, _, k := d.lookup(key)
|
||||
if ds == nil {
|
||||
return false, nil
|
||||
}
|
||||
return ds.Has(k)
|
||||
}
|
||||
|
||||
func (d *Datastore) Delete(key datastore.Key) error {
|
||||
ds, _, k := d.lookup(key)
|
||||
if ds == nil {
|
||||
return datastore.ErrNotFound
|
||||
}
|
||||
return ds.Delete(k)
|
||||
}
|
||||
|
||||
func (d *Datastore) Query(q query.Query) (query.Results, error) {
|
||||
if len(q.Filters) > 0 ||
|
||||
len(q.Orders) > 0 ||
|
||||
q.Limit > 0 ||
|
||||
q.Offset > 0 {
|
||||
// TODO this is overly simplistic, but the only caller is
|
||||
// `ipfs refs local` for now, and this gets us moving.
|
||||
return nil, errors.New("mount only supports listing all prefixed keys in random order")
|
||||
}
|
||||
key := datastore.NewKey(q.Prefix)
|
||||
ds, mount, k := d.lookup(key)
|
||||
if ds == nil {
|
||||
return nil, errors.New("mount only supports listing a mount point")
|
||||
}
|
||||
// TODO support listing cross mount points too
|
||||
|
||||
// delegate the query to the mounted datastore, while adjusting
|
||||
// keys in and out
|
||||
q2 := q
|
||||
q2.Prefix = k.String()
|
||||
wrapDS := keytransform.Wrap(ds, &keytransform.Pair{
|
||||
Convert: func(datastore.Key) datastore.Key {
|
||||
panic("this should never be called")
|
||||
},
|
||||
Invert: func(k datastore.Key) datastore.Key {
|
||||
return mount.Child(k)
|
||||
},
|
||||
})
|
||||
|
||||
r, err := wrapDS.Query(q2)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r = query.ResultsReplaceQuery(r, q)
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (d *Datastore) Close() error {
|
||||
for _, d := range d.mounts {
|
||||
if c, ok := d.Datastore.(io.Closer); ok {
|
||||
err := c.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type mountBatch struct {
|
||||
mounts map[string]datastore.Batch
|
||||
|
||||
d *Datastore
|
||||
}
|
||||
|
||||
func (d *Datastore) Batch() (datastore.Batch, error) {
|
||||
return &mountBatch{
|
||||
mounts: make(map[string]datastore.Batch),
|
||||
d: d,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (mt *mountBatch) lookupBatch(key datastore.Key) (datastore.Batch, datastore.Key, error) {
|
||||
child, loc, rest := mt.d.lookup(key)
|
||||
t, ok := mt.mounts[loc.String()]
|
||||
if !ok {
|
||||
bds, ok := child.(datastore.Batching)
|
||||
if !ok {
|
||||
return nil, datastore.NewKey(""), datastore.ErrBatchUnsupported
|
||||
}
|
||||
var err error
|
||||
t, err = bds.Batch()
|
||||
if err != nil {
|
||||
return nil, datastore.NewKey(""), err
|
||||
}
|
||||
mt.mounts[loc.String()] = t
|
||||
}
|
||||
return t, rest, nil
|
||||
}
|
||||
|
||||
func (mt *mountBatch) Put(key datastore.Key, val interface{}) error {
|
||||
t, rest, err := mt.lookupBatch(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return t.Put(rest, val)
|
||||
}
|
||||
|
||||
func (mt *mountBatch) Delete(key datastore.Key) error {
|
||||
t, rest, err := mt.lookupBatch(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return t.Delete(rest)
|
||||
}
|
||||
|
||||
func (mt *mountBatch) Commit() error {
|
||||
for _, t := range mt.mounts {
|
||||
err := t.Commit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
24
Godeps/_workspace/src/github.com/ipfs/go-datastore/namespace/doc.go
generated
vendored
24
Godeps/_workspace/src/github.com/ipfs/go-datastore/namespace/doc.go
generated
vendored
@ -1,24 +0,0 @@
|
||||
// Package namespace introduces a namespace Datastore Shim, which basically
|
||||
// mounts the entire child datastore under a prefix.
|
||||
//
|
||||
// Use the Wrap function to wrap a datastore with any Key prefix. For example:
|
||||
//
|
||||
// import (
|
||||
// "fmt"
|
||||
//
|
||||
// ds "github.com/ipfs/go-datastore"
|
||||
// nsds "github.com/ipfs/go-datastore/namespace"
|
||||
// )
|
||||
//
|
||||
// func main() {
|
||||
// mp := ds.NewMapDatastore()
|
||||
// ns := nsds.Wrap(mp, ds.NewKey("/foo/bar"))
|
||||
//
|
||||
// // in the Namespace Datastore:
|
||||
// ns.Put(ds.NewKey("/beep"), "boop")
|
||||
// v2, _ := ns.Get(ds.NewKey("/beep")) // v2 == "boop"
|
||||
//
|
||||
// // and, in the underlying MapDatastore:
|
||||
// v3, _ := mp.Get(ds.NewKey("/foo/bar/beep")) // v3 == "boop"
|
||||
// }
|
||||
package namespace
|
||||
91
Godeps/_workspace/src/github.com/ipfs/go-datastore/namespace/namespace.go
generated
vendored
91
Godeps/_workspace/src/github.com/ipfs/go-datastore/namespace/namespace.go
generated
vendored
@ -1,91 +0,0 @@
|
||||
package namespace
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
ktds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/keytransform"
|
||||
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
|
||||
)
|
||||
|
||||
// PrefixTransform constructs a KeyTransform with a pair of functions that
|
||||
// add or remove the given prefix key.
|
||||
//
|
||||
// Warning: Will panic if prefix not found when it should be there. This is
|
||||
// to avoid insidious data inconsistency errors.
|
||||
func PrefixTransform(prefix ds.Key) ktds.KeyTransform {
|
||||
return &ktds.Pair{
|
||||
|
||||
// Convert adds the prefix
|
||||
Convert: func(k ds.Key) ds.Key {
|
||||
return prefix.Child(k)
|
||||
},
|
||||
|
||||
// Invert removes the prefix. panics if prefix not found.
|
||||
Invert: func(k ds.Key) ds.Key {
|
||||
if !prefix.IsAncestorOf(k) {
|
||||
fmt.Errorf("Expected prefix (%s) in key (%s)", prefix, k)
|
||||
panic("expected prefix not found")
|
||||
}
|
||||
|
||||
s := strings.TrimPrefix(k.String(), prefix.String())
|
||||
return ds.NewKey(s)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Wrap wraps a given datastore with a key-prefix.
|
||||
func Wrap(child ds.Datastore, prefix ds.Key) *datastore {
|
||||
if child == nil {
|
||||
panic("child (ds.Datastore) is nil")
|
||||
}
|
||||
|
||||
d := ktds.Wrap(child, PrefixTransform(prefix))
|
||||
return &datastore{Datastore: d, raw: child, prefix: prefix}
|
||||
}
|
||||
|
||||
type datastore struct {
|
||||
prefix ds.Key
|
||||
raw ds.Datastore
|
||||
ktds.Datastore
|
||||
}
|
||||
|
||||
// Query implements Query, inverting keys on the way back out.
|
||||
func (d *datastore) Query(q dsq.Query) (dsq.Results, error) {
|
||||
qr, err := d.raw.Query(q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ch := make(chan dsq.Result)
|
||||
go func() {
|
||||
defer close(ch)
|
||||
defer qr.Close()
|
||||
|
||||
for r := range qr.Next() {
|
||||
if r.Error != nil {
|
||||
ch <- r
|
||||
continue
|
||||
}
|
||||
|
||||
k := ds.NewKey(r.Entry.Key)
|
||||
if !d.prefix.IsAncestorOf(k) {
|
||||
continue
|
||||
}
|
||||
|
||||
r.Entry.Key = d.Datastore.InvertKey(k).String()
|
||||
ch <- r
|
||||
}
|
||||
}()
|
||||
|
||||
return dsq.DerivedResults(qr, ch), nil
|
||||
}
|
||||
|
||||
func (d *datastore) Batch() (ds.Batch, error) {
|
||||
if bds, ok := d.Datastore.(ds.Batching); ok {
|
||||
return bds.Batch()
|
||||
}
|
||||
|
||||
return nil, ds.ErrBatchUnsupported
|
||||
}
|
||||
120
Godeps/_workspace/src/github.com/ipfs/go-datastore/panic/panic.go
generated
vendored
120
Godeps/_workspace/src/github.com/ipfs/go-datastore/panic/panic.go
generated
vendored
@ -1,120 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
|
||||
)
|
||||
|
||||
type datastore struct {
|
||||
child ds.Datastore
|
||||
}
|
||||
|
||||
// Wrap shims a datastore such than _any_ operation failing triggers a panic
|
||||
// This is useful for debugging invariants.
|
||||
func Wrap(d ds.Datastore) ds.Shim {
|
||||
return &datastore{child: d}
|
||||
}
|
||||
|
||||
func (d *datastore) Children() []ds.Datastore {
|
||||
return []ds.Datastore{d.child}
|
||||
}
|
||||
|
||||
func (d *datastore) Put(key ds.Key, value interface{}) error {
|
||||
err := d.child.Put(key, value)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stdout, "panic datastore: %s", err)
|
||||
panic("panic datastore: Put failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *datastore) Get(key ds.Key) (interface{}, error) {
|
||||
val, err := d.child.Get(key)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stdout, "panic datastore: %s", err)
|
||||
panic("panic datastore: Get failed")
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (d *datastore) Has(key ds.Key) (bool, error) {
|
||||
e, err := d.child.Has(key)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stdout, "panic datastore: %s", err)
|
||||
panic("panic datastore: Has failed")
|
||||
}
|
||||
return e, nil
|
||||
}
|
||||
|
||||
func (d *datastore) Delete(key ds.Key) error {
|
||||
err := d.child.Delete(key)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stdout, "panic datastore: %s", err)
|
||||
panic("panic datastore: Delete failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *datastore) Query(q dsq.Query) (dsq.Results, error) {
|
||||
r, err := d.child.Query(q)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stdout, "panic datastore: %s", err)
|
||||
panic("panic datastore: Query failed")
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (d *datastore) Close() error {
|
||||
if c, ok := d.child.(io.Closer); ok {
|
||||
err := c.Close()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stdout, "panic datastore: %s", err)
|
||||
panic("panic datastore: Close failed")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *datastore) Batch() (ds.Batch, error) {
|
||||
b, err := d.child.(ds.Batching).Batch()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &panicBatch{b}, nil
|
||||
}
|
||||
|
||||
type panicBatch struct {
|
||||
t ds.Batch
|
||||
}
|
||||
|
||||
func (p *panicBatch) Put(key ds.Key, val interface{}) error {
|
||||
err := p.t.Put(key, val)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stdout, "panic datastore: %s", err)
|
||||
panic("panic datastore: transaction put failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *panicBatch) Delete(key ds.Key) error {
|
||||
err := p.t.Delete(key)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stdout, "panic datastore: %s", err)
|
||||
panic("panic datastore: transaction delete failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *panicBatch) Commit() error {
|
||||
err := p.t.Commit()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stdout, "panic datastore: %s", err)
|
||||
panic("panic datastore: transaction commit failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
86
Godeps/_workspace/src/github.com/ipfs/go-datastore/query/filter.go
generated
vendored
86
Godeps/_workspace/src/github.com/ipfs/go-datastore/query/filter.go
generated
vendored
@ -1,86 +0,0 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Filter is an object that tests ResultEntries
|
||||
type Filter interface {
|
||||
// Filter returns whether an entry passes the filter
|
||||
Filter(e Entry) bool
|
||||
}
|
||||
|
||||
// Op is a comparison operator
|
||||
type Op string
|
||||
|
||||
var (
|
||||
Equal = Op("==")
|
||||
NotEqual = Op("!=")
|
||||
GreaterThan = Op(">")
|
||||
GreaterThanOrEqual = Op(">=")
|
||||
LessThan = Op("<")
|
||||
LessThanOrEqual = Op("<=")
|
||||
)
|
||||
|
||||
// FilterValueCompare is used to signal to datastores they
|
||||
// should apply internal comparisons. unfortunately, there
|
||||
// is no way to apply comparisons* to interface{} types in
|
||||
// Go, so if the datastore doesnt have a special way to
|
||||
// handle these comparisons, you must provided the
|
||||
// TypedFilter to actually do filtering.
|
||||
//
|
||||
// [*] other than == and !=, which use reflect.DeepEqual.
|
||||
type FilterValueCompare struct {
|
||||
Op Op
|
||||
Value interface{}
|
||||
TypedFilter Filter
|
||||
}
|
||||
|
||||
func (f FilterValueCompare) Filter(e Entry) bool {
|
||||
if f.TypedFilter != nil {
|
||||
return f.TypedFilter.Filter(e)
|
||||
}
|
||||
|
||||
switch f.Op {
|
||||
case Equal:
|
||||
return reflect.DeepEqual(f.Value, e.Value)
|
||||
case NotEqual:
|
||||
return !reflect.DeepEqual(f.Value, e.Value)
|
||||
default:
|
||||
panic(fmt.Errorf("cannot apply op '%s' to interface{}.", f.Op))
|
||||
}
|
||||
}
|
||||
|
||||
type FilterKeyCompare struct {
|
||||
Op Op
|
||||
Key string
|
||||
}
|
||||
|
||||
func (f FilterKeyCompare) Filter(e Entry) bool {
|
||||
switch f.Op {
|
||||
case Equal:
|
||||
return e.Key == f.Key
|
||||
case NotEqual:
|
||||
return e.Key != f.Key
|
||||
case GreaterThan:
|
||||
return e.Key > f.Key
|
||||
case GreaterThanOrEqual:
|
||||
return e.Key >= f.Key
|
||||
case LessThan:
|
||||
return e.Key < f.Key
|
||||
case LessThanOrEqual:
|
||||
return e.Key <= f.Key
|
||||
default:
|
||||
panic(fmt.Errorf("unknown op '%s'", f.Op))
|
||||
}
|
||||
}
|
||||
|
||||
type FilterKeyPrefix struct {
|
||||
Prefix string
|
||||
}
|
||||
|
||||
func (f FilterKeyPrefix) Filter(e Entry) bool {
|
||||
return strings.HasPrefix(e.Key, f.Prefix)
|
||||
}
|
||||
66
Godeps/_workspace/src/github.com/ipfs/go-datastore/query/order.go
generated
vendored
66
Godeps/_workspace/src/github.com/ipfs/go-datastore/query/order.go
generated
vendored
@ -1,66 +0,0 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Order is an object used to order objects
|
||||
type Order interface {
|
||||
|
||||
// Sort sorts the Entry slice according to
|
||||
// the Order criteria.
|
||||
Sort([]Entry)
|
||||
}
|
||||
|
||||
// OrderByValue is used to signal to datastores they
|
||||
// should apply internal orderings. unfortunately, there
|
||||
// is no way to apply order comparisons to interface{} types
|
||||
// in Go, so if the datastore doesnt have a special way to
|
||||
// handle these comparisons, you must provide an Order
|
||||
// implementation that casts to the correct type.
|
||||
type OrderByValue struct {
|
||||
TypedOrder Order
|
||||
}
|
||||
|
||||
func (o OrderByValue) Sort(res []Entry) {
|
||||
if o.TypedOrder == nil {
|
||||
panic("cannot order interface{} by value. see query docs.")
|
||||
}
|
||||
o.TypedOrder.Sort(res)
|
||||
}
|
||||
|
||||
// OrderByValueDescending is used to signal to datastores they
|
||||
// should apply internal orderings. unfortunately, there
|
||||
// is no way to apply order comparisons to interface{} types
|
||||
// in Go, so if the datastore doesnt have a special way to
|
||||
// handle these comparisons, you are SOL.
|
||||
type OrderByValueDescending struct {
|
||||
TypedOrder Order
|
||||
}
|
||||
|
||||
func (o OrderByValueDescending) Sort(res []Entry) {
|
||||
if o.TypedOrder == nil {
|
||||
panic("cannot order interface{} by value. see query docs.")
|
||||
}
|
||||
o.TypedOrder.Sort(res)
|
||||
}
|
||||
|
||||
// OrderByKey
|
||||
type OrderByKey struct{}
|
||||
|
||||
func (o OrderByKey) Sort(res []Entry) {
|
||||
sort.Stable(reByKey(res))
|
||||
}
|
||||
|
||||
// OrderByKeyDescending
|
||||
type OrderByKeyDescending struct{}
|
||||
|
||||
func (o OrderByKeyDescending) Sort(res []Entry) {
|
||||
sort.Stable(sort.Reverse(reByKey(res)))
|
||||
}
|
||||
|
||||
type reByKey []Entry
|
||||
|
||||
func (s reByKey) Len() int { return len(s) }
|
||||
func (s reByKey) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s reByKey) Less(i, j int) bool { return s[i].Key < s[j].Key }
|
||||
250
Godeps/_workspace/src/github.com/ipfs/go-datastore/query/query.go
generated
vendored
250
Godeps/_workspace/src/github.com/ipfs/go-datastore/query/query.go
generated
vendored
@ -1,250 +0,0 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
goprocess "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess"
|
||||
)
|
||||
|
||||
/*
|
||||
Query represents storage for any key-value pair.
|
||||
|
||||
tl;dr:
|
||||
|
||||
queries are supported across datastores.
|
||||
Cheap on top of relational dbs, and expensive otherwise.
|
||||
Pick the right tool for the job!
|
||||
|
||||
In addition to the key-value store get and set semantics, datastore
|
||||
provides an interface to retrieve multiple records at a time through
|
||||
the use of queries. The datastore Query model gleans a common set of
|
||||
operations performed when querying. To avoid pasting here years of
|
||||
database research, let’s summarize the operations datastore supports.
|
||||
|
||||
Query Operations:
|
||||
|
||||
* namespace - scope the query, usually by object type
|
||||
* filters - select a subset of values by applying constraints
|
||||
* orders - sort the results by applying sort conditions
|
||||
* limit - impose a numeric limit on the number of results
|
||||
* offset - skip a number of results (for efficient pagination)
|
||||
|
||||
datastore combines these operations into a simple Query class that allows
|
||||
applications to define their constraints in a simple, generic, way without
|
||||
introducing datastore specific calls, languages, etc.
|
||||
|
||||
Of course, different datastores provide relational query support across a
|
||||
wide spectrum, from full support in traditional databases to none at all in
|
||||
most key-value stores. Datastore aims to provide a common, simple interface
|
||||
for the sake of application evolution over time and keeping large code bases
|
||||
free of tool-specific code. It would be ridiculous to claim to support high-
|
||||
performance queries on architectures that obviously do not. Instead, datastore
|
||||
provides the interface, ideally translating queries to their native form
|
||||
(e.g. into SQL for MySQL).
|
||||
|
||||
However, on the wrong datastore, queries can potentially incur the high cost
|
||||
of performing the aforemantioned query operations on the data set directly in
|
||||
Go. It is the client’s responsibility to select the right tool for the job:
|
||||
pick a data storage solution that fits the application’s needs now, and wrap
|
||||
it with a datastore implementation. As the needs change, swap out datastore
|
||||
implementations to support your new use cases. Some applications, particularly
|
||||
in early development stages, can afford to incurr the cost of queries on non-
|
||||
relational databases (e.g. using a FSDatastore and not worry about a database
|
||||
at all). When it comes time to switch the tool for performance, updating the
|
||||
application code can be as simple as swapping the datastore in one place, not
|
||||
all over the application code base. This gain in engineering time, both at
|
||||
initial development and during later iterations, can significantly offset the
|
||||
cost of the layer of abstraction.
|
||||
|
||||
*/
|
||||
type Query struct {
|
||||
Prefix string // namespaces the query to results whose keys have Prefix
|
||||
Filters []Filter // filter results. apply sequentially
|
||||
Orders []Order // order results. apply sequentially
|
||||
Limit int // maximum number of results
|
||||
Offset int // skip given number of results
|
||||
KeysOnly bool // return only keys.
|
||||
}
|
||||
|
||||
// NotFetched is a special type that signals whether or not the value
|
||||
// of an Entry has been fetched or not. This is needed because
|
||||
// datastore implementations get to decide whether Query returns values
|
||||
// or only keys. nil is not a good signal, as real values may be nil.
|
||||
const NotFetched int = iota
|
||||
|
||||
// Entry is a query result entry.
|
||||
type Entry struct {
|
||||
Key string // cant be ds.Key because circular imports ...!!!
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
// Result is a special entry that includes an error, so that the client
|
||||
// may be warned about internal errors.
|
||||
type Result struct {
|
||||
Entry
|
||||
|
||||
Error error
|
||||
}
|
||||
|
||||
// Results is a set of Query results. This is the interface for clients.
|
||||
// Example:
|
||||
//
|
||||
// qr, _ := myds.Query(q)
|
||||
// for r := range qr.Next() {
|
||||
// if r.Error != nil {
|
||||
// // handle.
|
||||
// break
|
||||
// }
|
||||
//
|
||||
// fmt.Println(r.Entry.Key, r.Entry.Value)
|
||||
// }
|
||||
//
|
||||
// or, wait on all results at once:
|
||||
//
|
||||
// qr, _ := myds.Query(q)
|
||||
// es, _ := qr.Rest()
|
||||
// for _, e := range es {
|
||||
// fmt.Println(e.Key, e.Value)
|
||||
// }
|
||||
//
|
||||
type Results interface {
|
||||
Query() Query // the query these Results correspond to
|
||||
Next() <-chan Result // returns a channel to wait for the next result
|
||||
Rest() ([]Entry, error) // waits till processing finishes, returns all entries at once.
|
||||
Close() error // client may call Close to signal early exit
|
||||
|
||||
// Process returns a goprocess.Process associated with these results.
|
||||
// most users will not need this function (Close is all they want),
|
||||
// but it's here in case you want to connect the results to other
|
||||
// goprocess-friendly things.
|
||||
Process() goprocess.Process
|
||||
}
|
||||
|
||||
// results implements Results
|
||||
type results struct {
|
||||
query Query
|
||||
proc goprocess.Process
|
||||
res <-chan Result
|
||||
}
|
||||
|
||||
func (r *results) Next() <-chan Result {
|
||||
return r.res
|
||||
}
|
||||
|
||||
func (r *results) Rest() ([]Entry, error) {
|
||||
var es []Entry
|
||||
for e := range r.res {
|
||||
if e.Error != nil {
|
||||
return es, e.Error
|
||||
}
|
||||
es = append(es, e.Entry)
|
||||
}
|
||||
<-r.proc.Closed() // wait till the processing finishes.
|
||||
return es, nil
|
||||
}
|
||||
|
||||
func (r *results) Process() goprocess.Process {
|
||||
return r.proc
|
||||
}
|
||||
|
||||
func (r *results) Close() error {
|
||||
return r.proc.Close()
|
||||
}
|
||||
|
||||
func (r *results) Query() Query {
|
||||
return r.query
|
||||
}
|
||||
|
||||
// ResultBuilder is what implementors use to construct results
|
||||
// Implementors of datastores and their clients must respect the
|
||||
// Process of the Request:
|
||||
//
|
||||
// * clients must call r.Process().Close() on an early exit, so
|
||||
// implementations can reclaim resources.
|
||||
// * if the Entries are read to completion (channel closed), Process
|
||||
// should be closed automatically.
|
||||
// * datastores must respect <-Process.Closing(), which intermediates
|
||||
// an early close signal from the client.
|
||||
//
|
||||
type ResultBuilder struct {
|
||||
Query Query
|
||||
Process goprocess.Process
|
||||
Output chan Result
|
||||
}
|
||||
|
||||
// Results returns a Results to to this builder.
|
||||
func (rb *ResultBuilder) Results() Results {
|
||||
return &results{
|
||||
query: rb.Query,
|
||||
proc: rb.Process,
|
||||
res: rb.Output,
|
||||
}
|
||||
}
|
||||
|
||||
func NewResultBuilder(q Query) *ResultBuilder {
|
||||
b := &ResultBuilder{
|
||||
Query: q,
|
||||
Output: make(chan Result),
|
||||
}
|
||||
b.Process = goprocess.WithTeardown(func() error {
|
||||
close(b.Output)
|
||||
return nil
|
||||
})
|
||||
return b
|
||||
}
|
||||
|
||||
// ResultsWithChan returns a Results object from a channel
|
||||
// of Result entries. Respects its own Close()
|
||||
func ResultsWithChan(q Query, res <-chan Result) Results {
|
||||
b := NewResultBuilder(q)
|
||||
|
||||
// go consume all the entries and add them to the results.
|
||||
b.Process.Go(func(worker goprocess.Process) {
|
||||
for {
|
||||
select {
|
||||
case <-worker.Closing(): // client told us to close early
|
||||
return
|
||||
case e, more := <-res:
|
||||
if !more {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case b.Output <- e:
|
||||
case <-worker.Closing(): // client told us to close early
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
})
|
||||
|
||||
go b.Process.CloseAfterChildren()
|
||||
return b.Results()
|
||||
}
|
||||
|
||||
// ResultsWithEntries returns a Results object from a list of entries
|
||||
func ResultsWithEntries(q Query, res []Entry) Results {
|
||||
b := NewResultBuilder(q)
|
||||
|
||||
// go consume all the entries and add them to the results.
|
||||
b.Process.Go(func(worker goprocess.Process) {
|
||||
for _, e := range res {
|
||||
select {
|
||||
case b.Output <- Result{Entry: e}:
|
||||
case <-worker.Closing(): // client told us to close early
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
})
|
||||
|
||||
go b.Process.CloseAfterChildren()
|
||||
return b.Results()
|
||||
}
|
||||
|
||||
func ResultsReplaceQuery(r Results, q Query) Results {
|
||||
return &results{
|
||||
query: q,
|
||||
proc: r.Process(),
|
||||
res: r.Next(),
|
||||
}
|
||||
}
|
||||
127
Godeps/_workspace/src/github.com/ipfs/go-datastore/query/query_impl.go
generated
vendored
127
Godeps/_workspace/src/github.com/ipfs/go-datastore/query/query_impl.go
generated
vendored
@ -1,127 +0,0 @@
|
||||
package query
|
||||
|
||||
func DerivedResults(qr Results, ch <-chan Result) Results {
|
||||
return &results{
|
||||
query: qr.Query(),
|
||||
proc: qr.Process(),
|
||||
res: ch,
|
||||
}
|
||||
}
|
||||
|
||||
// NaiveFilter applies a filter to the results.
|
||||
func NaiveFilter(qr Results, filter Filter) Results {
|
||||
ch := make(chan Result)
|
||||
go func() {
|
||||
defer close(ch)
|
||||
defer qr.Close()
|
||||
|
||||
for e := range qr.Next() {
|
||||
if e.Error != nil || filter.Filter(e.Entry) {
|
||||
ch <- e
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return DerivedResults(qr, ch)
|
||||
}
|
||||
|
||||
// NaiveLimit truncates the results to a given int limit
|
||||
func NaiveLimit(qr Results, limit int) Results {
|
||||
ch := make(chan Result)
|
||||
go func() {
|
||||
defer close(ch)
|
||||
defer qr.Close()
|
||||
|
||||
l := 0
|
||||
for e := range qr.Next() {
|
||||
if e.Error != nil {
|
||||
ch <- e
|
||||
continue
|
||||
}
|
||||
ch <- e
|
||||
l++
|
||||
if limit > 0 && l >= limit {
|
||||
break
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return DerivedResults(qr, ch)
|
||||
}
|
||||
|
||||
// NaiveOffset skips a given number of results
|
||||
func NaiveOffset(qr Results, offset int) Results {
|
||||
ch := make(chan Result)
|
||||
go func() {
|
||||
defer close(ch)
|
||||
defer qr.Close()
|
||||
|
||||
sent := 0
|
||||
for e := range qr.Next() {
|
||||
if e.Error != nil {
|
||||
ch <- e
|
||||
}
|
||||
|
||||
if sent < offset {
|
||||
sent++
|
||||
continue
|
||||
}
|
||||
ch <- e
|
||||
}
|
||||
}()
|
||||
|
||||
return DerivedResults(qr, ch)
|
||||
}
|
||||
|
||||
// NaiveOrder reorders results according to given Order.
|
||||
// WARNING: this is the only non-stream friendly operation!
|
||||
func NaiveOrder(qr Results, o Order) Results {
|
||||
ch := make(chan Result)
|
||||
var entries []Entry
|
||||
go func() {
|
||||
defer close(ch)
|
||||
defer qr.Close()
|
||||
|
||||
for e := range qr.Next() {
|
||||
if e.Error != nil {
|
||||
ch <- e
|
||||
}
|
||||
|
||||
entries = append(entries, e.Entry)
|
||||
}
|
||||
|
||||
o.Sort(entries)
|
||||
for _, e := range entries {
|
||||
ch <- Result{Entry: e}
|
||||
}
|
||||
}()
|
||||
|
||||
return DerivedResults(qr, ch)
|
||||
}
|
||||
|
||||
func NaiveQueryApply(q Query, qr Results) Results {
|
||||
if q.Prefix != "" {
|
||||
qr = NaiveFilter(qr, FilterKeyPrefix{q.Prefix})
|
||||
}
|
||||
for _, f := range q.Filters {
|
||||
qr = NaiveFilter(qr, f)
|
||||
}
|
||||
for _, o := range q.Orders {
|
||||
qr = NaiveOrder(qr, o)
|
||||
}
|
||||
if q.Offset != 0 {
|
||||
qr = NaiveOffset(qr, q.Offset)
|
||||
}
|
||||
if q.Limit != 0 {
|
||||
qr = NaiveLimit(qr, q.Offset)
|
||||
}
|
||||
return qr
|
||||
}
|
||||
|
||||
func ResultEntriesFrom(keys []string, vals []interface{}) []Entry {
|
||||
re := make([]Entry, len(keys))
|
||||
for i, k := range keys {
|
||||
re[i] = Entry{Key: k, Value: vals[i]}
|
||||
}
|
||||
return re
|
||||
}
|
||||
92
Godeps/_workspace/src/github.com/ipfs/go-datastore/redis/redis.go
generated
vendored
92
Godeps/_workspace/src/github.com/ipfs/go-datastore/redis/redis.go
generated
vendored
@ -1,92 +0,0 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/fzzy/radix/redis"
|
||||
datastore "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
query "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
|
||||
)
|
||||
|
||||
var _ datastore.Datastore = &Datastore{}
|
||||
var _ datastore.ThreadSafeDatastore = &Datastore{}
|
||||
|
||||
var ErrInvalidType = errors.New("redis datastore: invalid type error. this datastore only supports []byte values")
|
||||
|
||||
func NewExpiringDatastore(client *redis.Client, ttl time.Duration) (*Datastore, error) {
|
||||
return &Datastore{
|
||||
client: client,
|
||||
ttl: ttl,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewDatastore(client *redis.Client) (*Datastore, error) {
|
||||
return &Datastore{
|
||||
client: client,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type Datastore struct {
|
||||
mu sync.Mutex
|
||||
client *redis.Client
|
||||
ttl time.Duration
|
||||
}
|
||||
|
||||
func (ds *Datastore) Put(key datastore.Key, value interface{}) error {
|
||||
ds.mu.Lock()
|
||||
defer ds.mu.Unlock()
|
||||
|
||||
data, ok := value.([]byte)
|
||||
if !ok {
|
||||
return ErrInvalidType
|
||||
}
|
||||
|
||||
ds.client.Append("SET", key.String(), data)
|
||||
if ds.ttl != 0 {
|
||||
ds.client.Append("EXPIRE", key.String(), ds.ttl.Seconds())
|
||||
}
|
||||
if err := ds.client.GetReply().Err; err != nil {
|
||||
return fmt.Errorf("failed to put value: %s", err)
|
||||
}
|
||||
if ds.ttl != 0 {
|
||||
if err := ds.client.GetReply().Err; err != nil {
|
||||
return fmt.Errorf("failed to set expiration: %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ds *Datastore) Get(key datastore.Key) (value interface{}, err error) {
|
||||
ds.mu.Lock()
|
||||
defer ds.mu.Unlock()
|
||||
return ds.client.Cmd("GET", key.String()).Bytes()
|
||||
}
|
||||
|
||||
func (ds *Datastore) Has(key datastore.Key) (exists bool, err error) {
|
||||
ds.mu.Lock()
|
||||
defer ds.mu.Unlock()
|
||||
return ds.client.Cmd("EXISTS", key.String()).Bool()
|
||||
}
|
||||
|
||||
func (ds *Datastore) Delete(key datastore.Key) (err error) {
|
||||
ds.mu.Lock()
|
||||
defer ds.mu.Unlock()
|
||||
return ds.client.Cmd("DEL", key.String()).Err
|
||||
}
|
||||
|
||||
func (ds *Datastore) Query(q query.Query) (query.Results, error) {
|
||||
return nil, errors.New("TODO implement query for redis datastore?")
|
||||
}
|
||||
|
||||
func (ds *Datastore) IsThreadSafe() {}
|
||||
|
||||
func (ds *Datastore) Batch() (datastore.Batch, error) {
|
||||
return nil, datastore.ErrBatchUnsupported
|
||||
}
|
||||
|
||||
func (ds *Datastore) Close() error {
|
||||
return ds.client.Close()
|
||||
}
|
||||
116
Godeps/_workspace/src/github.com/ipfs/go-datastore/sync/sync.go
generated
vendored
116
Godeps/_workspace/src/github.com/ipfs/go-datastore/sync/sync.go
generated
vendored
@ -1,116 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
|
||||
)
|
||||
|
||||
// MutexDatastore contains a child datastire and a mutex.
|
||||
// used for coarse sync
|
||||
type MutexDatastore struct {
|
||||
sync.RWMutex
|
||||
|
||||
child ds.Datastore
|
||||
}
|
||||
|
||||
// MutexWrap constructs a datastore with a coarse lock around
|
||||
// the entire datastore, for every single operation
|
||||
func MutexWrap(d ds.Datastore) *MutexDatastore {
|
||||
return &MutexDatastore{child: d}
|
||||
}
|
||||
|
||||
// Children implements Shim
|
||||
func (d *MutexDatastore) Children() []ds.Datastore {
|
||||
return []ds.Datastore{d.child}
|
||||
}
|
||||
|
||||
// IsThreadSafe implements ThreadSafeDatastore
|
||||
func (d *MutexDatastore) IsThreadSafe() {}
|
||||
|
||||
// Put implements Datastore.Put
|
||||
func (d *MutexDatastore) Put(key ds.Key, value interface{}) (err error) {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
return d.child.Put(key, value)
|
||||
}
|
||||
|
||||
// Get implements Datastore.Get
|
||||
func (d *MutexDatastore) Get(key ds.Key) (value interface{}, err error) {
|
||||
d.RLock()
|
||||
defer d.RUnlock()
|
||||
return d.child.Get(key)
|
||||
}
|
||||
|
||||
// Has implements Datastore.Has
|
||||
func (d *MutexDatastore) Has(key ds.Key) (exists bool, err error) {
|
||||
d.RLock()
|
||||
defer d.RUnlock()
|
||||
return d.child.Has(key)
|
||||
}
|
||||
|
||||
// Delete implements Datastore.Delete
|
||||
func (d *MutexDatastore) Delete(key ds.Key) (err error) {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
return d.child.Delete(key)
|
||||
}
|
||||
|
||||
// KeyList implements Datastore.KeyList
|
||||
func (d *MutexDatastore) Query(q dsq.Query) (dsq.Results, error) {
|
||||
d.RLock()
|
||||
defer d.RUnlock()
|
||||
return d.child.Query(q)
|
||||
}
|
||||
|
||||
func (d *MutexDatastore) Batch() (ds.Batch, error) {
|
||||
d.RLock()
|
||||
defer d.RUnlock()
|
||||
bds, ok := d.child.(ds.Batching)
|
||||
if !ok {
|
||||
return nil, ds.ErrBatchUnsupported
|
||||
}
|
||||
|
||||
b, err := bds.Batch()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &syncBatch{
|
||||
batch: b,
|
||||
mds: d,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *MutexDatastore) Close() error {
|
||||
d.RWMutex.Lock()
|
||||
defer d.RWMutex.Unlock()
|
||||
if c, ok := d.child.(io.Closer); ok {
|
||||
return c.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type syncBatch struct {
|
||||
batch ds.Batch
|
||||
mds *MutexDatastore
|
||||
}
|
||||
|
||||
func (b *syncBatch) Put(key ds.Key, val interface{}) error {
|
||||
b.mds.Lock()
|
||||
defer b.mds.Unlock()
|
||||
return b.batch.Put(key, val)
|
||||
}
|
||||
|
||||
func (b *syncBatch) Delete(key ds.Key) error {
|
||||
b.mds.Lock()
|
||||
defer b.mds.Unlock()
|
||||
return b.batch.Delete(key)
|
||||
}
|
||||
|
||||
func (b *syncBatch) Commit() error {
|
||||
b.mds.Lock()
|
||||
defer b.mds.Unlock()
|
||||
return b.batch.Commit()
|
||||
}
|
||||
198
Godeps/_workspace/src/github.com/ipfs/go-datastore/syncmount/mount.go
generated
vendored
198
Godeps/_workspace/src/github.com/ipfs/go-datastore/syncmount/mount.go
generated
vendored
@ -1,198 +0,0 @@
|
||||
// Package mount provides a Datastore that has other Datastores
|
||||
// mounted at various key prefixes and is threadsafe
|
||||
package syncmount
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/keytransform"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNoMount = errors.New("no datastore mounted for this key")
|
||||
)
|
||||
|
||||
type Mount struct {
|
||||
Prefix ds.Key
|
||||
Datastore ds.Datastore
|
||||
}
|
||||
|
||||
func New(mounts []Mount) *Datastore {
|
||||
// make a copy so we're sure it doesn't mutate
|
||||
m := make([]Mount, len(mounts))
|
||||
for i, v := range mounts {
|
||||
m[i] = v
|
||||
}
|
||||
return &Datastore{mounts: m}
|
||||
}
|
||||
|
||||
type Datastore struct {
|
||||
mounts []Mount
|
||||
lk sync.Mutex
|
||||
}
|
||||
|
||||
var _ ds.Datastore = (*Datastore)(nil)
|
||||
|
||||
func (d *Datastore) lookup(key ds.Key) (ds.Datastore, ds.Key, ds.Key) {
|
||||
d.lk.Lock()
|
||||
defer d.lk.Unlock()
|
||||
for _, m := range d.mounts {
|
||||
if m.Prefix.Equal(key) || m.Prefix.IsAncestorOf(key) {
|
||||
s := strings.TrimPrefix(key.String(), m.Prefix.String())
|
||||
k := ds.NewKey(s)
|
||||
return m.Datastore, m.Prefix, k
|
||||
}
|
||||
}
|
||||
return nil, ds.NewKey("/"), key
|
||||
}
|
||||
|
||||
func (d *Datastore) Put(key ds.Key, value interface{}) error {
|
||||
cds, _, k := d.lookup(key)
|
||||
if cds == nil {
|
||||
return ErrNoMount
|
||||
}
|
||||
return cds.Put(k, value)
|
||||
}
|
||||
|
||||
func (d *Datastore) Get(key ds.Key) (value interface{}, err error) {
|
||||
cds, _, k := d.lookup(key)
|
||||
if cds == nil {
|
||||
return nil, ds.ErrNotFound
|
||||
}
|
||||
return cds.Get(k)
|
||||
}
|
||||
|
||||
func (d *Datastore) Has(key ds.Key) (exists bool, err error) {
|
||||
cds, _, k := d.lookup(key)
|
||||
if cds == nil {
|
||||
return false, nil
|
||||
}
|
||||
return cds.Has(k)
|
||||
}
|
||||
|
||||
func (d *Datastore) Delete(key ds.Key) error {
|
||||
cds, _, k := d.lookup(key)
|
||||
if cds == nil {
|
||||
return ds.ErrNotFound
|
||||
}
|
||||
return cds.Delete(k)
|
||||
}
|
||||
|
||||
func (d *Datastore) Query(q query.Query) (query.Results, error) {
|
||||
if len(q.Filters) > 0 ||
|
||||
len(q.Orders) > 0 ||
|
||||
q.Limit > 0 ||
|
||||
q.Offset > 0 {
|
||||
// TODO this is overly simplistic, but the only caller is
|
||||
// `ipfs refs local` for now, and this gets us moving.
|
||||
return nil, errors.New("mount only supports listing all prefixed keys in random order")
|
||||
}
|
||||
key := ds.NewKey(q.Prefix)
|
||||
cds, mount, k := d.lookup(key)
|
||||
if cds == nil {
|
||||
return nil, errors.New("mount only supports listing a mount point")
|
||||
}
|
||||
// TODO support listing cross mount points too
|
||||
|
||||
// delegate the query to the mounted datastore, while adjusting
|
||||
// keys in and out
|
||||
q2 := q
|
||||
q2.Prefix = k.String()
|
||||
wrapDS := keytransform.Wrap(cds, &keytransform.Pair{
|
||||
Convert: func(ds.Key) ds.Key {
|
||||
panic("this should never be called")
|
||||
},
|
||||
Invert: func(k ds.Key) ds.Key {
|
||||
return mount.Child(k)
|
||||
},
|
||||
})
|
||||
|
||||
r, err := wrapDS.Query(q2)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r = query.ResultsReplaceQuery(r, q)
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (d *Datastore) IsThreadSafe() {}
|
||||
|
||||
func (d *Datastore) Close() error {
|
||||
for _, d := range d.mounts {
|
||||
if c, ok := d.Datastore.(io.Closer); ok {
|
||||
err := c.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type mountBatch struct {
|
||||
mounts map[string]ds.Batch
|
||||
lk sync.Mutex
|
||||
|
||||
d *Datastore
|
||||
}
|
||||
|
||||
func (d *Datastore) Batch() (ds.Batch, error) {
|
||||
return &mountBatch{
|
||||
mounts: make(map[string]ds.Batch),
|
||||
d: d,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (mt *mountBatch) lookupBatch(key ds.Key) (ds.Batch, ds.Key, error) {
|
||||
mt.lk.Lock()
|
||||
defer mt.lk.Unlock()
|
||||
|
||||
child, loc, rest := mt.d.lookup(key)
|
||||
t, ok := mt.mounts[loc.String()]
|
||||
if !ok {
|
||||
bds, ok := child.(ds.Batching)
|
||||
if !ok {
|
||||
return nil, ds.NewKey(""), ds.ErrBatchUnsupported
|
||||
}
|
||||
var err error
|
||||
t, err = bds.Batch()
|
||||
if err != nil {
|
||||
return nil, ds.NewKey(""), err
|
||||
}
|
||||
mt.mounts[loc.String()] = t
|
||||
}
|
||||
return t, rest, nil
|
||||
}
|
||||
|
||||
func (mt *mountBatch) Put(key ds.Key, val interface{}) error {
|
||||
t, rest, err := mt.lookupBatch(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return t.Put(rest, val)
|
||||
}
|
||||
|
||||
func (mt *mountBatch) Delete(key ds.Key) error {
|
||||
t, rest, err := mt.lookupBatch(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return t.Delete(rest)
|
||||
}
|
||||
|
||||
func (mt *mountBatch) Commit() error {
|
||||
for _, t := range mt.mounts {
|
||||
err := t.Commit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
25
Godeps/_workspace/src/github.com/ipfs/go-datastore/test/assert.go
generated
vendored
25
Godeps/_workspace/src/github.com/ipfs/go-datastore/test/assert.go
generated
vendored
@ -1,25 +0,0 @@
|
||||
package dstest
|
||||
|
||||
import "testing"
|
||||
|
||||
func Nil(err error, t *testing.T, msgs ...string) {
|
||||
if err != nil {
|
||||
t.Fatal(msgs, "error:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func True(v bool, t *testing.T, msgs ...string) {
|
||||
if !v {
|
||||
t.Fatal(msgs)
|
||||
}
|
||||
}
|
||||
|
||||
func False(v bool, t *testing.T, msgs ...string) {
|
||||
True(!v, t, msgs...)
|
||||
}
|
||||
|
||||
func Err(err error, t *testing.T, msgs ...string) {
|
||||
if err == nil {
|
||||
t.Fatal(msgs, "error:", err)
|
||||
}
|
||||
}
|
||||
99
Godeps/_workspace/src/github.com/ipfs/go-datastore/test/test_util.go
generated
vendored
99
Godeps/_workspace/src/github.com/ipfs/go-datastore/test/test_util.go
generated
vendored
@ -1,99 +0,0 @@
|
||||
package dstest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base32"
|
||||
"testing"
|
||||
|
||||
rand "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/dustin/randbo"
|
||||
dstore "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
)
|
||||
|
||||
func RunBatchTest(t *testing.T, ds dstore.Batching) {
|
||||
batch, err := ds.Batch()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
r := rand.New()
|
||||
var blocks [][]byte
|
||||
var keys []dstore.Key
|
||||
for i := 0; i < 20; i++ {
|
||||
blk := make([]byte, 256*1024)
|
||||
r.Read(blk)
|
||||
blocks = append(blocks, blk)
|
||||
|
||||
key := dstore.NewKey(base32.StdEncoding.EncodeToString(blk[:8]))
|
||||
keys = append(keys, key)
|
||||
|
||||
err := batch.Put(key, blk)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure they are not in the datastore before comitting
|
||||
for _, k := range keys {
|
||||
_, err := ds.Get(k)
|
||||
if err == nil {
|
||||
t.Fatal("should not have found this block")
|
||||
}
|
||||
}
|
||||
|
||||
// commit, write them to the datastore
|
||||
err = batch.Commit()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i, k := range keys {
|
||||
blk, err := ds.Get(k)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(blk.([]byte), blocks[i]) {
|
||||
t.Fatal("blocks not correct!")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func RunBatchDeleteTest(t *testing.T, ds dstore.Batching) {
|
||||
r := rand.New()
|
||||
var keys []dstore.Key
|
||||
for i := 0; i < 20; i++ {
|
||||
blk := make([]byte, 16)
|
||||
r.Read(blk)
|
||||
|
||||
key := dstore.NewKey(base32.StdEncoding.EncodeToString(blk[:8]))
|
||||
keys = append(keys, key)
|
||||
|
||||
err := ds.Put(key, blk)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
batch, err := ds.Batch()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, k := range keys {
|
||||
err := batch.Delete(k)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
err = batch.Commit()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, k := range keys {
|
||||
_, err := ds.Get(k)
|
||||
if err == nil {
|
||||
t.Fatal("shouldnt have found block")
|
||||
}
|
||||
}
|
||||
}
|
||||
94
Godeps/_workspace/src/github.com/ipfs/go-datastore/tiered/tiered.go
generated
vendored
94
Godeps/_workspace/src/github.com/ipfs/go-datastore/tiered/tiered.go
generated
vendored
@ -1,94 +0,0 @@
|
||||
package tiered
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
|
||||
)
|
||||
|
||||
type tiered []ds.Datastore
|
||||
|
||||
// New returns a tiered datastore. Puts and Deletes will write-through to
|
||||
// all datastores, Has and Get will try each datastore sequentially, and
|
||||
// Query will always try the last one (most complete) first.
|
||||
func New(dses ...ds.Datastore) tiered {
|
||||
return tiered(dses)
|
||||
}
|
||||
|
||||
// Put stores the object `value` named by `key`.
|
||||
func (d tiered) Put(key ds.Key, value interface{}) (err error) {
|
||||
errs := make(chan error, len(d))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, cd := range d {
|
||||
wg.Add(1)
|
||||
go func(cd ds.Datastore) {
|
||||
defer wg.Done()
|
||||
if err := cd.Put(key, value); err != nil {
|
||||
errs <- err
|
||||
}
|
||||
}(cd)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
close(errs)
|
||||
for err := range errs {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves the object `value` named by `key`.
|
||||
func (d tiered) Get(key ds.Key) (value interface{}, err error) {
|
||||
err = fmt.Errorf("no datastores")
|
||||
for _, cd := range d {
|
||||
value, err = cd.Get(key)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Has returns whether the `key` is mapped to a `value`.
|
||||
func (d tiered) Has(key ds.Key) (exists bool, err error) {
|
||||
err = fmt.Errorf("no datastores")
|
||||
for _, cd := range d {
|
||||
exists, err = cd.Has(key)
|
||||
if err == nil && exists {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Delete removes the value for given `key`.
|
||||
func (d tiered) Delete(key ds.Key) (err error) {
|
||||
errs := make(chan error, len(d))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, cd := range d {
|
||||
wg.Add(1)
|
||||
go func(cd ds.Datastore) {
|
||||
defer wg.Done()
|
||||
if err := cd.Delete(key); err != nil {
|
||||
errs <- err
|
||||
}
|
||||
}(cd)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
close(errs)
|
||||
for err := range errs {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Query returns a list of keys in the datastore
|
||||
func (d tiered) Query(q dsq.Query) (dsq.Results, error) {
|
||||
// query always the last (most complete) one
|
||||
return d[len(d)-1].Query(q)
|
||||
}
|
||||
104
Godeps/_workspace/src/github.com/ipfs/go-datastore/timecache/timecache.go
generated
vendored
104
Godeps/_workspace/src/github.com/ipfs/go-datastore/timecache/timecache.go
generated
vendored
@ -1,104 +0,0 @@
|
||||
package timecache
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
|
||||
)
|
||||
|
||||
// op keys
|
||||
var (
|
||||
putKey = "put"
|
||||
getKey = "get"
|
||||
hasKey = "has"
|
||||
deleteKey = "delete"
|
||||
)
|
||||
|
||||
type datastore struct {
|
||||
cache ds.Datastore
|
||||
ttl time.Duration
|
||||
|
||||
ttlmu sync.Mutex
|
||||
ttls map[ds.Key]time.Time
|
||||
}
|
||||
|
||||
func WithTTL(ttl time.Duration) *datastore {
|
||||
return WithCache(ds.NewMapDatastore(), ttl)
|
||||
}
|
||||
|
||||
// WithCache wraps a given datastore as a timecache.
|
||||
// Get + Has requests are considered expired after a TTL.
|
||||
func WithCache(d ds.Datastore, ttl time.Duration) *datastore {
|
||||
return &datastore{cache: d, ttl: ttl, ttls: make(map[ds.Key]time.Time)}
|
||||
}
|
||||
|
||||
func (d *datastore) gc() {
|
||||
var now = time.Now()
|
||||
var del []ds.Key
|
||||
|
||||
// remove all expired ttls.
|
||||
d.ttlmu.Lock()
|
||||
for k, ttl := range d.ttls {
|
||||
if now.After(ttl) {
|
||||
delete(d.ttls, k)
|
||||
del = append(del, k)
|
||||
}
|
||||
}
|
||||
d.ttlmu.Unlock()
|
||||
|
||||
for _, k := range del {
|
||||
d.cache.Delete(k)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *datastore) ttlPut(key ds.Key) {
|
||||
d.ttlmu.Lock()
|
||||
d.ttls[key] = time.Now().Add(d.ttl)
|
||||
d.ttlmu.Unlock()
|
||||
}
|
||||
|
||||
func (d *datastore) ttlDelete(key ds.Key) {
|
||||
d.ttlmu.Lock()
|
||||
delete(d.ttls, key)
|
||||
d.ttlmu.Unlock()
|
||||
}
|
||||
|
||||
// Put stores the object `value` named by `key`.
|
||||
func (d *datastore) Put(key ds.Key, value interface{}) (err error) {
|
||||
err = d.cache.Put(key, value)
|
||||
d.ttlPut(key)
|
||||
return err
|
||||
}
|
||||
|
||||
// Get retrieves the object `value` named by `key`.
|
||||
func (d *datastore) Get(key ds.Key) (value interface{}, err error) {
|
||||
d.gc()
|
||||
return d.cache.Get(key)
|
||||
}
|
||||
|
||||
// Has returns whether the `key` is mapped to a `value`.
|
||||
func (d *datastore) Has(key ds.Key) (exists bool, err error) {
|
||||
d.gc()
|
||||
return d.cache.Has(key)
|
||||
}
|
||||
|
||||
// Delete removes the value for given `key`.
|
||||
func (d *datastore) Delete(key ds.Key) (err error) {
|
||||
d.ttlDelete(key)
|
||||
return d.cache.Delete(key)
|
||||
}
|
||||
|
||||
// Query returns a list of keys in the datastore
|
||||
func (d *datastore) Query(q dsq.Query) (dsq.Results, error) {
|
||||
return d.cache.Query(q)
|
||||
}
|
||||
|
||||
func (d *datastore) Close() error {
|
||||
if c, ok := d.cache.(io.Closer); ok {
|
||||
return c.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -7,9 +7,9 @@ import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
dsns "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/namespace"
|
||||
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
dsns "github.com/ipfs/go-datastore/namespace"
|
||||
dsq "github.com/ipfs/go-datastore/query"
|
||||
blocks "github.com/ipfs/go-ipfs/blocks"
|
||||
key "github.com/ipfs/go-ipfs/blocks/key"
|
||||
mh "gx/ipfs/QmYf7ng2hG5XBtJA3tN34DQ2GUN5HNksEw1rLDkmr6vGku/go-multihash"
|
||||
|
||||
@ -5,9 +5,9 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
|
||||
ds_sync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
dsq "github.com/ipfs/go-datastore/query"
|
||||
ds_sync "github.com/ipfs/go-datastore/sync"
|
||||
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
|
||||
|
||||
blocks "github.com/ipfs/go-ipfs/blocks"
|
||||
|
||||
@ -3,9 +3,9 @@ package blockstore
|
||||
import (
|
||||
"testing"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
|
||||
syncds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
dsq "github.com/ipfs/go-datastore/query"
|
||||
syncds "github.com/ipfs/go-datastore/sync"
|
||||
"github.com/ipfs/go-ipfs/blocks"
|
||||
)
|
||||
|
||||
|
||||
@ -4,7 +4,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
b58 "gx/ipfs/QmT8rehPR3F6bmwL6zjUN8XpiDBFFpMP2myPdC6ApsWfJf/go-base58"
|
||||
mh "gx/ipfs/QmYf7ng2hG5XBtJA3tN34DQ2GUN5HNksEw1rLDkmr6vGku/go-multihash"
|
||||
)
|
||||
|
||||
@ -5,8 +5,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-datastore/sync"
|
||||
blocks "github.com/ipfs/go-ipfs/blocks"
|
||||
blockstore "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil"
|
||||
|
||||
@ -5,8 +5,8 @@ import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
dsync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
dsync "github.com/ipfs/go-datastore/sync"
|
||||
bstore "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
key "github.com/ipfs/go-ipfs/blocks/key"
|
||||
bserv "github.com/ipfs/go-ipfs/blockservice"
|
||||
|
||||
@ -16,7 +16,7 @@ import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
diag "github.com/ipfs/go-ipfs/diagnostics"
|
||||
peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer"
|
||||
discovery "gx/ipfs/QmQkQP7WmeT9FRJDsEzAaGYDparttDiB6mCpVBrq2MuWQS/go-libp2p/p2p/discovery"
|
||||
|
||||
@ -3,7 +3,7 @@ package corerouting
|
||||
import (
|
||||
"errors"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
core "github.com/ipfs/go-ipfs/core"
|
||||
repo "github.com/ipfs/go-ipfs/repo"
|
||||
routing "github.com/ipfs/go-ipfs/routing"
|
||||
|
||||
@ -7,8 +7,8 @@ import (
|
||||
"os"
|
||||
gopath "path"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
syncds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
syncds "github.com/ipfs/go-datastore/sync"
|
||||
bstore "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
key "github.com/ipfs/go-ipfs/blocks/key"
|
||||
bserv "github.com/ipfs/go-ipfs/blockservice"
|
||||
|
||||
@ -5,8 +5,8 @@ import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-datastore/sync"
|
||||
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
|
||||
|
||||
bstore "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
|
||||
@ -3,8 +3,8 @@ package coremock
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
syncds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
|
||||
"github.com/ipfs/go-datastore"
|
||||
syncds "github.com/ipfs/go-datastore/sync"
|
||||
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
|
||||
|
||||
commands "github.com/ipfs/go-ipfs/commands"
|
||||
|
||||
@ -8,8 +8,8 @@ import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-datastore/sync"
|
||||
blocks "github.com/ipfs/go-ipfs/blocks"
|
||||
blockstore "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
message "github.com/ipfs/go-ipfs/exchange/bitswap/message"
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
package bitswap
|
||||
|
||||
import (
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network"
|
||||
mockrouting "github.com/ipfs/go-ipfs/routing/mock"
|
||||
testutil "github.com/ipfs/go-ipfs/thirdparty/testutil"
|
||||
|
||||
@ -3,8 +3,8 @@ package bitswap
|
||||
import (
|
||||
"time"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
ds_sync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
ds_sync "github.com/ipfs/go-datastore/sync"
|
||||
blockstore "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet"
|
||||
datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2"
|
||||
|
||||
@ -3,8 +3,8 @@ package offline
|
||||
import (
|
||||
"testing"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
ds_sync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
ds_sync "github.com/ipfs/go-datastore/sync"
|
||||
blocks "github.com/ipfs/go-ipfs/blocks"
|
||||
"github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
"github.com/ipfs/go-ipfs/blocks/blocksutil"
|
||||
|
||||
@ -3,8 +3,8 @@ package reprovide_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-datastore/sync"
|
||||
blocks "github.com/ipfs/go-ipfs/blocks"
|
||||
blockstore "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
mock "github.com/ipfs/go-ipfs/routing/mock"
|
||||
|
||||
@ -10,8 +10,8 @@ import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-datastore/sync"
|
||||
bstore "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
key "github.com/ipfs/go-ipfs/blocks/key"
|
||||
bserv "github.com/ipfs/go-ipfs/blockservice"
|
||||
|
||||
@ -1,8 +1,8 @@
|
||||
package mdutils
|
||||
|
||||
import (
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-datastore/sync"
|
||||
"github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
bsrv "github.com/ipfs/go-ipfs/blockservice"
|
||||
"github.com/ipfs/go-ipfs/exchange/offline"
|
||||
|
||||
@ -3,8 +3,8 @@ package dagutils
|
||||
import (
|
||||
"errors"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
syncds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
syncds "github.com/ipfs/go-datastore/sync"
|
||||
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
|
||||
|
||||
bstore "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
|
||||
@ -13,8 +13,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-datastore/sync"
|
||||
"github.com/ipfs/go-ipfs/path"
|
||||
randbo "gx/ipfs/QmYvsG72GsfLgUeSojXArjnU6L4Wmwk7wuAxtNLuyXcc1T/randbo"
|
||||
"gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
|
||||
|
||||
@ -4,7 +4,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
path "github.com/ipfs/go-ipfs/path"
|
||||
routing "github.com/ipfs/go-ipfs/routing"
|
||||
ci "gx/ipfs/QmUEUu1CM8bxBJxc3ZLojAi8evhTr4byQogWstABet79oY/go-libp2p-crypto"
|
||||
|
||||
@ -6,7 +6,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto"
|
||||
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
|
||||
|
||||
|
||||
@ -12,7 +12,7 @@ import (
|
||||
"github.com/ipfs/go-ipfs/routing"
|
||||
dhtpb "github.com/ipfs/go-ipfs/routing/dht/pb"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer"
|
||||
goprocess "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess"
|
||||
gpctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context"
|
||||
|
||||
@ -5,8 +5,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-datastore/sync"
|
||||
key "github.com/ipfs/go-ipfs/blocks/key"
|
||||
path "github.com/ipfs/go-ipfs/path"
|
||||
mockrouting "github.com/ipfs/go-ipfs/routing/mock"
|
||||
|
||||
@ -7,7 +7,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
key "github.com/ipfs/go-ipfs/blocks/key"
|
||||
"github.com/ipfs/go-ipfs/blocks/set"
|
||||
mdag "github.com/ipfs/go-ipfs/merkledag"
|
||||
|
||||
@ -6,8 +6,8 @@ import (
|
||||
|
||||
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-datastore/sync"
|
||||
"github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
key "github.com/ipfs/go-ipfs/blocks/key"
|
||||
bs "github.com/ipfs/go-ipfs/blockservice"
|
||||
|
||||
@ -4,8 +4,8 @@ import (
|
||||
"testing"
|
||||
"testing/quick"
|
||||
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
|
||||
"github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-datastore/sync"
|
||||
"github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
"github.com/ipfs/go-ipfs/blocks/key"
|
||||
"github.com/ipfs/go-ipfs/blockservice"
|
||||
|
||||
@ -4,11 +4,11 @@ import (
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/flatfs"
|
||||
levelds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/leveldb"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/measure"
|
||||
mount "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/syncmount"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore/flatfs"
|
||||
levelds "github.com/ipfs/go-datastore/leveldb"
|
||||
"github.com/ipfs/go-datastore/measure"
|
||||
mount "github.com/ipfs/go-datastore/syncmount"
|
||||
repo "github.com/ipfs/go-ipfs/repo"
|
||||
config "github.com/ipfs/go-ipfs/repo/config"
|
||||
"github.com/ipfs/go-ipfs/thirdparty/dir"
|
||||
|
||||
@ -10,7 +10,7 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/measure"
|
||||
"github.com/ipfs/go-datastore/measure"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/mitchellh/go-homedir"
|
||||
repo "github.com/ipfs/go-ipfs/repo"
|
||||
"github.com/ipfs/go-ipfs/repo/common"
|
||||
|
||||
@ -7,7 +7,7 @@ import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
datastore "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
datastore "github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-ipfs/repo/config"
|
||||
"github.com/ipfs/go-ipfs/thirdparty/assert"
|
||||
)
|
||||
|
||||
@ -4,7 +4,7 @@ import (
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
config "github.com/ipfs/go-ipfs/repo/config"
|
||||
)
|
||||
|
||||
|
||||
@ -15,7 +15,7 @@ import (
|
||||
kb "github.com/ipfs/go-ipfs/routing/kbucket"
|
||||
record "github.com/ipfs/go-ipfs/routing/record"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer"
|
||||
host "gx/ipfs/QmQkQP7WmeT9FRJDsEzAaGYDparttDiB6mCpVBrq2MuWQS/go-libp2p/p2p/host"
|
||||
protocol "gx/ipfs/QmQkQP7WmeT9FRJDsEzAaGYDparttDiB6mCpVBrq2MuWQS/go-libp2p/p2p/protocol"
|
||||
|
||||
@ -9,8 +9,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-datastore/sync"
|
||||
key "github.com/ipfs/go-ipfs/blocks/key"
|
||||
routing "github.com/ipfs/go-ipfs/routing"
|
||||
record "github.com/ipfs/go-ipfs/routing/record"
|
||||
|
||||
@ -6,8 +6,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-datastore/sync"
|
||||
key "github.com/ipfs/go-ipfs/blocks/key"
|
||||
routing "github.com/ipfs/go-ipfs/routing"
|
||||
pb "github.com/ipfs/go-ipfs/routing/dht/pb"
|
||||
|
||||
@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
key "github.com/ipfs/go-ipfs/blocks/key"
|
||||
pb "github.com/ipfs/go-ipfs/routing/dht/pb"
|
||||
lgbl "github.com/ipfs/go-ipfs/thirdparty/loggables"
|
||||
|
||||
@ -4,7 +4,7 @@ import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
key "github.com/ipfs/go-ipfs/blocks/key"
|
||||
routing "github.com/ipfs/go-ipfs/routing"
|
||||
dhtpb "github.com/ipfs/go-ipfs/routing/dht/pb"
|
||||
|
||||
@ -5,8 +5,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-datastore/sync"
|
||||
key "github.com/ipfs/go-ipfs/blocks/key"
|
||||
"github.com/ipfs/go-ipfs/thirdparty/testutil"
|
||||
|
||||
|
||||
@ -1,8 +1,8 @@
|
||||
package mockrouting
|
||||
|
||||
import (
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
sync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
sync "github.com/ipfs/go-datastore/sync"
|
||||
dht "github.com/ipfs/go-ipfs/routing/dht"
|
||||
"github.com/ipfs/go-ipfs/thirdparty/testutil"
|
||||
mocknet "gx/ipfs/QmQkQP7WmeT9FRJDsEzAaGYDparttDiB6mCpVBrq2MuWQS/go-libp2p/p2p/net/mock"
|
||||
|
||||
@ -5,7 +5,7 @@
|
||||
package mockrouting
|
||||
|
||||
import (
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
key "github.com/ipfs/go-ipfs/blocks/key"
|
||||
routing "github.com/ipfs/go-ipfs/routing"
|
||||
delay "github.com/ipfs/go-ipfs/thirdparty/delay"
|
||||
|
||||
@ -4,7 +4,7 @@ import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
key "github.com/ipfs/go-ipfs/blocks/key"
|
||||
routing "github.com/ipfs/go-ipfs/routing"
|
||||
pb "github.com/ipfs/go-ipfs/routing/dht/pb"
|
||||
|
||||
@ -4,7 +4,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
datastore "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
datastore "github.com/ipfs/go-datastore"
|
||||
key "github.com/ipfs/go-ipfs/blocks/key"
|
||||
dhtpb "github.com/ipfs/go-ipfs/routing/dht/pb"
|
||||
record "github.com/ipfs/go-ipfs/routing/record"
|
||||
|
||||
@ -3,7 +3,7 @@ package supernode
|
||||
import (
|
||||
"testing"
|
||||
|
||||
datastore "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
datastore "github.com/ipfs/go-datastore"
|
||||
key "github.com/ipfs/go-ipfs/blocks/key"
|
||||
dhtpb "github.com/ipfs/go-ipfs/routing/dht/pb"
|
||||
)
|
||||
|
||||
@ -8,8 +8,8 @@ import (
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
syncds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
|
||||
"github.com/ipfs/go-datastore"
|
||||
syncds "github.com/ipfs/go-datastore/sync"
|
||||
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
|
||||
|
||||
key "github.com/ipfs/go-ipfs/blocks/key"
|
||||
|
||||
@ -12,8 +12,8 @@ import (
|
||||
gopath "path"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
syncds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
|
||||
"github.com/ipfs/go-datastore"
|
||||
syncds "github.com/ipfs/go-datastore/sync"
|
||||
random "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-random"
|
||||
commands "github.com/ipfs/go-ipfs/commands"
|
||||
core "github.com/ipfs/go-ipfs/core"
|
||||
|
||||
2
thirdparty/datastore2/datastore_closer.go
vendored
2
thirdparty/datastore2/datastore_closer.go
vendored
@ -3,7 +3,7 @@ package datastore2
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore"
|
||||
)
|
||||
|
||||
type ThreadSafeDatastoreCloser interface {
|
||||
|
||||
4
thirdparty/datastore2/delayed.go
vendored
4
thirdparty/datastore2/delayed.go
vendored
@ -1,8 +1,8 @@
|
||||
package datastore2
|
||||
|
||||
import (
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
dsq "github.com/ipfs/go-datastore/query"
|
||||
|
||||
delay "github.com/ipfs/go-ipfs/thirdparty/delay"
|
||||
)
|
||||
|
||||
2
thirdparty/datastore2/threadsafe.go
vendored
2
thirdparty/datastore2/threadsafe.go
vendored
@ -3,7 +3,7 @@ package datastore2
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore"
|
||||
)
|
||||
|
||||
// ClaimThreadSafe claims that a Datastore is threadsafe, even when
|
||||
|
||||
4
thirdparty/testutil/datastore.go
vendored
4
thirdparty/testutil/datastore.go
vendored
@ -1,8 +1,8 @@
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
syncds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
|
||||
"github.com/ipfs/go-datastore"
|
||||
syncds "github.com/ipfs/go-datastore/sync"
|
||||
ds2 "github.com/ipfs/go-ipfs/thirdparty/datastore2"
|
||||
)
|
||||
|
||||
|
||||
@ -7,7 +7,7 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
|
||||
"github.com/ipfs/go-datastore/sync"
|
||||
"github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
bs "github.com/ipfs/go-ipfs/blockservice"
|
||||
"github.com/ipfs/go-ipfs/exchange/offline"
|
||||
@ -20,7 +20,7 @@ import (
|
||||
uio "github.com/ipfs/go-ipfs/unixfs/io"
|
||||
u "gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
|
||||
)
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user