From 7eb4a92aba06dfbf8b1cb5174a8ec7826cdf6d35 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Fri, 20 Mar 2015 06:53:18 -0700 Subject: [PATCH 01/15] 0-to-1 fsrepo migration --- Godeps/Godeps.json | 4 ++ .../src/github.com/jbenet/go-migrate/LICENSE | 21 ++++++ .../github.com/jbenet/go-migrate/README.md | 11 +++ .../src/github.com/jbenet/go-migrate/cli.go | 51 ++++++++++++++ .../src/github.com/jbenet/go-migrate/doc.go | 2 + .../github.com/jbenet/go-migrate/migrate.go | 37 ++++++++++ repo/fsrepo/migrations/0-to-1/main.go | 69 +++++++++++++++++++ repo/fsrepo/migrations/mfsr.go | 56 +++++++++++++++ 8 files changed, 251 insertions(+) create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-migrate/LICENSE create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-migrate/README.md create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-migrate/cli.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-migrate/doc.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-migrate/migrate.go create mode 100644 repo/fsrepo/migrations/0-to-1/main.go create mode 100644 repo/fsrepo/migrations/mfsr.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index b8b28109e..cbd1e7967 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -159,6 +159,10 @@ "ImportPath": "github.com/jbenet/go-logging", "Rev": "74bec4b83f6d45d1402c1e9d94c0c29e39f6e0ea" }, + { + "ImportPath": "github.com/jbenet/go-migrate", + "Rev": "593be6b4b24a87e4d380e54339721ad4b4c6543c" + }, { "ImportPath": "github.com/jbenet/go-msgio", "Rev": "dbae89193876910c736b2ce1291fa8bbcf299d77" diff --git a/Godeps/_workspace/src/github.com/jbenet/go-migrate/LICENSE b/Godeps/_workspace/src/github.com/jbenet/go-migrate/LICENSE new file mode 100644 index 000000000..c7386b3c9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-migrate/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Juan Batiz-Benet + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/jbenet/go-migrate/README.md b/Godeps/_workspace/src/github.com/jbenet/go-migrate/README.md new file mode 100644 index 000000000..8ddf5a6d3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-migrate/README.md @@ -0,0 +1,11 @@ +# go-migrate + +This is a very simple migration framework. See "Migrations" in https://github.com/jbenet/random-ideas/issues/33 + +This package includes: + +- `migrate` package -- lib to write migration programs + +## The model + +The idea here is that we have some thing -- usually a directory -- that needs to be migrated between different representation versions. This may be because there has been an upgrade. diff --git a/Godeps/_workspace/src/github.com/jbenet/go-migrate/cli.go b/Godeps/_workspace/src/github.com/jbenet/go-migrate/cli.go new file mode 100644 index 000000000..66f1f3b2b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-migrate/cli.go @@ -0,0 +1,51 @@ +package migrate + +import ( + "flag" + "fmt" + "os" +) + +type Flags struct { + Force bool + Revert bool + Path string // file path to migrate for fs based migrations +} + +func (f *Flags) Setup() { + flag.BoolVar(&f.Force, "f", false, "whether to force a migration (ignores warnings)") + flag.BoolVar(&f.Revert, "revert", false, "whether to apply the migration backwards") + flag.StringVar(&f.Path, "path", "", "file path to migrate for fs based migrations") +} + +func (f *Flags) Parse() { + flag.Parse() +} + +func Run(m Migration) error { + f := Flags{} + f.Setup() + f.Parse() + + if !m.Reversible() { + if f.Revert { + return fmt.Errorf("migration %d is irreversible", m.Versions()) + } + if !f.Force { + return fmt.Errorf("migration %d is irreversible (use -f to proceed)", m.Versions()) + } + } + + if f.Revert { + return m.Revert(Options{f}) + } else { + return m.Apply(Options{f}) + } +} + +func Main(m Migration) { + if err := Run(m); err != nil { + fmt.Fprintf(os.Stderr, "error: %s\n", err) + os.Exit(1) + } +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-migrate/doc.go b/Godeps/_workspace/src/github.com/jbenet/go-migrate/doc.go new file mode 100644 index 000000000..29e607a13 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-migrate/doc.go @@ -0,0 +1,2 @@ +// Package migrate is used to write migrations between representations of things. +package migrate diff --git a/Godeps/_workspace/src/github.com/jbenet/go-migrate/migrate.go b/Godeps/_workspace/src/github.com/jbenet/go-migrate/migrate.go new file mode 100644 index 000000000..f374da273 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-migrate/migrate.go @@ -0,0 +1,37 @@ +package migrate + +import ( + "fmt" +) + +// Options are migration options. For now all flags are options. +type Options struct { + Flags +} + +// Migration represents +type Migration interface { + + // Versions is the "v-to-v" version string. + Versions() string + + // Reversible returns whether this migration can be reverted. + // Endeavor to make them all reversible. This is here only to warn users + // in case this is not the case. + Reversible() bool + + // Apply applies the migration in question. + Apply(Options) error + + // Revert un-applies the migration in question. This should be best-effort. + // Some migrations are definitively one-way. If so, return an error. + Revert(Options) error +} + +func SplitVersion(s string) (from int, to int) { + _, err := fmt.Scanf(s, "%d-to-%d", &from, &to) + if err != nil { + panic(err.Error()) + } + return +} diff --git a/repo/fsrepo/migrations/0-to-1/main.go b/repo/fsrepo/migrations/0-to-1/main.go new file mode 100644 index 000000000..cc0c5ef1c --- /dev/null +++ b/repo/fsrepo/migrations/0-to-1/main.go @@ -0,0 +1,69 @@ +package main + +import ( + "fmt" + "os" + "strings" + + migrate "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-migrate" + mfsr "github.com/ipfs/go-ipfs/repo/fsrepo/migrations" +) + +type migration struct { +} + +// Version is the int version number. This could be a string +// in future versions +func (m migration) Versions() string { + return "0-to-1" +} + +// Reversible returns whether this migration can be reverted. +// Endeavor to make them all reversible. This is here only to warn users +// in case this is not the case. +func (m migration) Reversible() bool { + return true +} + +// Apply applies the migration in question. +// This migration merely adds a version file. +func (m migration) Apply(opts migrate.Options) error { + repo := mfsr.RepoPath(opts.Path) + + // first, check if there is a version file. + // if there is, bail out. + if v, err := repo.Version(); err == nil { + return fmt.Errorf("repo at %s is version %s (not 0)", opts.Path, v) + } else if !strings.Contains(err.Error(), "no version file in repo") { + return err + } + + // add the version file + if err := repo.WriteVersion("1"); err != nil { + return err + } + + return nil +} + +// Revert un-applies the migration in question. This should be best-effort. +// Some migrations are definitively one-way. If so, return an error. +func (m migration) Revert(opts migrate.Options) error { + repo := mfsr.RepoPath(opts.Path) + + if err := repo.CheckVersion("1"); err != nil { + return err + } + + // remove the version file + if err := os.Remove(repo.VersionFile()); err != nil { + return err + } + + return nil +} + +func main() { + m := migration{} + migrate.Main(&m) +} diff --git a/repo/fsrepo/migrations/mfsr.go b/repo/fsrepo/migrations/mfsr.go new file mode 100644 index 000000000..21b81f1a3 --- /dev/null +++ b/repo/fsrepo/migrations/mfsr.go @@ -0,0 +1,56 @@ +package mfsr + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path" + "strings" +) + +const VersionFile = "version" + +type RepoPath string + +func (rp RepoPath) VersionFile() string { + return path.Join(string(rp), VersionFile) +} + +func (rp RepoPath) Version() (string, error) { + if rp == "" { + return "", fmt.Errorf("invalid repo path \"%s\"", rp) + } + + fn := rp.VersionFile() + if _, err := os.Stat(fn); os.IsNotExist(err) { + return "", errors.New("no version file in repo at " + string(rp)) + } + + c, err := ioutil.ReadFile(fn) + if err != nil { + return "", err + } + + s := string(c) + s = strings.TrimSpace(s) + return s, nil +} + +func (rp RepoPath) CheckVersion(version string) error { + v, err := rp.Version() + if err != nil { + return err + } + + if v != version { + return fmt.Errorf("versions differ (expected: %s, actual:%s)", version, v) + } + + return nil +} + +func (rp RepoPath) WriteVersion(version string) error { + fn := rp.VersionFile() + return ioutil.WriteFile(fn, []byte(version+"\n"), 0644) +} From 5cb8d80bb6ac0f0ae150c61c2de9c19c680fd015 Mon Sep 17 00:00:00 2001 From: Tommi Virtanen Date: Mon, 16 Mar 2015 13:49:51 -0700 Subject: [PATCH 02/15] LevelDB is no longer "the" datastore, adjust identifiers --- repo/fsrepo/fsrepo.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/repo/fsrepo/fsrepo.go b/repo/fsrepo/fsrepo.go index 84664ab5f..9b5d7949c 100644 --- a/repo/fsrepo/fsrepo.go +++ b/repo/fsrepo/fsrepo.go @@ -25,7 +25,7 @@ import ( ) const ( - defaultDataStoreDirectory = "datastore" + leveldbDirectory = "datastore" ) var ( @@ -190,8 +190,8 @@ func Init(repoPath string, conf *config.Config) error { // The actual datastore contents are initialized lazily when Opened. // During Init, we merely check that the directory is writeable. - p := path.Join(repoPath, defaultDataStoreDirectory) - if err := dir.Writable(p); err != nil { + leveldbPath := path.Join(repoPath, leveldbDirectory) + if err := dir.Writable(leveldbPath); err != nil { return fmt.Errorf("datastore: %s", err) } @@ -236,8 +236,8 @@ func (r *FSRepo) openConfig() error { // openDatastore returns an error if the config file is not present. func (r *FSRepo) openDatastore() error { - dsPath := path.Join(r.path, defaultDataStoreDirectory) - ds, err := levelds.NewDatastore(dsPath, &levelds.Options{ + leveldbPath := path.Join(r.path, leveldbDirectory) + ds, err := levelds.NewDatastore(leveldbPath, &levelds.Options{ Compression: ldbopts.NoCompression, }) if err != nil { @@ -430,7 +430,7 @@ func isInitializedUnsynced(repoPath string) bool { if !configIsInitialized(repoPath) { return false } - if !util.FileExists(path.Join(repoPath, defaultDataStoreDirectory)) { + if !util.FileExists(path.Join(repoPath, leveldbDirectory)) { return false } return true From 26cebac6d82d886cb59477c9eb4b9d78f4526a74 Mon Sep 17 00:00:00 2001 From: Tommi Virtanen Date: Mon, 16 Mar 2015 13:50:48 -0700 Subject: [PATCH 03/15] Remove comment referring to old code FSRepo.Open is dead since fdd1cd8dc045db90b06497a15df7f6f232f76bda --- repo/fsrepo/fsrepo.go | 1 - 1 file changed, 1 deletion(-) diff --git a/repo/fsrepo/fsrepo.go b/repo/fsrepo/fsrepo.go index 9b5d7949c..203426446 100644 --- a/repo/fsrepo/fsrepo.go +++ b/repo/fsrepo/fsrepo.go @@ -60,7 +60,6 @@ type FSRepo struct { // the same fsrepo path concurrently lockfile io.Closer config *config.Config - // ds is set on Open ds ds2.ThreadSafeDatastoreCloser } From 59aa209164fefbdfd2f6a50608a30da68c0d0b4a Mon Sep 17 00:00:00 2001 From: Tommi Virtanen Date: Mon, 16 Mar 2015 13:55:33 -0700 Subject: [PATCH 04/15] Let FSRepo Close know explicitly about LevelDB This allows replacing the datastore without needing to write Close through to every wrapped datastore. --- repo/fsrepo/fsrepo.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/repo/fsrepo/fsrepo.go b/repo/fsrepo/fsrepo.go index 203426446..a9e750d54 100644 --- a/repo/fsrepo/fsrepo.go +++ b/repo/fsrepo/fsrepo.go @@ -60,7 +60,9 @@ type FSRepo struct { // the same fsrepo path concurrently lockfile io.Closer config *config.Config - ds ds2.ThreadSafeDatastoreCloser + ds ds.ThreadSafeDatastore + // tracked separately for use in Close; do not use directly. + leveldbDS levelds.Datastore } var _ repo.Repo = (*FSRepo)(nil) @@ -236,13 +238,15 @@ func (r *FSRepo) openConfig() error { // openDatastore returns an error if the config file is not present. func (r *FSRepo) openDatastore() error { leveldbPath := path.Join(r.path, leveldbDirectory) - ds, err := levelds.NewDatastore(leveldbPath, &levelds.Options{ + var err error + // save leveldb reference so it can be neatly closed afterward + r.leveldbDS, err = levelds.NewDatastore(leveldbPath, &levelds.Options{ Compression: ldbopts.NoCompression, }) if err != nil { return errors.New("unable to open leveldb datastore") } - r.ds = ds + r.ds = r.leveldbDS return nil } @@ -267,7 +271,7 @@ func (r *FSRepo) Close() error { return errors.New("repo is closed") } - if err := r.ds.Close(); err != nil { + if err := r.leveldbDS.Close(); err != nil { return err } From 4f4b894627d56e0c1a88f1dd3905b0d017fc10ae Mon Sep 17 00:00:00 2001 From: Tommi Virtanen Date: Mon, 16 Mar 2015 13:59:26 -0700 Subject: [PATCH 05/15] Update vendored go-datastore, now has flatfs and mount --- Godeps/Godeps.json | 2 +- .../jbenet/go-datastore/Godeps/Godeps.json | 4 +- .../github.com/jbenet/go-datastore/Makefile | 4 +- .../github.com/jbenet/go-datastore/README.md | 2 +- .../jbenet/go-datastore/basic_ds.go | 3 + .../jbenet/go-datastore/callback/callback.go | 42 +++ .../jbenet/go-datastore/coalesce/coalesce.go | 126 +++++++ .../go-datastore/coalesce/coalesce_test.go | 299 +++++++++++++++++ .../jbenet/go-datastore/datastore.go | 4 +- .../jbenet/go-datastore/flatfs/flatfs.go | 241 ++++++++++++++ .../jbenet/go-datastore/flatfs/flatfs_test.go | 315 ++++++++++++++++++ .../github.com/jbenet/go-datastore/fs/fs.go | 19 +- .../jbenet/go-datastore/lru/datastore_test.go | 1 - .../jbenet/go-datastore/mount/mount.go | 116 +++++++ .../jbenet/go-datastore/mount/mount_test.go | 241 ++++++++++++++ .../jbenet/go-datastore/tiered/tiered.go | 94 ++++++ .../jbenet/go-datastore/tiered/tiered_test.go | 79 +++++ .../go-datastore/timecache/timecache.go | 96 ++++++ .../go-datastore/timecache/timecache_test.go | 64 ++++ 19 files changed, 1742 insertions(+), 10 deletions(-) create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-datastore/callback/callback.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-datastore/coalesce/coalesce.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-datastore/coalesce/coalesce_test.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs/flatfs.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs/flatfs_test.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-datastore/mount/mount.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-datastore/mount/mount_test.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-datastore/tiered/tiered.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-datastore/tiered/tiered_test.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-datastore/timecache/timecache.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-datastore/timecache/timecache_test.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index cbd1e7967..b777b8db2 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -141,7 +141,7 @@ }, { "ImportPath": "github.com/jbenet/go-datastore", - "Rev": "35738aceb35505bd3c77c2a618fb1947ca3f72da" + "Rev": "f1a0a0fd88f23b67589957f02b7500372aca186f" }, { "ImportPath": "github.com/jbenet/go-detect-race", diff --git a/Godeps/_workspace/src/github.com/jbenet/go-datastore/Godeps/Godeps.json b/Godeps/_workspace/src/github.com/jbenet/go-datastore/Godeps/Godeps.json index 6dc670b20..0b0fc49ba 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-datastore/Godeps/Godeps.json +++ b/Godeps/_workspace/src/github.com/jbenet/go-datastore/Godeps/Godeps.json @@ -1,6 +1,6 @@ { "ImportPath": "github.com/jbenet/go-datastore", - "GoVersion": "go1.3", + "GoVersion": "go1.4", "Packages": [ "./..." ], @@ -20,7 +20,7 @@ }, { "ImportPath": "github.com/jbenet/goprocess", - "Rev": "b4b4178efcf2404ce9db72438c9c49db2fb399d8" + "Rev": "5b02f8d275a2dd882fb06f8bbdf74347795ff3b1" }, { "ImportPath": "github.com/mattbaird/elastigo/api", diff --git a/Godeps/_workspace/src/github.com/jbenet/go-datastore/Makefile b/Godeps/_workspace/src/github.com/jbenet/go-datastore/Makefile index c03d51b20..100d402cb 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-datastore/Makefile +++ b/Godeps/_workspace/src/github.com/jbenet/go-datastore/Makefile @@ -1,8 +1,8 @@ build: go build -test: - go test ./... +test: build + go test -race -cpu=5 -v ./... # saves/vendors third-party dependencies to Godeps/_workspace # -r flag rewrites import paths to use the vendored path diff --git a/Godeps/_workspace/src/github.com/jbenet/go-datastore/README.md b/Godeps/_workspace/src/github.com/jbenet/go-datastore/README.md index 3230090a8..2baf4b2a2 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-datastore/README.md +++ b/Godeps/_workspace/src/github.com/jbenet/go-datastore/README.md @@ -8,7 +8,7 @@ Based on [datastore.py](https://github.com/datastore/datastore). ### Documentation -https://godoc.org/github.com/datastore/go-datastore +https://godoc.org/github.com/jbenet/go-datastore ### License diff --git a/Godeps/_workspace/src/github.com/jbenet/go-datastore/basic_ds.go b/Godeps/_workspace/src/github.com/jbenet/go-datastore/basic_ds.go index 9949775d5..c627a1d56 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-datastore/basic_ds.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-datastore/basic_ds.go @@ -45,6 +45,9 @@ func (d *MapDatastore) Has(key Key) (exists bool, err error) { // Delete implements Datastore.Delete func (d *MapDatastore) Delete(key Key) (err error) { + if _, found := d.values[key]; !found { + return ErrNotFound + } delete(d.values, key) return nil } diff --git a/Godeps/_workspace/src/github.com/jbenet/go-datastore/callback/callback.go b/Godeps/_workspace/src/github.com/jbenet/go-datastore/callback/callback.go new file mode 100644 index 000000000..302b4b33b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-datastore/callback/callback.go @@ -0,0 +1,42 @@ +package callback + +import ( + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + dsq "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query" +) + +type Datastore struct { + D ds.Datastore + F func() +} + +func Wrap(ds ds.Datastore, f func()) ds.Datastore { + return &Datastore{ds, f} +} + +func (c *Datastore) SetFunc(f func()) { c.F = f } + +func (c *Datastore) Put(key ds.Key, value interface{}) (err error) { + c.F() + return c.D.Put(key, value) +} + +func (c *Datastore) Get(key ds.Key) (value interface{}, err error) { + c.F() + return c.D.Get(key) +} + +func (c *Datastore) Has(key ds.Key) (exists bool, err error) { + c.F() + return c.D.Has(key) +} + +func (c *Datastore) Delete(key ds.Key) (err error) { + c.F() + return c.D.Delete(key) +} + +func (c *Datastore) Query(q dsq.Query) (dsq.Results, error) { + c.F() + return c.D.Query(q) +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-datastore/coalesce/coalesce.go b/Godeps/_workspace/src/github.com/jbenet/go-datastore/coalesce/coalesce.go new file mode 100644 index 000000000..c583e5615 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-datastore/coalesce/coalesce.go @@ -0,0 +1,126 @@ +package coalesce + +import ( + "sync" + + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + dsq "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query" +) + +var ( + putKey = "put" + getKey = // parent keys + "get" + hasKey = "has" + deleteKey = "delete" +) + +type keySync struct { + op string + k ds.Key + value interface{} +} + +type valSync struct { + val interface{} + err error + done chan struct{} +} + +// Datastore uses golang-lru for internal storage. +type datastore struct { + child ds.Datastore + + reqmu sync.Mutex + req map[keySync]*valSync +} + +// Wrap wraps a given datastore with a coalescing datastore. +// All simultaenous requests which have the same keys will +// yield the exact same result. Note that this shares +// memory. It is not possible to copy a generic interface{} +func Wrap(d ds.Datastore) ds.Datastore { + return &datastore{child: d, req: make(map[keySync]*valSync)} +} + +// sync synchronizes requests for a given key. +func (d *datastore) sync(k keySync) (vs *valSync, found bool) { + d.reqmu.Lock() + vs, found = d.req[k] + if !found { + vs = &valSync{done: make(chan struct{})} + d.req[k] = vs + } + d.reqmu.Unlock() + + // if we did find one, wait till it's done. + if found { + <-vs.done + } + return vs, found +} + +// sync synchronizes requests for a given key. +func (d *datastore) syncDone(k keySync) { + + d.reqmu.Lock() + vs, found := d.req[k] + if !found { + panic("attempt to syncDone non-existent request") + } + delete(d.req, k) + d.reqmu.Unlock() + + // release all the waiters. + close(vs.done) +} + +// Put stores the object `value` named by `key`. +func (d *datastore) Put(key ds.Key, value interface{}) (err error) { + ks := keySync{putKey, key, value} + vs, found := d.sync(ks) + if !found { + vs.err = d.child.Put(key, value) + d.syncDone(ks) + } + return err +} + +// Get retrieves the object `value` named by `key`. +func (d *datastore) Get(key ds.Key) (value interface{}, err error) { + ks := keySync{getKey, key, nil} + vs, found := d.sync(ks) + if !found { + vs.val, vs.err = d.child.Get(key) + d.syncDone(ks) + } + return vs.val, vs.err +} + +// Has returns whether the `key` is mapped to a `value`. +func (d *datastore) Has(key ds.Key) (exists bool, err error) { + ks := keySync{hasKey, key, nil} + vs, found := d.sync(ks) + if !found { + vs.val, vs.err = d.child.Has(key) + d.syncDone(ks) + } + return vs.val.(bool), vs.err +} + +// Delete removes the value for given `key`. +func (d *datastore) Delete(key ds.Key) (err error) { + ks := keySync{deleteKey, key, nil} + vs, found := d.sync(ks) + if !found { + vs.err = d.child.Delete(key) + d.syncDone(ks) + } + return vs.err +} + +// Query returns a list of keys in the datastore +func (d *datastore) Query(q dsq.Query) (dsq.Results, error) { + // query not coalesced yet. + return d.child.Query(q) +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-datastore/coalesce/coalesce_test.go b/Godeps/_workspace/src/github.com/jbenet/go-datastore/coalesce/coalesce_test.go new file mode 100644 index 000000000..d116520cd --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-datastore/coalesce/coalesce_test.go @@ -0,0 +1,299 @@ +package coalesce + +import ( + "fmt" + "sync" + "testing" + "time" + + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + dscb "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/callback" + dssync "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" +) + +type mock struct { + sync.Mutex + + inside int + outside int + ds ds.Datastore +} + +func setup() *mock { + m := &mock{} + + mp := ds.NewMapDatastore() + ts := dssync.MutexWrap(mp) + cb1 := dscb.Wrap(ts, func() { + m.Lock() + m.inside++ + m.Unlock() + <-time.After(20 * time.Millisecond) + }) + cd := Wrap(cb1) + cb2 := dscb.Wrap(cd, func() { + m.Lock() + m.outside++ + m.Unlock() + }) + + m.ds = cb2 + return m +} + +func TestCoalesceSamePut(t *testing.T) { + m := setup() + done := make(chan struct{}) + + go func() { + m.ds.Put(ds.NewKey("foo"), "bar") + done <- struct{}{} + }() + go func() { + m.ds.Put(ds.NewKey("foo"), "bar") + done <- struct{}{} + }() + go func() { + m.ds.Put(ds.NewKey("foo"), "bar") + done <- struct{}{} + }() + + <-done + <-done + <-done + + if m.inside != 1 { + t.Error("incalls should be 1", m.inside) + } + + if m.outside != 3 { + t.Error("outcalls should be 3", m.outside) + } +} + +func TestCoalesceSamePutDiffPut(t *testing.T) { + m := setup() + done := make(chan struct{}) + + go func() { + m.ds.Put(ds.NewKey("foo"), "bar") + done <- struct{}{} + }() + go func() { + m.ds.Put(ds.NewKey("foo"), "bar") + done <- struct{}{} + }() + go func() { + m.ds.Put(ds.NewKey("foo"), "bar2") + done <- struct{}{} + }() + go func() { + m.ds.Put(ds.NewKey("foo"), "bar3") + done <- struct{}{} + }() + + <-done + <-done + <-done + <-done + + if m.inside != 3 { + t.Error("incalls should be 3", m.inside) + } + + if m.outside != 4 { + t.Error("outcalls should be 4", m.outside) + } +} + +func TestCoalesceSameGet(t *testing.T) { + m := setup() + done := make(chan struct{}) + errs := make(chan error, 30) + + m.ds.Put(ds.NewKey("foo1"), "bar") + m.ds.Put(ds.NewKey("foo2"), "baz") + + for i := 0; i < 10; i++ { + go func() { + v, err := m.ds.Get(ds.NewKey("foo1")) + if err != nil { + errs <- err + } + if v != "bar" { + errs <- fmt.Errorf("v is not bar", v) + } + done <- struct{}{} + }() + } + for i := 0; i < 10; i++ { + go func() { + v, err := m.ds.Get(ds.NewKey("foo2")) + if err != nil { + errs <- err + } + if v != "baz" { + errs <- fmt.Errorf("v is not baz", v) + } + done <- struct{}{} + }() + } + for i := 0; i < 10; i++ { + go func() { + _, err := m.ds.Get(ds.NewKey("foo3")) + if err == nil { + errs <- fmt.Errorf("no error") + } + done <- struct{}{} + }() + } + + for i := 0; i < 30; i++ { + <-done + } + + if m.inside != 5 { + t.Error("incalls should be 3", m.inside) + } + + if m.outside != 32 { + t.Error("outcalls should be 30", m.outside) + } +} + +func TestCoalesceHas(t *testing.T) { + m := setup() + done := make(chan struct{}) + errs := make(chan error, 30) + + m.ds.Put(ds.NewKey("foo1"), "bar") + m.ds.Put(ds.NewKey("foo2"), "baz") + + for i := 0; i < 10; i++ { + go func() { + v, err := m.ds.Has(ds.NewKey("foo1")) + if err != nil { + errs <- err + } + if !v { + errs <- fmt.Errorf("should have foo1") + } + done <- struct{}{} + }() + } + for i := 0; i < 10; i++ { + go func() { + v, err := m.ds.Has(ds.NewKey("foo2")) + if err != nil { + errs <- err + } + if !v { + errs <- fmt.Errorf("should have foo2") + } + done <- struct{}{} + }() + } + for i := 0; i < 10; i++ { + go func() { + v, err := m.ds.Has(ds.NewKey("foo3")) + if err != nil { + errs <- err + } + if v { + errs <- fmt.Errorf("should not have foo3") + } + done <- struct{}{} + }() + } + + for i := 0; i < 30; i++ { + <-done + } + + if m.inside != 5 { + t.Error("incalls should be 3", m.inside) + } + + if m.outside != 32 { + t.Error("outcalls should be 30", m.outside) + } +} + +func TestCoalesceDelete(t *testing.T) { + m := setup() + done := make(chan struct{}) + errs := make(chan error, 30) + + m.ds.Put(ds.NewKey("foo1"), "bar1") + m.ds.Put(ds.NewKey("foo2"), "bar2") + m.ds.Put(ds.NewKey("foo3"), "bar3") + + for i := 0; i < 10; i++ { + go func() { + err := m.ds.Delete(ds.NewKey("foo1")) + if err != nil { + errs <- err + } + has, err := m.ds.Has(ds.NewKey("foo1")) + if err != nil { + errs <- err + } + if has { + t.Error("still have it after deleting") + } + done <- struct{}{} + }() + } + for i := 0; i < 10; i++ { + go func() { + err := m.ds.Delete(ds.NewKey("foo2")) + if err != nil { + errs <- err + } + has, err := m.ds.Has(ds.NewKey("foo2")) + if err != nil { + errs <- err + } + if has { + t.Error("still have it after deleting") + } + done <- struct{}{} + }() + } + for i := 0; i < 10; i++ { + go func() { + has, err := m.ds.Has(ds.NewKey("foo3")) + if err != nil { + errs <- err + } + if !has { + t.Error("should still have foo3") + } + done <- struct{}{} + }() + } + for i := 0; i < 10; i++ { + go func() { + has, err := m.ds.Has(ds.NewKey("foo4")) + if err != nil { + errs <- err + } + if has { + t.Error("should not have foo4") + } + done <- struct{}{} + }() + } + + for i := 0; i < 40; i++ { + <-done + } + + if m.inside != 9 { + t.Error("incalls should be 9", m.inside) + } + + if m.outside != 63 { + t.Error("outcalls should be 63", m.outside) + } +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-datastore/datastore.go b/Godeps/_workspace/src/github.com/jbenet/go-datastore/datastore.go index ea6b7e108..e6c94e1e9 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-datastore/datastore.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-datastore/datastore.go @@ -39,7 +39,7 @@ type Datastore interface { // Ultimately, the lowest-level datastore will need to do some value checking // or risk getting incorrect values. It may also be useful to expose a more // type-safe interface to your application, and do the checking up-front. - Put(key Key, value interface{}) (err error) + Put(key Key, value interface{}) error // Get retrieves the object `value` named by `key`. // Get will return ErrNotFound if the key is not mapped to a value. @@ -52,7 +52,7 @@ type Datastore interface { Has(key Key) (exists bool, err error) // Delete removes the value for given `key`. - Delete(key Key) (err error) + Delete(key Key) error // Query searches the datastore and returns a query result. This function // may return before the query actually runs. To wait for the query: diff --git a/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs/flatfs.go b/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs/flatfs.go new file mode 100644 index 000000000..c3bc96d4c --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs/flatfs.go @@ -0,0 +1,241 @@ +// Package flatfs is a Datastore implementation that stores all +// objects in a two-level directory structure in the local file +// system, regardless of the hierarchy of the keys. +package flatfs + +import ( + "encoding/hex" + "errors" + "io/ioutil" + "os" + "path" + "strings" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query" +) + +const ( + extension = ".data" + maxPrefixLen = 16 +) + +var ( + ErrBadPrefixLen = errors.New("bad prefix length") +) + +type Datastore struct { + path string + // length of the dir splay prefix, in bytes of hex digits + hexPrefixLen int +} + +var _ datastore.Datastore = (*Datastore)(nil) + +func New(path string, prefixLen int) (*Datastore, error) { + if prefixLen <= 0 || prefixLen > maxPrefixLen { + return nil, ErrBadPrefixLen + } + fs := &Datastore{ + path: path, + // convert from binary bytes to bytes of hex encoding + hexPrefixLen: prefixLen * hex.EncodedLen(1), + } + return fs, nil +} + +var padding = strings.Repeat("_", maxPrefixLen*hex.EncodedLen(1)) + +func (fs *Datastore) encode(key datastore.Key) (dir, file string) { + safe := hex.EncodeToString(key.Bytes()) + prefix := (safe + padding)[:fs.hexPrefixLen] + dir = path.Join(fs.path, prefix) + file = path.Join(dir, safe+extension) + return dir, file +} + +func (fs *Datastore) decode(file string) (key datastore.Key, ok bool) { + if path.Ext(file) != extension { + return datastore.Key{}, false + } + name := file[:len(file)-len(extension)] + k, err := hex.DecodeString(name) + if err != nil { + return datastore.Key{}, false + } + return datastore.NewKey(string(k)), true +} + +func (fs *Datastore) makePrefixDir(dir string) error { + if err := os.Mkdir(dir, 0777); err != nil { + // EEXIST is safe to ignore here, that just means the prefix + // directory already existed. + if !os.IsExist(err) { + return err + } + } + + // In theory, if we create a new prefix dir and add a file to + // it, the creation of the prefix dir itself might not be + // durable yet. Sync the root dir after a successful mkdir of + // a prefix dir, just to be paranoid. + f, err := os.Open(fs.path) + if err != nil { + return err + } + defer f.Close() + if err := f.Sync(); err != nil { + return err + } + return nil +} + +func (fs *Datastore) Put(key datastore.Key, value interface{}) error { + val, ok := value.([]byte) + if !ok { + return datastore.ErrInvalidType + } + + dir, path := fs.encode(key) + if err := fs.makePrefixDir(dir); err != nil { + return err + } + + dirF, err := os.Open(dir) + if err != nil { + return err + } + defer dirF.Close() + + tmp, err := ioutil.TempFile(dir, "put-") + if err != nil { + return err + } + closed := false + removed := false + defer func() { + if !closed { + // silence errcheck + _ = tmp.Close() + } + if !removed { + // silence errcheck + _ = os.Remove(tmp.Name()) + } + }() + + if _, err := tmp.Write(val); err != nil { + return err + } + if err := tmp.Sync(); err != nil { + return err + } + if err := tmp.Close(); err != nil { + return err + } + closed = true + + err = os.Rename(tmp.Name(), path) + if err != nil { + return err + } + removed = true + + if err := dirF.Sync(); err != nil { + return err + } + + return nil +} + +func (fs *Datastore) Get(key datastore.Key) (value interface{}, err error) { + _, path := fs.encode(key) + data, err := ioutil.ReadFile(path) + if err != nil { + if os.IsNotExist(err) { + return nil, datastore.ErrNotFound + } + // no specific error to return, so just pass it through + return nil, err + } + return data, nil +} + +func (fs *Datastore) Has(key datastore.Key) (exists bool, err error) { + _, path := fs.encode(key) + switch _, err := os.Stat(path); { + case err == nil: + return true, nil + case os.IsNotExist(err): + return false, nil + default: + return false, err + } +} + +func (fs *Datastore) Delete(key datastore.Key) error { + _, path := fs.encode(key) + switch err := os.Remove(path); { + case err == nil: + return nil + case os.IsNotExist(err): + return datastore.ErrNotFound + default: + return err + } +} + +func (fs *Datastore) Query(q query.Query) (query.Results, error) { + if (q.Prefix != "" && q.Prefix != "/") || + len(q.Filters) > 0 || + len(q.Orders) > 0 || + q.Limit > 0 || + q.Offset > 0 || + !q.KeysOnly { + // TODO this is overly simplistic, but the only caller is + // `ipfs refs local` for now, and this gets us moving. + return nil, errors.New("flatfs only supports listing all keys in random order") + } + + // TODO this dumb implementation gathers all keys into a single slice. + root, err := os.Open(fs.path) + if err != nil { + return nil, err + } + defer root.Close() + + var res []query.Entry + prefixes, err := root.Readdir(0) + if err != nil { + return nil, err + } + for _, fi := range prefixes { + if !fi.IsDir() || fi.Name()[0] == '.' { + continue + } + child, err := os.Open(path.Join(fs.path, fi.Name())) + if err != nil { + return nil, err + } + defer child.Close() + objs, err := child.Readdir(0) + if err != nil { + return nil, err + } + for _, fi := range objs { + if !fi.Mode().IsRegular() || fi.Name()[0] == '.' { + continue + } + key, ok := fs.decode(fi.Name()) + if !ok { + continue + } + res = append(res, query.Entry{Key: key.String()}) + } + } + return query.ResultsWithEntries(q, res), nil +} + +var _ datastore.ThreadSafeDatastore = (*Datastore)(nil) + +func (*Datastore) IsThreadSafe() {} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs/flatfs_test.go b/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs/flatfs_test.go new file mode 100644 index 000000000..d5c5fcf18 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs/flatfs_test.go @@ -0,0 +1,315 @@ +package flatfs_test + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query" +) + +func tempdir(t testing.TB) (path string, cleanup func()) { + path, err := ioutil.TempDir("", "test-datastore-flatfs-") + if err != nil { + t.Fatalf("cannot create temp directory: %v", err) + } + + cleanup = func() { + if err := os.RemoveAll(path); err != nil { + t.Errorf("tempdir cleanup failed: %v", err) + } + } + return path, cleanup +} + +func TestBadPrefixLen(t *testing.T) { + temp, cleanup := tempdir(t) + defer cleanup() + + for i := 0; i > -3; i-- { + _, err := flatfs.New(temp, 0) + if g, e := err, flatfs.ErrBadPrefixLen; g != e { + t.Errorf("expected ErrBadPrefixLen, got: %v", g) + } + } +} + +func TestPutBadValueType(t *testing.T) { + temp, cleanup := tempdir(t) + defer cleanup() + + fs, err := flatfs.New(temp, 2) + if err != nil { + t.Fatalf("New fail: %v\n", err) + } + + err = fs.Put(datastore.NewKey("quux"), 22) + if g, e := err, datastore.ErrInvalidType; g != e { + t.Fatalf("expected ErrInvalidType, got: %v\n", g) + } +} + +func TestPut(t *testing.T) { + temp, cleanup := tempdir(t) + defer cleanup() + + fs, err := flatfs.New(temp, 2) + if err != nil { + t.Fatalf("New fail: %v\n", err) + } + + err = fs.Put(datastore.NewKey("quux"), []byte("foobar")) + if err != nil { + t.Fatalf("Put fail: %v\n", err) + } +} + +func TestGet(t *testing.T) { + temp, cleanup := tempdir(t) + defer cleanup() + + fs, err := flatfs.New(temp, 2) + if err != nil { + t.Fatalf("New fail: %v\n", err) + } + + const input = "foobar" + err = fs.Put(datastore.NewKey("quux"), []byte(input)) + if err != nil { + t.Fatalf("Put fail: %v\n", err) + } + + data, err := fs.Get(datastore.NewKey("quux")) + if err != nil { + t.Fatalf("Get failed: %v", err) + } + buf, ok := data.([]byte) + if !ok { + t.Fatalf("expected []byte from Get, got %T: %v", data, data) + } + if g, e := string(buf), input; g != e { + t.Fatalf("Get gave wrong content: %q != %q", g, e) + } +} + +func TestPutOverwrite(t *testing.T) { + temp, cleanup := tempdir(t) + defer cleanup() + + fs, err := flatfs.New(temp, 2) + if err != nil { + t.Fatalf("New fail: %v\n", err) + } + + const ( + loser = "foobar" + winner = "xyzzy" + ) + err = fs.Put(datastore.NewKey("quux"), []byte(loser)) + if err != nil { + t.Fatalf("Put fail: %v\n", err) + } + + err = fs.Put(datastore.NewKey("quux"), []byte(winner)) + if err != nil { + t.Fatalf("Put fail: %v\n", err) + } + + data, err := fs.Get(datastore.NewKey("quux")) + if err != nil { + t.Fatalf("Get failed: %v", err) + } + if g, e := string(data.([]byte)), winner; g != e { + t.Fatalf("Get gave wrong content: %q != %q", g, e) + } +} + +func TestGetNotFoundError(t *testing.T) { + temp, cleanup := tempdir(t) + defer cleanup() + + fs, err := flatfs.New(temp, 2) + if err != nil { + t.Fatalf("New fail: %v\n", err) + } + + _, err = fs.Get(datastore.NewKey("quux")) + if g, e := err, datastore.ErrNotFound; g != e { + t.Fatalf("expected ErrNotFound, got: %v\n", g) + } +} + +func TestStorage(t *testing.T) { + temp, cleanup := tempdir(t) + defer cleanup() + + const prefixLen = 2 + const prefix = "2f71" + const target = prefix + "/2f71757578.data" + fs, err := flatfs.New(temp, prefixLen) + if err != nil { + t.Fatalf("New fail: %v\n", err) + } + + err = fs.Put(datastore.NewKey("quux"), []byte("foobar")) + if err != nil { + t.Fatalf("Put fail: %v\n", err) + } + + seen := false + walk := func(absPath string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + path, err := filepath.Rel(temp, absPath) + if err != nil { + return err + } + switch path { + case ".", "..": + // ignore + case prefix: + if !fi.IsDir() { + t.Errorf("prefix directory is not a file? %v", fi.Mode()) + } + // we know it's there if we see the file, nothing more to + // do here + case target: + seen = true + if !fi.Mode().IsRegular() { + t.Errorf("expected a regular file, mode: %04o", fi.Mode()) + } + if g, e := fi.Mode()&os.ModePerm&0007, os.FileMode(0000); g != e { + t.Errorf("file should not be world accessible: %04o", fi.Mode()) + } + default: + t.Errorf("saw unexpected directory entry: %q %v", path, fi.Mode()) + } + return nil + } + if err := filepath.Walk(temp, walk); err != nil { + t.Fatal("walk: %v", err) + } + if !seen { + t.Error("did not see the data file") + } +} + +func TestHasNotFound(t *testing.T) { + temp, cleanup := tempdir(t) + defer cleanup() + + fs, err := flatfs.New(temp, 2) + if err != nil { + t.Fatalf("New fail: %v\n", err) + } + + found, err := fs.Has(datastore.NewKey("quux")) + if err != nil { + t.Fatalf("Has fail: %v\n", err) + } + if g, e := found, false; g != e { + t.Fatalf("wrong Has: %v != %v", g, e) + } +} + +func TestHasFound(t *testing.T) { + temp, cleanup := tempdir(t) + defer cleanup() + + fs, err := flatfs.New(temp, 2) + if err != nil { + t.Fatalf("New fail: %v\n", err) + } + err = fs.Put(datastore.NewKey("quux"), []byte("foobar")) + if err != nil { + t.Fatalf("Put fail: %v\n", err) + } + + found, err := fs.Has(datastore.NewKey("quux")) + if err != nil { + t.Fatalf("Has fail: %v\n", err) + } + if g, e := found, true; g != e { + t.Fatalf("wrong Has: %v != %v", g, e) + } +} + +func TestDeleteNotFound(t *testing.T) { + temp, cleanup := tempdir(t) + defer cleanup() + + fs, err := flatfs.New(temp, 2) + if err != nil { + t.Fatalf("New fail: %v\n", err) + } + + err = fs.Delete(datastore.NewKey("quux")) + if g, e := err, datastore.ErrNotFound; g != e { + t.Fatalf("expected ErrNotFound, got: %v\n", g) + } +} + +func TestDeleteFound(t *testing.T) { + temp, cleanup := tempdir(t) + defer cleanup() + + fs, err := flatfs.New(temp, 2) + if err != nil { + t.Fatalf("New fail: %v\n", err) + } + err = fs.Put(datastore.NewKey("quux"), []byte("foobar")) + if err != nil { + t.Fatalf("Put fail: %v\n", err) + } + + err = fs.Delete(datastore.NewKey("quux")) + if err != nil { + t.Fatalf("Delete fail: %v\n", err) + } + + // check that it's gone + _, err = fs.Get(datastore.NewKey("quux")) + if g, e := err, datastore.ErrNotFound; g != e { + t.Fatalf("expected Get after Delete to give ErrNotFound, got: %v\n", g) + } +} + +func TestQuerySimple(t *testing.T) { + temp, cleanup := tempdir(t) + defer cleanup() + + fs, err := flatfs.New(temp, 2) + if err != nil { + t.Fatalf("New fail: %v\n", err) + } + const myKey = "quux" + err = fs.Put(datastore.NewKey(myKey), []byte("foobar")) + if err != nil { + t.Fatalf("Put fail: %v\n", err) + } + + res, err := fs.Query(query.Query{KeysOnly: true}) + if err != nil { + t.Fatalf("Query fail: %v\n", err) + } + entries, err := res.Rest() + if err != nil { + t.Fatalf("Query Results.Rest fail: %v\n", err) + } + seen := false + for _, e := range entries { + switch e.Key { + case datastore.NewKey(myKey).String(): + seen = true + default: + t.Errorf("saw unexpected key: %q", e.Key) + } + } + if !seen { + t.Errorf("did not see wanted key %q in %+v", myKey, entries) + } +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-datastore/fs/fs.go b/Godeps/_workspace/src/github.com/jbenet/go-datastore/fs/fs.go index 87166c8e0..f56896936 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-datastore/fs/fs.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-datastore/fs/fs.go @@ -1,3 +1,20 @@ +// Package fs is a simple Datastore implementation that stores keys +// are directories and files, mirroring the key. That is, the key +// "/foo/bar" is stored as file "PATH/foo/bar/.dsobject". +// +// This means key some segments will not work. For example, the +// following keys will result in unwanted behavior: +// +// - "/foo/./bar" +// - "/foo/../bar" +// - "/foo\x00bar" +// +// Keys that only differ in case may be confused with each other on +// case insensitive file systems, for example in OS X. +// +// This package is intended for exploratory use, where the user would +// examine the file system manually, and should only be used with +// human-friendly, trusted keys. You have been warned. package fs import ( @@ -13,7 +30,7 @@ import ( var ObjectKeySuffix = ".dsobject" -// Datastore uses a standard Go map for internal storage. +// Datastore uses a uses a file per key to store values. type Datastore struct { path string } diff --git a/Godeps/_workspace/src/github.com/jbenet/go-datastore/lru/datastore_test.go b/Godeps/_workspace/src/github.com/jbenet/go-datastore/lru/datastore_test.go index dc31b19a1..7f7732d18 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-datastore/lru/datastore_test.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-datastore/lru/datastore_test.go @@ -9,7 +9,6 @@ import ( . "gopkg.in/check.v1" ) -// Hook up gocheck into the "go test" runner. func Test(t *testing.T) { TestingT(t) } type DSSuite struct{} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-datastore/mount/mount.go b/Godeps/_workspace/src/github.com/jbenet/go-datastore/mount/mount.go new file mode 100644 index 000000000..c066364a2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-datastore/mount/mount.go @@ -0,0 +1,116 @@ +// Package mount provides a Datastore that has other Datastores +// mounted at various key prefixes. +package mount + +import ( + "errors" + "strings" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/keytransform" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query" +) + +var ( + ErrNoMount = errors.New("no datastore mounted for this key") +) + +type Mount struct { + Prefix datastore.Key + Datastore datastore.Datastore +} + +func New(mounts []Mount) *Datastore { + // make a copy so we're sure it doesn't mutate + m := make([]Mount, len(mounts)) + for i, v := range mounts { + m[i] = v + } + return &Datastore{mounts: m} +} + +type Datastore struct { + mounts []Mount +} + +var _ datastore.Datastore = (*Datastore)(nil) + +func (d *Datastore) lookup(key datastore.Key) (ds datastore.Datastore, mountpoint, rest datastore.Key) { + for _, m := range d.mounts { + if m.Prefix.Equal(key) || m.Prefix.IsAncestorOf(key) { + s := strings.TrimPrefix(key.String(), m.Prefix.String()) + k := datastore.NewKey(s) + return m.Datastore, m.Prefix, k + } + } + return nil, datastore.NewKey("/"), key +} + +func (d *Datastore) Put(key datastore.Key, value interface{}) error { + ds, _, k := d.lookup(key) + if ds == nil { + return ErrNoMount + } + return ds.Put(k, value) +} + +func (d *Datastore) Get(key datastore.Key) (value interface{}, err error) { + ds, _, k := d.lookup(key) + if ds == nil { + return nil, datastore.ErrNotFound + } + return ds.Get(k) +} + +func (d *Datastore) Has(key datastore.Key) (exists bool, err error) { + ds, _, k := d.lookup(key) + if ds == nil { + return false, nil + } + return ds.Has(k) +} + +func (d *Datastore) Delete(key datastore.Key) error { + ds, _, k := d.lookup(key) + if ds == nil { + return datastore.ErrNotFound + } + return ds.Delete(k) +} + +func (d *Datastore) Query(q query.Query) (query.Results, error) { + if len(q.Filters) > 0 || + len(q.Orders) > 0 || + q.Limit > 0 || + q.Offset > 0 { + // TODO this is overly simplistic, but the only caller is + // `ipfs refs local` for now, and this gets us moving. + return nil, errors.New("mount only supports listing all prefixed keys in random order") + } + key := datastore.NewKey(q.Prefix) + ds, mount, k := d.lookup(key) + if ds == nil { + return nil, errors.New("mount only supports listing a mount point") + } + // TODO support listing cross mount points too + + // delegate the query to the mounted datastore, while adjusting + // keys in and out + q2 := q + q2.Prefix = k.String() + wrapDS := keytransform.Wrap(ds, &keytransform.Pair{ + Convert: func(datastore.Key) datastore.Key { + panic("this should never be called") + }, + Invert: func(k datastore.Key) datastore.Key { + return mount.Child(k) + }, + }) + + r, err := wrapDS.Query(q2) + if err != nil { + return nil, err + } + r = query.ResultsReplaceQuery(r, q) + return r, nil +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-datastore/mount/mount_test.go b/Godeps/_workspace/src/github.com/jbenet/go-datastore/mount/mount_test.go new file mode 100644 index 000000000..0d38d6962 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-datastore/mount/mount_test.go @@ -0,0 +1,241 @@ +package mount_test + +import ( + "testing" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/mount" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query" +) + +func TestPutBadNothing(t *testing.T) { + m := mount.New(nil) + + err := m.Put(datastore.NewKey("quux"), []byte("foobar")) + if g, e := err, mount.ErrNoMount; g != e { + t.Fatalf("Put got wrong error: %v != %v", g, e) + } +} + +func TestPutBadNoMount(t *testing.T) { + mapds := datastore.NewMapDatastore() + m := mount.New([]mount.Mount{ + {Prefix: datastore.NewKey("/redherring"), Datastore: mapds}, + }) + + err := m.Put(datastore.NewKey("/quux/thud"), []byte("foobar")) + if g, e := err, mount.ErrNoMount; g != e { + t.Fatalf("expected ErrNoMount, got: %v\n", g) + } +} + +func TestPut(t *testing.T) { + mapds := datastore.NewMapDatastore() + m := mount.New([]mount.Mount{ + {Prefix: datastore.NewKey("/quux"), Datastore: mapds}, + }) + + if err := m.Put(datastore.NewKey("/quux/thud"), []byte("foobar")); err != nil { + t.Fatalf("Put error: %v", err) + } + + val, err := mapds.Get(datastore.NewKey("/thud")) + if err != nil { + t.Fatalf("Get error: %v", err) + } + buf, ok := val.([]byte) + if !ok { + t.Fatalf("Get value is not []byte: %T %v", val, val) + } + if g, e := string(buf), "foobar"; g != e { + t.Errorf("wrong value: %q != %q", g, e) + } +} + +func TestGetBadNothing(t *testing.T) { + m := mount.New([]mount.Mount{}) + + _, err := m.Get(datastore.NewKey("/quux/thud")) + if g, e := err, datastore.ErrNotFound; g != e { + t.Fatalf("expected ErrNotFound, got: %v\n", g) + } +} + +func TestGetBadNoMount(t *testing.T) { + mapds := datastore.NewMapDatastore() + m := mount.New([]mount.Mount{ + {Prefix: datastore.NewKey("/redherring"), Datastore: mapds}, + }) + + _, err := m.Get(datastore.NewKey("/quux/thud")) + if g, e := err, datastore.ErrNotFound; g != e { + t.Fatalf("expected ErrNotFound, got: %v\n", g) + } +} + +func TestGetNotFound(t *testing.T) { + mapds := datastore.NewMapDatastore() + m := mount.New([]mount.Mount{ + {Prefix: datastore.NewKey("/quux"), Datastore: mapds}, + }) + + _, err := m.Get(datastore.NewKey("/quux/thud")) + if g, e := err, datastore.ErrNotFound; g != e { + t.Fatalf("expected ErrNotFound, got: %v\n", g) + } +} + +func TestGet(t *testing.T) { + mapds := datastore.NewMapDatastore() + m := mount.New([]mount.Mount{ + {Prefix: datastore.NewKey("/quux"), Datastore: mapds}, + }) + + if err := mapds.Put(datastore.NewKey("/thud"), []byte("foobar")); err != nil { + t.Fatalf("Get error: %v", err) + } + + val, err := m.Get(datastore.NewKey("/quux/thud")) + if err != nil { + t.Fatalf("Put error: %v", err) + } + + buf, ok := val.([]byte) + if !ok { + t.Fatalf("Get value is not []byte: %T %v", val, val) + } + if g, e := string(buf), "foobar"; g != e { + t.Errorf("wrong value: %q != %q", g, e) + } +} + +func TestHasBadNothing(t *testing.T) { + m := mount.New([]mount.Mount{}) + + found, err := m.Has(datastore.NewKey("/quux/thud")) + if err != nil { + t.Fatalf("Has error: %v", err) + } + if g, e := found, false; g != e { + t.Fatalf("wrong value: %v != %v", g, e) + } +} + +func TestHasBadNoMount(t *testing.T) { + mapds := datastore.NewMapDatastore() + m := mount.New([]mount.Mount{ + {Prefix: datastore.NewKey("/redherring"), Datastore: mapds}, + }) + + found, err := m.Has(datastore.NewKey("/quux/thud")) + if err != nil { + t.Fatalf("Has error: %v", err) + } + if g, e := found, false; g != e { + t.Fatalf("wrong value: %v != %v", g, e) + } +} + +func TestHasNotFound(t *testing.T) { + mapds := datastore.NewMapDatastore() + m := mount.New([]mount.Mount{ + {Prefix: datastore.NewKey("/quux"), Datastore: mapds}, + }) + + found, err := m.Has(datastore.NewKey("/quux/thud")) + if err != nil { + t.Fatalf("Has error: %v", err) + } + if g, e := found, false; g != e { + t.Fatalf("wrong value: %v != %v", g, e) + } +} + +func TestHas(t *testing.T) { + mapds := datastore.NewMapDatastore() + m := mount.New([]mount.Mount{ + {Prefix: datastore.NewKey("/quux"), Datastore: mapds}, + }) + + if err := mapds.Put(datastore.NewKey("/thud"), []byte("foobar")); err != nil { + t.Fatalf("Put error: %v", err) + } + + found, err := m.Has(datastore.NewKey("/quux/thud")) + if err != nil { + t.Fatalf("Has error: %v", err) + } + if g, e := found, true; g != e { + t.Fatalf("wrong value: %v != %v", g, e) + } +} + +func TestDeleteNotFound(t *testing.T) { + mapds := datastore.NewMapDatastore() + m := mount.New([]mount.Mount{ + {Prefix: datastore.NewKey("/quux"), Datastore: mapds}, + }) + + err := m.Delete(datastore.NewKey("/quux/thud")) + if g, e := err, datastore.ErrNotFound; g != e { + t.Fatalf("expected ErrNotFound, got: %v\n", g) + } +} + +func TestDelete(t *testing.T) { + mapds := datastore.NewMapDatastore() + m := mount.New([]mount.Mount{ + {Prefix: datastore.NewKey("/quux"), Datastore: mapds}, + }) + + if err := mapds.Put(datastore.NewKey("/thud"), []byte("foobar")); err != nil { + t.Fatalf("Put error: %v", err) + } + + err := m.Delete(datastore.NewKey("/quux/thud")) + if err != nil { + t.Fatalf("Delete error: %v", err) + } + + // make sure it disappeared + found, err := mapds.Has(datastore.NewKey("/thud")) + if err != nil { + t.Fatalf("Has error: %v", err) + } + if g, e := found, false; g != e { + t.Fatalf("wrong value: %v != %v", g, e) + } +} + +func TestQuerySimple(t *testing.T) { + mapds := datastore.NewMapDatastore() + m := mount.New([]mount.Mount{ + {Prefix: datastore.NewKey("/quux"), Datastore: mapds}, + }) + + const myKey = "/quux/thud" + if err := m.Put(datastore.NewKey(myKey), []byte("foobar")); err != nil { + t.Fatalf("Put error: %v", err) + } + + res, err := m.Query(query.Query{Prefix: "/quux"}) + if err != nil { + t.Fatalf("Query fail: %v\n", err) + } + entries, err := res.Rest() + if err != nil { + t.Fatalf("Query Results.Rest fail: %v\n", err) + } + seen := false + for _, e := range entries { + switch e.Key { + case datastore.NewKey(myKey).String(): + seen = true + default: + t.Errorf("saw unexpected key: %q", e.Key) + } + } + if !seen { + t.Errorf("did not see wanted key %q in %+v", myKey, entries) + } +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-datastore/tiered/tiered.go b/Godeps/_workspace/src/github.com/jbenet/go-datastore/tiered/tiered.go new file mode 100644 index 000000000..1e112d6ba --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-datastore/tiered/tiered.go @@ -0,0 +1,94 @@ +package tiered + +import ( + "fmt" + "sync" + + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + dsq "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query" +) + +type tiered []ds.Datastore + +// New returns a tiered datastore. Puts and Deletes will write-through to +// all datastores, Has and Get will try each datastore sequentially, and +// Query will always try the last one (most complete) first. +func New(dses ...ds.Datastore) ds.Datastore { + return tiered(dses) +} + +// Put stores the object `value` named by `key`. +func (d tiered) Put(key ds.Key, value interface{}) (err error) { + errs := make(chan error, len(d)) + + var wg sync.WaitGroup + for _, cd := range d { + wg.Add(1) + go func(cd ds.Datastore) { + defer wg.Done() + if err := cd.Put(key, value); err != nil { + errs <- err + } + }(cd) + } + wg.Wait() + + close(errs) + for err := range errs { + return err + } + return nil +} + +// Get retrieves the object `value` named by `key`. +func (d tiered) Get(key ds.Key) (value interface{}, err error) { + err = fmt.Errorf("no datastores") + for _, cd := range d { + value, err = cd.Get(key) + if err == nil { + break + } + } + return +} + +// Has returns whether the `key` is mapped to a `value`. +func (d tiered) Has(key ds.Key) (exists bool, err error) { + err = fmt.Errorf("no datastores") + for _, cd := range d { + exists, err = cd.Has(key) + if err == nil && exists { + break + } + } + return +} + +// Delete removes the value for given `key`. +func (d tiered) Delete(key ds.Key) (err error) { + errs := make(chan error, len(d)) + + var wg sync.WaitGroup + for _, cd := range d { + wg.Add(1) + go func(cd ds.Datastore) { + defer wg.Done() + if err := cd.Delete(key); err != nil { + errs <- err + } + }(cd) + } + wg.Wait() + + close(errs) + for err := range errs { + return err + } + return nil +} + +// Query returns a list of keys in the datastore +func (d tiered) Query(q dsq.Query) (dsq.Results, error) { + // query always the last (most complete) one + return d[len(d)-1].Query(q) +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-datastore/tiered/tiered_test.go b/Godeps/_workspace/src/github.com/jbenet/go-datastore/tiered/tiered_test.go new file mode 100644 index 000000000..886051435 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-datastore/tiered/tiered_test.go @@ -0,0 +1,79 @@ +package tiered + +import ( + "testing" + + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + dscb "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/callback" + dsq "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query" +) + +func testHas(t *testing.T, dses []ds.Datastore, k ds.Key, v interface{}) { + // all under should have it + for _, d := range dses { + if v2, err := d.Get(k); err != nil { + t.Error(err) + } else if v2 != v { + t.Error("value incorrect", d, k, v, v2) + } + + if has, err := d.Has(k); err != nil { + t.Error(err) + } else if !has { + t.Error("should have it", d, k, v) + } + } +} + +func testNotHas(t *testing.T, dses []ds.Datastore, k ds.Key) { + // all under should not have it + for _, d := range dses { + if _, err := d.Get(k); err == nil { + t.Error("should not have it", d, k) + } + + if has, err := d.Has(k); err != nil { + t.Error(err) + } else if has { + t.Error("should not have it", d, k) + } + } +} + +func TestTiered(t *testing.T) { + d1 := ds.NewMapDatastore() + d2 := ds.NewMapDatastore() + d3 := ds.NewMapDatastore() + d4 := ds.NewMapDatastore() + + td := New(d1, d2, d3, d4) + td.Put(ds.NewKey("foo"), "bar") + testHas(t, []ds.Datastore{td}, ds.NewKey("foo"), "bar") + testHas(t, td.(tiered), ds.NewKey("foo"), "bar") // all children + + // remove it from, say, caches. + d1.Delete(ds.NewKey("foo")) + d2.Delete(ds.NewKey("foo")) + testHas(t, []ds.Datastore{td}, ds.NewKey("foo"), "bar") + testHas(t, td.(tiered)[2:], ds.NewKey("foo"), "bar") + testNotHas(t, td.(tiered)[:2], ds.NewKey("foo")) + + // write it again. + td.Put(ds.NewKey("foo"), "bar2") + testHas(t, []ds.Datastore{td}, ds.NewKey("foo"), "bar2") + testHas(t, td.(tiered), ds.NewKey("foo"), "bar2") +} + +func TestQueryCallsLast(t *testing.T) { + var d1n, d2n, d3n int + d1 := dscb.Wrap(ds.NewMapDatastore(), func() { d1n++ }) + d2 := dscb.Wrap(ds.NewMapDatastore(), func() { d2n++ }) + d3 := dscb.Wrap(ds.NewMapDatastore(), func() { d3n++ }) + + td := New(d1, d2, d3) + + td.Query(dsq.Query{}) + if d3n < 1 { + t.Error("should call last") + } +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-datastore/timecache/timecache.go b/Godeps/_workspace/src/github.com/jbenet/go-datastore/timecache/timecache.go new file mode 100644 index 000000000..6109b05f9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-datastore/timecache/timecache.go @@ -0,0 +1,96 @@ +package timecache + +import ( + "sync" + "time" + + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + dsq "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query" +) + +var ( + putKey = "put" + getKey = // op keys + "get" + hasKey = "has" + deleteKey = "delete" +) + +type datastore struct { + cache ds.Datastore + ttl time.Duration + + ttlmu sync.Mutex + ttls map[ds.Key]time.Time +} + +func WithTTL(ttl time.Duration) ds.Datastore { + return WithCache(ds.NewMapDatastore(), ttl) +} + +// WithCache wraps a given datastore as a timecache. +// Get + Has requests are considered expired after a TTL. +func WithCache(d ds.Datastore, ttl time.Duration) ds.Datastore { + return &datastore{cache: d, ttl: ttl, ttls: make(map[ds.Key]time.Time)} +} + +func (d *datastore) gc() { + var now = time.Now() + var del []ds.Key + + // remove all expired ttls. + d.ttlmu.Lock() + for k, ttl := range d.ttls { + if now.After(ttl) { + delete(d.ttls, k) + del = append(del, k) + } + } + d.ttlmu.Unlock() + + for _, k := range del { + d.cache.Delete(k) + } +} + +func (d *datastore) ttlPut(key ds.Key) { + d.ttlmu.Lock() + d.ttls[key] = time.Now().Add(d.ttl) + d.ttlmu.Unlock() +} + +func (d *datastore) ttlDelete(key ds.Key) { + d.ttlmu.Lock() + delete(d.ttls, key) + d.ttlmu.Unlock() +} + +// Put stores the object `value` named by `key`. +func (d *datastore) Put(key ds.Key, value interface{}) (err error) { + err = d.cache.Put(key, value) + d.ttlPut(key) + return err +} + +// Get retrieves the object `value` named by `key`. +func (d *datastore) Get(key ds.Key) (value interface{}, err error) { + d.gc() + return d.cache.Get(key) +} + +// Has returns whether the `key` is mapped to a `value`. +func (d *datastore) Has(key ds.Key) (exists bool, err error) { + d.gc() + return d.cache.Has(key) +} + +// Delete removes the value for given `key`. +func (d *datastore) Delete(key ds.Key) (err error) { + d.ttlDelete(key) + return d.cache.Delete(key) +} + +// Query returns a list of keys in the datastore +func (d *datastore) Query(q dsq.Query) (dsq.Results, error) { + return d.cache.Query(q) +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-datastore/timecache/timecache_test.go b/Godeps/_workspace/src/github.com/jbenet/go-datastore/timecache/timecache_test.go new file mode 100644 index 000000000..20c2ea88c --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-datastore/timecache/timecache_test.go @@ -0,0 +1,64 @@ +package timecache + +import ( + "testing" + "time" + + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" +) + +func testHas(t *testing.T, d ds.Datastore, k ds.Key, v interface{}) { + if v2, err := d.Get(k); err != nil { + t.Error(err) + } else if v2 != v { + t.Error("value incorrect", d, k, v, v2) + } + + if has, err := d.Has(k); err != nil { + t.Error(err) + } else if !has { + t.Error("should have it", d, k, v) + } +} + +func testNotHas(t *testing.T, d ds.Datastore, k ds.Key) { + if _, err := d.Get(k); err == nil { + t.Error("should not have it", d, k) + } + + if has, err := d.Has(k); err != nil { + t.Error(err) + } else if has { + t.Error("should not have it", d, k) + } +} + +func TestTimeCache(t *testing.T) { + ttl := time.Millisecond * 100 + cache := WithTTL(ttl) + cache.Put(ds.NewKey("foo1"), "bar1") + cache.Put(ds.NewKey("foo2"), "bar2") + + <-time.After(ttl / 2) + cache.Put(ds.NewKey("foo3"), "bar3") + cache.Put(ds.NewKey("foo4"), "bar4") + testHas(t, cache, ds.NewKey("foo1"), "bar1") + testHas(t, cache, ds.NewKey("foo2"), "bar2") + testHas(t, cache, ds.NewKey("foo3"), "bar3") + testHas(t, cache, ds.NewKey("foo4"), "bar4") + + <-time.After(ttl / 2) + testNotHas(t, cache, ds.NewKey("foo1")) + testNotHas(t, cache, ds.NewKey("foo2")) + testHas(t, cache, ds.NewKey("foo3"), "bar3") + testHas(t, cache, ds.NewKey("foo4"), "bar4") + + cache.Delete(ds.NewKey("foo3")) + testNotHas(t, cache, ds.NewKey("foo3")) + + <-time.After(ttl / 2) + testNotHas(t, cache, ds.NewKey("foo1")) + testNotHas(t, cache, ds.NewKey("foo2")) + testNotHas(t, cache, ds.NewKey("foo3")) + testNotHas(t, cache, ds.NewKey("foo4")) +} From 24daeec70c7c600e3f1c413fe6bf11cb57d5b243 Mon Sep 17 00:00:00 2001 From: Tommi Virtanen Date: Mon, 16 Mar 2015 14:03:28 -0700 Subject: [PATCH 06/15] Use flatfs to store objects under /blocks outside of LevelDB WARNING: No migration performed! That needs to come in a separate commit, perhaps amended into this one. Migration must move keyspace "/b" from leveldb to the flatfs subdir, while removing the "b" prefix (keys should start with just "/"). --- .../jbenet/go-datastore/flatfs/flatfs.go | 4 +-- .../jbenet/go-datastore/mount/mount.go | 6 ++-- blocks/blockstore/blockstore.go | 4 ++- repo/fsrepo/fsrepo.go | 35 ++++++++++++++++++- util/datastore2/threadsafe.go | 15 ++++++++ 5 files changed, 57 insertions(+), 7 deletions(-) create mode 100644 util/datastore2/threadsafe.go diff --git a/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs/flatfs.go b/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs/flatfs.go index c3bc96d4c..4704c3e1f 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs/flatfs.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs/flatfs.go @@ -11,8 +11,8 @@ import ( "path" "strings" - "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" - "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query" + "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query" ) const ( diff --git a/Godeps/_workspace/src/github.com/jbenet/go-datastore/mount/mount.go b/Godeps/_workspace/src/github.com/jbenet/go-datastore/mount/mount.go index c066364a2..3c66cc3cf 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-datastore/mount/mount.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-datastore/mount/mount.go @@ -6,9 +6,9 @@ import ( "errors" "strings" - "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" - "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/keytransform" - "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query" + "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/keytransform" + "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query" ) var ( diff --git a/blocks/blockstore/blockstore.go b/blocks/blockstore/blockstore.go index 7f3d4d7c8..ccc7e8fc5 100644 --- a/blocks/blockstore/blockstore.go +++ b/blocks/blockstore/blockstore.go @@ -18,7 +18,7 @@ import ( var log = eventlog.Logger("blockstore") // BlockPrefix namespaces blockstore datastores -var BlockPrefix = ds.NewKey("b") +var BlockPrefix = ds.NewKey("blocks") var ValueTypeMismatch = errors.New("The retrieved value is not a Block") @@ -89,6 +89,8 @@ func (bs *blockstore) AllKeysChan(ctx context.Context) (<-chan u.Key, error) { // KeysOnly, because that would be _a lot_ of data. q := dsq.Query{KeysOnly: true} + // datastore/namespace does *NOT* fix up Query.Prefix + q.Prefix = BlockPrefix.String() res, err := bs.datastore.Query(q) if err != nil { return nil, err diff --git a/repo/fsrepo/fsrepo.go b/repo/fsrepo/fsrepo.go index a9e750d54..ce856651d 100644 --- a/repo/fsrepo/fsrepo.go +++ b/repo/fsrepo/fsrepo.go @@ -10,7 +10,9 @@ import ( "sync" ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs" levelds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/leveldb" + "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/mount" ldbopts "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" repo "github.com/ipfs/go-ipfs/repo" "github.com/ipfs/go-ipfs/repo/common" @@ -26,6 +28,7 @@ import ( const ( leveldbDirectory = "datastore" + flatfsDirectory = "blocks" ) var ( @@ -196,6 +199,11 @@ func Init(repoPath string, conf *config.Config) error { return fmt.Errorf("datastore: %s", err) } + flatfsPath := path.Join(repoPath, flatfsDirectory) + if err := dir.Writable(flatfsPath); err != nil { + return fmt.Errorf("datastore: %s", err) + } + if err := dir.Writable(path.Join(repoPath, "logs")); err != nil { return err } @@ -246,7 +254,32 @@ func (r *FSRepo) openDatastore() error { if err != nil { return errors.New("unable to open leveldb datastore") } - r.ds = r.leveldbDS + + // 4TB of 256kB objects ~=17M objects, splitting that 256-way + // leads to ~66k objects per dir, splitting 256*256-way leads to + // only 256. + // + // The keys seen by the block store have predictable prefixes, + // including "/" from datastore.Key and 2 bytes from multihash. To + // reach a uniform 256-way split, we need approximately 4 bytes of + // prefix. + blocksDS, err := flatfs.New(path.Join(r.path, flatfsDirectory), 4) + if err != nil { + return errors.New("unable to open flatfs datastore") + } + + mountDS := mount.New([]mount.Mount{ + {Prefix: ds.NewKey("/blocks"), Datastore: blocksDS}, + {Prefix: ds.NewKey("/"), Datastore: r.leveldbDS}, + }) + // Make sure it's ok to claim the virtual datastore from mount as + // threadsafe. There's no clean way to make mount itself provide + // this information without copy-pasting the code into two + // variants. This is the same dilemma as the `[].byte` attempt at + // introducing const types to Go. + var _ ds.ThreadSafeDatastore = blocksDS + var _ ds.ThreadSafeDatastore = r.leveldbDS + r.ds = ds2.ClaimThreadSafe{mountDS} return nil } diff --git a/util/datastore2/threadsafe.go b/util/datastore2/threadsafe.go new file mode 100644 index 000000000..2995943ca --- /dev/null +++ b/util/datastore2/threadsafe.go @@ -0,0 +1,15 @@ +package datastore2 + +import ( + "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" +) + +// ClaimThreadSafe claims that a Datastore is threadsafe, even when +// it's type does not guarantee this. Use carefully. +type ClaimThreadSafe struct { + datastore.Datastore +} + +var _ datastore.ThreadSafeDatastore = ClaimThreadSafe{} + +func (ClaimThreadSafe) IsThreadSafe() {} From e965c53780003a01fcbd94af96333ace4b3e2748 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 8 Apr 2015 14:51:15 -0700 Subject: [PATCH 07/15] remove / prefix and close directories properly in query --- .../jbenet/go-datastore/flatfs/flatfs.go | 49 +++++++++++-------- 1 file changed, 29 insertions(+), 20 deletions(-) diff --git a/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs/flatfs.go b/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs/flatfs.go index 4704c3e1f..76b1e628a 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs/flatfs.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs/flatfs.go @@ -47,7 +47,7 @@ func New(path string, prefixLen int) (*Datastore, error) { var padding = strings.Repeat("_", maxPrefixLen*hex.EncodedLen(1)) func (fs *Datastore) encode(key datastore.Key) (dir, file string) { - safe := hex.EncodeToString(key.Bytes()) + safe := hex.EncodeToString(key.Bytes()[1:]) prefix := (safe + padding)[:fs.hexPrefixLen] dir = path.Join(fs.path, prefix) file = path.Join(dir, safe+extension) @@ -210,32 +210,41 @@ func (fs *Datastore) Query(q query.Query) (query.Results, error) { return nil, err } for _, fi := range prefixes { - if !fi.IsDir() || fi.Name()[0] == '.' { - continue - } - child, err := os.Open(path.Join(fs.path, fi.Name())) + var err error + res, err = fs.enumerateKeys(fi, res) if err != nil { return nil, err } - defer child.Close() - objs, err := child.Readdir(0) - if err != nil { - return nil, err - } - for _, fi := range objs { - if !fi.Mode().IsRegular() || fi.Name()[0] == '.' { - continue - } - key, ok := fs.decode(fi.Name()) - if !ok { - continue - } - res = append(res, query.Entry{Key: key.String()}) - } } return query.ResultsWithEntries(q, res), nil } +func (fs *Datastore) enumerateKeys(fi os.FileInfo, res []query.Entry) ([]query.Entry, error) { + if !fi.IsDir() || fi.Name()[0] == '.' { + return res, nil + } + child, err := os.Open(path.Join(fs.path, fi.Name())) + if err != nil { + return nil, err + } + defer child.Close() + objs, err := child.Readdir(0) + if err != nil { + return nil, err + } + for _, fi := range objs { + if !fi.Mode().IsRegular() || fi.Name()[0] == '.' { + return res, nil + } + key, ok := fs.decode(fi.Name()) + if !ok { + return res, nil + } + res = append(res, query.Entry{Key: key.String()}) + } + return res, nil +} + var _ datastore.ThreadSafeDatastore = (*Datastore)(nil) func (*Datastore) IsThreadSafe() {} From 2c79e5ddb51d2664a0a04deae8b97c04a6d76c6f Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 8 Apr 2015 16:18:30 -0700 Subject: [PATCH 08/15] add 1-to-2 migration program --- repo/fsrepo/migrations/1-to-2/main.go | 366 ++++++++++++++++++++++++++ 1 file changed, 366 insertions(+) create mode 100644 repo/fsrepo/migrations/1-to-2/main.go diff --git a/repo/fsrepo/migrations/1-to-2/main.go b/repo/fsrepo/migrations/1-to-2/main.go new file mode 100644 index 000000000..84c0dbb33 --- /dev/null +++ b/repo/fsrepo/migrations/1-to-2/main.go @@ -0,0 +1,366 @@ +package main + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "path" + "strings" + + dstore "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + flatfs "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs" + leveldb "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/leveldb" + dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query" + migrate "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-migrate" + fsrepo "github.com/ipfs/go-ipfs/repo/fsrepo" + mfsr "github.com/ipfs/go-ipfs/repo/fsrepo/migrations" + + context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" +) + +var _ = context.Background + +const peerKeyName = "peer.key" + +type migration struct{} + +func (m migration) Versions() string { + return "1-to-2" +} + +func (m migration) Reversible() bool { + return true +} + +func (m migration) Apply(opts migrate.Options) error { + repo := mfsr.RepoPath(opts.Path) + + if err := repo.CheckVersion("1"); err != nil { + return err + } + + // 1) move key out of config, into its own path + err := moveKeyOutOfConfig(opts.Path) + if err != nil { + return err + } + + // 2) Transfer blocks out of leveldb into flatDB + err = transferBlocksToFlatDB(opts.Path) + if err != nil { + return err + } + + // 3) move ipfs path from .go-ipfs to .ipfs + newpath, err := moveIpfsDir(opts.Path) + if err != nil { + return err + } + + // 4) Update version number + repo = mfsr.RepoPath(newpath) + err = repo.WriteVersion("2") + if err != nil { + return err + } + + return nil +} + +func (m migration) Revert(opts migrate.Options) error { + repo := mfsr.RepoPath(opts.Path) + if err := repo.CheckVersion("2"); err != nil { + return err + } + + // 1) Move directory back to .go-ipfs + npath, err := reverseIpfsDir(opts.Path) + if err != nil { + return err + } + + // 2) move blocks back from flatfs to leveldb + err = transferBlocksFromFlatDB(npath) + if err != nil { + return err + } + + // 3) move key back into config + err = moveKeyIntoConfig(npath) + if err != nil { + return err + } + + // 4) change version number back down + repo = mfsr.RepoPath(npath) + err = repo.WriteVersion("1") + if err != nil { + return err + } + + return nil +} + +func transferBlocksToFlatDB(repopath string) error { + r, err := fsrepo.Open(repopath) + if err != nil { + return err + } + + blockspath := path.Join(repopath, "blocks") + err = os.Mkdir(blockspath, 0777) + if err != nil { + return err + } + + fds, err := flatfs.New(blockspath, 4) + if err != nil { + return err + } + + return transferBlocks(r.Datastore(), fds, "/b/", "") +} + +func transferBlocksFromFlatDB(repopath string) error { + + ldbpath := path.Join(repopath, "datastore") + blockspath := path.Join(repopath, "blocks") + fds, err := flatfs.New(blockspath, 4) + if err != nil { + return err + } + + ldb, err := leveldb.NewDatastore(ldbpath, nil) + if err != nil { + return err + } + + err = transferBlocks(fds, ldb, "", "/b/") + if err != nil { + return err + } + + // Now remove the blocks directory + err = os.RemoveAll(blockspath) + if err != nil { + return err + } + + return nil +} + +func transferBlocks(from, to dstore.Datastore, fpref, tpref string) error { + q := dsq.Query{Prefix: fpref, KeysOnly: true} + res, err := from.Query(q) + if err != nil { + return err + } + + fmt.Println("Starting query") + for result := range res.Next() { + nkey := fmt.Sprintf("%s%s", tpref, result.Key[len(fpref):]) + + fkey := dstore.NewKey(result.Key) + val, err := from.Get(fkey) + if err != nil { + return err + } + + err = to.Put(dstore.NewKey(nkey), val) + if err != nil { + return err + } + + err = from.Delete(fkey) + if err != nil { + return err + } + } + fmt.Println("Query done") + + return nil +} + +func moveKeyOutOfConfig(repopath string) error { + // Make keys directory + keypath := path.Join(repopath, "keys") + err := os.Mkdir(keypath, 0777) + if err != nil { + return err + } + + // Grab the config + cfg, err := loadConfigJSON(repopath) + if err != nil { + return err + } + + // get the private key from it + privKey, err := getPrivateKeyFromConfig(cfg) + if err != nil { + return err + } + + keyfilepath := path.Join(keypath, peerKeyName) + fi, err := os.OpenFile(keyfilepath, os.O_CREATE|os.O_WRONLY, 0600) + if err != nil { + return err + } + + // Write our b64-protobuf encoded key + _, err = fi.WriteString(privKey) + if err != nil { + return err + } + + err = fi.Close() + if err != nil { + return err + } + + // Now that the key is safely in its own file, remove it from the config + err = clearPrivateKeyFromConfig(cfg) + if err != nil { + return err + } + + err = saveConfigJSON(repopath, cfg) + if err != nil { + return err + } + + return nil +} + +// Part of the 2-to-1 revert process +func moveKeyIntoConfig(repopath string) error { + // Make keys directory + keypath := path.Join(repopath, "keys") + + // Grab the config + cfg, err := loadConfigJSON(repopath) + if err != nil { + return err + } + + keyfilepath := path.Join(keypath, peerKeyName) + pkey, err := ioutil.ReadFile(keyfilepath) + if err != nil { + return err + } + + id, ok := cfg["Identity"] + if !ok { + return errors.New("expected to find an identity object in config") + } + identity, ok := id.(map[string]interface{}) + if !ok { + return errors.New("expected Identity in config to be an object") + } + identity["PrivKey"] = string(pkey) + + err = saveConfigJSON(repopath, cfg) + if err != nil { + return err + } + + // Now that the key is safely in the config, delete the file + err = os.RemoveAll(keypath) + if err != nil { + return err + } + + return nil +} + +func moveIpfsDir(curpath string) (string, error) { + newpath := strings.Replace(curpath, ".go-ipfs", ".ipfs", 1) + return newpath, os.Rename(curpath, newpath) +} + +func reverseIpfsDir(curpath string) (string, error) { + newpath := strings.Replace(curpath, ".ipfs", ".go-ipfs", 1) + return newpath, os.Rename(curpath, newpath) +} + +func loadConfigJSON(repoPath string) (map[string]interface{}, error) { + cfgPath := path.Join(repoPath, "config") + fi, err := os.Open(cfgPath) + if err != nil { + return nil, err + } + + var out map[string]interface{} + err = json.NewDecoder(fi).Decode(&out) + if err != nil { + return nil, err + } + + return out, nil +} + +func saveConfigJSON(repoPath string, cfg map[string]interface{}) error { + cfgPath := path.Join(repoPath, "config") + fi, err := os.Create(cfgPath) + if err != nil { + return err + } + + out, err := json.MarshalIndent(cfg, "", "\t") + if err != nil { + return err + } + + _, err = fi.Write(out) + if err != nil { + return err + } + + return nil +} + +func getPrivateKeyFromConfig(cfg map[string]interface{}) (string, error) { + ident, ok := cfg["Identity"] + if !ok { + return "", errors.New("no identity found in config") + } + + identMap, ok := ident.(map[string]interface{}) + if !ok { + return "", errors.New("expected Identity to be object (map)") + } + + privkey, ok := identMap["PrivKey"] + if !ok { + return "", errors.New("no PrivKey field found in Identity") + } + + privkeyStr, ok := privkey.(string) + if !ok { + return "", errors.New("expected PrivKey to be a string") + } + + return privkeyStr, nil +} + +func clearPrivateKeyFromConfig(cfg map[string]interface{}) error { + ident, ok := cfg["Identity"] + if !ok { + return errors.New("no identity found in config") + } + + identMap, ok := ident.(map[string]interface{}) + if !ok { + return errors.New("expected Identity to be object (map)") + } + + delete(identMap, "PrivKey") + return nil +} + +func main() { + m := migration{} + migrate.Main(&m) +} From f3fbedf31279dac516eb6bbdec788f9515c793e3 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 8 Apr 2015 23:16:40 -0700 Subject: [PATCH 09/15] no longer worry about moving key out of config file --- cmd/ipfs/daemon.go | 2 +- repo/fsrepo/fsrepo.go | 26 ++++ repo/fsrepo/migrations/1-to-2/main.go | 172 ++++---------------------- 3 files changed, 54 insertions(+), 146 deletions(-) diff --git a/cmd/ipfs/daemon.go b/cmd/ipfs/daemon.go index 75b4b0b48..94e20a70e 100644 --- a/cmd/ipfs/daemon.go +++ b/cmd/ipfs/daemon.go @@ -119,7 +119,7 @@ func daemonFunc(req cmds.Request, res cmds.Response) { // sure we are permitted to access the resources (datastore, etc.) repo, err := fsrepo.Open(req.Context().ConfigRoot) if err != nil { - res.SetError(fmt.Errorf("Couldn't obtain lock. Is another daemon already running?"), cmds.ErrNormal) + res.SetError(err, cmds.ErrNormal) return } diff --git a/repo/fsrepo/fsrepo.go b/repo/fsrepo/fsrepo.go index ce856651d..96e9210b8 100644 --- a/repo/fsrepo/fsrepo.go +++ b/repo/fsrepo/fsrepo.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "io" + "io/ioutil" "os" "path" "strconv" @@ -18,6 +19,7 @@ import ( "github.com/ipfs/go-ipfs/repo/common" config "github.com/ipfs/go-ipfs/repo/config" lockfile "github.com/ipfs/go-ipfs/repo/fsrepo/lock" + mfsr "github.com/ipfs/go-ipfs/repo/fsrepo/migrations" serialize "github.com/ipfs/go-ipfs/repo/fsrepo/serialize" dir "github.com/ipfs/go-ipfs/thirdparty/dir" "github.com/ipfs/go-ipfs/thirdparty/eventlog" @@ -26,6 +28,8 @@ import ( ds2 "github.com/ipfs/go-ipfs/util/datastore2" ) +var RepoVersion = "2" + const ( leveldbDirectory = "datastore" flatfsDirectory = "blocks" @@ -107,6 +111,24 @@ func open(repoPath string) (repo.Repo, error) { if !isInitializedUnsynced(r.path) { return nil, errors.New("ipfs not initialized, please run 'ipfs init'") } + + // Check version, and error out if not matching + ver, err := ioutil.ReadFile(path.Join(expPath, "version")) + if err != nil { + if os.IsNotExist(err) { + return nil, errors.New("version check failed, no version file found, please run 0-to-1 migration tool.") + } + return nil, err + } + + vers := string(ver)[:1] + + if vers != RepoVersion { + return nil, fmt.Errorf("Repo has incorrect version: '%s'\nProgram version is: '%s'\nPlease run the appropriate migration tool before continuing", + vers, RepoVersion) + + } + // check repo path, then check all constituent parts. // TODO acquire repo lock // TODO if err := initCheckDir(logpath); err != nil { // } @@ -208,6 +230,10 @@ func Init(repoPath string, conf *config.Config) error { return err } + if err := mfsr.RepoPath(repoPath).WriteVersion(RepoVersion); err != nil { + return err + } + return nil } diff --git a/repo/fsrepo/migrations/1-to-2/main.go b/repo/fsrepo/migrations/1-to-2/main.go index 84c0dbb33..28b6c0abd 100644 --- a/repo/fsrepo/migrations/1-to-2/main.go +++ b/repo/fsrepo/migrations/1-to-2/main.go @@ -2,9 +2,7 @@ package main import ( "encoding/json" - "errors" "fmt" - "io/ioutil" "os" "path" "strings" @@ -14,7 +12,6 @@ import ( leveldb "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/leveldb" dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query" migrate "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-migrate" - fsrepo "github.com/ipfs/go-ipfs/repo/fsrepo" mfsr "github.com/ipfs/go-ipfs/repo/fsrepo/migrations" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" @@ -41,8 +38,8 @@ func (m migration) Apply(opts migrate.Options) error { return err } - // 1) move key out of config, into its own path - err := moveKeyOutOfConfig(opts.Path) + // 1) run some sanity checks to make sure we should even bother + err := sanityChecks(opts) if err != nil { return err } @@ -87,13 +84,7 @@ func (m migration) Revert(opts migrate.Options) error { return err } - // 3) move key back into config - err = moveKeyIntoConfig(npath) - if err != nil { - return err - } - - // 4) change version number back down + // 3) change version number back down repo = mfsr.RepoPath(npath) err = repo.WriteVersion("1") if err != nil { @@ -103,8 +94,30 @@ func (m migration) Revert(opts migrate.Options) error { return nil } +// sanityChecks performs a set of tests to make sure the migration will go +// smoothly +func sanityChecks(opts migrate.Options) error { + npath := strings.Replace(opts.Path, ".go-ipfs", ".ipfs", 1) + + // make sure we can move the repo from .go-ipfs to .ipfs + err := os.Mkdir(npath, 0777) + if err != nil { + return err + } + + // we can? good, remove it now + err = os.Remove(npath) + if err != nil { + // this is weird... not worth continuing + return err + } + + return nil +} + func transferBlocksToFlatDB(repopath string) error { - r, err := fsrepo.Open(repopath) + ldbpath := path.Join(repopath, "datastore") + ldb, err := leveldb.NewDatastore(ldbpath, nil) if err != nil { return err } @@ -120,7 +133,7 @@ func transferBlocksToFlatDB(repopath string) error { return err } - return transferBlocks(r.Datastore(), fds, "/b/", "") + return transferBlocks(ldb, fds, "/b/", "") } func transferBlocksFromFlatDB(repopath string) error { @@ -183,98 +196,6 @@ func transferBlocks(from, to dstore.Datastore, fpref, tpref string) error { return nil } -func moveKeyOutOfConfig(repopath string) error { - // Make keys directory - keypath := path.Join(repopath, "keys") - err := os.Mkdir(keypath, 0777) - if err != nil { - return err - } - - // Grab the config - cfg, err := loadConfigJSON(repopath) - if err != nil { - return err - } - - // get the private key from it - privKey, err := getPrivateKeyFromConfig(cfg) - if err != nil { - return err - } - - keyfilepath := path.Join(keypath, peerKeyName) - fi, err := os.OpenFile(keyfilepath, os.O_CREATE|os.O_WRONLY, 0600) - if err != nil { - return err - } - - // Write our b64-protobuf encoded key - _, err = fi.WriteString(privKey) - if err != nil { - return err - } - - err = fi.Close() - if err != nil { - return err - } - - // Now that the key is safely in its own file, remove it from the config - err = clearPrivateKeyFromConfig(cfg) - if err != nil { - return err - } - - err = saveConfigJSON(repopath, cfg) - if err != nil { - return err - } - - return nil -} - -// Part of the 2-to-1 revert process -func moveKeyIntoConfig(repopath string) error { - // Make keys directory - keypath := path.Join(repopath, "keys") - - // Grab the config - cfg, err := loadConfigJSON(repopath) - if err != nil { - return err - } - - keyfilepath := path.Join(keypath, peerKeyName) - pkey, err := ioutil.ReadFile(keyfilepath) - if err != nil { - return err - } - - id, ok := cfg["Identity"] - if !ok { - return errors.New("expected to find an identity object in config") - } - identity, ok := id.(map[string]interface{}) - if !ok { - return errors.New("expected Identity in config to be an object") - } - identity["PrivKey"] = string(pkey) - - err = saveConfigJSON(repopath, cfg) - if err != nil { - return err - } - - // Now that the key is safely in the config, delete the file - err = os.RemoveAll(keypath) - if err != nil { - return err - } - - return nil -} - func moveIpfsDir(curpath string) (string, error) { newpath := strings.Replace(curpath, ".go-ipfs", ".ipfs", 1) return newpath, os.Rename(curpath, newpath) @@ -321,45 +242,6 @@ func saveConfigJSON(repoPath string, cfg map[string]interface{}) error { return nil } -func getPrivateKeyFromConfig(cfg map[string]interface{}) (string, error) { - ident, ok := cfg["Identity"] - if !ok { - return "", errors.New("no identity found in config") - } - - identMap, ok := ident.(map[string]interface{}) - if !ok { - return "", errors.New("expected Identity to be object (map)") - } - - privkey, ok := identMap["PrivKey"] - if !ok { - return "", errors.New("no PrivKey field found in Identity") - } - - privkeyStr, ok := privkey.(string) - if !ok { - return "", errors.New("expected PrivKey to be a string") - } - - return privkeyStr, nil -} - -func clearPrivateKeyFromConfig(cfg map[string]interface{}) error { - ident, ok := cfg["Identity"] - if !ok { - return errors.New("no identity found in config") - } - - identMap, ok := ident.(map[string]interface{}) - if !ok { - return errors.New("expected Identity to be object (map)") - } - - delete(identMap, "PrivKey") - return nil -} - func main() { m := migration{} migrate.Main(&m) From 96a22c5bb1713b0ecf6502ee2bd01f383b4956ec Mon Sep 17 00:00:00 2001 From: Christian Couder Date: Sun, 29 Mar 2015 13:25:01 +0200 Subject: [PATCH 10/15] config: change default config dir name to .ipfs This changes .go-ipfs to .ipfs everywhere. And by the way this defines a DefaultPathName const for this name. License: MIT Signed-off-by: Christian Couder --- .gitignore | 2 +- README.md | 2 +- core/commands/config.go | 2 +- docs/fuse.md | 2 +- jenkins/network-test.sh | 6 +++--- repo/config/config.go | 4 +++- repo/fsrepo/doc.go | 2 +- test/3nodetest/bootstrap/Dockerfile | 2 +- test/3nodetest/bootstrap/config | 4 ++-- test/3nodetest/client/Dockerfile | 2 +- test/3nodetest/client/config | 4 ++-- test/3nodetest/server/Dockerfile | 2 +- test/3nodetest/server/config | 4 ++-- test/bench/bench_cli_ipfs_add/main.go | 2 +- test/bench/offline_add/main.go | 2 +- test/jenkins/network-test.sh | 6 +++--- test/sharness/README.md | 10 +++++----- test/sharness/lib/test-lib.sh | 2 +- test/sharness/t0020-init.sh | 12 ++++++------ test/sharness/t0060-daemon.sh | 12 ++++++------ test/supernode_client/.gitignore | 2 +- test/supernode_client/main.go | 2 +- 22 files changed, 45 insertions(+), 43 deletions(-) diff --git a/.gitignore b/.gitignore index 1cbb7d055..c22a1447d 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,6 @@ *.test *.orig *~ -.go-ipfs /test/bin +.ipfs diff --git a/README.md b/README.md index ce70bccff..cbbe25927 100644 --- a/README.md +++ b/README.md @@ -145,7 +145,7 @@ accomplished with the following command: ### Troubleshooting If you have previously installed ipfs before and you are running into problems getting a newer version to work, try deleting (or backing up somewhere -else) your ipfs config directory (~/.go-ipfs by default) and rerunning `ipfs init`. +else) your ipfs config directory (~/.ipfs by default) and rerunning `ipfs init`. This will reinitialize the config file to its defaults and clear out the local datastore of any bad entries. diff --git a/core/commands/config.go b/core/commands/config.go index e05c71f45..64c320742 100644 --- a/core/commands/config.go +++ b/core/commands/config.go @@ -49,7 +49,7 @@ Get the value of the 'datastore.path' key: Set the value of the 'datastore.path' key: - ipfs config datastore.path ~/.go-ipfs/datastore + ipfs config datastore.path ~/.ipfs/datastore `, }, diff --git a/docs/fuse.md b/docs/fuse.md index 60d79be52..f23fd4f85 100644 --- a/docs/fuse.md +++ b/docs/fuse.md @@ -1,6 +1,6 @@ # FUSE -As a golang project, `go-ipfs` is easily downloaded and installed with `go get github.com/ipfs/go-ipfs`. All data is stored in a leveldb data store in `~/.go-ipfs/datastore`. If, however, you would like to mount the datastore (`ipfs mount /ipfs`) and use it as you would a normal filesystem, you will need to install fuse. +As a golang project, `go-ipfs` is easily downloaded and installed with `go get github.com/ipfs/go-ipfs`. All data is stored in a leveldb data store in `~/.ipfs/datastore`. If, however, you would like to mount the datastore (`ipfs mount /ipfs`) and use it as you would a normal filesystem, you will need to install fuse. As a precursor, you will have to create the `/ipfs` and `/ipns` directories explicitly. Note that modifying root requires sudo permissions. diff --git a/jenkins/network-test.sh b/jenkins/network-test.sh index a4caf431c..5502e80d3 100755 --- a/jenkins/network-test.sh +++ b/jenkins/network-test.sh @@ -8,6 +8,6 @@ make clean make test make save_logs -docker cp dockertest_server_1:/root/.go-ipfs/logs/events.log $(PWD)/build/server-events.log -docker cp dockertest_bootstrap_1:/root/.go-ipfs/logs/events.log $(PWD)/build/bootstrap-events.log -docker cp dockertest_client_1:/root/.go-ipfs/logs/events.log $(PWD)/build/client-events.log +docker cp dockertest_server_1:/root/.ipfs/logs/events.log $(PWD)/build/server-events.log +docker cp dockertest_bootstrap_1:/root/.ipfs/logs/events.log $(PWD)/build/bootstrap-events.log +docker cp dockertest_client_1:/root/.ipfs/logs/events.log $(PWD)/build/client-events.log diff --git a/repo/config/config.go b/repo/config/config.go index 6876f9603..83cd2abb9 100644 --- a/repo/config/config.go +++ b/repo/config/config.go @@ -29,8 +29,10 @@ type Config struct { } const ( + // DefaultPathName is the default config dir name + DefaultPathName = ".ipfs" // DefaultPathRoot is the path to the default config dir location. - DefaultPathRoot = "~/.go-ipfs" + DefaultPathRoot = "~/" + DefaultPathName // DefaultConfigFile is the filename of the configuration file DefaultConfigFile = "config" // EnvDir is the environment variable used to change the path root. diff --git a/repo/fsrepo/doc.go b/repo/fsrepo/doc.go index cbed3fd98..c565053f4 100644 --- a/repo/fsrepo/doc.go +++ b/repo/fsrepo/doc.go @@ -2,7 +2,7 @@ // // TODO explain the package roadmap... // -// .go-ipfs/ +// .ipfs/ // ├── client/ // | ├── client.lock <------ protects client/ + signals its own pid // │ ├── ipfs-client.cpuprof diff --git a/test/3nodetest/bootstrap/Dockerfile b/test/3nodetest/bootstrap/Dockerfile index 3b97d58d0..3fd42f567 100644 --- a/test/3nodetest/bootstrap/Dockerfile +++ b/test/3nodetest/bootstrap/Dockerfile @@ -2,7 +2,7 @@ FROM zaqwsx_ipfs-test-img RUN ipfs init -b=1024 ADD . /tmp/id -RUN mv -f /tmp/id/config /root/.go-ipfs/config +RUN mv -f /tmp/id/config /root/.ipfs/config RUN ipfs id ENV IPFS_PROF true diff --git a/test/3nodetest/bootstrap/config b/test/3nodetest/bootstrap/config index 58ba1abfa..9f178833c 100644 --- a/test/3nodetest/bootstrap/config +++ b/test/3nodetest/bootstrap/config @@ -5,7 +5,7 @@ }, "Datastore": { "Type": "leveldb", - "Path": "/root/.go-ipfs/datastore" + "Path": "/root/.ipfs/datastore" }, "Addresses": { "Swarm": [ @@ -30,7 +30,7 @@ "Last": "" }, "Logs": { - "Filename": "/root/.go-ipfs/logs/events.log", + "Filename": "/root/.ipfs/logs/events.log", "MaxSizeMB": 0, "MaxBackups": 0, "MaxAgeDays": 0 diff --git a/test/3nodetest/client/Dockerfile b/test/3nodetest/client/Dockerfile index 0ff89819f..c2ac8d725 100644 --- a/test/3nodetest/client/Dockerfile +++ b/test/3nodetest/client/Dockerfile @@ -2,7 +2,7 @@ FROM zaqwsx_ipfs-test-img RUN ipfs init -b=1024 ADD . /tmp/id -RUN mv -f /tmp/id/config /root/.go-ipfs/config +RUN mv -f /tmp/id/config /root/.ipfs/config RUN ipfs id EXPOSE 4031 4032/udp diff --git a/test/3nodetest/client/config b/test/3nodetest/client/config index f3d5e9f65..3a8484f86 100644 --- a/test/3nodetest/client/config +++ b/test/3nodetest/client/config @@ -8,7 +8,7 @@ "Bootstrap": [ ], "Datastore": { - "Path": "/root/.go-ipfs/datastore", + "Path": "/root/.ipfs/datastore", "Type": "leveldb" }, "Identity": { @@ -16,7 +16,7 @@ "PrivKey": "CAAS4AQwggJcAgEAAoGBANlJUjOCbPXgYUfo1Pr6nlIjJDPNwN81ACamhaoEZ9VRHXI3fPe7RVAaaXrWLHb892mRqFi1ScE2lcMTLc7WGfyc7dwPqBOZqkVvT0KpCx3Mg246+WvnG8I3HCbWyjSP9tJflOBQxVq6qT2yZSXjNTtDdO4skd4PsPqBco53guYTAgMBAAECgYEAtIcYhrdMNBSSfp5RpZxnwbJ0t52xK0HruDEOSK2UX0Ufg+/aIjEza1QmYupi0xFltg5QojMs7hyd3Q+oNXro5tKsYVeiqrLsUh9jMjaQofzSlV9Oc+bhkkl48YWvF6Y8qx88UYAX+oJqB627H4S1gxLdNEJhPjEAD6n/jql3zUECQQDmHP75wJ7nC4TlxT1SHim5syMAqWNs/SOHnvX8yLrFV9FrMRzsD5qMlIEGBrAjaESzEck6XpbqkyxB8KKGo7OjAkEA8brtEh/AMoQ/yoSWdYT2MRbJxCAn+KG2c6Hi9AMMmJ+K779HxywpUIDYIa22hzLKYumYIuRa1X++1glOAFGq0QJAPQgXwFoMSy9M8jwcBXmmi3AtqnFCw5doIwJQL9l1X/3ot0txZlLFJOAGUHjZoqp2/h+LhYWs9U5PgLW4BYnJjQJAPydY/J0y93+5ss1FCdr8/wI3IHhOORT2t+sZgiqxxcYY5F4TAKQ2/wNKdDIQN+47FfB1gNgsKw8+6mhv6oFroQJACBF2yssNVXiXa2Na/a9tKYutGvxbm3lXzOvmpkW3FukbsObKYS344J1vdg0nzM6EWQCaiBweSA5TQ27iNW6BzQ==" }, "Logs": { - "Filename": "/root/.go-ipfs/logs/events.log", + "Filename": "/root/.ipfs/logs/events.log", "MaxAgeDays": 0, "MaxBackups": 0, "MaxSizeMB": 0 diff --git a/test/3nodetest/server/Dockerfile b/test/3nodetest/server/Dockerfile index f735fe1d8..bced68354 100644 --- a/test/3nodetest/server/Dockerfile +++ b/test/3nodetest/server/Dockerfile @@ -2,7 +2,7 @@ FROM zaqwsx_ipfs-test-img RUN ipfs init -b=1024 ADD . /tmp/test -RUN mv -f /tmp/test/config /root/.go-ipfs/config +RUN mv -f /tmp/test/config /root/.ipfs/config RUN ipfs id RUN chmod +x /tmp/test/run.sh diff --git a/test/3nodetest/server/config b/test/3nodetest/server/config index 59c7fecdf..49cca26d6 100644 --- a/test/3nodetest/server/config +++ b/test/3nodetest/server/config @@ -8,7 +8,7 @@ "Bootstrap": [ ], "Datastore": { - "Path": "/root/.go-ipfs/datastore", + "Path": "/root/.ipfs/datastore", "Type": "leveldb" }, "Identity": { @@ -16,7 +16,7 @@ "PrivKey": "CAAS4AQwggJcAgEAAoGBANW3mJMmDSJbdRyykO0Ze5t6WL6jeTtpOhklxePBIkJL/Uil78Va/tODx6Mvv3GMCkbGvzWslTZXpaHa9vBmjE3MVZSmd5fLRybKT0zZ3juABKcx+WIVNw8JlkpEORihJdwb+5tRUC5pUcMzxqHSmGX+d6e9KZqLnv7piNKg2+r7AgMBAAECgYAqc6+w+wv82SHoM2gqULeG6MScCajZLkvGFwS5+vEtLh7/wUZhc3PO3AxZ0/A5Q9H+wRfWN5PkGYDjJ7WJhzUzGfTbrQ821JV6B3IUR4UHo2IgJkZO4EUB5L9KBUqvYxDJigtGBopgQh0EeDSS+9X8vaGmit5l4zcAfi+UGYPgMQJBAOCJQU8N2HW5SawBo2QX0bnCAAnu5Ilk2QaqwDZbDQaM5JWFcpRpGnjBhsZihHwVWvKCbnq83JhAGRQvKAEepMUCQQDzqjvIyM+Au42nP7SFDHoMjEnHW8Nimvz8zPbyrSUEHe4l9/yS4+BeRPxpwI5xgzp8g1wEYfNeXt08buYwCsy/AkBXWg5mSuSjJ+pZWGnQTtPwiGCrfJy8NteXmGYev11Z5wYmhTwGML1zrRZZp4oTG9u97LA+X6sSMB2RlKbjiKBhAkEAgl/hoSshK+YugwCpHE9ytmgRyeOlhYscNj+NGofeOHezRwmLUSUwlgAfdo4bKU1n69t1TrsCNspXYdCMxcPhjQJAMNxkJ8t2tFMpucCQfWJ09wvFKZSHX1/iD9GKWL0Qk2FcMCg3NXiqei5NL3NYqCWpdC/IfjsAEGCJrTFwp/OoUw==" }, "Logs": { - "Filename": "/root/.go-ipfs/logs/events.log", + "Filename": "/root/.ipfs/logs/events.log", "MaxAgeDays": 0, "MaxBackups": 0, "MaxSizeMB": 0 diff --git a/test/bench/bench_cli_ipfs_add/main.go b/test/bench/bench_cli_ipfs_add/main.go index 06969275a..15ca4a165 100644 --- a/test/bench/bench_cli_ipfs_add/main.go +++ b/test/bench/bench_cli_ipfs_add/main.go @@ -53,7 +53,7 @@ func benchmarkAdd(amount int64) (*testing.BenchmarkResult, error) { defer os.RemoveAll(tmpDir) env := append( - []string{fmt.Sprintf("%s=%s", config.EnvDir, path.Join(tmpDir, ".go-ipfs"))}, // first in order to override + []string{fmt.Sprintf("%s=%s", config.EnvDir, path.Join(tmpDir, config.DefaultPathName))}, // first in order to override os.Environ()..., ) setupCmd := func(cmd *exec.Cmd) { diff --git a/test/bench/offline_add/main.go b/test/bench/offline_add/main.go index d2f0ac905..0df24f5c4 100644 --- a/test/bench/offline_add/main.go +++ b/test/bench/offline_add/main.go @@ -43,7 +43,7 @@ func benchmarkAdd(amount int64) (*testing.BenchmarkResult, error) { } defer os.RemoveAll(tmpDir) - env := append(os.Environ(), fmt.Sprintf("%s=%s", config.EnvDir, path.Join(tmpDir, ".go-ipfs"))) + env := append(os.Environ(), fmt.Sprintf("%s=%s", config.EnvDir, path.Join(tmpDir, config.DefaultPathName))) setupCmd := func(cmd *exec.Cmd) { cmd.Env = env } diff --git a/test/jenkins/network-test.sh b/test/jenkins/network-test.sh index fcc43c91c..8905284bd 100755 --- a/test/jenkins/network-test.sh +++ b/test/jenkins/network-test.sh @@ -8,6 +8,6 @@ make clean make test make save_logs -docker cp 3nodetest_server_1:/root/.go-ipfs/logs/events.log $(PWD)/build/server-events.log -docker cp 3nodetest_bootstrap_1:/root/.go-ipfs/logs/events.log $(PWD)/build/bootstrap-events.log -docker cp 3nodetest_client_1:/root/.go-ipfs/logs/events.log $(PWD)/build/client-events.log +docker cp 3nodetest_server_1:/root/.ipfs/logs/events.log $(PWD)/build/server-events.log +docker cp 3nodetest_bootstrap_1:/root/.ipfs/logs/events.log $(PWD)/build/bootstrap-events.log +docker cp 3nodetest_client_1:/root/.ipfs/logs/events.log $(PWD)/build/client-events.log diff --git a/test/sharness/README.md b/test/sharness/README.md index a9bd9cda8..e54f9e932 100644 --- a/test/sharness/README.md +++ b/test/sharness/README.md @@ -68,11 +68,11 @@ This means cating certain files, or running diagnostic commands. For example: ``` -test_expect_success ".go-ipfs/ has been created" ' - test -d ".go-ipfs" && - test -f ".go-ipfs/config" && - test -d ".go-ipfs/datastore" || - test_fsh ls -al .go-ipfs +test_expect_success ".ipfs/ has been created" ' + test -d ".ipfs" && + test -f ".ipfs/config" && + test -d ".ipfs/datastore" || + test_fsh ls -al .ipfs ' ``` diff --git a/test/sharness/lib/test-lib.sh b/test/sharness/lib/test-lib.sh index c40bcd082..4be9aa434 100644 --- a/test/sharness/lib/test-lib.sh +++ b/test/sharness/lib/test-lib.sh @@ -148,7 +148,7 @@ test_init_ipfs() { # todo: in the future, use env? test_expect_success "ipfs init succeeds" ' - export IPFS_PATH="$(pwd)/.go-ipfs" && + export IPFS_PATH="$(pwd)/.ipfs" && ipfs init -b=1024 > /dev/null ' diff --git a/test/sharness/t0020-init.sh b/test/sharness/t0020-init.sh index 64e3a0d9d..002b9865e 100755 --- a/test/sharness/t0020-init.sh +++ b/test/sharness/t0020-init.sh @@ -9,16 +9,16 @@ test_description="Test init command" . lib/test-lib.sh test_expect_success "ipfs init succeeds" ' - export IPFS_PATH="$(pwd)/.go-ipfs" && + export IPFS_PATH="$(pwd)/.ipfs" && BITS="2048" && ipfs init --bits="$BITS" >actual_init ' -test_expect_success ".go-ipfs/ has been created" ' - test -d ".go-ipfs" && - test -f ".go-ipfs/config" && - test -d ".go-ipfs/datastore" || - test_fsh ls -al .go-ipfs +test_expect_success ".ipfs/ has been created" ' + test -d ".ipfs" && + test -f ".ipfs/config" && + test -d ".ipfs/datastore" || + test_fsh ls -al .ipfs ' test_expect_success "ipfs config succeeds" ' diff --git a/test/sharness/t0060-daemon.sh b/test/sharness/t0060-daemon.sh index 2db6018c5..9b92df48a 100755 --- a/test/sharness/t0060-daemon.sh +++ b/test/sharness/t0060-daemon.sh @@ -10,7 +10,7 @@ test_description="Test daemon command" # this needs to be in a different test than "ipfs daemon --init" below test_expect_success "setup IPFS_PATH" ' - IPFS_PATH="$(pwd)/.go-ipfs" + IPFS_PATH="$(pwd)/.ipfs" ' # NOTE: this should remove bootstrap peers (needs a flag) @@ -54,11 +54,11 @@ test_expect_failure "ipfs daemon output looks good" ' test_cmp_repeat_10_sec expected actual_daemon ' -test_expect_success ".go-ipfs/ has been created" ' - test -d ".go-ipfs" && - test -f ".go-ipfs/config" && - test -d ".go-ipfs/datastore" || - test_fsh ls .go-ipfs +test_expect_success ".ipfs/ has been created" ' + test -d ".ipfs" && + test -f ".ipfs/config" && + test -d ".ipfs/datastore" || + test_fsh ls .ipfs ' # begin same as in t0010 diff --git a/test/supernode_client/.gitignore b/test/supernode_client/.gitignore index 7d3e2ed04..c8d27e8f5 100644 --- a/test/supernode_client/.gitignore +++ b/test/supernode_client/.gitignore @@ -1 +1 @@ -.go-ipfs/ +.ipfs/ diff --git a/test/supernode_client/main.go b/test/supernode_client/main.go index 5f5efb61e..11ee6ed73 100644 --- a/test/supernode_client/main.go +++ b/test/supernode_client/main.go @@ -63,7 +63,7 @@ func run() error { if err != nil { return err } - repoPath := gopath.Join(cwd, ".go-ipfs") + repoPath := gopath.Join(cwd, config.DefaultPathName) if err := ensureRepoInitialized(repoPath); err != nil { } repo, err := fsrepo.Open(repoPath) From c419a489e1ab684e7ff1aab0d2cbe6b99b540768 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 9 Apr 2015 17:13:04 -0700 Subject: [PATCH 11/15] make ipfs understand the new migration --- cmd/ipfs/daemon.go | 15 +++----- repo/fsrepo/fsrepo.go | 54 +++++++++++++++++---------- repo/fsrepo/migrations/1-to-2/main.go | 2 +- 3 files changed, 41 insertions(+), 30 deletions(-) diff --git a/cmd/ipfs/daemon.go b/cmd/ipfs/daemon.go index 94e20a70e..b2104ce6f 100644 --- a/cmd/ipfs/daemon.go +++ b/cmd/ipfs/daemon.go @@ -103,21 +103,16 @@ func daemonFunc(req cmds.Request, res cmds.Response) { } } - // To ensure that IPFS has been initialized, fetch the config. Do this - // _before_ acquiring the daemon lock so the user gets an appropriate error - // message. - // NB: It's safe to read the config without the daemon lock, but not safe - // to write. - ctx := req.Context() - cfg, err := ctx.GetConfig() + // acquire the repo lock _before_ constructing a node. we need to make + // sure we are permitted to access the resources (datastore, etc.) + repo, err := fsrepo.Open(req.Context().ConfigRoot) if err != nil { res.SetError(err, cmds.ErrNormal) return } - // acquire the repo lock _before_ constructing a node. we need to make - // sure we are permitted to access the resources (datastore, etc.) - repo, err := fsrepo.Open(req.Context().ConfigRoot) + ctx := req.Context() + cfg, err := ctx.GetConfig() if err != nil { res.SetError(err, cmds.ErrNormal) return diff --git a/repo/fsrepo/fsrepo.go b/repo/fsrepo/fsrepo.go index 96e9210b8..6885731ff 100644 --- a/repo/fsrepo/fsrepo.go +++ b/repo/fsrepo/fsrepo.go @@ -4,10 +4,10 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "path" "strconv" + "strings" "sync" ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" @@ -28,8 +28,13 @@ import ( ds2 "github.com/ipfs/go-ipfs/util/datastore2" ) +// version number that we are currently expecting to see var RepoVersion = "2" +var incorrectRepoFormat = "Repo has incorrect version: '%s'\nProgram version is: '%s'\nPlease run the appropriate migration tool before continuing" + +var ErrNoVersion = errors.New("version check failed, no version file found, please run 0-to-1 migration tool.") + const ( leveldbDirectory = "datastore" flatfsDirectory = "blocks" @@ -87,13 +92,14 @@ func open(repoPath string) (repo.Repo, error) { packageLock.Lock() defer packageLock.Unlock() - expPath, err := u.TildeExpansion(path.Clean(repoPath)) + r, err := newFSRepo(repoPath) if err != nil { return nil, err } - r := &FSRepo{ - path: expPath, + // Check if its initialized + if err := checkInitialized(r.path); err != nil { + return nil, err } r.lockfile, err = lockfile.Lock(r.path) @@ -108,30 +114,20 @@ func open(repoPath string) (repo.Repo, error) { } }() - if !isInitializedUnsynced(r.path) { - return nil, errors.New("ipfs not initialized, please run 'ipfs init'") - } - // Check version, and error out if not matching - ver, err := ioutil.ReadFile(path.Join(expPath, "version")) + ver, err := mfsr.RepoPath(r.path).Version() if err != nil { if os.IsNotExist(err) { - return nil, errors.New("version check failed, no version file found, please run 0-to-1 migration tool.") + return nil, ErrNoVersion } return nil, err } - vers := string(ver)[:1] - - if vers != RepoVersion { - return nil, fmt.Errorf("Repo has incorrect version: '%s'\nProgram version is: '%s'\nPlease run the appropriate migration tool before continuing", - vers, RepoVersion) - + if ver != RepoVersion { + return nil, fmt.Errorf(incorrectRepoFormat, ver, RepoVersion) } // check repo path, then check all constituent parts. - // TODO acquire repo lock - // TODO if err := initCheckDir(logpath); err != nil { // } if err := dir.Writable(r.path); err != nil { return nil, err } @@ -144,13 +140,33 @@ func open(repoPath string) (repo.Repo, error) { return nil, err } - // log.Debugf("writing eventlogs to ...", c.path) + // setup eventlogger configureEventLoggerAtRepoPath(r.config, r.path) keepLocked = true return r, nil } +func newFSRepo(rpath string) (*FSRepo, error) { + expPath, err := u.TildeExpansion(path.Clean(rpath)) + if err != nil { + return nil, err + } + + return &FSRepo{path: expPath}, nil +} + +func checkInitialized(path string) error { + if !isInitializedUnsynced(path) { + alt := strings.Replace(path, ".ipfs", ".go-ipfs", 1) + if isInitializedUnsynced(alt) { + return debugerror.New("ipfs repo found in old '.go-ipfs' location, please run migration tool") + } + return debugerror.New("ipfs not initialized, please run 'ipfs init'") + } + return nil +} + // ConfigAt returns an error if the FSRepo at the given path is not // initialized. This function allows callers to read the config file even when // another process is running and holding the lock. diff --git a/repo/fsrepo/migrations/1-to-2/main.go b/repo/fsrepo/migrations/1-to-2/main.go index 28b6c0abd..0b8702f29 100644 --- a/repo/fsrepo/migrations/1-to-2/main.go +++ b/repo/fsrepo/migrations/1-to-2/main.go @@ -124,7 +124,7 @@ func transferBlocksToFlatDB(repopath string) error { blockspath := path.Join(repopath, "blocks") err = os.Mkdir(blockspath, 0777) - if err != nil { + if err != nil && !os.IsExist(err) { return err } From 591cca9507f05f913b3874ed0c649001635b53ff Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 15 Apr 2015 14:28:28 -0700 Subject: [PATCH 12/15] remove migrations code from go-ipfs, in favor of fs-repo-migrations repo --- repo/fsrepo/migrations/0-to-1/main.go | 69 ------- repo/fsrepo/migrations/1-to-2/main.go | 248 -------------------------- repo/fsrepo/migrations/mfsr.go | 56 ------ 3 files changed, 373 deletions(-) delete mode 100644 repo/fsrepo/migrations/0-to-1/main.go delete mode 100644 repo/fsrepo/migrations/1-to-2/main.go delete mode 100644 repo/fsrepo/migrations/mfsr.go diff --git a/repo/fsrepo/migrations/0-to-1/main.go b/repo/fsrepo/migrations/0-to-1/main.go deleted file mode 100644 index cc0c5ef1c..000000000 --- a/repo/fsrepo/migrations/0-to-1/main.go +++ /dev/null @@ -1,69 +0,0 @@ -package main - -import ( - "fmt" - "os" - "strings" - - migrate "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-migrate" - mfsr "github.com/ipfs/go-ipfs/repo/fsrepo/migrations" -) - -type migration struct { -} - -// Version is the int version number. This could be a string -// in future versions -func (m migration) Versions() string { - return "0-to-1" -} - -// Reversible returns whether this migration can be reverted. -// Endeavor to make them all reversible. This is here only to warn users -// in case this is not the case. -func (m migration) Reversible() bool { - return true -} - -// Apply applies the migration in question. -// This migration merely adds a version file. -func (m migration) Apply(opts migrate.Options) error { - repo := mfsr.RepoPath(opts.Path) - - // first, check if there is a version file. - // if there is, bail out. - if v, err := repo.Version(); err == nil { - return fmt.Errorf("repo at %s is version %s (not 0)", opts.Path, v) - } else if !strings.Contains(err.Error(), "no version file in repo") { - return err - } - - // add the version file - if err := repo.WriteVersion("1"); err != nil { - return err - } - - return nil -} - -// Revert un-applies the migration in question. This should be best-effort. -// Some migrations are definitively one-way. If so, return an error. -func (m migration) Revert(opts migrate.Options) error { - repo := mfsr.RepoPath(opts.Path) - - if err := repo.CheckVersion("1"); err != nil { - return err - } - - // remove the version file - if err := os.Remove(repo.VersionFile()); err != nil { - return err - } - - return nil -} - -func main() { - m := migration{} - migrate.Main(&m) -} diff --git a/repo/fsrepo/migrations/1-to-2/main.go b/repo/fsrepo/migrations/1-to-2/main.go deleted file mode 100644 index 0b8702f29..000000000 --- a/repo/fsrepo/migrations/1-to-2/main.go +++ /dev/null @@ -1,248 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "os" - "path" - "strings" - - dstore "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" - flatfs "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs" - leveldb "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/leveldb" - dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query" - migrate "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-migrate" - mfsr "github.com/ipfs/go-ipfs/repo/fsrepo/migrations" - - context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" -) - -var _ = context.Background - -const peerKeyName = "peer.key" - -type migration struct{} - -func (m migration) Versions() string { - return "1-to-2" -} - -func (m migration) Reversible() bool { - return true -} - -func (m migration) Apply(opts migrate.Options) error { - repo := mfsr.RepoPath(opts.Path) - - if err := repo.CheckVersion("1"); err != nil { - return err - } - - // 1) run some sanity checks to make sure we should even bother - err := sanityChecks(opts) - if err != nil { - return err - } - - // 2) Transfer blocks out of leveldb into flatDB - err = transferBlocksToFlatDB(opts.Path) - if err != nil { - return err - } - - // 3) move ipfs path from .go-ipfs to .ipfs - newpath, err := moveIpfsDir(opts.Path) - if err != nil { - return err - } - - // 4) Update version number - repo = mfsr.RepoPath(newpath) - err = repo.WriteVersion("2") - if err != nil { - return err - } - - return nil -} - -func (m migration) Revert(opts migrate.Options) error { - repo := mfsr.RepoPath(opts.Path) - if err := repo.CheckVersion("2"); err != nil { - return err - } - - // 1) Move directory back to .go-ipfs - npath, err := reverseIpfsDir(opts.Path) - if err != nil { - return err - } - - // 2) move blocks back from flatfs to leveldb - err = transferBlocksFromFlatDB(npath) - if err != nil { - return err - } - - // 3) change version number back down - repo = mfsr.RepoPath(npath) - err = repo.WriteVersion("1") - if err != nil { - return err - } - - return nil -} - -// sanityChecks performs a set of tests to make sure the migration will go -// smoothly -func sanityChecks(opts migrate.Options) error { - npath := strings.Replace(opts.Path, ".go-ipfs", ".ipfs", 1) - - // make sure we can move the repo from .go-ipfs to .ipfs - err := os.Mkdir(npath, 0777) - if err != nil { - return err - } - - // we can? good, remove it now - err = os.Remove(npath) - if err != nil { - // this is weird... not worth continuing - return err - } - - return nil -} - -func transferBlocksToFlatDB(repopath string) error { - ldbpath := path.Join(repopath, "datastore") - ldb, err := leveldb.NewDatastore(ldbpath, nil) - if err != nil { - return err - } - - blockspath := path.Join(repopath, "blocks") - err = os.Mkdir(blockspath, 0777) - if err != nil && !os.IsExist(err) { - return err - } - - fds, err := flatfs.New(blockspath, 4) - if err != nil { - return err - } - - return transferBlocks(ldb, fds, "/b/", "") -} - -func transferBlocksFromFlatDB(repopath string) error { - - ldbpath := path.Join(repopath, "datastore") - blockspath := path.Join(repopath, "blocks") - fds, err := flatfs.New(blockspath, 4) - if err != nil { - return err - } - - ldb, err := leveldb.NewDatastore(ldbpath, nil) - if err != nil { - return err - } - - err = transferBlocks(fds, ldb, "", "/b/") - if err != nil { - return err - } - - // Now remove the blocks directory - err = os.RemoveAll(blockspath) - if err != nil { - return err - } - - return nil -} - -func transferBlocks(from, to dstore.Datastore, fpref, tpref string) error { - q := dsq.Query{Prefix: fpref, KeysOnly: true} - res, err := from.Query(q) - if err != nil { - return err - } - - fmt.Println("Starting query") - for result := range res.Next() { - nkey := fmt.Sprintf("%s%s", tpref, result.Key[len(fpref):]) - - fkey := dstore.NewKey(result.Key) - val, err := from.Get(fkey) - if err != nil { - return err - } - - err = to.Put(dstore.NewKey(nkey), val) - if err != nil { - return err - } - - err = from.Delete(fkey) - if err != nil { - return err - } - } - fmt.Println("Query done") - - return nil -} - -func moveIpfsDir(curpath string) (string, error) { - newpath := strings.Replace(curpath, ".go-ipfs", ".ipfs", 1) - return newpath, os.Rename(curpath, newpath) -} - -func reverseIpfsDir(curpath string) (string, error) { - newpath := strings.Replace(curpath, ".ipfs", ".go-ipfs", 1) - return newpath, os.Rename(curpath, newpath) -} - -func loadConfigJSON(repoPath string) (map[string]interface{}, error) { - cfgPath := path.Join(repoPath, "config") - fi, err := os.Open(cfgPath) - if err != nil { - return nil, err - } - - var out map[string]interface{} - err = json.NewDecoder(fi).Decode(&out) - if err != nil { - return nil, err - } - - return out, nil -} - -func saveConfigJSON(repoPath string, cfg map[string]interface{}) error { - cfgPath := path.Join(repoPath, "config") - fi, err := os.Create(cfgPath) - if err != nil { - return err - } - - out, err := json.MarshalIndent(cfg, "", "\t") - if err != nil { - return err - } - - _, err = fi.Write(out) - if err != nil { - return err - } - - return nil -} - -func main() { - m := migration{} - migrate.Main(&m) -} diff --git a/repo/fsrepo/migrations/mfsr.go b/repo/fsrepo/migrations/mfsr.go deleted file mode 100644 index 21b81f1a3..000000000 --- a/repo/fsrepo/migrations/mfsr.go +++ /dev/null @@ -1,56 +0,0 @@ -package mfsr - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path" - "strings" -) - -const VersionFile = "version" - -type RepoPath string - -func (rp RepoPath) VersionFile() string { - return path.Join(string(rp), VersionFile) -} - -func (rp RepoPath) Version() (string, error) { - if rp == "" { - return "", fmt.Errorf("invalid repo path \"%s\"", rp) - } - - fn := rp.VersionFile() - if _, err := os.Stat(fn); os.IsNotExist(err) { - return "", errors.New("no version file in repo at " + string(rp)) - } - - c, err := ioutil.ReadFile(fn) - if err != nil { - return "", err - } - - s := string(c) - s = strings.TrimSpace(s) - return s, nil -} - -func (rp RepoPath) CheckVersion(version string) error { - v, err := rp.Version() - if err != nil { - return err - } - - if v != version { - return fmt.Errorf("versions differ (expected: %s, actual:%s)", version, v) - } - - return nil -} - -func (rp RepoPath) WriteVersion(version string) error { - fn := rp.VersionFile() - return ioutil.WriteFile(fn, []byte(version+"\n"), 0644) -} From bb7bf81873a3e9ceeb369d23807594530bd5b009 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 17 Apr 2015 02:26:53 -0700 Subject: [PATCH 13/15] put mfsr package back --- repo/fsrepo/migrations/mfsr.go | 61 ++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 repo/fsrepo/migrations/mfsr.go diff --git a/repo/fsrepo/migrations/mfsr.go b/repo/fsrepo/migrations/mfsr.go new file mode 100644 index 000000000..c591f67ee --- /dev/null +++ b/repo/fsrepo/migrations/mfsr.go @@ -0,0 +1,61 @@ +package mfsr + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strings" +) + +const VersionFile = "version" + +type RepoPath string + +func (rp RepoPath) VersionFile() string { + return path.Join(string(rp), VersionFile) +} + +func (rp RepoPath) Version() (string, error) { + if rp == "" { + return "", fmt.Errorf("invalid repo path \"%s\"", rp) + } + + fn := rp.VersionFile() + if _, err := os.Stat(fn); os.IsNotExist(err) { + return "", VersionFileNotFound(rp) + } + + c, err := ioutil.ReadFile(fn) + if err != nil { + return "", err + } + + s := string(c) + s = strings.TrimSpace(s) + return s, nil +} + +func (rp RepoPath) CheckVersion(version string) error { + v, err := rp.Version() + if err != nil { + return err + } + + if v != version { + return fmt.Errorf("versions differ (expected: %s, actual:%s)", version, v) + } + + return nil +} + +func (rp RepoPath) WriteVersion(version string) error { + fn := rp.VersionFile() + return ioutil.WriteFile(fn, []byte(version+"\n"), 0644) +} + +type VersionFileNotFound string + +func (v VersionFileNotFound) Error() string { + return "no version file in repo at " + string(v) +} From c1b8d292fb8dacf9a8685af675a1baa9e0b5c2d4 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Sun, 19 Apr 2015 14:43:06 -0700 Subject: [PATCH 14/15] repo: clean up migration errors Improved the repo migration errors to provide instructions to the user. --- repo/fsrepo/fsrepo.go | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/repo/fsrepo/fsrepo.go b/repo/fsrepo/fsrepo.go index 6885731ff..a59e5cd95 100644 --- a/repo/fsrepo/fsrepo.go +++ b/repo/fsrepo/fsrepo.go @@ -31,9 +31,19 @@ import ( // version number that we are currently expecting to see var RepoVersion = "2" -var incorrectRepoFormat = "Repo has incorrect version: '%s'\nProgram version is: '%s'\nPlease run the appropriate migration tool before continuing" +var migrationInstructions = `See https://github.com/ipfs/fs-repo-migrations/blob/master/run.md +Sorry for the inconvenience. In the future, these will run automatically.` -var ErrNoVersion = errors.New("version check failed, no version file found, please run 0-to-1 migration tool.") +var errIncorrectRepoFmt = `Repo has incorrect version: %s +Program version is: %s +Please run the ipfs migration tool before continuing. +` + migrationInstructions + +var ( + ErrNoRepo = errors.New("no ipfs repo found. please run: ipfs init") + ErrNoVersion = errors.New("no version file found, please run 0-to-1 migration tool.\n" + migrationInstructions) + ErrOldRepo = errors.New("ipfs repo found in old '~/.go-ipfs' location, please run migration tool.\n" + migrationInstructions) +) const ( leveldbDirectory = "datastore" @@ -124,7 +134,7 @@ func open(repoPath string) (repo.Repo, error) { } if ver != RepoVersion { - return nil, fmt.Errorf(incorrectRepoFormat, ver, RepoVersion) + return nil, fmt.Errorf(errIncorrectRepoFmt, ver, RepoVersion) } // check repo path, then check all constituent parts. @@ -160,9 +170,9 @@ func checkInitialized(path string) error { if !isInitializedUnsynced(path) { alt := strings.Replace(path, ".ipfs", ".go-ipfs", 1) if isInitializedUnsynced(alt) { - return debugerror.New("ipfs repo found in old '.go-ipfs' location, please run migration tool") + return ErrOldRepo } - return debugerror.New("ipfs not initialized, please run 'ipfs init'") + return ErrNoRepo } return nil } From dd25a75225edba813c70ad958dc069f1a74e87f9 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Sun, 19 Apr 2015 23:33:36 -0700 Subject: [PATCH 15/15] repo: move daemon.lock -> repo.lock The "daemon.lock" was really a repo.lock, as the cli also took it and the purpose was any process mutex. This is part of the 1-to-2 migration, and has already been handled in https://github.com/ipfs/fs-repo-migrations/tree/master/ipfs-1-to-2 --- repo/fsrepo/lock/lock.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/repo/fsrepo/lock/lock.go b/repo/fsrepo/lock/lock.go index 5681a5d38..f4fcd80b7 100644 --- a/repo/fsrepo/lock/lock.go +++ b/repo/fsrepo/lock/lock.go @@ -8,9 +8,9 @@ import ( "github.com/ipfs/go-ipfs/util" ) -// LockFile is the filename of the daemon lock, relative to config dir +// LockFile is the filename of the repo lock, relative to config dir // TODO rename repo lock and hide name -const LockFile = "daemon.lock" +const LockFile = "repo.lock" func Lock(confdir string) (io.Closer, error) { c, err := lock.Lock(path.Join(confdir, LockFile))