diff --git a/.artifact_files b/.artifact_files deleted file mode 100644 index 6b1d0bfab..000000000 --- a/.artifact_files +++ /dev/null @@ -1 +0,0 @@ -LICENSE diff --git a/.dockerignore b/.dockerignore index ee0f144b4..bf4ffe706 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,6 +1,7 @@ .git/ !.git/HEAD !.git/refs/ +!.git/packed-refs cmd/ipfs/ipfs vendor/gx/ test/ diff --git a/.gobuilder.yml b/.gobuilder.yml deleted file mode 100644 index 4cf648012..000000000 --- a/.gobuilder.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -triggers: - - github.com/ipfs/go-ipfs/cmd/ipfs -no_go_fmt: true diff --git a/.travis.yml b/.travis.yml index 3de3336bb..602ba01d4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,8 @@ # dist: trusty # KVM Setup +notifications: + email: false + os: - linux - osx @@ -7,7 +10,7 @@ os: language: go go: - - 1.5.2 + - 1.7 env: - TEST_NO_FUSE=1 TEST_VERBOSE=1 TEST_SUITE=test_go_expensive diff --git a/CHANGELOG.md b/CHANGELOG.md index dd185723c..138f1cde6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,171 @@ # go-ipfs changelog +### 0.4.3-rc4 - 2016-09-09 + +This release candidate fixes issues in Bitswap and the `ipfs add` command, and improves testing. +We plan for this to be the last release candidate before the release of go-ipfs v0.4.3. + +With this release candidate, we're also moving go-ipfs to Go 1.7, which we expect will yield improvements in runtime performance, memory usage, build time and size of the release binaries. + +- Require Go 1.7. (@whyrusleeping, @Kubuxu, @lgierth, [ipfs/go-ipfs#3163](https://github.com/ipfs/go-ipfs/pull/3163)) + - For this purpose, switch Docker image from Alpine 3.4 to Alpine Edge. +- Fix cancellation of Bitswap `wantlist` entries. (@whyrusleeping, [ipfs/go-ipfs#3182](https://github.com/ipfs/go-ipfs/pull/3182)) +- Fix clearing of `active` state of Bitswap provider queries. (@whyrusleeping, [ipfs/go-ipfs#3169](https://github.com/ipfs/go-ipfs/pull/3169)) +- Fix a panic in the DHT code. (@Kubuxu, [ipfs/go-ipfs#3200](https://github.com/ipfs/go-ipfs/pull/3200)) +- Improve handling of `Identity` field in `ipfs config` command. (@Kubuxu, @whyrusleeping, [ipfs/go-ipfs#3141](https://github.com/ipfs/go-ipfs/pull/3141)) +- Fix explicit adding of symlinked files and directories. (@kevina, [ipfs/go-ipfs#3135](https://github.com/ipfs/go-ipfs/pull/3135)) +- Fix bash auto-completion of `ipfs daemon --unrestricted-api` option. (@lgierth, [ipfs/go-ipfs#3159](https://github.com/ipfs/go-ipfs/pull/3159)) +- Introduce a new timeout tool for tests to avoid licensing issues. (@Kubuxu, [ipfs/go-ipfs#3152](https://github.com/ipfs/go-ipfs/pull/3152)) +- Improve output for migrations of fs-repo. (@lgierth, [ipfs/go-ipfs#3158](https://github.com/ipfs/go-ipfs/pull/3158)) +- Fix info notice of commands taking input from stdin. (@Kubuxu, [ipfs/go-ipfs#3134](https://github.com/ipfs/go-ipfs/pull/3134)) +- Bring back a few tests for stdin handling of `ipfs cat` and `ipfs add`. (@Kubuxu, [ipfs/go-ipfs#3144](https://github.com/ipfs/go-ipfs/pull/3144)) +- Improve sharness tests for `ipfs repo verify` command. (@whyrusleeping, [ipfs/go-ipfs#3148](https://github.com/ipfs/go-ipfs/pull/3148)) +- Improve sharness tests for CORS headers on the gateway. (@Kubuxu, [ipfs/go-ipfs#3142](https://github.com/ipfs/go-ipfs/pull/3142)) +- Improve tests for pinning within `ipfs files`. (@kevina, [ipfs/go-ipfs#3151](https://github.com/ipfs/go-ipfs/pull/3151)) +- Improve tests for the automatic raising of file descriptor limits. (@whyrusleeping, [ipfs/go-ipfs#3149](https://github.com/ipfs/go-ipfs/pull/3149)) + +### 0.4.3-rc3 - 2016-08-09 + +This release candidate fixes a panic that occurs when input from stdin was +expected, but none was given: [ipfs/go-ipfs#3050](https://github.com/ipfs/go-ipfs/pull/3050) + +### 0.4.3-rc2 - 2016-07-23 + +This release includes bugfixes and fixes for regressions that were introduced +between 0.4.2 and 0.4.3-rc1. + +- Regressions + - Fix daemon panic when there is no multipart input provided over the HTTP API. + (@whyrusleeping, [ipfs/go-ipfs#2989](https://github.com/ipfs/go-ipfs/pull/2989)) + - Fix `ipfs refs --edges` not printing edges. + (@Kubuxu, [ipfs/go-ipfs#3007](https://github.com/ipfs/go-ipfs/pull/3007)) + - Fix progress option for `ipfs add` defaulting to true on the HTTP API. + (@whyrusleeping, [ipfs/go-ipfs#3025](https://github.com/ipfs/go-ipfs/pull/3025)) + - Fix erroneous printing of stdin reading message. + (@whyrusleeping, [ipfs/go-ipfs#3033](https://github.com/ipfs/go-ipfs/pull/3033)) + - Fix panic caused by passing `--mount` and `--offline` flags to `ipfs daemon`. + (@Kubuxu, [ipfs/go-ipfs#3022](https://github.com/ipfs/go-ipfs/pull/3022)) + - Fix symlink path resolution on windows. + (@Kubuxu, [ipfs/go-ipfs#3023](https://github.com/ipfs/go-ipfs/pull/3023)) + - Add in code to prevent issue 3032 from crashing the daemon. + (@whyrusleeping, [ipfs/go-ipfs#3037](https://github.com/ipfs/go-ipfs/pull/3037)) + + +### 0.4.3-rc1 - 2016-07-23 + +This is a maintenance release which comes with a couple of nice enhancements, and improves the performance of Storage, Bitswap, as well as Content and Peer Routing. It also introduces a handful of new commands and options, and fixes a good bunch of bugs. + +This is the first Release Candidate. Unless there are vulnerabilities or regressions discovered, the final 0.4.3 release will happen about one week from now. + +- Security Vulnerability + + - The `master` branch if go-ipfs suffered from a vulnerability for about 3 weeks. It allowed an attacker to use an iframe to request malicious HTML and JS from the API of a local go-ipfs node. The attacker could then gain unrestricted access to the node's API, and e.g. extract the private key. We fixed this issue by reintroducing restrictions on which particular objects can be loaded through the API (@lgierth, [ipfs/go-ipfs#2949](https://github.com/ipfs/go-ipfs/pull/2949)), and by completely excluding the private key from the API (@Kubuxu, [ipfs/go-ipfs#2957](https://github.com/ipfs/go-ipfs/pull/2957)). We will also work on more hardening of the API in the next release. + - **The previous release 0.4.2 is not vulnerable. That means if you're using official binaries from [dist.ipfs.io](https://dist.ipfs.io) you're not affected.** If you're running go-ipfs built from the `master` branch between June 17th ([ipfs/go-ipfs@1afebc21](https://github.com/ipfs/go-ipfs/commit/1afebc21f324982141ca8a29710da0d6f83ca804)) and July 7th ([ipfs/go-ipfs@39bef0d5](https://github.com/ipfs/go-ipfs/commit/39bef0d5b01f70abf679fca2c4d078a2d55620e2)), please update to v0.4.3-rc1 immediately. + - We are grateful to the group of independent researchers who made us aware of this vulnerability. We wanna use this opportunity to reiterate that we're very happy about any additional review of pull requests and releases. You can contact us any time at security@ipfs.io (GPG [4B9665FB 92636D17 7C7A86D3 50AAE8A9 59B13AF3](https://pgp.mit.edu/pks/lookup?op=get&search=0x50AAE8A959B13AF3)). + +- Notable changes + + - Improve Bitswap performance. (@whyrusleeping, [ipfs/go-ipfs#2727](https://github.com/ipfs/go-ipfs/pull/2727), [ipfs/go-ipfs#2798](https://github.com/ipfs/go-ipfs/pull/2798)) + - Improve Content Routing and Peer Routing performance. (@whyrusleeping, [ipfs/go-ipfs#2817](https://github.com/ipfs/go-ipfs/pull/2817), [ipfs/go-ipfs#2841](https://github.com/ipfs/go-ipfs/pull/2841)) + - Improve datastore, blockstore, and dagstore performance. (@kevina, @Kubuxu, @whyrusleeping [ipfs/go-datastore#43](https://github.com/ipfs/go-datastore/pull/43), [ipfs/go-ipfs#2885](https://github.com/ipfs/go-ipfs/pull/2885), [ipfs/go-ipfs#2961](https://github.com/ipfs/go-ipfs/pull/2961), [ipfs/go-ipfs#2953](https://github.com/ipfs/go-ipfs/pull/2953), [ipfs/go-ipfs#2960](https://github.com/ipfs/go-ipfs/pull/2960)) + - Content Providers are now stored on disk to gain savings on process memory. (@whyrusleeping, [ipfs/go-ipfs#2804](https://github.com/ipfs/go-ipfs/pull/2804), [ipfs/go-ipfs#2860](https://github.com/ipfs/go-ipfs/pull/2860)) + - Migrations of the fs-repo (usually stored at `~/.ipfs`) now run automatically. If there's a TTY available, you'll get prompted when running `ipfs daemon`, and in addition you can use the `--migrate=true` or `--migrate=false` options to avoid the prompt. (@whyrusleeping, @lgierth, [ipfs/go-ipfs#2939](https://github.com/ipfs/go-ipfs/pull/2939)) + - The internal naming of blocks in the blockstore has changed, which requires a migration of the fs-repo, from version 3 to 4. (@whyrusleeping, [ipfs/go-ipfs#2903](https://github.com/ipfs/go-ipfs/pull/2903)) + - We now automatically raise the file descriptor limit to 1024 if neccessary. (@whyrusleeping, [ipfs/go-ipfs#2884](https://github.com/ipfs/go-ipfs/pull/2884), [ipfs/go-ipfs#2891](https://github.com/ipfs/go-ipfs/pull/2891)) + - After a long struggle with deadlocks and hanging connections, we've decided to disable the uTP transport by default for now. (@whyrusleeping, [ipfs/go-ipfs#2840](https://github.com/ipfs/go-ipfs/pull/2840), [ipfs/go-libp2p-transport@88244000](https://github.com/ipfs/go-libp2p-transport/commit/88244000f0ce8851ffcfbac746ebc0794b71d2a4)) + - There is now documentation for the configuration options in `docs/config.md`. (@whyrusleeping, [ipfs/go-ipfs#2974](https://github.com/ipfs/go-ipfs/pull/2974)) + - All commands now sanely handle the combination of stdin and optional flags in certain edge cases. (@lgierth, [ipfs/go-ipfs#2952](https://github.com/ipfs/go-ipfs/pull/2952)) + +- New Features + + - Add `--offline` option to `ipfs daemon` command, which disables all swarm networking. (@Kubuxu, [ipfs/go-ipfs#2696](https://github.com/ipfs/go-ipfs/pull/2696), [ipfs/go-ipfs#2867](https://github.com/ipfs/go-ipfs/pull/2867)) + - Add `Datastore.HashOnRead` option for verifying block hashes on read access. (@Kubuxu, [ipfs/go-ipfs#2904](https://github.com/ipfs/go-ipfs/pull/2904)) + - Add `Datastore.BloomFilterSize` option for tuning the blockstore's new lookup bloom filter. (@Kubuxu, [ipfs/go-ipfs#2973](https://github.com/ipfs/go-ipfs/pull/2973)) + +- Bugfixes + + - Fix publishing of local IPNS entries, and more. (@whyrusleeping, [ipfs/go-ipfs#2943](https://github.com/ipfs/go-ipfs/pull/2943)) + - Fix progress bars in `ipfs add` and `ipfs get`. (@whyrusleeping, [ipfs/go-ipfs#2893](https://github.com/ipfs/go-ipfs/pull/2893), [ipfs/go-ipfs#2948](https://github.com/ipfs/go-ipfs/pull/2948)) + - Make sure files added through `ipfs files` are pinned and don't get GC'd. (@kevina, [ipfs/go-ipfs#2872](https://github.com/ipfs/go-ipfs/pull/2872)) + - Fix copying into directory using `ipfs files cp`. (@whyrusleeping, [ipfs/go-ipfs#2977](https://github.com/ipfs/go-ipfs/pull/2977)) + - Fix `ipfs version --commit` with Docker containers. (@lgierth, [ipfs/go-ipfs#2734](https://github.com/ipfs/go-ipfs/pull/2734)) + - Run `ipfs diag` commands in the daemon instead of the CLI. (@Kubuxu, [ipfs/go-ipfs#2761](https://github.com/ipfs/go-ipfs/pull/2761)) + - Fix protobuf encoding on the API and in commands. (@stebalien, [ipfs/go-ipfs#2516](https://github.com/ipfs/go-ipfs/pull/2516)) + - Fix goroutine leak in `/ipfs/ping` protocol handler. (@whyrusleeping, [ipfs/go-libp2p#58](https://github.com/ipfs/go-libp2p/pull/58)) + - Fix `--flags` option on `ipfs commands`. (@Kubuxu, [ipfs/go-ipfs#2773](https://github.com/ipfs/go-ipfs/pull/2773)) + - Fix the error channels in `namesys`. (@whyrusleeping, [ipfs/go-ipfs#2788](https://github.com/ipfs/go-ipfs/pull/2788)) + - Fix consumptions of observed swarm addresses. (@whyrusleeping, [ipfs/go-libp2p#63](https://github.com/ipfs/go-libp2p/pull/63), [ipfs/go-ipfs#2771](https://github.com/ipfs/go-ipfs/issues/2771)) + - Fix a rare DHT panic. (@whyrusleeping, [ipfs/go-ipfs#2856](https://github.com/ipfs/go-ipfs/pull/2856)) + - Fix go-ipfs/js-ipfs interoperability issues in SPDY. (@whyrusleeping, [whyrusleeping/go-smux-spdystream@fae17783](https://github.com/whyrusleeping/go-smux-spdystream/commit/fae1778302a9e029bb308cf71cf33f857f2d89e8)) + - Fix a logging race condition during shutdown. (@Kubuxu, [ipfs/go-log#3](https://github.com/ipfs/go-log/pull/3)) + - Prevent DHT connection hangs. (@whyrusleeping, [ipfs/go-ipfs#2826](https://github.com/ipfs/go-ipfs/pull/2826), [ipfs/go-ipfs#2863](https://github.com/ipfs/go-ipfs/pull/2863)) + - Fix NDJSON output of `ipfs refs local`. (@Kubuxu, [ipfs/go-ipfs#2812](https://github.com/ipfs/go-ipfs/pull/2812)) + - Fix race condition in NAT detection. (@whyrusleeping, [ipfs/go-libp2p#69](https://github.com/ipfs/go-libp2p/pull/69)) + - Fix error messages. (@whyrusleeping, @Kubuxu, [ipfs/go-ipfs#2905](https://github.com/ipfs/go-ipfs/pull/2905), [ipfs/go-ipfs#2928](https://github.com/ipfs/go-ipfs/pull/2928)) + +- Enhancements + + - Increase maximum object size on `ipfs put` from 1 MiB to 2 MiB. The maximum object size on the wire including all framing is 4 MiB. (@kpcyrd, [ipfs/go-ipfs#2980](https://github.com/ipfs/go-ipfs/pull/2980)) + - Add CORS headers to the Gateway's default config. (@Kubuxu, [ipfs/go-ipfs#2778](https://github.com/ipfs/go-ipfs/pull/2778)) + - Clear the dial backoff for a peer when using `ipfs swarm connect`. (@whyrusleeping, [ipfs/go-ipfs#2941](https://github.com/ipfs/go-ipfs/pull/2941)) + - Allow passing options to daemon in Docker container. (@lgierth, [ipfs/go-ipfs#2955](https://github.com/ipfs/go-ipfs/pull/2955)) + - Add `-v/--verbose` to `ìpfs swarm peers` command. (@csasarak, [ipfs/go-ipfs#2713](https://github.com/ipfs/go-ipfs/pull/2713)) + - Add `--format`, `--hash`, and `--size` options to `ipfs files stat` command. (@Kubuxu, [ipfs/go-ipfs#2706](https://github.com/ipfs/go-ipfs/pull/2706)) + - Add `--all` option to `ipfs version` command. (@Kubuxu, [ipfs/go-ipfs#2790](https://github.com/ipfs/go-ipfs/pull/2790)) + - Add `ipfs repo version` command. (@pfista, [ipfs/go-ipfs#2598](https://github.com/ipfs/go-ipfs/pull/2598)) + - Add `ipfs repo verify` command. (@whyrusleeping, [ipfs/go-ipfs#2924](https://github.com/ipfs/go-ipfs/pull/2924), [ipfs/go-ipfs#2951](https://github.com/ipfs/go-ipfs/pull/2951)) + - Add `ipfs stats repo` and `ipfs stats bitswap` command aliases. (@pfista, [ipfs/go-ipfs#2810](https://github.com/ipfs/go-ipfs/pull/2810)) + - Add success indication to responses of `ipfs ping` command. (@Kubuxu, [ipfs/go-ipfs#2813](https://github.com/ipfs/go-ipfs/pull/2813)) + - Save changes made via `ipfs swarm filter` to the config file. (@yuvallanger, [ipfs/go-ipfs#2880](https://github.com/ipfs/go-ipfs/pull/2880)) + - Expand `ipfs_p2p_peers` metric to include libp2p transport. (@lgierth, [ipfs/go-ipfs#2728](https://github.com/ipfs/go-ipfs/pull/2728)) + - Rework `ipfs files add` internals to avoid caching and prevent memory leaks. (@whyrusleeping, [ipfs/go-ipfs#2795](https://github.com/ipfs/go-ipfs/pull/2795)) + - Support `GOPATH` with multiple path components. (@karalabe, @lgierth, @djdv, [ipfs/go-ipfs#2808](https://github.com/ipfs/go-ipfs/pull/2808), [ipfs/go-ipfs#2862](https://github.com/ipfs/go-ipfs/pull/2862), [ipfs/go-ipfs#2975](https://github.com/ipfs/go-ipfs/pull/2975)) + +- General Codebase + + - Take steps towards the `filestore` datastore. (@kevina, [ipfs/go-ipfs#2792](https://github.com/ipfs/go-ipfs/pull/2792), [ipfs/go-ipfs#2634](https://github.com/ipfs/go-ipfs/pull/2634)) + - Update recommended Golang version to 1.6.2 (@Kubuxu, [ipfs/go-ipfs#2724](https://github.com/ipfs/go-ipfs/pull/2724)) + - Update to Gx 0.8.0 and Gx-Go 1.2.1, which is faster and less noisy. (@whyrusleeping, [ipfs/go-ipfs#2979](https://github.com/ipfs/go-ipfs/pull/2979)) + - Use `go4.org/lock` instead of `camlistore/lock` for locking. (@whyrusleeping, [ipfs/go-ipfs#2887](https://github.com/ipfs/go-ipfs/pull/2887)) + - Manage `go.uuid`, `hamming`, `backoff`, `proquint`, `pb`, `go-context`, `cors`, `go-datastore` packages with Gx. (@Kubuxu, [ipfs/go-ipfs#2733](https://github.com/ipfs/go-ipfs/pull/2733), [ipfs/go-ipfs#2736](https://github.com/ipfs/go-ipfs/pull/2736), [ipfs/go-ipfs#2757](https://github.com/ipfs/go-ipfs/pull/2757), [ipfs/go-ipfs#2825](https://github.com/ipfs/go-ipfs/pull/2825), [ipfs/go-ipfs#2838](https://github.com/ipfs/go-ipfs/pull/2838)) + - Clean up the gateway's surface. (@lgierth, [ipfs/go-ipfs#2874](https://github.com/ipfs/go-ipfs/pull/2874)) + - Simplify the API gateway's access restrictions. (@lgierth, [ipfs/go-ipfs#2949](https://github.com/ipfs/go-ipfs/pull/2949), [ipfs/go-ipfs#2956](https://github.com/ipfs/go-ipfs/pull/2956)) + - Update docker image to Alpine Linux 3.4 and remove Go version constraint. (@lgierth, [ipfs/go-ipfs#2901](https://github.com/ipfs/go-ipfs/pull/2901), [ipfs/go-ipfs#2929](https://github.com/ipfs/go-ipfs/pull/2929)) + - Clarify `Dockerfile` and `Dockerfile.fast`. (@lgierth, [ipfs/go-ipfs#2796](https://github.com/ipfs/go-ipfs/pull/2796)) + - Simplify resolution of Git commit refs in Dockerfiles. (@lgierth, [ipfs/go-ipfs#2754](https://github.com/ipfs/go-ipfs/pull/2754)) + - Consolidate `--verbose` description across commands. (@Kubuxu, [ipfs/go-ipfs#2746](https://github.com/ipfs/go-ipfs/pull/2746)) + - Allow setting position of default values in command option descriptions. (@Kubuxu, [ipfs/go-ipfs#2744](https://github.com/ipfs/go-ipfs/pull/2744)) + - Set explicit default values for boolean command options. (@RichardLitt, [ipfs/go-ipfs#2657](https://github.com/ipfs/go-ipfs/pull/2657)) + - Autogenerate command synopsises. (@Kubuxu, [ipfs/go-ipfs#2785](https://github.com/ipfs/go-ipfs/pull/2785)) + - Fix and improve lots of documentation. (@RichardLitt, [ipfs/go-ipfs#2741](https://github.com/ipfs/go-ipfs/pull/2741), [ipfs/go-ipfs#2781](https://github.com/ipfs/go-ipfs/pull/2781)) + - Improve command descriptions to fit a width of 78 characters. (@RichardLitt, [ipfs/go-ipfs#2779](https://github.com/ipfs/go-ipfs/pull/2779), [ipfs/go-ipfs#2780](https://github.com/ipfs/go-ipfs/pull/2780), [ipfs/go-ipfs#2782](https://github.com/ipfs/go-ipfs/pull/2782)) + - Fix filename conflict in the debugging guide. (@Kubuxu, [ipfs/go-ipfs#2752](https://github.com/ipfs/go-ipfs/pull/2752)) + - Decapitalize log messages, according to Golang style guides. (@RichardLitt, [ipfs/go-ipfs#2853](https://github.com/ipfs/go-ipfs/pull/2853)) + - Add Github Issues HowTo guide. (@RichardLitt, @chriscool, [ipfs/go-ipfs#2889](https://github.com/ipfs/go-ipfs/pull/2889), [ipfs/go-ipfs#2895](https://github.com/ipfs/go-ipfs/pull/2895)) + - Add Github Issue template. (@chriscool, [ipfs/go-ipfs#2786](https://github.com/ipfs/go-ipfs/pull/2786)) + - Apply standard-readme to the README file. (@RichardLitt, [ipfs/go-ipfs#2883](https://github.com/ipfs/go-ipfs/pull/2883)) + - Fix issues pointed out by `govet`. (@Kubuxu, [ipfs/go-ipfs#2854](https://github.com/ipfs/go-ipfs/pull/2854)) + - Clarify `ipfs get` error message. (@whyrusleeping, [ipfs/go-ipfs#2886](https://github.com/ipfs/go-ipfs/pull/2886)) + - Remove dead code. (@whyrusleeping, [ipfs/go-ipfs#2819](https://github.com/ipfs/go-ipfs/pull/2819)) + - Add changelog for v0.4.3. (@lgierth, [ipfs/go-ipfs#2984](https://github.com/ipfs/go-ipfs/pull/2984)) + +- Tests & CI + + - Fix flaky `ipfs mount` sharness test by using the `iptb` tool. (@noffle, [ipfs/go-ipfs#2707](https://github.com/ipfs/go-ipfs/pull/2707)) + - Fix flaky IP port selection in tests. (@Kubuxu, [ipfs/go-ipfs#2855](https://github.com/ipfs/go-ipfs/pull/2855)) + - Fix CLI tests on OSX by resolving /tmp symlink. (@Kubuxu, [ipfs/go-ipfs#2926](https://github.com/ipfs/go-ipfs/pull/2926)) + - Fix flaky GC test by running the daemon in offline mode. (@Kubuxu, [ipfs/go-ipfs#2908](https://github.com/ipfs/go-ipfs/pull/2908)) + - Add tests for `ipfs add` with hidden files. (@Kubuxu, [ipfs/go-ipfs#2756](https://github.com/ipfs/go-ipfs/pull/2756)) + - Add test to make sure the body of HEAD responses is empty. (@Kubuxu, [ipfs/go-ipfs#2775](https://github.com/ipfs/go-ipfs/pull/2775)) + - Add test to catch misdials. (@Kubuxu, [ipfs/go-ipfs#2831](https://github.com/ipfs/go-ipfs/pull/2831)) + - Mark flaky tests for `ipfs dht query` as known failure. (@noffle, [ipfs/go-ipfs#2720](https://github.com/ipfs/go-ipfs/pull/2720)) + - Remove failing blockstore-without-context test. (@Kubuxu, [ipfs/go-ipfs#2857](https://github.com/ipfs/go-ipfs/pull/2857)) + - Fix `--version` tests for versions with a suffix like `-dev` or `-rc1`. (@lgierth, [ipfs/go-ipfs#2937](https://github.com/ipfs/go-ipfs/pull/2937)) + - Make sharness tests work in cases where go-ipfs is symlinked into GOPATH. (@lgierth, [ipfs/go-ipfs#2937](https://github.com/ipfs/go-ipfs/pull/2937)) + - Add variable delays to blockstore mocks. (@rikonor, [ipfs/go-ipfs#2871](https://github.com/ipfs/go-ipfs/pull/2871)) + - Disable Travis CI email notifications. (@Kubuxu, [ipfs/go-ipfs#2896](https://github.com/ipfs/go-ipfs/pull/2896)) + + ### 0.4.2 - 2016-05-17 This is a patch release which fixes perfomance and networking bugs in go-libp2p, @@ -27,14 +193,14 @@ There are also a few other nice improvements. * Add a debug-guidelines document. (@richardlitt) * Update the contribute document. (@richardlitt) * Fix documentation of many `ipfs` commands. (@richardlitt) - * Fall back to ShortDesc if LongDesc is missing. (@kubuxu) + * Fall back to ShortDesc if LongDesc is missing. (@Kubuxu) * Removals * Remove -f option from `ipfs init` command. (@whyrusleeping) * Bugfixes * Fix `ipfs object patch` argument handling and validation. (@jbenet) - * Fix `ipfs config edit` command by running it client-side. (@kubuxu) + * Fix `ipfs config edit` command by running it client-side. (@Kubuxu) * Set default value for `ipfs refs` arguments. (@richardlitt) * Fix parsing of incorrect command and argument permutations. (@thomas-gardner) * Update Dockerfile to latest go1.5.4-r0. (@chriscool) @@ -58,7 +224,7 @@ There are also a few other nice improvements. * CI * Fix t0170-dht sharness test. (@chriscool) - * Increase timeout in t0060-daemon sharness test. (@kubuxu) + * Increase timeout in t0060-daemon sharness test. (@Kubuxu) * Have CircleCI use `make deps` instead of `gx` directly. (@whyrusleeping) @@ -89,7 +255,7 @@ hang bugfix that was shipped in the 0.4.0 release. * Bugfixes * fixed ipfs name resolve --local multihash error (@pfista) * ipfs patch commands won't return null links field anymore (@whyrusleeping) - * Make non recursive resolve print the result (@kubuxu) + * Make non recursive resolve print the result (@Kubuxu) * Output dirs on ipfs add -rn (@noffle) * update libp2p dep to fix hanging listeners problem (@whyrusleeping) * Fix Swarm.AddrFilters config setting with regard to `/ip6` addresses (@lgierth) diff --git a/Dockerfile b/Dockerfile index c72e274d6..7efa81ea7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,7 @@ -FROM alpine:3.3 +FROM alpine:edge MAINTAINER Lars Gierth -# There is a copy of this Dockerfile in test/sharness, +# There is a copy of this Dockerfile called Dockerfile.fast, # which is optimized for build time, instead of image size. # # Please keep these two Dockerfiles in sync. @@ -29,7 +29,6 @@ ENV IPFS_PATH /data/ipfs # The default logging level ENV IPFS_LOGGING "" # Golang stuff -ENV GO_VERSION 1.5.4-r0 ENV GOPATH /go ENV PATH /go/bin:$PATH ENV SRC_PATH /go/src/github.com/ipfs/go-ipfs @@ -37,7 +36,7 @@ ENV SRC_PATH /go/src/github.com/ipfs/go-ipfs # Get the go-ipfs sourcecode COPY . $SRC_PATH -RUN apk add --update musl go=$GO_VERSION git bash wget ca-certificates \ +RUN apk add --update musl-dev gcc go git bash wget ca-certificates \ # Setup user and fs-repo directory && mkdir -p $IPFS_PATH \ && adduser -D -h $IPFS_PATH -u 1000 ipfs \ @@ -50,11 +49,7 @@ RUN apk add --update musl go=$GO_VERSION git bash wget ca-certificates \ # Invoke gx && cd $SRC_PATH \ && gx --verbose install --global \ - # We get the current commit using this hack, - # so that we don't have to copy all of .git/ into the build context. - # This saves us quite a bit of image size. - && ref="$(cat .git/HEAD | cut -d' ' -f2)" \ - && commit="$(cat .git/$ref | head -c 7)" \ + && mkdir .git/objects && commit=$(git rev-parse --short HEAD) \ && echo "ldflags=-X github.com/ipfs/go-ipfs/repo/config.CurrentCommit=$commit" \ # Build and install IPFS and entrypoint script && cd $SRC_PATH/cmd/ipfs \ @@ -63,7 +58,7 @@ RUN apk add --update musl go=$GO_VERSION git bash wget ca-certificates \ && cp $SRC_PATH/bin/container_daemon /usr/local/bin/start_ipfs \ && chmod 755 /usr/local/bin/start_ipfs \ # Remove all build-time dependencies - && apk del --purge musl go git && rm -rf $GOPATH && rm -vf $IPFS_PATH/api + && apk del --purge musl-dev gcc go git && rm -rf $GOPATH && rm -vf $IPFS_PATH/api # Call uid 1000 "ipfs" USER ipfs diff --git a/test/Dockerfile b/Dockerfile.fast similarity index 79% rename from test/Dockerfile rename to Dockerfile.fast index af8a59661..0993920b3 100644 --- a/test/Dockerfile +++ b/Dockerfile.fast @@ -1,12 +1,10 @@ -FROM alpine:3.3 +FROM alpine:edge MAINTAINER Lars Gierth -# This is a copy of the root Dockerfile, +# This is a copy of /Dockerfile, # except that we optimize for build time, instead of image size. # # Please keep these two Dockerfiles in sync. -# -# Only sections different from the root Dockerfile are commented. EXPOSE 4001 @@ -17,7 +15,6 @@ EXPOSE 8080 ENV GX_IPFS "" ENV IPFS_PATH /data/ipfs ENV IPFS_LOGGING "" -ENV GO_VERSION 1.5.4-r0 ENV GOPATH /go ENV PATH /go/bin:$PATH ENV SRC_PATH /go/src/github.com/ipfs/go-ipfs @@ -31,7 +28,7 @@ ENV SRC_PATH /go/src/github.com/ipfs/go-ipfs # and trigger a re-run of all following commands. COPY ./package.json $SRC_PATH/package.json -RUN apk add --update musl go=$GO_VERSION git bash wget ca-certificates \ +RUN apk add --update musl-dev gcc go git bash wget ca-certificates \ && mkdir -p $IPFS_PATH \ && adduser -D -h $IPFS_PATH -u 1000 ipfs \ && chown ipfs:ipfs $IPFS_PATH && chmod 755 $IPFS_PATH \ @@ -44,15 +41,14 @@ RUN apk add --update musl go=$GO_VERSION git bash wget ca-certificates \ COPY . $SRC_PATH RUN cd $SRC_PATH \ - && ref="$(cat .git/HEAD | cut -d' ' -f2)" \ - && commit="$(cat .git/$ref | head -c 7)" \ + && mkdir .git/objects && commit=$(git rev-parse --short HEAD) \ && echo "ldflags=-X github.com/ipfs/go-ipfs/repo/config.CurrentCommit=$commit" \ && cd $SRC_PATH/cmd/ipfs \ && go build -ldflags "-X github.com/ipfs/go-ipfs/repo/config.CurrentCommit=$commit" \ && cp ipfs /usr/local/bin/ipfs \ && cp $SRC_PATH/bin/container_daemon /usr/local/bin/start_ipfs \ && chmod 755 /usr/local/bin/start_ipfs \ - && apk del --purge musl go git && rm -rf $GOPATH && rm -vf $IPFS_PATH/api + && apk del --purge musl-dev gcc go git && rm -rf $GOPATH && rm -vf $IPFS_PATH/api USER ipfs VOLUME $IPFS_PATH diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 9e0e216d0..2f28be476 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -9,15 +9,6 @@ "ImportPath": "bazil.org/fuse", "Rev": "e4fcc9a2c7567d1c42861deebeb483315d222262" }, - { - "ImportPath": "bitbucket.org/ww/goautoneg", - "Comment": "null-5", - "Rev": "75cd24fc2f2c2a2088577d12123ddee5f54e0675" - }, - { - "ImportPath": "github.com/bren2010/proquint", - "Rev": "5958552242606512f714d2e93513b380f43f9991" - }, { "ImportPath": "github.com/briantigerchow/pubsub", "Rev": "39ce5f556423a4c7223b370fa17a3bbd75b2d197" @@ -26,14 +17,6 @@ "ImportPath": "github.com/camlistore/lock", "Rev": "ae27720f340952636b826119b58130b9c1a847a0" }, - { - "ImportPath": "github.com/cenkalti/backoff", - "Rev": "9831e1e25c874e0a0601b6dc43641071414eec7a" - }, - { - "ImportPath": "github.com/cheggaaa/pb", - "Rev": "d7729fd7ec1372c15b83db39834bf842bf2d69fb" - }, { "ImportPath": "github.com/codahale/hdrhistogram", "Rev": "5fd85ec0b4e2dd5d4158d257d943f2e586d86b62" @@ -42,92 +25,22 @@ "ImportPath": "github.com/codahale/metrics", "Rev": "7c37910bc765e705301b159683480bdd44555c91" }, - { - "ImportPath": "github.com/cryptix/mdns", - "Rev": "04ff72a32679d57d009c0ac0fc5c4cda10350bad" - }, - { - "ImportPath": "github.com/docker/spdystream", - "Rev": "e372247595b2edd26f6d022288e97eed793d70a2" - }, - { - "ImportPath": "github.com/dustin/go-humanize", - "Rev": "00897f070f09f194c26d65afae734ba4c32404e8" - }, - { - "ImportPath": "github.com/dustin/randbo", - "Rev": "7f1b564ca7242d22bcc6e2128beb90d9fa38b9f0" - }, - { - "ImportPath": "github.com/facebookgo/atomicfile", - "Rev": "6f117f2e7f224fb03eb5e5fba370eade6e2b90c8" - }, - { - "ImportPath": "github.com/gogo/protobuf/io", - "Rev": "0ac967c269268f1af7d9bcc7927ccc9a589b2b36" - }, - { - "ImportPath": "github.com/gogo/protobuf/proto", - "Rev": "0ac967c269268f1af7d9bcc7927ccc9a589b2b36" - }, { "ImportPath": "github.com/hashicorp/golang-lru", "Rev": "253b2dc1ca8bae42c3b5b6e53dd2eab1a7551116" }, - { - "ImportPath": "github.com/ipfs/go-datastore", - "Rev": "e63957b6da369d986ef3e7a3f249779ba3f56c7e" - }, - { - "ImportPath": "github.com/jbenet/go-base58", - "Rev": "6237cf65f3a6f7111cd8a42be3590df99a66bc7d" - }, - { - "ImportPath": "github.com/jbenet/go-context/frac", - "Rev": "d14ea06fba99483203c19d92cfcd13ebe73135f4" - }, - { - "ImportPath": "github.com/jbenet/go-context/io", - "Rev": "d14ea06fba99483203c19d92cfcd13ebe73135f4" - }, { "ImportPath": "github.com/jbenet/go-detect-race", "Rev": "3463798d9574bd0b7eca275dccc530804ff5216f" }, - { - "ImportPath": "github.com/jbenet/go-fuse-version", - "Rev": "b733dfc0597e1f6780510ee7afad8b6e3c7af3eb" - }, { "ImportPath": "github.com/jbenet/go-is-domain", "Rev": "93b717f2ae17838a265e30277275ee99ee7198d6" }, - { - "ImportPath": "github.com/jbenet/go-msgio", - "Rev": "9399b44f6bf265b30bedaf2af8c0604bbc8d5275" - }, - { - "ImportPath": "github.com/jbenet/go-multiaddr", - "Comment": "0.1.2-38-gc13f11b", - "Rev": "c13f11bbfe6439771f4df7bfb330f686826144e8" - }, - { - "ImportPath": "github.com/jbenet/go-multiaddr-net", - "Rev": "4a8bd8f8baf45afcf2bb385bbc17e5208d5d4c71" - }, - { - "ImportPath": "github.com/jbenet/go-multihash", - "Comment": "0.1.0-39-ge8d2374", - "Rev": "e8d2374934f16a971d1e94a864514a21ac74bf7f" - }, { "ImportPath": "github.com/jbenet/go-os-rename", "Rev": "3ac97f61ef67a6b87b95c1282f6c317ed0e693c2" }, - { - "ImportPath": "github.com/jbenet/go-peerstream", - "Rev": "f3ab20739a88aa79306dc039c1b5a39e7afa45d6" - }, { "ImportPath": "github.com/jbenet/go-random", "Rev": "cd535bd25356746b9b1e824871dda7da932460e2" @@ -136,10 +49,6 @@ "ImportPath": "github.com/jbenet/go-random-files", "Rev": "737479700b40b4b50e914e963ce8d9d44603e3c8" }, - { - "ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil", - "Rev": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a" - }, { "ImportPath": "github.com/mitchellh/go-homedir", "Rev": "1f6da4a72e57d4e7edd4a7295a585e0a3999a2d4" @@ -148,27 +57,6 @@ "ImportPath": "github.com/mtchavez/jenkins", "Rev": "5a816af6ef21ef401bff5e4b7dd255d63400f497" }, - { - "ImportPath": "github.com/olekukonko/ts", - "Rev": "ecf753e7c962639ab5a1fb46f7da627d4c0a04b8" - }, - { - "ImportPath": "github.com/rs/cors", - "Rev": "5e4ce6bc0ecd3472f6f943666d84876691be2ced" - }, - { - "ImportPath": "github.com/satori/go.uuid", - "Rev": "7c7f2020c4c9491594b85767967f4619c2fa75f9" - }, - { - "ImportPath": "github.com/steakknife/hamming", - "Comment": "0.0.10", - "Rev": "8bad99011016569c05320e51be39c648679c5b73" - }, - { - "ImportPath": "github.com/syndtr/goleveldb/leveldb", - "Rev": "4875955338b0a434238a31165cb87255ab6e9e4a" - }, { "ImportPath": "github.com/syndtr/gosnappy/snappy", "Rev": "156a073208e131d7d2e212cb749feae7c339e846" @@ -180,51 +68,6 @@ { "ImportPath": "github.com/whyrusleeping/chunker", "Rev": "537e901819164627ca4bb5ce4e3faa8ce7956564" - }, - { - "ImportPath": "github.com/whyrusleeping/go-logging", - "Rev": "128b9855511a4ea3ccbcf712695baf2bab72e134" - }, - { - "ImportPath": "github.com/whyrusleeping/go-metrics", - "Rev": "1cd8009604ec2238b5a71305a0ecd974066e0e16" - }, - { - "ImportPath": "github.com/whyrusleeping/go-sysinfo", - "Rev": "769b7c0b50e8030895abc74ba8107ac715e3162a" - }, - { - "ImportPath": "github.com/whyrusleeping/multiaddr-filter", - "Rev": "9e26222151125ecd3fc1fd190179b6bdd55f5608" - }, - { - "ImportPath": "golang.org/x/crypto/blowfish", - "Rev": "c84e1f8e3a7e322d497cd16c0e8a13c7e127baf3" - }, - { - "ImportPath": "golang.org/x/crypto/sha3", - "Rev": "c84e1f8e3a7e322d497cd16c0e8a13c7e127baf3" - }, - { - "ImportPath": "golang.org/x/net/context", - "Rev": "ff8eb9a34a5cbb9941ffc6f84a19a8014c2646ad" - }, - { - "ImportPath": "golang.org/x/net/internal/iana", - "Rev": "ff8eb9a34a5cbb9941ffc6f84a19a8014c2646ad" - }, - { - "ImportPath": "golang.org/x/net/ipv4", - "Rev": "ff8eb9a34a5cbb9941ffc6f84a19a8014c2646ad" - }, - { - "ImportPath": "golang.org/x/net/ipv6", - "Rev": "ff8eb9a34a5cbb9941ffc6f84a19a8014c2646ad" - }, - { - "ImportPath": "gopkg.in/fsnotify.v1", - "Comment": "v1.2.0", - "Rev": "96c060f6a6b7e0d6f75fddd10efeaca3e5d1bcb0" } ] } diff --git a/Godeps/_workspace/src/bazil.org/fuse/syscallx/syscallx_std.go b/Godeps/_workspace/src/bazil.org/fuse/syscallx/syscallx_std.go index c0187a6b8..d12713cd0 100644 --- a/Godeps/_workspace/src/bazil.org/fuse/syscallx/syscallx_std.go +++ b/Godeps/_workspace/src/bazil.org/fuse/syscallx/syscallx_std.go @@ -6,7 +6,7 @@ package syscallx // the right stuff in golang.org/x/sys/unix. import ( - "golang.org/x/sys/unix" + "gx/ipfs/QmXPKMT5cT8ajqamSD1YaeEpfeaHvs9AU4MQzte4Bkr6V4/sys/unix" ) func Getxattr(path string, attr string, dest []byte) (sz int, err error) { diff --git a/Godeps/_workspace/src/github.com/bren2010/proquint/README.md b/Godeps/_workspace/src/github.com/bren2010/proquint/README.md deleted file mode 100644 index 13e7b0b5e..000000000 --- a/Godeps/_workspace/src/github.com/bren2010/proquint/README.md +++ /dev/null @@ -1,6 +0,0 @@ -Proquint -------- - -Golang implementation of [Proquint Pronounceable Identifiers](https://github.com/deoxxa/proquint). - - diff --git a/Godeps/_workspace/src/github.com/bren2010/proquint/proquint.go b/Godeps/_workspace/src/github.com/bren2010/proquint/proquint.go deleted file mode 100644 index 60e1cf981..000000000 --- a/Godeps/_workspace/src/github.com/bren2010/proquint/proquint.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Copyright (c) 2014 Brendan McMillion - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. -*/ - -package proquint - -import ( - "bytes" - "strings" - "regexp" -) - -var ( - conse = [...]byte{'b', 'd', 'f', 'g', 'h', 'j', 'k', 'l', 'm', 'n', - 'p', 'r', 's', 't', 'v', 'z'} - vowse = [...]byte{'a', 'i', 'o', 'u'} - - consd = map[byte] uint16 { - 'b' : 0, 'd' : 1, 'f' : 2, 'g' : 3, - 'h' : 4, 'j' : 5, 'k' : 6, 'l' : 7, - 'm' : 8, 'n' : 9, 'p' : 10, 'r' : 11, - 's' : 12, 't' : 13, 'v' : 14, 'z' : 15, - } - - vowsd = map[byte] uint16 { - 'a' : 0, 'i' : 1, 'o' : 2, 'u' : 3, - } -) - -/** -* Tests if a given string is a Proquint identifier -* -* @param {string} str The candidate string. -* -* @return {bool} Whether or not it qualifies. -* @return {error} Error -*/ -func IsProquint(str string) (bool, error) { - exp := "^([abdfghijklmnoprstuvz]{5}-)*[abdfghijklmnoprstuvz]{5}$" - ok, err := regexp.MatchString(exp, str) - - return ok, err -} - -/** -* Encodes an arbitrary byte slice into an identifier. -* -* @param {[]byte} buf Slice of bytes to encode. -* -* @return {string} The given byte slice as an identifier. -*/ -func Encode(buf []byte) string { - var out bytes.Buffer - - for i := 0; i < len(buf); i = i + 2 { - var n uint16 = (uint16(buf[i]) * 256) + uint16(buf[i + 1]) - - var ( - c1 = n & 0x0f - v1 = (n >> 4) & 0x03 - c2 = (n >> 6) & 0x0f - v2 = (n >> 10) & 0x03 - c3 = (n >> 12) & 0x0f - ) - - out.WriteByte(conse[c1]) - out.WriteByte(vowse[v1]) - out.WriteByte(conse[c2]) - out.WriteByte(vowse[v2]) - out.WriteByte(conse[c3]) - - if (i + 2) < len(buf) { - out.WriteByte('-') - } - } - - return out.String() -} - -/** -* Decodes an identifier into its corresponding byte slice. -* -* @param {string} str Identifier to convert. -* -* @return {[]byte} The identifier as a byte slice. -*/ -func Decode(str string) []byte { - var ( - out bytes.Buffer - bits []string = strings.Split(str, "-") - ) - - for i := 0; i < len(bits); i++ { - var x uint16 = consd[bits[i][0]] + - (vowsd[bits[i][1]] << 4) + - (consd[bits[i][2]] << 6) + - (vowsd[bits[i][3]] << 10) + - (consd[bits[i][4]] << 12) - - out.WriteByte(byte(x >> 8)) - out.WriteByte(byte(x)) - } - - return out.Bytes() -} diff --git a/Godeps/_workspace/src/github.com/camlistore/lock/.gitignore b/Godeps/_workspace/src/github.com/camlistore/lock/.gitignore deleted file mode 100644 index b25c15b81..000000000 --- a/Godeps/_workspace/src/github.com/camlistore/lock/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*~ diff --git a/Godeps/_workspace/src/github.com/camlistore/lock/COPYING b/Godeps/_workspace/src/github.com/camlistore/lock/COPYING deleted file mode 100644 index d64569567..000000000 --- a/Godeps/_workspace/src/github.com/camlistore/lock/COPYING +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Godeps/_workspace/src/github.com/camlistore/lock/README.txt b/Godeps/_workspace/src/github.com/camlistore/lock/README.txt deleted file mode 100644 index a9eeb33de..000000000 --- a/Godeps/_workspace/src/github.com/camlistore/lock/README.txt +++ /dev/null @@ -1,3 +0,0 @@ -File locking library. - -See http://godoc.org/github.com/camlistore/lock diff --git a/Godeps/_workspace/src/github.com/camlistore/lock/lock.go b/Godeps/_workspace/src/github.com/camlistore/lock/lock.go deleted file mode 100644 index 6268527b0..000000000 --- a/Godeps/_workspace/src/github.com/camlistore/lock/lock.go +++ /dev/null @@ -1,158 +0,0 @@ -/* -Copyright 2013 The Go Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package lock - -import ( - "encoding/json" - "fmt" - "io" - "os" - "path/filepath" - "sync" -) - -// Lock locks the given file, creating the file if necessary. If the -// file already exists, it must have zero size or an error is returned. -// The lock is an exclusive lock (a write lock), but locked files -// should neither be read from nor written to. Such files should have -// zero size and only exist to co-ordinate ownership across processes. -// -// A nil Closer is returned if an error occurred. Otherwise, close that -// Closer to release the lock. -// -// On Linux, FreeBSD and OSX, a lock has the same semantics as fcntl(2)'s -// advisory locks. In particular, closing any other file descriptor for the -// same file will release the lock prematurely. -// -// Attempting to lock a file that is already locked by the current process -// has undefined behavior. -// -// On other operating systems, lock will fallback to using the presence and -// content of a file named name + '.lock' to implement locking behavior. -func Lock(name string) (io.Closer, error) { - return lockFn(name) -} - -var lockFn = lockPortable - -// Portable version not using fcntl. Doesn't handle crashes as gracefully, -// since it can leave stale lock files. -// TODO: write pid of owner to lock file and on race see if pid is -// still alive? -func lockPortable(name string) (io.Closer, error) { - absName, err := filepath.Abs(name) - if err != nil { - return nil, fmt.Errorf("can't Lock file %q: can't find abs path: %v", name, err) - } - fi, err := os.Stat(absName) - if err == nil && fi.Size() > 0 { - if isStaleLock(absName) { - os.Remove(absName) - } else { - return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name) - } - } - f, err := os.OpenFile(absName, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_EXCL, 0666) - if err != nil { - return nil, fmt.Errorf("failed to create lock file %s %v", absName, err) - } - if err := json.NewEncoder(f).Encode(&pidLockMeta{OwnerPID: os.Getpid()}); err != nil { - return nil, err - } - return &lockCloser{f: f, abs: absName}, nil -} - -type pidLockMeta struct { - OwnerPID int -} - -func isStaleLock(path string) bool { - f, err := os.Open(path) - if err != nil { - return false - } - defer f.Close() - var meta pidLockMeta - if json.NewDecoder(f).Decode(&meta) != nil { - return false - } - if meta.OwnerPID == 0 { - return false - } - p, err := os.FindProcess(meta.OwnerPID) - if err != nil { - // e.g. on Windows - return true - } - // On unix, os.FindProcess always is true, so we have to send - // it a signal to see if it's alive. - if signalZero != nil { - if p.Signal(signalZero) != nil { - return true - } - } - return false -} - -var signalZero os.Signal // nil or set by lock_sigzero.go - -type lockCloser struct { - f *os.File - abs string - once sync.Once - err error -} - -func (lc *lockCloser) Close() error { - lc.once.Do(lc.close) - return lc.err -} - -func (lc *lockCloser) close() { - if err := lc.f.Close(); err != nil { - lc.err = err - } - if err := os.Remove(lc.abs); err != nil { - lc.err = err - } -} - -var ( - lockmu sync.Mutex - locked = map[string]bool{} // abs path -> true -) - -// unlocker is used by the darwin and linux implementations with fcntl -// advisory locks. -type unlocker struct { - f *os.File - abs string -} - -func (u *unlocker) Close() error { - lockmu.Lock() - // Remove is not necessary but it's nice for us to clean up. - // If we do do this, though, it needs to be before the - // u.f.Close below. - os.Remove(u.abs) - if err := u.f.Close(); err != nil { - return err - } - delete(locked, u.abs) - lockmu.Unlock() - return nil -} diff --git a/Godeps/_workspace/src/github.com/camlistore/lock/lock_appengine.go b/Godeps/_workspace/src/github.com/camlistore/lock/lock_appengine.go deleted file mode 100644 index ab4cad6ab..000000000 --- a/Godeps/_workspace/src/github.com/camlistore/lock/lock_appengine.go +++ /dev/null @@ -1,32 +0,0 @@ -// +build appengine - -/* -Copyright 2013 The Go Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package lock - -import ( - "errors" - "io" -) - -func init() { - lockFn = lockAppEngine -} - -func lockAppEngine(name string) (io.Closer, error) { - return nil, errors.New("Lock not available on App Engine") -} diff --git a/Godeps/_workspace/src/github.com/camlistore/lock/lock_darwin_amd64.go b/Godeps/_workspace/src/github.com/camlistore/lock/lock_darwin_amd64.go deleted file mode 100644 index 9fea51fe8..000000000 --- a/Godeps/_workspace/src/github.com/camlistore/lock/lock_darwin_amd64.go +++ /dev/null @@ -1,80 +0,0 @@ -// +build darwin,amd64 -// +build !appengine - -/* -Copyright 2013 The Go Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package lock - -import ( - "fmt" - "io" - "os" - "path/filepath" - "syscall" - "unsafe" -) - -func init() { - lockFn = lockFcntl -} - -func lockFcntl(name string) (io.Closer, error) { - abs, err := filepath.Abs(name) - if err != nil { - return nil, err - } - lockmu.Lock() - if locked[abs] { - lockmu.Unlock() - return nil, fmt.Errorf("file %q already locked", abs) - } - locked[abs] = true - lockmu.Unlock() - - fi, err := os.Stat(name) - if err == nil && fi.Size() > 0 { - return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name) - } - - f, err := os.Create(name) - if err != nil { - return nil, fmt.Errorf("Lock Create of %s (abs: %s) failed: %v", name, abs, err) - } - - // This type matches C's "struct flock" defined in /usr/include/sys/fcntl.h. - // TODO: move this into the standard syscall package. - k := struct { - Start uint64 // sizeof(off_t): 8 - Len uint64 // sizeof(off_t): 8 - Pid uint32 // sizeof(pid_t): 4 - Type uint16 // sizeof(short): 2 - Whence uint16 // sizeof(short): 2 - }{ - Type: syscall.F_WRLCK, - Whence: uint16(os.SEEK_SET), - Start: 0, - Len: 0, // 0 means to lock the entire file. - Pid: uint32(os.Getpid()), - } - - _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_SETLK), uintptr(unsafe.Pointer(&k))) - if errno != 0 { - f.Close() - return nil, errno - } - return &unlocker{f, abs}, nil -} diff --git a/Godeps/_workspace/src/github.com/camlistore/lock/lock_freebsd.go b/Godeps/_workspace/src/github.com/camlistore/lock/lock_freebsd.go deleted file mode 100644 index d3835d624..000000000 --- a/Godeps/_workspace/src/github.com/camlistore/lock/lock_freebsd.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2013 The Go Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package lock - -import ( - "fmt" - "io" - "os" - "path/filepath" - "syscall" - "unsafe" -) - -func init() { - lockFn = lockFcntl -} - -func lockFcntl(name string) (io.Closer, error) { - abs, err := filepath.Abs(name) - if err != nil { - return nil, err - } - lockmu.Lock() - if locked[abs] { - lockmu.Unlock() - return nil, fmt.Errorf("file %q already locked", abs) - } - locked[abs] = true - lockmu.Unlock() - - fi, err := os.Stat(name) - if err == nil && fi.Size() > 0 { - return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name) - } - - f, err := os.Create(name) - if err != nil { - return nil, err - } - - // This type matches C's "struct flock" defined in /usr/include/fcntl.h. - // TODO: move this into the standard syscall package. - k := struct { - Start int64 /* off_t starting offset */ - Len int64 /* off_t len = 0 means until end of file */ - Pid int32 /* pid_t lock owner */ - Type int16 /* short lock type: read/write, etc. */ - Whence int16 /* short type of l_start */ - Sysid int32 /* int remote system id or zero for local */ - }{ - Start: 0, - Len: 0, // 0 means to lock the entire file. - Pid: int32(os.Getpid()), - Type: syscall.F_WRLCK, - Whence: int16(os.SEEK_SET), - Sysid: 0, - } - - _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_SETLK), uintptr(unsafe.Pointer(&k))) - if errno != 0 { - f.Close() - return nil, errno - } - return &unlocker{f, abs}, nil -} diff --git a/Godeps/_workspace/src/github.com/camlistore/lock/lock_linux_amd64.go b/Godeps/_workspace/src/github.com/camlistore/lock/lock_linux_amd64.go deleted file mode 100644 index 3a7eb00a6..000000000 --- a/Godeps/_workspace/src/github.com/camlistore/lock/lock_linux_amd64.go +++ /dev/null @@ -1,80 +0,0 @@ -// +build linux,amd64 -// +build !appengine - -/* -Copyright 2013 The Go Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package lock - -import ( - "fmt" - "io" - "os" - "path/filepath" - "syscall" - "unsafe" -) - -func init() { - lockFn = lockFcntl -} - -func lockFcntl(name string) (io.Closer, error) { - abs, err := filepath.Abs(name) - if err != nil { - return nil, err - } - lockmu.Lock() - if locked[abs] { - lockmu.Unlock() - return nil, fmt.Errorf("file %q already locked", abs) - } - locked[abs] = true - lockmu.Unlock() - - fi, err := os.Stat(name) - if err == nil && fi.Size() > 0 { - return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name) - } - - f, err := os.Create(name) - if err != nil { - return nil, err - } - - // This type matches C's "struct flock" defined in /usr/include/bits/fcntl.h. - // TODO: move this into the standard syscall package. - k := struct { - Type uint32 - Whence uint32 - Start uint64 - Len uint64 - Pid uint32 - }{ - Type: syscall.F_WRLCK, - Whence: uint32(os.SEEK_SET), - Start: 0, - Len: 0, // 0 means to lock the entire file. - Pid: uint32(os.Getpid()), - } - - _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_SETLK), uintptr(unsafe.Pointer(&k))) - if errno != 0 { - f.Close() - return nil, errno - } - return &unlocker{f, abs}, nil -} diff --git a/Godeps/_workspace/src/github.com/camlistore/lock/lock_linux_arm.go b/Godeps/_workspace/src/github.com/camlistore/lock/lock_linux_arm.go deleted file mode 100644 index c2a0a102e..000000000 --- a/Godeps/_workspace/src/github.com/camlistore/lock/lock_linux_arm.go +++ /dev/null @@ -1,81 +0,0 @@ -// +build linux,arm -// +build !appengine - -/* -Copyright 2013 The Go Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package lock - -import ( - "fmt" - "io" - "os" - "path/filepath" - "syscall" - "unsafe" -) - -func init() { - lockFn = lockFcntl -} - -func lockFcntl(name string) (io.Closer, error) { - abs, err := filepath.Abs(name) - if err != nil { - return nil, err - } - lockmu.Lock() - if locked[abs] { - lockmu.Unlock() - return nil, fmt.Errorf("file %q already locked", abs) - } - locked[abs] = true - lockmu.Unlock() - - fi, err := os.Stat(name) - if err == nil && fi.Size() > 0 { - return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name) - } - - f, err := os.Create(name) - if err != nil { - return nil, err - } - - // This type matches C's "struct flock" defined in /usr/include/bits/fcntl.h. - // TODO: move this into the standard syscall package. - k := struct { - Type uint16 - Whence uint16 - Start uint32 - Len uint32 - Pid uint32 - }{ - Type: syscall.F_WRLCK, - Whence: uint16(os.SEEK_SET), - Start: 0, - Len: 0, // 0 means to lock the entire file. - Pid: uint32(os.Getpid()), - } - - const F_SETLK = 6 // actual value. syscall package is wrong: golang.org/issue/7059 - _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(F_SETLK), uintptr(unsafe.Pointer(&k))) - if errno != 0 { - f.Close() - return nil, errno - } - return &unlocker{f, abs}, nil -} diff --git a/Godeps/_workspace/src/github.com/camlistore/lock/lock_plan9.go b/Godeps/_workspace/src/github.com/camlistore/lock/lock_plan9.go deleted file mode 100644 index bdf4e2292..000000000 --- a/Godeps/_workspace/src/github.com/camlistore/lock/lock_plan9.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright 2013 The Go Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package lock - -import ( - "fmt" - "io" - "os" - "path/filepath" -) - -func init() { - lockFn = lockPlan9 -} - -func lockPlan9(name string) (io.Closer, error) { - var f *os.File - abs, err := filepath.Abs(name) - if err != nil { - return nil, err - } - lockmu.Lock() - if locked[abs] { - lockmu.Unlock() - return nil, fmt.Errorf("file %q already locked", abs) - } - locked[abs] = true - lockmu.Unlock() - - fi, err := os.Stat(name) - if err == nil && fi.Size() > 0 { - return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name) - } - - f, err = os.OpenFile(name, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0644) - if err != nil { - return nil, fmt.Errorf("Lock Create of %s (abs: %s) failed: %v", name, abs, err) - } - - return &unlocker{f, abs}, nil -} diff --git a/Godeps/_workspace/src/github.com/camlistore/lock/lock_sigzero.go b/Godeps/_workspace/src/github.com/camlistore/lock/lock_sigzero.go deleted file mode 100644 index fd3ba2db1..000000000 --- a/Godeps/_workspace/src/github.com/camlistore/lock/lock_sigzero.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build !appengine -// +build linux darwin freebsd openbsd netbsd dragonfly - -/* -Copyright 2013 The Go Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package lock - -import "syscall" - -func init() { - signalZero = syscall.Signal(0) -} diff --git a/Godeps/_workspace/src/github.com/camlistore/lock/lock_test.go b/Godeps/_workspace/src/github.com/camlistore/lock/lock_test.go deleted file mode 100644 index 518d2f025..000000000 --- a/Godeps/_workspace/src/github.com/camlistore/lock/lock_test.go +++ /dev/null @@ -1,131 +0,0 @@ -/* -Copyright 2013 The Go Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package lock - -import ( - "fmt" - "io/ioutil" - "log" - "os" - "os/exec" - "path/filepath" - "strconv" - "testing" -) - -func TestLock(t *testing.T) { - testLock(t, false) -} - -func TestLockPortable(t *testing.T) { - testLock(t, true) -} - -func TestLockInChild(t *testing.T) { - f := os.Getenv("TEST_LOCK_FILE") - if f == "" { - // not child - return - } - lock := Lock - if v, _ := strconv.ParseBool(os.Getenv("TEST_LOCK_PORTABLE")); v { - lock = lockPortable - } - - lk, err := lock(f) - if err != nil { - log.Fatalf("Lock failed: %v", err) - } - - if v, _ := strconv.ParseBool(os.Getenv("TEST_LOCK_CRASH")); v { - // Simulate a crash, or at least not unlocking the - // lock. We still exit 0 just to simplify the parent - // process exec code. - os.Exit(0) - } - lk.Close() -} - -func testLock(t *testing.T, portable bool) { - lock := Lock - if portable { - lock = lockPortable - } - - td, err := ioutil.TempDir("", "") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(td) - - path := filepath.Join(td, "foo.lock") - - childLock := func(crash bool) error { - cmd := exec.Command(os.Args[0], "-test.run=LockInChild$") - cmd.Env = []string{"TEST_LOCK_FILE=" + path} - if portable { - cmd.Env = append(cmd.Env, "TEST_LOCK_PORTABLE=1") - } - if crash { - cmd.Env = append(cmd.Env, "TEST_LOCK_CRASH=1") - } - out, err := cmd.CombinedOutput() - t.Logf("Child output: %q (err %v)", out, err) - if err != nil { - return fmt.Errorf("Child Process lock of %s failed: %v %s", path, err, out) - } - return nil - } - - t.Logf("Locking in crashing child...") - if err := childLock(true); err != nil { - t.Fatalf("first lock in child process: %v", err) - } - - t.Logf("Locking+unlocking in child...") - if err := childLock(false); err != nil { - t.Fatalf("lock in child process after crashing child: %v", err) - } - - t.Logf("Locking in parent...") - lk1, err := lock(path) - if err != nil { - t.Fatal(err) - } - - t.Logf("Again in parent...") - _, err = lock(path) - if err == nil { - t.Fatal("expected second lock to fail") - } - - t.Logf("Locking in child...") - if childLock(false) == nil { - t.Fatalf("expected lock in child process to fail") - } - - t.Logf("Unlocking lock in parent") - if err := lk1.Close(); err != nil { - t.Fatal(err) - } - - lk3, err := lock(path) - if err != nil { - t.Fatal(err) - } - lk3.Close() -} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/.gitignore b/Godeps/_workspace/src/github.com/cenkalti/backoff/.gitignore deleted file mode 100644 index 00268614f..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/.travis.yml b/Godeps/_workspace/src/github.com/cenkalti/backoff/.travis.yml deleted file mode 100644 index ce9cb6233..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/.travis.yml +++ /dev/null @@ -1,2 +0,0 @@ -language: go -go: 1.3.3 diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/LICENSE b/Godeps/_workspace/src/github.com/cenkalti/backoff/LICENSE deleted file mode 100644 index 89b817996..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Cenk Altı - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/README.md b/Godeps/_workspace/src/github.com/cenkalti/backoff/README.md deleted file mode 100644 index 8e2612e63..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# backoff - -[![GoDoc](https://godoc.org/github.com/cenkalti/backoff?status.png)](https://godoc.org/github.com/cenkalti/backoff) -[![Build Status](https://travis-ci.org/cenkalti/backoff.png)](https://travis-ci.org/cenkalti/backoff) - -This is a Go port of the exponential backoff algorithm from -[google-http-java-client](https://code.google.com/p/google-http-java-client/wiki/ExponentialBackoff). - -[Exponential backoff](http://en.wikipedia.org/wiki/Exponential_backoff) -is an algorithm that uses feedback to multiplicatively decrease the rate of some process, -in order to gradually find an acceptable rate. -The retries exponentially increase and stop increasing when a certain threshold is met. - - - - -## Install - -```bash -go get github.com/cenkalti/backoff -``` - -## Example - -Simple retry helper that uses exponential back-off algorithm: - -```go -operation := func() error { - // An operation that might fail -} - -err := backoff.Retry(operation, backoff.NewExponentialBackOff()) -if err != nil { - // handle error -} - -// operation is successfull -``` - -Ticker example: - -```go -operation := func() error { - // An operation that may fail -} - -b := backoff.NewExponentialBackOff() -ticker := backoff.NewTicker(b) - -var err error - -// Ticks will continue to arrive when the previous operation is still running, -// so operations that take a while to fail could run in quick succession. -for t = range ticker.C { - if err = operation(); err != nil { - log.Println(err, "will retry...") - continue - } - - ticker.Stop() - break -} - -if err != nil { - // Operation has failed. -} - -// Operation is successfull. -``` diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/backoff.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/backoff.go deleted file mode 100644 index 25870d2fc..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/backoff.go +++ /dev/null @@ -1,56 +0,0 @@ -// Package backoff implements backoff algorithms for retrying operations. -// -// Also has a Retry() helper for retrying operations that may fail. -package backoff - -import "time" - -// Back-off policy when retrying an operation. -type BackOff interface { - // Gets the duration to wait before retrying the operation or - // backoff.Stop to indicate that no retries should be made. - // - // Example usage: - // - // duration := backoff.NextBackOff(); - // if (duration == backoff.Stop) { - // // do not retry operation - // } else { - // // sleep for duration and retry operation - // } - // - NextBackOff() time.Duration - - // Reset to initial state. - Reset() -} - -// Indicates that no more retries should be made for use in NextBackOff(). -const Stop time.Duration = -1 - -// ZeroBackOff is a fixed back-off policy whose back-off time is always zero, -// meaning that the operation is retried immediately without waiting. -type ZeroBackOff struct{} - -func (b *ZeroBackOff) Reset() {} - -func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } - -// StopBackOff is a fixed back-off policy that always returns backoff.Stop for -// NextBackOff(), meaning that the operation should not be retried. -type StopBackOff struct{} - -func (b *StopBackOff) Reset() {} - -func (b *StopBackOff) NextBackOff() time.Duration { return Stop } - -type ConstantBackOff struct { - Interval time.Duration -} - -func (b *ConstantBackOff) Reset() {} -func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } - -func NewConstantBackOff(d time.Duration) *ConstantBackOff { - return &ConstantBackOff{Interval: d} -} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/backoff_test.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/backoff_test.go deleted file mode 100644 index 24c49947b..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/backoff_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package backoff - -import ( - "time" - - "testing" -) - -func TestNextBackOffMillis(t *testing.T) { - subtestNextBackOff(t, 0, new(ZeroBackOff)) - subtestNextBackOff(t, Stop, new(StopBackOff)) -} - -func subtestNextBackOff(t *testing.T, expectedValue time.Duration, backOffPolicy BackOff) { - for i := 0; i < 10; i++ { - next := backOffPolicy.NextBackOff() - if next != expectedValue { - t.Errorf("got: %d expected: %d", next, expectedValue) - } - } -} - -func TestConstantBackOff(t *testing.T) { - backoff := NewConstantBackOff(time.Second) - if backoff.NextBackOff() != time.Second { - t.Error("invalid interval") - } -} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/exponential.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/exponential.go deleted file mode 100644 index de75e8584..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/exponential.go +++ /dev/null @@ -1,141 +0,0 @@ -package backoff - -import ( - "math/rand" - "time" -) - -/* -ExponentialBackOff is an implementation of BackOff that increases the back off -period for each retry attempt using a randomization function that grows exponentially. - -NextBackOff() is calculated using the following formula: - - randomized_interval = - retry_interval * (random value in range [1 - randomization_factor, 1 + randomization_factor]) - -In other words NextBackOff() will range between the randomization factor -percentage below and above the retry interval. For example, using 2 seconds as the base retry -interval and 0.5 as the randomization factor, the actual back off period used in the next retry -attempt will be between 1 and 3 seconds. - -NOTE: max_interval caps the retry_interval and not the randomized_interval. - -If the time elapsed since an ExponentialBackOff instance is created goes past the -max_elapsed_time then the method NextBackOff() starts returning backoff.Stop. -The elapsed time can be reset by calling Reset(). - -EXAMPLE: The default retry_interval is .5 seconds, default randomization_factor is 0.5, default -multiplier is 1.5 and the default max_interval is 1 minute. For 10 tries the sequence will be -(values in seconds) and assuming we go over the max_elapsed_time on the 10th try: - - request# retry_interval randomized_interval - - 1 0.5 [0.25, 0.75] - 2 0.75 [0.375, 1.125] - 3 1.125 [0.562, 1.687] - 4 1.687 [0.8435, 2.53] - 5 2.53 [1.265, 3.795] - 6 3.795 [1.897, 5.692] - 7 5.692 [2.846, 8.538] - 8 8.538 [4.269, 12.807] - 9 12.807 [6.403, 19.210] - 10 19.210 backoff.Stop - -Implementation is not thread-safe. -*/ -type ExponentialBackOff struct { - InitialInterval time.Duration - RandomizationFactor float64 - Multiplier float64 - MaxInterval time.Duration - // After MaxElapsedTime the ExponentialBackOff stops. - // It never stops if MaxElapsedTime == 0. - MaxElapsedTime time.Duration - Clock Clock - - currentInterval time.Duration - startTime time.Time -} - -// Clock is an interface that returns current time for BackOff. -type Clock interface { - Now() time.Time -} - -// Default values for ExponentialBackOff. -const ( - DefaultInitialInterval = 500 * time.Millisecond - DefaultRandomizationFactor = 0.5 - DefaultMultiplier = 1.5 - DefaultMaxInterval = 60 * time.Second - DefaultMaxElapsedTime = 15 * time.Minute -) - -// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. -func NewExponentialBackOff() *ExponentialBackOff { - return &ExponentialBackOff{ - InitialInterval: DefaultInitialInterval, - RandomizationFactor: DefaultRandomizationFactor, - Multiplier: DefaultMultiplier, - MaxInterval: DefaultMaxInterval, - MaxElapsedTime: DefaultMaxElapsedTime, - Clock: SystemClock, - } -} - -type systemClock struct{} - -func (t systemClock) Now() time.Time { - return time.Now() -} - -// SystemClock implements Clock interface that uses time.Now(). -var SystemClock = systemClock{} - -// Reset the interval back to the initial retry interval and restarts the timer. -func (b *ExponentialBackOff) Reset() { - b.currentInterval = b.InitialInterval - b.startTime = b.Clock.Now() -} - -// NextBackOff calculates the next back off interval using the formula: -// randomized_interval = retry_interval +/- (randomization_factor * retry_interval) -func (b *ExponentialBackOff) NextBackOff() time.Duration { - // Make sure we have not gone over the maximum elapsed time. - if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime { - return Stop - } - defer b.incrementCurrentInterval() - return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) -} - -// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance -// is created and is reset when Reset() is called. -// -// The elapsed time is computed using time.Now().UnixNano(). -func (b *ExponentialBackOff) GetElapsedTime() time.Duration { - return b.Clock.Now().Sub(b.startTime) -} - -// Increments the current interval by multiplying it with the multiplier. -func (b *ExponentialBackOff) incrementCurrentInterval() { - // Check for overflow, if overflow is detected set the current interval to the max interval. - if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { - b.currentInterval = b.MaxInterval - } else { - b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) - } -} - -// Returns a random value from the interval: -// [randomizationFactor * currentInterval, randomizationFactor * currentInterval]. -func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { - var delta = randomizationFactor * float64(currentInterval) - var minInterval = float64(currentInterval) - delta - var maxInterval = float64(currentInterval) + delta - // Get a random value from the range [minInterval, maxInterval]. - // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then - // we want a 33% chance for selecting either 1, 2 or 3. - return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) -} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/exponential_test.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/exponential_test.go deleted file mode 100644 index 2af22b8bd..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/exponential_test.go +++ /dev/null @@ -1,111 +0,0 @@ -package backoff - -import ( - "math" - "testing" - "time" -) - -func TestBackOff(t *testing.T) { - var ( - testInitialInterval = 500 * time.Millisecond - testRandomizationFactor = 0.1 - testMultiplier = 2.0 - testMaxInterval = 5 * time.Second - testMaxElapsedTime = 15 * time.Minute - ) - - exp := NewExponentialBackOff() - exp.InitialInterval = testInitialInterval - exp.RandomizationFactor = testRandomizationFactor - exp.Multiplier = testMultiplier - exp.MaxInterval = testMaxInterval - exp.MaxElapsedTime = testMaxElapsedTime - exp.Reset() - - var expectedResults = []time.Duration{500, 1000, 2000, 4000, 5000, 5000, 5000, 5000, 5000, 5000} - for i, d := range expectedResults { - expectedResults[i] = d * time.Millisecond - } - - for _, expected := range expectedResults { - assertEquals(t, expected, exp.currentInterval) - // Assert that the next back off falls in the expected range. - var minInterval = expected - time.Duration(testRandomizationFactor*float64(expected)) - var maxInterval = expected + time.Duration(testRandomizationFactor*float64(expected)) - var actualInterval = exp.NextBackOff() - if !(minInterval <= actualInterval && actualInterval <= maxInterval) { - t.Error("error") - } - } -} - -func TestGetRandomizedInterval(t *testing.T) { - // 33% chance of being 1. - assertEquals(t, 1, getRandomValueFromInterval(0.5, 0, 2)) - assertEquals(t, 1, getRandomValueFromInterval(0.5, 0.33, 2)) - // 33% chance of being 2. - assertEquals(t, 2, getRandomValueFromInterval(0.5, 0.34, 2)) - assertEquals(t, 2, getRandomValueFromInterval(0.5, 0.66, 2)) - // 33% chance of being 3. - assertEquals(t, 3, getRandomValueFromInterval(0.5, 0.67, 2)) - assertEquals(t, 3, getRandomValueFromInterval(0.5, 0.99, 2)) -} - -type TestClock struct { - i time.Duration - start time.Time -} - -func (c *TestClock) Now() time.Time { - t := c.start.Add(c.i) - c.i += time.Second - return t -} - -func TestGetElapsedTime(t *testing.T) { - var exp = NewExponentialBackOff() - exp.Clock = &TestClock{} - exp.Reset() - - var elapsedTime = exp.GetElapsedTime() - if elapsedTime != time.Second { - t.Errorf("elapsedTime=%d", elapsedTime) - } -} - -func TestMaxElapsedTime(t *testing.T) { - var exp = NewExponentialBackOff() - exp.Clock = &TestClock{start: time.Time{}.Add(10000 * time.Second)} - if exp.NextBackOff() != Stop { - t.Error("error2") - } - // Change the currentElapsedTime to be 0 ensuring that the elapsed time will be greater - // than the max elapsed time. - exp.startTime = time.Time{} - assertEquals(t, Stop, exp.NextBackOff()) -} - -func TestBackOffOverflow(t *testing.T) { - var ( - testInitialInterval time.Duration = math.MaxInt64 / 2 - testMaxInterval time.Duration = math.MaxInt64 - testMultiplier float64 = 2.1 - ) - - exp := NewExponentialBackOff() - exp.InitialInterval = testInitialInterval - exp.Multiplier = testMultiplier - exp.MaxInterval = testMaxInterval - exp.Reset() - - exp.NextBackOff() - // Assert that when an overflow is possible the current varerval time.Duration is set to the max varerval time.Duration . - assertEquals(t, testMaxInterval, exp.currentInterval) -} - -func assertEquals(t *testing.T, expected, value time.Duration) { - if expected != value { - t.Errorf("got: %d, expected: %d", value, expected) - } -} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/retry.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/retry.go deleted file mode 100644 index 80c547767..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/retry.go +++ /dev/null @@ -1,47 +0,0 @@ -package backoff - -import "time" - -// Retry the function f until it does not return error or BackOff stops. -// f is guaranteed to be run at least once. -// It is the caller's responsibility to reset b after Retry returns. -// -// Retry sleeps the goroutine for the duration returned by BackOff after a -// failed operation returns. -// -// Usage: -// operation := func() error { -// // An operation that may fail -// } -// -// err := backoff.Retry(operation, backoff.NewExponentialBackOff()) -// if err != nil { -// // Operation has failed. -// } -// -// // Operation is successfull. -// -func Retry(f func() error, b BackOff) error { return RetryNotify(f, b, nil) } - -// RetryNotify calls notify function with the error and wait duration for each failed attempt before sleep. -func RetryNotify(f func() error, b BackOff, notify func(err error, wait time.Duration)) error { - var err error - var next time.Duration - - b.Reset() - for { - if err = f(); err == nil { - return nil - } - - if next = b.NextBackOff(); next == Stop { - return err - } - - if notify != nil { - notify(err, next) - } - - time.Sleep(next) - } -} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/retry_test.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/retry_test.go deleted file mode 100644 index c0d25ab76..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/retry_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package backoff - -import ( - "errors" - "log" - "testing" -) - -func TestRetry(t *testing.T) { - const successOn = 3 - var i = 0 - - // This function is successfull on "successOn" calls. - f := func() error { - i++ - log.Printf("function is called %d. time\n", i) - - if i == successOn { - log.Println("OK") - return nil - } - - log.Println("error") - return errors.New("error") - } - - err := Retry(f, NewExponentialBackOff()) - if err != nil { - t.Errorf("unexpected error: %s", err.Error()) - } - if i != successOn { - t.Errorf("invalid number of retries: %d", i) - } -} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/ticker.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/ticker.go deleted file mode 100644 index 17ace5660..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/ticker.go +++ /dev/null @@ -1,105 +0,0 @@ -package backoff - -import ( - "runtime" - "sync" - "time" -) - -// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. -// -// Ticks will continue to arrive when the previous operation is still running, -// so operations that take a while to fail could run in quick succession. -// -// Usage: -// operation := func() error { -// // An operation that may fail -// } -// -// b := backoff.NewExponentialBackOff() -// ticker := backoff.NewTicker(b) -// -// var err error -// for _ = range ticker.C { -// if err = operation(); err != nil { -// log.Println(err, "will retry...") -// continue -// } -// -// ticker.Stop() -// break -// } -// -// if err != nil { -// // Operation has failed. -// } -// -// // Operation is successfull. -// -type Ticker struct { - C <-chan time.Time - c chan time.Time - b BackOff - stop chan struct{} - stopOnce sync.Once -} - -// NewTicker returns a new Ticker containing a channel that will send the time at times -// specified by the BackOff argument. Ticker is guaranteed to tick at least once. -// The channel is closed when Stop method is called or BackOff stops. -func NewTicker(b BackOff) *Ticker { - c := make(chan time.Time) - t := &Ticker{ - C: c, - c: c, - b: b, - stop: make(chan struct{}), - } - go t.run() - runtime.SetFinalizer(t, (*Ticker).Stop) - return t -} - -// Stop turns off a ticker. After Stop, no more ticks will be sent. -func (t *Ticker) Stop() { - t.stopOnce.Do(func() { close(t.stop) }) -} - -func (t *Ticker) run() { - c := t.c - defer close(c) - t.b.Reset() - - // Ticker is guaranteed to tick at least once. - afterC := t.send(time.Now()) - - for { - if afterC == nil { - return - } - - select { - case tick := <-afterC: - afterC = t.send(tick) - case <-t.stop: - t.c = nil // Prevent future ticks from being sent to the channel. - return - } - } -} - -func (t *Ticker) send(tick time.Time) <-chan time.Time { - select { - case t.c <- tick: - case <-t.stop: - return nil - } - - next := t.b.NextBackOff() - if next == Stop { - t.Stop() - return nil - } - - return time.After(next) -} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/ticker_test.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/ticker_test.go deleted file mode 100644 index 7c392df46..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/ticker_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package backoff - -import ( - "errors" - "log" - "testing" -) - -func TestTicker(t *testing.T) { - const successOn = 3 - var i = 0 - - // This function is successfull on "successOn" calls. - f := func() error { - i++ - log.Printf("function is called %d. time\n", i) - - if i == successOn { - log.Println("OK") - return nil - } - - log.Println("error") - return errors.New("error") - } - - b := NewExponentialBackOff() - ticker := NewTicker(b) - - var err error - for _ = range ticker.C { - if err = f(); err != nil { - t.Log(err) - continue - } - - break - } - if err != nil { - t.Errorf("unexpected error: %s", err.Error()) - } - if i != successOn { - t.Errorf("invalid number of retries: %d", i) - } -} diff --git a/Godeps/_workspace/src/github.com/cheggaaa/pb/LICENSE b/Godeps/_workspace/src/github.com/cheggaaa/pb/LICENSE deleted file mode 100644 index 13ef3fe53..000000000 --- a/Godeps/_workspace/src/github.com/cheggaaa/pb/LICENSE +++ /dev/null @@ -1,12 +0,0 @@ -Copyright (c) 2012, Sergey Cherepanov -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/cheggaaa/pb/README.md b/Godeps/_workspace/src/github.com/cheggaaa/pb/README.md deleted file mode 100644 index af5c4bd50..000000000 --- a/Godeps/_workspace/src/github.com/cheggaaa/pb/README.md +++ /dev/null @@ -1,98 +0,0 @@ -## Terminal progress bar for Go - -Simple progress bar for console programms. - - -### Installation -``` -go get github.com/cheggaaa/pb -``` - -### Usage -```Go -package main - -import ( - "github.com/cheggaaa/pb" - "time" -) - -func main() { - count := 100000 - bar := pb.StartNew(count) - for i := 0; i < count; i++ { - bar.Increment() - time.Sleep(time.Millisecond) - } - bar.FinishPrint("The End!") -} -``` -Result will be like this: -``` -> go run test.go -37158 / 100000 [================>_______________________________] 37.16% 1m11s -``` - - -More functions? -```Go -// create bar -bar := pb.New(count) - -// refresh info every second (default 200ms) -bar.SetRefreshRate(time.Second) - -// show percents (by default already true) -bar.ShowPercent = true - -// show bar (by default already true) -bar.ShowBar = true - -// no need counters -bar.ShowCounters = false - -// show "time left" -bar.ShowTimeLeft = true - -// show average speed -bar.ShowSpeed = true - -// sets the width of the progress bar -bar.SetWidth(80) - -// sets the width of the progress bar, but if terminal size smaller will be ignored -bar.SetMaxWidth(80) - -// convert output to readable format (like KB, MB) -bar.SetUnits(pb.U_BYTES) - -// and start -bar.Start() -``` - -Want handle progress of io operations? -```Go -// create and start bar -bar := pb.New(myDataLen).SetUnits(pb.U_BYTES) -bar.Start() - -// my io.Reader -r := myReader - -// my io.Writer -w := myWriter - -// create multi writer -writer := io.MultiWriter(w, bar) - -// and copy -io.Copy(writer, r) - -// show example/copy/copy.go for advanced example - -``` - -Not like the looks? -```Go -bar.Format("<.- >") -``` diff --git a/Godeps/_workspace/src/github.com/cheggaaa/pb/example/copy/copy.go b/Godeps/_workspace/src/github.com/cheggaaa/pb/example/copy/copy.go deleted file mode 100644 index 2576b1d16..000000000 --- a/Godeps/_workspace/src/github.com/cheggaaa/pb/example/copy/copy.go +++ /dev/null @@ -1,81 +0,0 @@ -package main - -import ( - "fmt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/cheggaaa/pb" - "io" - "net/http" - "os" - "strconv" - "strings" - "time" -) - -func main() { - // check args - if len(os.Args) < 3 { - printUsage() - return - } - sourceName, destName := os.Args[1], os.Args[2] - - // check source - var source io.Reader - var sourceSize int64 - if strings.HasPrefix(sourceName, "http://") { - // open as url - resp, err := http.Get(sourceName) - if err != nil { - fmt.Printf("Can't get %s: %v\n", sourceName, err) - return - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - fmt.Printf("Server return non-200 status: %v\n", resp.Status) - return - } - i, _ := strconv.Atoi(resp.Header.Get("Content-Length")) - sourceSize = int64(i) - source = resp.Body - } else { - // open as file - s, err := os.Open(sourceName) - if err != nil { - fmt.Printf("Can't open %s: %v\n", sourceName, err) - return - } - defer s.Close() - // get source size - sourceStat, err := s.Stat() - if err != nil { - fmt.Printf("Can't stat %s: %v\n", sourceName, err) - return - } - sourceSize = sourceStat.Size() - source = s - } - - // create dest - dest, err := os.Create(destName) - if err != nil { - fmt.Printf("Can't create %s: %v\n", destName, err) - return - } - defer dest.Close() - - // create bar - bar := pb.New(int(sourceSize)).SetUnits(pb.U_BYTES).SetRefreshRate(time.Millisecond * 10) - bar.ShowSpeed = true - bar.Start() - - // create multi writer - writer := io.MultiWriter(dest, bar) - - // and copy - io.Copy(writer, source) - bar.Finish() -} - -func printUsage() { - fmt.Println("copy [source file or url] [dest file]") -} diff --git a/Godeps/_workspace/src/github.com/cheggaaa/pb/example/pb.go b/Godeps/_workspace/src/github.com/cheggaaa/pb/example/pb.go deleted file mode 100644 index 659277d61..000000000 --- a/Godeps/_workspace/src/github.com/cheggaaa/pb/example/pb.go +++ /dev/null @@ -1,30 +0,0 @@ -package main - -import ( - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/cheggaaa/pb" - "time" -) - -func main() { - count := 5000 - bar := pb.New(count) - - // show percents (by default already true) - bar.ShowPercent = true - - // show bar (by default already true) - bar.ShowPercent = true - - // no need counters - bar.ShowCounters = true - - bar.ShowTimeLeft = true - - // and start - bar.Start() - for i := 0; i < count; i++ { - bar.Increment() - time.Sleep(time.Millisecond) - } - bar.FinishPrint("The End!") -} diff --git a/Godeps/_workspace/src/github.com/cheggaaa/pb/format.go b/Godeps/_workspace/src/github.com/cheggaaa/pb/format.go deleted file mode 100644 index 1dd210be4..000000000 --- a/Godeps/_workspace/src/github.com/cheggaaa/pb/format.go +++ /dev/null @@ -1,45 +0,0 @@ -package pb - -import ( - "fmt" - "strconv" - "strings" -) - -type Units int - -const ( - // By default, without type handle - U_NO Units = iota - // Handle as b, Kb, Mb, etc - U_BYTES -) - -// Format integer -func Format(i int64, units Units) string { - switch units { - case U_BYTES: - return FormatBytes(i) - default: - // by default just convert to string - return strconv.FormatInt(i, 10) - } -} - -// Convert bytes to human readable string. Like a 2 MB, 64.2 KB, 52 B -func FormatBytes(i int64) (result string) { - switch { - case i > (1024 * 1024 * 1024 * 1024): - result = fmt.Sprintf("%#.02f TB", float64(i)/1024/1024/1024/1024) - case i > (1024 * 1024 * 1024): - result = fmt.Sprintf("%#.02f GB", float64(i)/1024/1024/1024) - case i > (1024 * 1024): - result = fmt.Sprintf("%#.02f MB", float64(i)/1024/1024) - case i > 1024: - result = fmt.Sprintf("%#.02f KB", float64(i)/1024) - default: - result = fmt.Sprintf("%d B", i) - } - result = strings.Trim(result, " ") - return -} diff --git a/Godeps/_workspace/src/github.com/cheggaaa/pb/format_test.go b/Godeps/_workspace/src/github.com/cheggaaa/pb/format_test.go deleted file mode 100644 index b76275e29..000000000 --- a/Godeps/_workspace/src/github.com/cheggaaa/pb/format_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package pb - -import ( - "fmt" - "strconv" - "testing" -) - -func Test_DefaultsToInteger(t *testing.T) { - value := int64(1000) - expected := strconv.Itoa(int(value)) - actual := Format(value, -1) - - if actual != expected { - t.Error(fmt.Sprintf("Expected {%s} was {%s}", expected, actual)) - } -} - -func Test_CanFormatAsInteger(t *testing.T) { - value := int64(1000) - expected := strconv.Itoa(int(value)) - actual := Format(value, U_NO) - - if actual != expected { - t.Error(fmt.Sprintf("Expected {%s} was {%s}", expected, actual)) - } -} - -func Test_CanFormatAsBytes(t *testing.T) { - value := int64(1000) - expected := "1000 B" - actual := Format(value, U_BYTES) - - if actual != expected { - t.Error(fmt.Sprintf("Expected {%s} was {%s}", expected, actual)) - } -} diff --git a/Godeps/_workspace/src/github.com/cheggaaa/pb/pb.go b/Godeps/_workspace/src/github.com/cheggaaa/pb/pb.go deleted file mode 100644 index 02303ee82..000000000 --- a/Godeps/_workspace/src/github.com/cheggaaa/pb/pb.go +++ /dev/null @@ -1,352 +0,0 @@ -package pb - -import ( - "fmt" - "io" - "math" - "strings" - "sync" - "sync/atomic" - "time" -) - -const ( - // Default refresh rate - 200ms - DEFAULT_REFRESH_RATE = time.Millisecond * 200 - FORMAT = "[=>-]" -) - -// DEPRECATED -// variables for backward compatibility, from now do not work -// use pb.Format and pb.SetRefreshRate -var ( - DefaultRefreshRate = DEFAULT_REFRESH_RATE - BarStart, BarEnd, Empty, Current, CurrentN string -) - -// Create new progress bar object -func New(total int) *ProgressBar { - return New64(int64(total)) -} - -// Create new progress bar object uding int64 as total -func New64(total int64) *ProgressBar { - pb := &ProgressBar{ - Total: total, - RefreshRate: DEFAULT_REFRESH_RATE, - ShowPercent: true, - ShowCounters: true, - ShowBar: true, - ShowTimeLeft: true, - ShowFinalTime: true, - Units: U_NO, - ManualUpdate: false, - isFinish: make(chan struct{}), - currentValue: -1, - } - return pb.Format(FORMAT) -} - -// Create new object and start -func StartNew(total int) *ProgressBar { - return New(total).Start() -} - -// Callback for custom output -// For example: -// bar.Callback = func(s string) { -// mySuperPrint(s) -// } -// -type Callback func(out string) - -type ProgressBar struct { - current int64 // current must be first member of struct (https://code.google.com/p/go/issues/detail?id=5278) - - Total int64 - RefreshRate time.Duration - ShowPercent, ShowCounters bool - ShowSpeed, ShowTimeLeft, ShowBar bool - ShowFinalTime bool - Output io.Writer - Callback Callback - NotPrint bool - Units Units - Width int - ForceWidth bool - ManualUpdate bool - - finishOnce sync.Once //Guards isFinish - isFinish chan struct{} - - startTime time.Time - startValue int64 - currentValue int64 - - prefix, postfix string - - BarStart string - BarEnd string - Empty string - Current string - CurrentN string -} - -// Start print -func (pb *ProgressBar) Start() *ProgressBar { - pb.startTime = time.Now() - pb.startValue = pb.current - if pb.Total == 0 { - pb.ShowBar = false - pb.ShowTimeLeft = false - pb.ShowPercent = false - } - if !pb.ManualUpdate { - go pb.writer() - } - return pb -} - -// Increment current value -func (pb *ProgressBar) Increment() int { - return pb.Add(1) -} - -// Set current value -func (pb *ProgressBar) Set(current int) *ProgressBar { - return pb.Set64(int64(current)) -} - -// Set64 sets the current value as int64 -func (pb *ProgressBar) Set64(current int64) *ProgressBar { - atomic.StoreInt64(&pb.current, current) - return pb -} - -// Add to current value -func (pb *ProgressBar) Add(add int) int { - return int(pb.Add64(int64(add))) -} - -func (pb *ProgressBar) Add64(add int64) int64 { - return atomic.AddInt64(&pb.current, add) -} - -// Set prefix string -func (pb *ProgressBar) Prefix(prefix string) *ProgressBar { - pb.prefix = prefix - return pb -} - -// Set postfix string -func (pb *ProgressBar) Postfix(postfix string) *ProgressBar { - pb.postfix = postfix - return pb -} - -// Set custom format for bar -// EXAMPLE: bar.Format("[=>_]") -func (pb *ProgressBar) Format(format string) *ProgressBar { - formatEntries := strings.Split(format, "") - if len(formatEntries) == 5 { - pb.BarStart = formatEntries[0] - pb.BarEnd = formatEntries[4] - pb.Empty = formatEntries[3] - pb.Current = formatEntries[1] - pb.CurrentN = formatEntries[2] - } - return pb -} - -// Set bar refresh rate -func (pb *ProgressBar) SetRefreshRate(rate time.Duration) *ProgressBar { - pb.RefreshRate = rate - return pb -} - -// Set units -// bar.SetUnits(U_NO) - by default -// bar.SetUnits(U_BYTES) - for Mb, Kb, etc -func (pb *ProgressBar) SetUnits(units Units) *ProgressBar { - pb.Units = units - return pb -} - -// Set max width, if width is bigger than terminal width, will be ignored -func (pb *ProgressBar) SetMaxWidth(width int) *ProgressBar { - pb.Width = width - pb.ForceWidth = false - return pb -} - -// Set bar width -func (pb *ProgressBar) SetWidth(width int) *ProgressBar { - pb.Width = width - pb.ForceWidth = true - return pb -} - -// End print -func (pb *ProgressBar) Finish() { - //Protect multiple calls - pb.finishOnce.Do(func() { - close(pb.isFinish) - pb.write(atomic.LoadInt64(&pb.current)) - if !pb.NotPrint { - fmt.Println() - } - }) -} - -// End print and write string 'str' -func (pb *ProgressBar) FinishPrint(str string) { - pb.Finish() - fmt.Println(str) -} - -// implement io.Writer -func (pb *ProgressBar) Write(p []byte) (n int, err error) { - n = len(p) - pb.Add(n) - return -} - -// implement io.Reader -func (pb *ProgressBar) Read(p []byte) (n int, err error) { - n = len(p) - pb.Add(n) - return -} - -// Create new proxy reader over bar -func (pb *ProgressBar) NewProxyReader(r io.Reader) *Reader { - return &Reader{r, pb} -} - -func (pb *ProgressBar) write(current int64) { - width := pb.getWidth() - - var percentBox, countersBox, timeLeftBox, speedBox, barBox, end, out string - - // percents - if pb.ShowPercent { - percent := float64(current) / (float64(pb.Total) / float64(100)) - percentBox = fmt.Sprintf(" %#.02f %% ", percent) - } - - // counters - if pb.ShowCounters { - if pb.Total > 0 { - countersBox = fmt.Sprintf("%s / %s ", Format(current, pb.Units), Format(pb.Total, pb.Units)) - } else { - countersBox = Format(current, pb.Units) + " " - } - } - - // time left - fromStart := time.Now().Sub(pb.startTime) - currentFromStart := current - pb.startValue - select { - case <-pb.isFinish: - if pb.ShowFinalTime { - left := (fromStart / time.Second) * time.Second - timeLeftBox = left.String() - } - default: - if pb.ShowTimeLeft && currentFromStart > 0 { - perEntry := fromStart / time.Duration(currentFromStart) - left := time.Duration(pb.Total-currentFromStart) * perEntry - left = (left / time.Second) * time.Second - timeLeftBox = left.String() - } - } - - // speed - if pb.ShowSpeed && currentFromStart > 0 { - fromStart := time.Now().Sub(pb.startTime) - speed := float64(currentFromStart) / (float64(fromStart) / float64(time.Second)) - speedBox = Format(int64(speed), pb.Units) + "/s " - } - - // bar - if pb.ShowBar { - size := width - len(countersBox+pb.BarStart+pb.BarEnd+percentBox+timeLeftBox+speedBox+pb.prefix+pb.postfix) - if size > 0 && pb.Total > 0 { - curCount := int(math.Ceil((float64(current) / float64(pb.Total)) * float64(size))) - emptCount := size - curCount - barBox = pb.BarStart - if emptCount < 0 { - emptCount = 0 - } - if curCount > size { - curCount = size - } - if emptCount <= 0 { - barBox += strings.Repeat(pb.Current, curCount) - } else if curCount > 0 { - barBox += strings.Repeat(pb.Current, curCount-1) + pb.CurrentN - } - - barBox += strings.Repeat(pb.Empty, emptCount) + pb.BarEnd - } - } - - // check len - out = pb.prefix + countersBox + barBox + percentBox + speedBox + timeLeftBox + pb.postfix - if len(out) < width { - end = strings.Repeat(" ", width-len(out)) - } - - // and print! - switch { - case pb.Output != nil: - fmt.Fprint(pb.Output, "\r"+out+end) - case pb.Callback != nil: - pb.Callback(out + end) - case !pb.NotPrint: - fmt.Print("\r" + out + end) - } -} - -func (pb *ProgressBar) getWidth() int { - if pb.ForceWidth { - return pb.Width - } - - width := pb.Width - termWidth, _ := terminalWidth() - if width == 0 || termWidth <= width { - width = termWidth - } - - return width -} - -// Write the current state of the progressbar -func (pb *ProgressBar) Update() { - c := atomic.LoadInt64(&pb.current) - if c != pb.currentValue { - pb.write(c) - pb.currentValue = c - } -} - -// Internal loop for writing progressbar -func (pb *ProgressBar) writer() { - pb.Update() - for { - select { - case <-pb.isFinish: - return - case <-time.After(pb.RefreshRate): - pb.Update() - } - } -} - -type window struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} diff --git a/Godeps/_workspace/src/github.com/cheggaaa/pb/pb_nix.go b/Godeps/_workspace/src/github.com/cheggaaa/pb/pb_nix.go deleted file mode 100644 index 5db4e523f..000000000 --- a/Godeps/_workspace/src/github.com/cheggaaa/pb/pb_nix.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build linux darwin freebsd netbsd openbsd - -package pb - -import "syscall" - -const sys_ioctl = syscall.SYS_IOCTL diff --git a/Godeps/_workspace/src/github.com/cheggaaa/pb/pb_solaris.go b/Godeps/_workspace/src/github.com/cheggaaa/pb/pb_solaris.go deleted file mode 100644 index 00d705e35..000000000 --- a/Godeps/_workspace/src/github.com/cheggaaa/pb/pb_solaris.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build solaris - -package pb - -const sys_ioctl = 54 diff --git a/Godeps/_workspace/src/github.com/cheggaaa/pb/pb_test.go b/Godeps/_workspace/src/github.com/cheggaaa/pb/pb_test.go deleted file mode 100644 index dfe394fd4..000000000 --- a/Godeps/_workspace/src/github.com/cheggaaa/pb/pb_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package pb - -import ( - "testing" -) - -func Test_IncrementAddsOne(t *testing.T) { - count := 5000 - bar := New(count) - expected := 1 - actual := bar.Increment() - - if actual != expected { - t.Errorf("Expected {%d} was {%d}", expected, actual) - } -} - -func Test_Width(t *testing.T) { - count := 5000 - bar := New(count) - width := 100 - bar.SetWidth(100).Callback = func(out string) { - if len(out) != width { - t.Errorf("Bar width expected {%d} was {%d}", len(out), width) - } - } - bar.Start() - bar.Increment() - bar.Finish() -} - -func Test_MultipleFinish(t *testing.T) { - bar := New(5000) - bar.Add(2000) - bar.Finish() - bar.Finish() -} diff --git a/Godeps/_workspace/src/github.com/cheggaaa/pb/pb_win.go b/Godeps/_workspace/src/github.com/cheggaaa/pb/pb_win.go deleted file mode 100644 index 719d39bf2..000000000 --- a/Godeps/_workspace/src/github.com/cheggaaa/pb/pb_win.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build windows - -package pb - -import ( - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/olekukonko/ts" -) - -func bold(str string) string { - return str -} - -func terminalWidth() (int, error) { - size, err := ts.GetSize() - return size.Col(), err -} diff --git a/Godeps/_workspace/src/github.com/cheggaaa/pb/pb_x.go b/Godeps/_workspace/src/github.com/cheggaaa/pb/pb_x.go deleted file mode 100644 index dd5f906e1..000000000 --- a/Godeps/_workspace/src/github.com/cheggaaa/pb/pb_x.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build linux darwin freebsd netbsd openbsd solaris - -package pb - -import ( - "os" - "runtime" - "syscall" - "unsafe" -) - -const ( - TIOCGWINSZ = 0x5413 - TIOCGWINSZ_OSX = 1074295912 -) - -var tty *os.File - -func init() { - var err error - tty, err = os.Open("/dev/tty") - if err != nil { - tty = os.Stdin - } -} - -func bold(str string) string { - return "\033[1m" + str + "\033[0m" -} - -func terminalWidth() (int, error) { - w := new(window) - tio := syscall.TIOCGWINSZ - if runtime.GOOS == "darwin" { - tio = TIOCGWINSZ_OSX - } - res, _, err := syscall.Syscall(sys_ioctl, - tty.Fd(), - uintptr(tio), - uintptr(unsafe.Pointer(w)), - ) - if int(res) == -1 { - return 0, err - } - return int(w.Col), nil -} diff --git a/Godeps/_workspace/src/github.com/cheggaaa/pb/reader.go b/Godeps/_workspace/src/github.com/cheggaaa/pb/reader.go deleted file mode 100644 index 2d01125ca..000000000 --- a/Godeps/_workspace/src/github.com/cheggaaa/pb/reader.go +++ /dev/null @@ -1,17 +0,0 @@ -package pb - -import ( - "io" -) - -// It's proxy reader, implement io.Reader -type Reader struct { - io.Reader - bar *ProgressBar -} - -func (r *Reader) Read(p []byte) (n int, err error) { - n, err = r.Reader.Read(p) - r.bar.Add(n) - return -} diff --git a/Godeps/_workspace/src/github.com/codahale/hdrhistogram/.travis.yml b/Godeps/_workspace/src/github.com/codahale/hdrhistogram/.travis.yml deleted file mode 100644 index 46cc6e78c..000000000 --- a/Godeps/_workspace/src/github.com/codahale/hdrhistogram/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - 1.3.3 -notifications: - # See http://about.travis-ci.org/docs/user/build-configuration/ to learn more - # about configuring notification recipients and more. - email: - recipients: - - coda.hale@gmail.com diff --git a/Godeps/_workspace/src/github.com/codahale/hdrhistogram/README.md b/Godeps/_workspace/src/github.com/codahale/hdrhistogram/README.md deleted file mode 100644 index 614b197c3..000000000 --- a/Godeps/_workspace/src/github.com/codahale/hdrhistogram/README.md +++ /dev/null @@ -1,15 +0,0 @@ -hdrhistogram -============ - -[![Build Status](https://travis-ci.org/codahale/hdrhistogram.png?branch=master)](https://travis-ci.org/codahale/hdrhistogram) - -A pure Go implementation of the [HDR Histogram](https://github.com/HdrHistogram/HdrHistogram). - -> A Histogram that supports recording and analyzing sampled data value counts -> across a configurable integer value range with configurable value precision -> within the range. Value precision is expressed as the number of significant -> digits in the value recording, and provides control over value quantization -> behavior across the value range and the subsequent value resolution at any -> given level. - -For documentation, check [godoc](http://godoc.org/github.com/codahale/hdrhistogram). diff --git a/Godeps/_workspace/src/github.com/codahale/hdrhistogram/hdr.go b/Godeps/_workspace/src/github.com/codahale/hdrhistogram/hdr.go deleted file mode 100644 index 340c904a9..000000000 --- a/Godeps/_workspace/src/github.com/codahale/hdrhistogram/hdr.go +++ /dev/null @@ -1,513 +0,0 @@ -// Package hdrhistogram provides an implementation of Gil Tene's HDR Histogram -// data structure. The HDR Histogram allows for fast and accurate analysis of -// the extreme ranges of data with non-normal distributions, like latency. -package hdrhistogram - -import ( - "fmt" - "math" -) - -// A Bracket is a part of a cumulative distribution. -type Bracket struct { - Quantile float64 - Count, ValueAt int64 -} - -// A Snapshot is an exported view of a Histogram, useful for serializing them. -// A Histogram can be constructed from it by passing it to Import. -type Snapshot struct { - LowestTrackableValue int64 - HighestTrackableValue int64 - SignificantFigures int64 - Counts []int64 -} - -// A Histogram is a lossy data structure used to record the distribution of -// non-normally distributed data (like latency) with a high degree of accuracy -// and a bounded degree of precision. -type Histogram struct { - lowestTrackableValue int64 - highestTrackableValue int64 - unitMagnitude int64 - significantFigures int64 - subBucketHalfCountMagnitude int32 - subBucketHalfCount int32 - subBucketMask int64 - subBucketCount int32 - bucketCount int32 - countsLen int32 - totalCount int64 - counts []int64 -} - -// New returns a new Histogram instance capable of tracking values in the given -// range and with the given amount of precision. -func New(minValue, maxValue int64, sigfigs int) *Histogram { - if sigfigs < 1 || 5 < sigfigs { - panic(fmt.Errorf("sigfigs must be [1,5] (was %d)", sigfigs)) - } - - largestValueWithSingleUnitResolution := 2 * math.Pow10(sigfigs) - subBucketCountMagnitude := int32(math.Ceil(math.Log2(float64(largestValueWithSingleUnitResolution)))) - - subBucketHalfCountMagnitude := subBucketCountMagnitude - if subBucketHalfCountMagnitude < 1 { - subBucketHalfCountMagnitude = 1 - } - subBucketHalfCountMagnitude-- - - unitMagnitude := int32(math.Floor(math.Log2(float64(minValue)))) - if unitMagnitude < 0 { - unitMagnitude = 0 - } - - subBucketCount := int32(math.Pow(2, float64(subBucketHalfCountMagnitude)+1)) - - subBucketHalfCount := subBucketCount / 2 - subBucketMask := int64(subBucketCount-1) << uint(unitMagnitude) - - // determine exponent range needed to support the trackable value with no - // overflow: - smallestUntrackableValue := int64(subBucketCount) << uint(unitMagnitude) - bucketsNeeded := int32(1) - for smallestUntrackableValue < maxValue { - smallestUntrackableValue <<= 1 - bucketsNeeded++ - } - - bucketCount := bucketsNeeded - countsLen := (bucketCount + 1) * (subBucketCount / 2) - - return &Histogram{ - lowestTrackableValue: minValue, - highestTrackableValue: maxValue, - unitMagnitude: int64(unitMagnitude), - significantFigures: int64(sigfigs), - subBucketHalfCountMagnitude: subBucketHalfCountMagnitude, - subBucketHalfCount: subBucketHalfCount, - subBucketMask: subBucketMask, - subBucketCount: subBucketCount, - bucketCount: bucketCount, - countsLen: countsLen, - totalCount: 0, - counts: make([]int64, countsLen), - } -} - -// ByteSize returns an estimate of the amount of memory allocated to the -// histogram in bytes. -// -// N.B.: This does not take into account the overhead for slices, which are -// small, constant, and specific to the compiler version. -func (h *Histogram) ByteSize() int { - return 6*8 + 5*4 + len(h.counts)*8 -} - -// Merge merges the data stored in the given histogram with the receiver, -// returning the number of recorded values which had to be dropped. -func (h *Histogram) Merge(from *Histogram) (dropped int64) { - i := from.rIterator() - for i.next() { - v := i.valueFromIdx - c := i.countAtIdx - - if h.RecordValues(v, c) != nil { - dropped += c - } - } - - return -} - -// TotalCount returns total number of values recorded. -func (h *Histogram) TotalCount() int64 { - return h.totalCount -} - -// Max returns the approximate maximum recorded value. -func (h *Histogram) Max() int64 { - var max int64 - i := h.iterator() - for i.next() { - if i.countAtIdx != 0 { - max = i.highestEquivalentValue - } - } - return h.lowestEquivalentValue(max) -} - -// Min returns the approximate minimum recorded value. -func (h *Histogram) Min() int64 { - var min int64 - i := h.iterator() - for i.next() { - if i.countAtIdx != 0 && min == 0 { - min = i.highestEquivalentValue - break - } - } - return h.lowestEquivalentValue(min) -} - -// Mean returns the approximate arithmetic mean of the recorded values. -func (h *Histogram) Mean() float64 { - var total int64 - i := h.iterator() - for i.next() { - if i.countAtIdx != 0 { - total += i.countAtIdx * h.medianEquivalentValue(i.valueFromIdx) - } - } - return float64(total) / float64(h.totalCount) -} - -// StdDev returns the approximate standard deviation of the recorded values. -func (h *Histogram) StdDev() float64 { - mean := h.Mean() - geometricDevTotal := 0.0 - - i := h.iterator() - for i.next() { - if i.countAtIdx != 0 { - dev := float64(h.medianEquivalentValue(i.valueFromIdx)) - mean - geometricDevTotal += (dev * dev) * float64(i.countAtIdx) - } - } - - return math.Sqrt(geometricDevTotal / float64(h.totalCount)) -} - -// Reset deletes all recorded values and restores the histogram to its original -// state. -func (h *Histogram) Reset() { - h.totalCount = 0 - for i := range h.counts { - h.counts[i] = 0 - } -} - -// RecordValue records the given value, returning an error if the value is out -// of range. -func (h *Histogram) RecordValue(v int64) error { - return h.RecordValues(v, 1) -} - -// RecordCorrectedValue records the given value, correcting for stalls in the -// recording process. This only works for processes which are recording values -// at an expected interval (e.g., doing jitter analysis). Processes which are -// recording ad-hoc values (e.g., latency for incoming requests) can't take -// advantage of this. -func (h *Histogram) RecordCorrectedValue(v, expectedInterval int64) error { - if err := h.RecordValue(v); err != nil { - return err - } - - if expectedInterval <= 0 || v <= expectedInterval { - return nil - } - - missingValue := v - expectedInterval - for missingValue >= expectedInterval { - if err := h.RecordValue(missingValue); err != nil { - return err - } - missingValue -= expectedInterval - } - - return nil -} - -// RecordValues records n occurrences of the given value, returning an error if -// the value is out of range. -func (h *Histogram) RecordValues(v, n int64) error { - idx := h.countsIndexFor(v) - if idx < 0 || int(h.countsLen) <= idx { - return fmt.Errorf("value %d is too large to be recorded", v) - } - h.counts[idx] += n - h.totalCount += n - - return nil -} - -// ValueAtQuantile returns the recorded value at the given quantile (0..100). -func (h *Histogram) ValueAtQuantile(q float64) int64 { - if q > 100 { - q = 100 - } - - total := int64(0) - countAtPercentile := int64(((q / 100) * float64(h.totalCount)) + 0.5) - - i := h.iterator() - for i.next() { - total += i.countAtIdx - if total >= countAtPercentile { - return h.highestEquivalentValue(i.valueFromIdx) - } - } - - return 0 -} - -// CumulativeDistribution returns an ordered list of brackets of the -// distribution of recorded values. -func (h *Histogram) CumulativeDistribution() []Bracket { - var result []Bracket - - i := h.pIterator(1) - for i.next() { - result = append(result, Bracket{ - Quantile: i.percentile, - Count: i.countToIdx, - ValueAt: i.highestEquivalentValue, - }) - } - - return result -} - -// Equals returns true if the two Histograms are equivalent, false if not. -func (h *Histogram) Equals(other *Histogram) bool { - switch { - case - h.lowestTrackableValue != other.lowestTrackableValue, - h.highestTrackableValue != other.highestTrackableValue, - h.unitMagnitude != other.unitMagnitude, - h.significantFigures != other.significantFigures, - h.subBucketHalfCountMagnitude != other.subBucketHalfCountMagnitude, - h.subBucketHalfCount != other.subBucketHalfCount, - h.subBucketMask != other.subBucketMask, - h.subBucketCount != other.subBucketCount, - h.bucketCount != other.bucketCount, - h.countsLen != other.countsLen, - h.totalCount != other.totalCount: - return false - default: - for i, c := range h.counts { - if c != other.counts[i] { - return false - } - } - } - return true -} - -// Export returns a snapshot view of the Histogram. This can be later passed to -// Import to construct a new Histogram with the same state. -func (h *Histogram) Export() *Snapshot { - return &Snapshot{ - LowestTrackableValue: h.lowestTrackableValue, - HighestTrackableValue: h.highestTrackableValue, - SignificantFigures: h.significantFigures, - Counts: h.counts, - } -} - -// Import returns a new Histogram populated from the Snapshot data. -func Import(s *Snapshot) *Histogram { - h := New(s.LowestTrackableValue, s.HighestTrackableValue, int(s.SignificantFigures)) - h.counts = s.Counts - totalCount := int64(0) - for i := int32(0); i < h.countsLen; i++ { - countAtIndex := h.counts[i] - if countAtIndex > 0 { - totalCount += countAtIndex - } - } - h.totalCount = totalCount - return h -} - -func (h *Histogram) iterator() *iterator { - return &iterator{ - h: h, - subBucketIdx: -1, - } -} - -func (h *Histogram) rIterator() *rIterator { - return &rIterator{ - iterator: iterator{ - h: h, - subBucketIdx: -1, - }, - } -} - -func (h *Histogram) pIterator(ticksPerHalfDistance int32) *pIterator { - return &pIterator{ - iterator: iterator{ - h: h, - subBucketIdx: -1, - }, - ticksPerHalfDistance: ticksPerHalfDistance, - } -} - -func (h *Histogram) sizeOfEquivalentValueRange(v int64) int64 { - bucketIdx := h.getBucketIndex(v) - subBucketIdx := h.getSubBucketIdx(v, bucketIdx) - adjustedBucket := bucketIdx - if subBucketIdx >= h.subBucketCount { - adjustedBucket++ - } - return int64(1) << uint(h.unitMagnitude+int64(adjustedBucket)) -} - -func (h *Histogram) valueFromIndex(bucketIdx, subBucketIdx int32) int64 { - return int64(subBucketIdx) << uint(int64(bucketIdx)+h.unitMagnitude) -} - -func (h *Histogram) lowestEquivalentValue(v int64) int64 { - bucketIdx := h.getBucketIndex(v) - subBucketIdx := h.getSubBucketIdx(v, bucketIdx) - return h.valueFromIndex(bucketIdx, subBucketIdx) -} - -func (h *Histogram) nextNonEquivalentValue(v int64) int64 { - return h.lowestEquivalentValue(v) + h.sizeOfEquivalentValueRange(v) -} - -func (h *Histogram) highestEquivalentValue(v int64) int64 { - return h.nextNonEquivalentValue(v) - 1 -} - -func (h *Histogram) medianEquivalentValue(v int64) int64 { - return h.lowestEquivalentValue(v) + (h.sizeOfEquivalentValueRange(v) >> 1) -} - -func (h *Histogram) getCountAtIndex(bucketIdx, subBucketIdx int32) int64 { - return h.counts[h.countsIndex(bucketIdx, subBucketIdx)] -} - -func (h *Histogram) countsIndex(bucketIdx, subBucketIdx int32) int32 { - bucketBaseIdx := (bucketIdx + 1) << uint(h.subBucketHalfCountMagnitude) - offsetInBucket := subBucketIdx - h.subBucketHalfCount - return bucketBaseIdx + offsetInBucket -} - -func (h *Histogram) getBucketIndex(v int64) int32 { - pow2Ceiling := bitLen(v | h.subBucketMask) - return int32(pow2Ceiling - int64(h.unitMagnitude) - - int64(h.subBucketHalfCountMagnitude+1)) -} - -func (h *Histogram) getSubBucketIdx(v int64, idx int32) int32 { - return int32(v >> uint(int64(idx)+int64(h.unitMagnitude))) -} - -func (h *Histogram) countsIndexFor(v int64) int { - bucketIdx := h.getBucketIndex(v) - subBucketIdx := h.getSubBucketIdx(v, bucketIdx) - return int(h.countsIndex(bucketIdx, subBucketIdx)) -} - -type iterator struct { - h *Histogram - bucketIdx, subBucketIdx int32 - countAtIdx, countToIdx, valueFromIdx int64 - highestEquivalentValue int64 -} - -func (i *iterator) next() bool { - if i.countToIdx >= i.h.totalCount { - return false - } - - // increment bucket - i.subBucketIdx++ - if i.subBucketIdx >= i.h.subBucketCount { - i.subBucketIdx = i.h.subBucketHalfCount - i.bucketIdx++ - } - - if i.bucketIdx >= i.h.bucketCount { - return false - } - - i.countAtIdx = i.h.getCountAtIndex(i.bucketIdx, i.subBucketIdx) - i.countToIdx += i.countAtIdx - i.valueFromIdx = i.h.valueFromIndex(i.bucketIdx, i.subBucketIdx) - i.highestEquivalentValue = i.h.highestEquivalentValue(i.valueFromIdx) - - return true -} - -type rIterator struct { - iterator - countAddedThisStep int64 -} - -func (r *rIterator) next() bool { - for r.iterator.next() { - if r.countAtIdx != 0 { - r.countAddedThisStep = r.countAtIdx - return true - } - } - return false -} - -type pIterator struct { - iterator - seenLastValue bool - ticksPerHalfDistance int32 - percentileToIteratorTo float64 - percentile float64 -} - -func (p *pIterator) next() bool { - if !(p.countToIdx < p.h.totalCount) { - if p.seenLastValue { - return false - } - - p.seenLastValue = true - p.percentile = 100 - - return true - } - - if p.subBucketIdx == -1 && !p.iterator.next() { - return false - } - - var done = false - for !done { - currentPercentile := (100.0 * float64(p.countToIdx)) / float64(p.h.totalCount) - if p.countAtIdx != 0 && p.percentileToIteratorTo <= currentPercentile { - p.percentile = p.percentileToIteratorTo - halfDistance := math.Trunc(math.Pow(2, math.Trunc(math.Log2(100.0/(100.0-p.percentileToIteratorTo)))+1)) - percentileReportingTicks := float64(p.ticksPerHalfDistance) * halfDistance - p.percentileToIteratorTo += 100.0 / percentileReportingTicks - return true - } - done = !p.iterator.next() - } - - return true -} - -func bitLen(x int64) (n int64) { - for ; x >= 0x8000; x >>= 16 { - n += 16 - } - if x >= 0x80 { - x >>= 8 - n += 8 - } - if x >= 0x8 { - x >>= 4 - n += 4 - } - if x >= 0x2 { - x >>= 2 - n += 2 - } - if x >= 0x1 { - n++ - } - return -} diff --git a/Godeps/_workspace/src/github.com/codahale/hdrhistogram/hdr_test.go b/Godeps/_workspace/src/github.com/codahale/hdrhistogram/hdr_test.go deleted file mode 100644 index ada770bee..000000000 --- a/Godeps/_workspace/src/github.com/codahale/hdrhistogram/hdr_test.go +++ /dev/null @@ -1,333 +0,0 @@ -package hdrhistogram_test - -import ( - "reflect" - "testing" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/codahale/hdrhistogram" -) - -func TestHighSigFig(t *testing.T) { - input := []int64{ - 459876, 669187, 711612, 816326, 931423, 1033197, 1131895, 2477317, - 3964974, 12718782, - } - - hist := hdrhistogram.New(459876, 12718782, 5) - for _, sample := range input { - hist.RecordValue(sample) - } - - if v, want := hist.ValueAtQuantile(50), int64(1048575); v != want { - t.Errorf("Median was %v, but expected %v", v, want) - } -} - -func TestValueAtQuantile(t *testing.T) { - h := hdrhistogram.New(1, 10000000, 3) - - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - data := []struct { - q float64 - v int64 - }{ - {q: 50, v: 500223}, - {q: 75, v: 750079}, - {q: 90, v: 900095}, - {q: 95, v: 950271}, - {q: 99, v: 990207}, - {q: 99.9, v: 999423}, - {q: 99.99, v: 999935}, - } - - for _, d := range data { - if v := h.ValueAtQuantile(d.q); v != d.v { - t.Errorf("P%v was %v, but expected %v", d.q, v, d.v) - } - } -} - -func TestMean(t *testing.T) { - h := hdrhistogram.New(1, 10000000, 3) - - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - if v, want := h.Mean(), 500000.013312; v != want { - t.Errorf("Mean was %v, but expected %v", v, want) - } -} - -func TestStdDev(t *testing.T) { - h := hdrhistogram.New(1, 10000000, 3) - - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - if v, want := h.StdDev(), 288675.1403682715; v != want { - t.Errorf("StdDev was %v, but expected %v", v, want) - } -} - -func TestTotalCount(t *testing.T) { - h := hdrhistogram.New(1, 10000000, 3) - - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - if v, want := h.TotalCount(), int64(i+1); v != want { - t.Errorf("TotalCount was %v, but expected %v", v, want) - } - } -} - -func TestMax(t *testing.T) { - h := hdrhistogram.New(1, 10000000, 3) - - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - if v, want := h.Max(), int64(999936); v != want { - t.Errorf("Max was %v, but expected %v", v, want) - } -} - -func TestReset(t *testing.T) { - h := hdrhistogram.New(1, 10000000, 3) - - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - h.Reset() - - if v, want := h.Max(), int64(0); v != want { - t.Errorf("Max was %v, but expected %v", v, want) - } -} - -func TestMerge(t *testing.T) { - h1 := hdrhistogram.New(1, 1000, 3) - h2 := hdrhistogram.New(1, 1000, 3) - - for i := 0; i < 100; i++ { - if err := h1.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - for i := 100; i < 200; i++ { - if err := h2.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - h1.Merge(h2) - - if v, want := h1.ValueAtQuantile(50), int64(99); v != want { - t.Errorf("Median was %v, but expected %v", v, want) - } -} - -func TestMin(t *testing.T) { - h := hdrhistogram.New(1, 10000000, 3) - - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - if v, want := h.Min(), int64(0); v != want { - t.Errorf("Min was %v, but expected %v", v, want) - } -} - -func TestByteSize(t *testing.T) { - h := hdrhistogram.New(1, 100000, 3) - - if v, want := h.ByteSize(), 65604; v != want { - t.Errorf("ByteSize was %v, but expected %d", v, want) - } -} - -func TestRecordCorrectedValue(t *testing.T) { - h := hdrhistogram.New(1, 100000, 3) - - if err := h.RecordCorrectedValue(10, 100); err != nil { - t.Fatal(err) - } - - if v, want := h.ValueAtQuantile(75), int64(10); v != want { - t.Errorf("Corrected value was %v, but expected %v", v, want) - } -} - -func TestRecordCorrectedValueStall(t *testing.T) { - h := hdrhistogram.New(1, 100000, 3) - - if err := h.RecordCorrectedValue(1000, 100); err != nil { - t.Fatal(err) - } - - if v, want := h.ValueAtQuantile(75), int64(800); v != want { - t.Errorf("Corrected value was %v, but expected %v", v, want) - } -} - -func TestCumulativeDistribution(t *testing.T) { - h := hdrhistogram.New(1, 100000000, 3) - - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - actual := h.CumulativeDistribution() - expected := []hdrhistogram.Bracket{ - hdrhistogram.Bracket{Quantile: 0, Count: 1, ValueAt: 0}, - hdrhistogram.Bracket{Quantile: 50, Count: 500224, ValueAt: 500223}, - hdrhistogram.Bracket{Quantile: 75, Count: 750080, ValueAt: 750079}, - hdrhistogram.Bracket{Quantile: 87.5, Count: 875008, ValueAt: 875007}, - hdrhistogram.Bracket{Quantile: 93.75, Count: 937984, ValueAt: 937983}, - hdrhistogram.Bracket{Quantile: 96.875, Count: 969216, ValueAt: 969215}, - hdrhistogram.Bracket{Quantile: 98.4375, Count: 984576, ValueAt: 984575}, - hdrhistogram.Bracket{Quantile: 99.21875, Count: 992256, ValueAt: 992255}, - hdrhistogram.Bracket{Quantile: 99.609375, Count: 996352, ValueAt: 996351}, - hdrhistogram.Bracket{Quantile: 99.8046875, Count: 998400, ValueAt: 998399}, - hdrhistogram.Bracket{Quantile: 99.90234375, Count: 999424, ValueAt: 999423}, - hdrhistogram.Bracket{Quantile: 99.951171875, Count: 999936, ValueAt: 999935}, - hdrhistogram.Bracket{Quantile: 99.9755859375, Count: 999936, ValueAt: 999935}, - hdrhistogram.Bracket{Quantile: 99.98779296875, Count: 999936, ValueAt: 999935}, - hdrhistogram.Bracket{Quantile: 99.993896484375, Count: 1000000, ValueAt: 1000447}, - hdrhistogram.Bracket{Quantile: 100, Count: 1000000, ValueAt: 1000447}, - } - - if !reflect.DeepEqual(actual, expected) { - t.Errorf("CF was %#v, but expected %#v", actual, expected) - } -} - -func BenchmarkHistogramRecordValue(b *testing.B) { - h := hdrhistogram.New(1, 10000000, 3) - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - b.Fatal(err) - } - } - b.ResetTimer() - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - h.RecordValue(100) - } -} - -func BenchmarkNew(b *testing.B) { - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - hdrhistogram.New(1, 120000, 3) // this could track 1ms-2min - } -} - -func TestUnitMagnitudeOverflow(t *testing.T) { - h := hdrhistogram.New(0, 200, 4) - if err := h.RecordValue(11); err != nil { - t.Fatal(err) - } -} - -func TestSubBucketMaskOverflow(t *testing.T) { - hist := hdrhistogram.New(2e7, 1e8, 5) - for _, sample := range [...]int64{1e8, 2e7, 3e7} { - hist.RecordValue(sample) - } - - for q, want := range map[float64]int64{ - 50: 33554431, - 83.33: 33554431, - 83.34: 100663295, - 99: 100663295, - } { - if got := hist.ValueAtQuantile(q); got != want { - t.Errorf("got %d for %fth percentile. want: %d", got, q, want) - } - } -} - -func TestExportImport(t *testing.T) { - min := int64(1) - max := int64(10000000) - sigfigs := 3 - h := hdrhistogram.New(min, max, sigfigs) - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - s := h.Export() - - if v := s.LowestTrackableValue; v != min { - t.Errorf("LowestTrackableValue was %v, but expected %v", v, min) - } - - if v := s.HighestTrackableValue; v != max { - t.Errorf("HighestTrackableValue was %v, but expected %v", v, max) - } - - if v := int(s.SignificantFigures); v != sigfigs { - t.Errorf("SignificantFigures was %v, but expected %v", v, sigfigs) - } - - if imported := hdrhistogram.Import(s); !imported.Equals(h) { - t.Error("Expected Histograms to be equivalent") - } - -} - -func TestEquals(t *testing.T) { - h1 := hdrhistogram.New(1, 10000000, 3) - for i := 0; i < 1000000; i++ { - if err := h1.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - h2 := hdrhistogram.New(1, 10000000, 3) - for i := 0; i < 10000; i++ { - if err := h1.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - if h1.Equals(h2) { - t.Error("Expected Histograms to not be equivalent") - } - - h1.Reset() - h2.Reset() - - if !h1.Equals(h2) { - t.Error("Expected Histograms to be equivalent") - } -} diff --git a/Godeps/_workspace/src/github.com/codahale/hdrhistogram/window.go b/Godeps/_workspace/src/github.com/codahale/hdrhistogram/window.go deleted file mode 100644 index dc43612a4..000000000 --- a/Godeps/_workspace/src/github.com/codahale/hdrhistogram/window.go +++ /dev/null @@ -1,45 +0,0 @@ -package hdrhistogram - -// A WindowedHistogram combines histograms to provide windowed statistics. -type WindowedHistogram struct { - idx int - h []Histogram - m *Histogram - - Current *Histogram -} - -// NewWindowed creates a new WindowedHistogram with N underlying histograms with -// the given parameters. -func NewWindowed(n int, minValue, maxValue int64, sigfigs int) *WindowedHistogram { - w := WindowedHistogram{ - idx: -1, - h: make([]Histogram, n), - m: New(minValue, maxValue, sigfigs), - } - - for i := range w.h { - w.h[i] = *New(minValue, maxValue, sigfigs) - } - w.Rotate() - - return &w -} - -// Merge returns a histogram which includes the recorded values from all the -// sections of the window. -func (w *WindowedHistogram) Merge() *Histogram { - w.m.Reset() - for _, h := range w.h { - w.m.Merge(&h) - } - return w.m -} - -// Rotate resets the oldest histogram and rotates it to be used as the current -// histogram. -func (w *WindowedHistogram) Rotate() { - w.idx++ - w.Current = &w.h[w.idx%len(w.h)] - w.Current.Reset() -} diff --git a/Godeps/_workspace/src/github.com/codahale/hdrhistogram/window_test.go b/Godeps/_workspace/src/github.com/codahale/hdrhistogram/window_test.go deleted file mode 100644 index f8bbabf65..000000000 --- a/Godeps/_workspace/src/github.com/codahale/hdrhistogram/window_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package hdrhistogram_test - -import ( - "testing" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/codahale/hdrhistogram" -) - -func TestWindowedHistogram(t *testing.T) { - w := hdrhistogram.NewWindowed(2, 1, 1000, 3) - - for i := 0; i < 100; i++ { - w.Current.RecordValue(int64(i)) - } - w.Rotate() - - for i := 100; i < 200; i++ { - w.Current.RecordValue(int64(i)) - } - w.Rotate() - - for i := 200; i < 300; i++ { - w.Current.RecordValue(int64(i)) - } - - if v, want := w.Merge().ValueAtQuantile(50), int64(199); v != want { - t.Errorf("Median was %v, but expected %v", v, want) - } -} - -func BenchmarkWindowedHistogramRecordAndRotate(b *testing.B) { - w := hdrhistogram.NewWindowed(3, 1, 10000000, 3) - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - if err := w.Current.RecordValue(100); err != nil { - b.Fatal(err) - } - - if i%100000 == 1 { - w.Rotate() - } - } -} - -func BenchmarkWindowedHistogramMerge(b *testing.B) { - w := hdrhistogram.NewWindowed(3, 1, 10000000, 3) - for i := 0; i < 10000000; i++ { - if err := w.Current.RecordValue(100); err != nil { - b.Fatal(err) - } - - if i%100000 == 1 { - w.Rotate() - } - } - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - w.Merge() - } -} diff --git a/Godeps/_workspace/src/github.com/codahale/metrics/.travis.yml b/Godeps/_workspace/src/github.com/codahale/metrics/.travis.yml deleted file mode 100644 index 46cc6e78c..000000000 --- a/Godeps/_workspace/src/github.com/codahale/metrics/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - 1.3.3 -notifications: - # See http://about.travis-ci.org/docs/user/build-configuration/ to learn more - # about configuring notification recipients and more. - email: - recipients: - - coda.hale@gmail.com diff --git a/Godeps/_workspace/src/github.com/codahale/metrics/LICENSE b/Godeps/_workspace/src/github.com/codahale/metrics/LICENSE deleted file mode 100644 index f9835c241..000000000 --- a/Godeps/_workspace/src/github.com/codahale/metrics/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Coda Hale - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/codahale/metrics/README.md b/Godeps/_workspace/src/github.com/codahale/metrics/README.md deleted file mode 100644 index 7482a50f3..000000000 --- a/Godeps/_workspace/src/github.com/codahale/metrics/README.md +++ /dev/null @@ -1,8 +0,0 @@ -metrics -======= - -[![Build Status](https://travis-ci.org/codahale/metrics.png?branch=master)](https://travis-ci.org/codahale/metrics) - -A Go library which provides light-weight instrumentation for your application. - -For documentation, check [godoc](http://godoc.org/github.com/codahale/metrics). diff --git a/Godeps/_workspace/src/github.com/codahale/metrics/metrics.go b/Godeps/_workspace/src/github.com/codahale/metrics/metrics.go deleted file mode 100644 index c707092ce..000000000 --- a/Godeps/_workspace/src/github.com/codahale/metrics/metrics.go +++ /dev/null @@ -1,329 +0,0 @@ -// Package metrics provides minimalist instrumentation for your applications in -// the form of counters and gauges. -// -// Counters -// -// A counter is a monotonically-increasing, unsigned, 64-bit integer used to -// represent the number of times an event has occurred. By tracking the deltas -// between measurements of a counter over intervals of time, an aggregation -// layer can derive rates, acceleration, etc. -// -// Gauges -// -// A gauge returns instantaneous measurements of something using signed, 64-bit -// integers. This value does not need to be monotonic. -// -// Histograms -// -// A histogram tracks the distribution of a stream of values (e.g. the number of -// milliseconds it takes to handle requests), adding gauges for the values at -// meaningful quantiles: 50th, 75th, 90th, 95th, 99th, 99.9th. -// -// Reporting -// -// Measurements from counters and gauges are available as expvars. Your service -// should return its expvars from an HTTP endpoint (i.e., /debug/vars) as a JSON -// object. -package metrics - -import ( - "expvar" - "sync" - "time" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/codahale/hdrhistogram" -) - -// A Counter is a monotonically increasing unsigned integer. -// -// Use a counter to derive rates (e.g., record total number of requests, derive -// requests per second). -type Counter string - -// Add increments the counter by one. -func (c Counter) Add() { - c.AddN(1) -} - -// AddN increments the counter by N. -func (c Counter) AddN(delta uint64) { - cm.Lock() - counters[string(c)] += delta - cm.Unlock() -} - -// SetFunc sets the counter's value to the lazily-called return value of the -// given function. -func (c Counter) SetFunc(f func() uint64) { - cm.Lock() - defer cm.Unlock() - - counterFuncs[string(c)] = f -} - -// SetBatchFunc sets the counter's value to the lazily-called return value of -// the given function, with an additional initializer function for a related -// batch of counters, all of which are keyed by an arbitrary value. -func (c Counter) SetBatchFunc(key interface{}, init func(), f func() uint64) { - cm.Lock() - defer cm.Unlock() - - gm.Lock() - defer gm.Unlock() - - counterFuncs[string(c)] = f - if _, ok := inits[key]; !ok { - inits[key] = init - } -} - -// Remove removes the given counter. -func (c Counter) Remove() { - cm.Lock() - defer cm.Unlock() - - gm.Lock() - defer gm.Unlock() - - delete(counters, string(c)) - delete(counterFuncs, string(c)) - delete(inits, string(c)) -} - -// A Gauge is an instantaneous measurement of a value. -// -// Use a gauge to track metrics which increase and decrease (e.g., amount of -// free memory). -type Gauge string - -// Set the gauge's value to the given value. -func (g Gauge) Set(value int64) { - gm.Lock() - defer gm.Unlock() - - gauges[string(g)] = func() int64 { - return value - } -} - -// SetFunc sets the gauge's value to the lazily-called return value of the given -// function. -func (g Gauge) SetFunc(f func() int64) { - gm.Lock() - defer gm.Unlock() - - gauges[string(g)] = f -} - -// SetBatchFunc sets the gauge's value to the lazily-called return value of the -// given function, with an additional initializer function for a related batch -// of gauges, all of which are keyed by an arbitrary value. -func (g Gauge) SetBatchFunc(key interface{}, init func(), f func() int64) { - gm.Lock() - defer gm.Unlock() - - gauges[string(g)] = f - if _, ok := inits[key]; !ok { - inits[key] = init - } -} - -// Remove removes the given gauge. -func (g Gauge) Remove() { - gm.Lock() - defer gm.Unlock() - - delete(gauges, string(g)) - delete(inits, string(g)) -} - -// Reset removes all existing counters and gauges. -func Reset() { - cm.Lock() - defer cm.Unlock() - - gm.Lock() - defer gm.Unlock() - - hm.Lock() - defer hm.Unlock() - - counters = make(map[string]uint64) - counterFuncs = make(map[string]func() uint64) - gauges = make(map[string]func() int64) - histograms = make(map[string]*Histogram) - inits = make(map[interface{}]func()) -} - -// Snapshot returns a copy of the values of all registered counters and gauges. -func Snapshot() (c map[string]uint64, g map[string]int64) { - cm.Lock() - defer cm.Unlock() - - gm.Lock() - defer gm.Unlock() - - hm.Lock() - defer hm.Unlock() - - for _, init := range inits { - init() - } - - c = make(map[string]uint64, len(counters)+len(counterFuncs)) - for n, v := range counters { - c[n] = v - } - - for n, f := range counterFuncs { - c[n] = f() - } - - g = make(map[string]int64, len(gauges)) - for n, f := range gauges { - g[n] = f() - } - - return -} - -// NewHistogram returns a windowed HDR histogram which drops data older than -// five minutes. The returned histogram is safe to use from multiple goroutines. -// -// Use a histogram to track the distribution of a stream of values (e.g., the -// latency associated with HTTP requests). -func NewHistogram(name string, minValue, maxValue int64, sigfigs int) *Histogram { - hm.Lock() - defer hm.Unlock() - - if _, ok := histograms[name]; ok { - panic(name + " already exists") - } - - hist := &Histogram{ - name: name, - hist: hdrhistogram.NewWindowed(5, minValue, maxValue, sigfigs), - } - histograms[name] = hist - - Gauge(name+".P50").SetBatchFunc(hname(name), hist.merge, hist.valueAt(50)) - Gauge(name+".P75").SetBatchFunc(hname(name), hist.merge, hist.valueAt(75)) - Gauge(name+".P90").SetBatchFunc(hname(name), hist.merge, hist.valueAt(90)) - Gauge(name+".P95").SetBatchFunc(hname(name), hist.merge, hist.valueAt(95)) - Gauge(name+".P99").SetBatchFunc(hname(name), hist.merge, hist.valueAt(99)) - Gauge(name+".P999").SetBatchFunc(hname(name), hist.merge, hist.valueAt(99.9)) - - return hist -} - -// Remove removes the given histogram. -func (h *Histogram) Remove() { - - hm.Lock() - defer hm.Unlock() - - Gauge(h.name + ".P50").Remove() - Gauge(h.name + ".P75").Remove() - Gauge(h.name + ".P90").Remove() - Gauge(h.name + ".P95").Remove() - Gauge(h.name + ".P99").Remove() - Gauge(h.name + ".P999").Remove() - - delete(histograms, h.name) -} - -type hname string // unexported to prevent collisions - -// A Histogram measures the distribution of a stream of values. -type Histogram struct { - name string - hist *hdrhistogram.WindowedHistogram - m *hdrhistogram.Histogram - rw sync.RWMutex -} - -// Name returns the name of the histogram -func (h *Histogram) Name() string { - return h.name -} - -// RecordValue records the given value, or returns an error if the value is out -// of range. -// Returned error values are of type Error. -func (h *Histogram) RecordValue(v int64) error { - h.rw.Lock() - defer h.rw.Unlock() - - err := h.hist.Current.RecordValue(v) - if err != nil { - return Error{h.name, err} - } - return nil -} - -func (h *Histogram) rotate() { - h.rw.Lock() - defer h.rw.Unlock() - - h.hist.Rotate() -} - -func (h *Histogram) merge() { - h.rw.Lock() - defer h.rw.Unlock() - - h.m = h.hist.Merge() -} - -func (h *Histogram) valueAt(q float64) func() int64 { - return func() int64 { - h.rw.RLock() - defer h.rw.RUnlock() - - if h.m == nil { - return 0 - } - - return h.m.ValueAtQuantile(q) - } -} - -// Error describes an error and the name of the metric where it occurred. -type Error struct { - Metric string - Err error -} - -func (e Error) Error() string { - return e.Metric + ": " + e.Err.Error() -} - -var ( - counters = make(map[string]uint64) - counterFuncs = make(map[string]func() uint64) - gauges = make(map[string]func() int64) - inits = make(map[interface{}]func()) - histograms = make(map[string]*Histogram) - - cm, gm, hm sync.Mutex -) - -func init() { - expvar.Publish("metrics", expvar.Func(func() interface{} { - counters, gauges := Snapshot() - return map[string]interface{}{ - "Counters": counters, - "Gauges": gauges, - } - })) - - go func() { - for _ = range time.NewTicker(1 * time.Minute).C { - hm.Lock() - for _, h := range histograms { - h.rotate() - } - hm.Unlock() - } - }() -} diff --git a/Godeps/_workspace/src/github.com/codahale/metrics/metrics_test.go b/Godeps/_workspace/src/github.com/codahale/metrics/metrics_test.go deleted file mode 100644 index f056c6d0b..000000000 --- a/Godeps/_workspace/src/github.com/codahale/metrics/metrics_test.go +++ /dev/null @@ -1,217 +0,0 @@ -package metrics_test - -import ( - "testing" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/codahale/metrics" -) - -func TestCounter(t *testing.T) { - metrics.Reset() - - metrics.Counter("whee").Add() - metrics.Counter("whee").AddN(10) - - counters, _ := metrics.Snapshot() - if v, want := counters["whee"], uint64(11); v != want { - t.Errorf("Counter was %v, but expected %v", v, want) - } -} - -func TestCounterFunc(t *testing.T) { - metrics.Reset() - - metrics.Counter("whee").SetFunc(func() uint64 { - return 100 - }) - - counters, _ := metrics.Snapshot() - if v, want := counters["whee"], uint64(100); v != want { - t.Errorf("Counter was %v, but expected %v", v, want) - } -} - -func TestCounterBatchFunc(t *testing.T) { - metrics.Reset() - - var a, b uint64 - - metrics.Counter("whee").SetBatchFunc( - "yay", - func() { - a, b = 1, 2 - }, - func() uint64 { - return a - }, - ) - - metrics.Counter("woo").SetBatchFunc( - "yay", - func() { - a, b = 1, 2 - }, - func() uint64 { - return b - }, - ) - - counters, _ := metrics.Snapshot() - if v, want := counters["whee"], uint64(1); v != want { - t.Errorf("Counter was %v, but expected %v", v, want) - } - - if v, want := counters["woo"], uint64(2); v != want { - t.Errorf("Counter was %v, but expected %v", v, want) - } -} - -func TestCounterRemove(t *testing.T) { - metrics.Reset() - - metrics.Counter("whee").Add() - metrics.Counter("whee").Remove() - - counters, _ := metrics.Snapshot() - if v, ok := counters["whee"]; ok { - t.Errorf("Counter was %v, but expected nothing", v) - } -} - -func TestGaugeValue(t *testing.T) { - metrics.Reset() - - metrics.Gauge("whee").Set(-100) - - _, gauges := metrics.Snapshot() - if v, want := gauges["whee"], int64(-100); v != want { - t.Errorf("Gauge was %v, but expected %v", v, want) - } -} - -func TestGaugeFunc(t *testing.T) { - metrics.Reset() - - metrics.Gauge("whee").SetFunc(func() int64 { - return -100 - }) - - _, gauges := metrics.Snapshot() - if v, want := gauges["whee"], int64(-100); v != want { - t.Errorf("Gauge was %v, but expected %v", v, want) - } -} - -func TestGaugeRemove(t *testing.T) { - metrics.Reset() - - metrics.Gauge("whee").Set(1) - metrics.Gauge("whee").Remove() - - _, gauges := metrics.Snapshot() - if v, ok := gauges["whee"]; ok { - t.Errorf("Gauge was %v, but expected nothing", v) - } -} - -func TestHistogram(t *testing.T) { - metrics.Reset() - - h := metrics.NewHistogram("heyo", 1, 1000, 3) - for i := 100; i > 0; i-- { - for j := 0; j < i; j++ { - h.RecordValue(int64(i)) - } - } - - _, gauges := metrics.Snapshot() - - if v, want := gauges["heyo.P50"], int64(71); v != want { - t.Errorf("P50 was %v, but expected %v", v, want) - } - - if v, want := gauges["heyo.P75"], int64(87); v != want { - t.Errorf("P75 was %v, but expected %v", v, want) - } - - if v, want := gauges["heyo.P90"], int64(95); v != want { - t.Errorf("P90 was %v, but expected %v", v, want) - } - - if v, want := gauges["heyo.P95"], int64(98); v != want { - t.Errorf("P95 was %v, but expected %v", v, want) - } - - if v, want := gauges["heyo.P99"], int64(100); v != want { - t.Errorf("P99 was %v, but expected %v", v, want) - } - - if v, want := gauges["heyo.P999"], int64(100); v != want { - t.Errorf("P999 was %v, but expected %v", v, want) - } -} - -func TestHistogramRemove(t *testing.T) { - metrics.Reset() - - h := metrics.NewHistogram("heyo", 1, 1000, 3) - h.Remove() - - _, gauges := metrics.Snapshot() - if v, ok := gauges["heyo.P50"]; ok { - t.Errorf("Gauge was %v, but expected nothing", v) - } -} - -func BenchmarkCounterAdd(b *testing.B) { - metrics.Reset() - - b.ReportAllocs() - b.ResetTimer() - - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - metrics.Counter("test1").Add() - } - }) -} - -func BenchmarkCounterAddN(b *testing.B) { - metrics.Reset() - - b.ReportAllocs() - b.ResetTimer() - - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - metrics.Counter("test2").AddN(100) - } - }) -} - -func BenchmarkGaugeSet(b *testing.B) { - metrics.Reset() - - b.ReportAllocs() - b.ResetTimer() - - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - metrics.Gauge("test2").Set(100) - } - }) -} - -func BenchmarkHistogramRecordValue(b *testing.B) { - metrics.Reset() - h := metrics.NewHistogram("hist", 1, 1000, 3) - - b.ReportAllocs() - b.ResetTimer() - - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - h.RecordValue(100) - } - }) -} diff --git a/Godeps/_workspace/src/github.com/codahale/metrics/runtime/doc.go b/Godeps/_workspace/src/github.com/codahale/metrics/runtime/doc.go deleted file mode 100644 index c811552d7..000000000 --- a/Godeps/_workspace/src/github.com/codahale/metrics/runtime/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Package runtime registers gauges and counters for various operationally -// important aspects of the Go runtime. -// -// To use, import this package: -// -// import _ "github.com/codahale/metrics/runtime" -// -// This registers the following gauges: -// -// FileDescriptors.Max -// FileDescriptors.Used -// Mem.NumGC -// Mem.PauseTotalNs -// Mem.LastGC -// Mem.Alloc -// Mem.HeapObjects -// Goroutines.Num -package runtime diff --git a/Godeps/_workspace/src/github.com/codahale/metrics/runtime/fds.go b/Godeps/_workspace/src/github.com/codahale/metrics/runtime/fds.go deleted file mode 100644 index 5a6469114..000000000 --- a/Godeps/_workspace/src/github.com/codahale/metrics/runtime/fds.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build !windows - -package runtime - -import ( - "io/ioutil" - "syscall" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/codahale/metrics" -) - -func getFDLimit() (uint64, error) { - var rlimit syscall.Rlimit - if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit); err != nil { - return 0, err - } - // rlimit.Cur's type is platform-dependent, so here we widen it as far as Go - // will allow by converting it to a uint64. - return uint64(rlimit.Cur), nil -} - -func getFDUsage() (uint64, error) { - fds, err := ioutil.ReadDir("/proc/self/fd") - if err != nil { - return 0, err - } - return uint64(len(fds)), nil -} - -func init() { - metrics.Gauge("FileDescriptors.Max").SetFunc(func() int64 { - v, err := getFDLimit() - if err != nil { - return 0 - } - return int64(v) - }) - - metrics.Gauge("FileDescriptors.Used").SetFunc(func() int64 { - v, err := getFDUsage() - if err != nil { - return 0 - } - return int64(v) - }) -} diff --git a/Godeps/_workspace/src/github.com/codahale/metrics/runtime/fds_test.go b/Godeps/_workspace/src/github.com/codahale/metrics/runtime/fds_test.go deleted file mode 100644 index 76f947bd9..000000000 --- a/Godeps/_workspace/src/github.com/codahale/metrics/runtime/fds_test.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build !windows - -package runtime - -import ( - "testing" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/codahale/metrics" -) - -func TestFdStats(t *testing.T) { - _, gauges := metrics.Snapshot() - - expected := []string{ - "FileDescriptors.Max", - "FileDescriptors.Used", - } - - for _, name := range expected { - if _, ok := gauges[name]; !ok { - t.Errorf("Missing gauge %q", name) - } - } -} diff --git a/Godeps/_workspace/src/github.com/codahale/metrics/runtime/fds_windows.go b/Godeps/_workspace/src/github.com/codahale/metrics/runtime/fds_windows.go deleted file mode 100644 index 40f7b697e..000000000 --- a/Godeps/_workspace/src/github.com/codahale/metrics/runtime/fds_windows.go +++ /dev/null @@ -1,4 +0,0 @@ -package runtime - -func init() { -} diff --git a/Godeps/_workspace/src/github.com/codahale/metrics/runtime/goroutines.go b/Godeps/_workspace/src/github.com/codahale/metrics/runtime/goroutines.go deleted file mode 100644 index e8a04f551..000000000 --- a/Godeps/_workspace/src/github.com/codahale/metrics/runtime/goroutines.go +++ /dev/null @@ -1,13 +0,0 @@ -package runtime - -import ( - "runtime" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/codahale/metrics" -) - -func init() { - metrics.Gauge("Goroutines.Num").SetFunc(func() int64 { - return int64(runtime.NumGoroutine()) - }) -} diff --git a/Godeps/_workspace/src/github.com/codahale/metrics/runtime/goroutines_test.go b/Godeps/_workspace/src/github.com/codahale/metrics/runtime/goroutines_test.go deleted file mode 100644 index b45dd7a39..000000000 --- a/Godeps/_workspace/src/github.com/codahale/metrics/runtime/goroutines_test.go +++ /dev/null @@ -1,21 +0,0 @@ -package runtime - -import ( - "testing" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/codahale/metrics" -) - -func TestGoroutinesStats(t *testing.T) { - _, gauges := metrics.Snapshot() - - expected := []string{ - "Goroutines.Num", - } - - for _, name := range expected { - if _, ok := gauges[name]; !ok { - t.Errorf("Missing gauge %q", name) - } - } -} diff --git a/Godeps/_workspace/src/github.com/codahale/metrics/runtime/memstats.go b/Godeps/_workspace/src/github.com/codahale/metrics/runtime/memstats.go deleted file mode 100644 index cd31b185f..000000000 --- a/Godeps/_workspace/src/github.com/codahale/metrics/runtime/memstats.go +++ /dev/null @@ -1,48 +0,0 @@ -package runtime - -import ( - "runtime" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/codahale/metrics" -) - -func init() { - msg := &memStatGauges{} - - metrics.Counter("Mem.NumGC").SetBatchFunc(key{}, msg.init, msg.numGC) - metrics.Counter("Mem.PauseTotalNs").SetBatchFunc(key{}, msg.init, msg.totalPause) - - metrics.Gauge("Mem.LastGC").SetBatchFunc(key{}, msg.init, msg.lastPause) - metrics.Gauge("Mem.Alloc").SetBatchFunc(key{}, msg.init, msg.alloc) - metrics.Gauge("Mem.HeapObjects").SetBatchFunc(key{}, msg.init, msg.objects) -} - -type key struct{} // unexported to prevent collision - -type memStatGauges struct { - stats runtime.MemStats -} - -func (msg *memStatGauges) init() { - runtime.ReadMemStats(&msg.stats) -} - -func (msg *memStatGauges) numGC() uint64 { - return uint64(msg.stats.NumGC) -} - -func (msg *memStatGauges) totalPause() uint64 { - return msg.stats.PauseTotalNs -} - -func (msg *memStatGauges) lastPause() int64 { - return int64(msg.stats.LastGC) -} - -func (msg *memStatGauges) alloc() int64 { - return int64(msg.stats.Alloc) -} - -func (msg *memStatGauges) objects() int64 { - return int64(msg.stats.HeapObjects) -} diff --git a/Godeps/_workspace/src/github.com/codahale/metrics/runtime/memstats_test.go b/Godeps/_workspace/src/github.com/codahale/metrics/runtime/memstats_test.go deleted file mode 100644 index 1a94ffbf5..000000000 --- a/Godeps/_workspace/src/github.com/codahale/metrics/runtime/memstats_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package runtime - -import ( - "testing" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/codahale/metrics" -) - -func TestMemStats(t *testing.T) { - counters, gauges := metrics.Snapshot() - - expectedCounters := []string{ - "Mem.NumGC", - "Mem.PauseTotalNs", - } - - expectedGauges := []string{ - "Mem.LastGC", - "Mem.Alloc", - "Mem.HeapObjects", - } - - for _, name := range expectedCounters { - if _, ok := counters[name]; !ok { - t.Errorf("Missing counters %q", name) - } - } - - for _, name := range expectedGauges { - if _, ok := gauges[name]; !ok { - t.Errorf("Missing gauge %q", name) - } - } -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/.gitignore b/Godeps/_workspace/src/github.com/dustin/go-humanize/.gitignore deleted file mode 100644 index 05b40514a..000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -#* -*.[568] -*.a -*~ -[568].out -_* diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/LICENSE b/Godeps/_workspace/src/github.com/dustin/go-humanize/LICENSE deleted file mode 100644 index 8d9a94a90..000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -Copyright (c) 2005-2008 Dustin Sallings - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/README.markdown b/Godeps/_workspace/src/github.com/dustin/go-humanize/README.markdown deleted file mode 100644 index 079bc89a4..000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/README.markdown +++ /dev/null @@ -1,78 +0,0 @@ -# Humane Units - -Just a few functions for helping humanize times and sizes. - -`go get` it as `github.com/dustin/go-humanize`, import it as -`"github.com/dustin/go-humanize"`, use it as `humanize` - -## Sizes - -This lets you take numbers like `82854982` and convert them to useful -strings like, `83MB` or `79MiB` (whichever you prefer). - -Example: - - fmt.Printf("That file is %s.", humanize.Bytes(82854982)) - -## Times - -This lets you take a `time.Time` and spit it out in relative terms. -For example, `12 seconds ago` or `3 days from now`. - -Example: - - fmt.Printf("This was touched %s", humanize.Time(someTimeInstance)) - -Thanks to Kyle Lemons for the time implementation from an IRC -conversation one day. It's pretty neat. - -## Ordinals - -From a [mailing list discussion][odisc] where a user wanted to be able -to label ordinals. - - 0 -> 0th - 1 -> 1st - 2 -> 2nd - 3 -> 3rd - 4 -> 4th - [...] - -Example: - - fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) - -## Commas - -Want to shove commas into numbers? Be my guest. - - 0 -> 0 - 100 -> 100 - 1000 -> 1,000 - 1000000000 -> 1,000,000,000 - -100000 -> -100,000 - -Example: - - fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) - -## Ftoa - -Nicer float64 formatter that removes trailing zeros. - - fmt.Printf("%f", 2.24) // 2.240000 - fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24 - fmt.Printf("%f", 2.0) // 2.000000 - fmt.Printf("%s", humanize.Ftoa(2.0)) // 2 - -## SI notation - -Format numbers with [SI notation][sinotation]. - -Example: - - humanize.SI(0.00000000223, "M") // 2.23nM - - -[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion -[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/big.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/big.go deleted file mode 100644 index f49dc337d..000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/big.go +++ /dev/null @@ -1,31 +0,0 @@ -package humanize - -import ( - "math/big" -) - -// order of magnitude (to a max order) -func oomm(n, b *big.Int, maxmag int) (float64, int) { - mag := 0 - m := &big.Int{} - for n.Cmp(b) >= 0 { - n.DivMod(n, b, m) - mag++ - if mag == maxmag && maxmag >= 0 { - break - } - } - return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag -} - -// total order of magnitude -// (same as above, but with no upper limit) -func oom(n, b *big.Int) (float64, int) { - mag := 0 - m := &big.Int{} - for n.Cmp(b) >= 0 { - n.DivMod(n, b, m) - mag++ - } - return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/bigbytes.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/bigbytes.go deleted file mode 100644 index 6876e92ee..000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/bigbytes.go +++ /dev/null @@ -1,164 +0,0 @@ -package humanize - -import ( - "fmt" - "math/big" - "strings" - "unicode" -) - -var ( - bigIECExp = big.NewInt(1024) - - // BigByte is one byte in bit.Ints - BigByte = big.NewInt(1) - // BigKiByte is 1,024 bytes in bit.Ints - BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp) - // BigMiByte is 1,024 k bytes in bit.Ints - BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp) - // BigGiByte is 1,024 m bytes in bit.Ints - BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp) - // BigTiByte is 1,024 g bytes in bit.Ints - BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp) - // BigPiByte is 1,024 t bytes in bit.Ints - BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp) - // BigEiByte is 1,024 p bytes in bit.Ints - BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp) - // BigZiByte is 1,024 e bytes in bit.Ints - BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp) - // BigYiByte is 1,024 z bytes in bit.Ints - BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp) -) - -var ( - bigSIExp = big.NewInt(1000) - - // BigSIByte is one SI byte in big.Ints - BigSIByte = big.NewInt(1) - // BigKByte is 1,000 SI bytes in big.Ints - BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp) - // BigMByte is 1,000 SI k bytes in big.Ints - BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp) - // BigGByte is 1,000 SI m bytes in big.Ints - BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp) - // BigTByte is 1,000 SI g bytes in big.Ints - BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp) - // BigPByte is 1,000 SI t bytes in big.Ints - BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp) - // BigEByte is 1,000 SI p bytes in big.Ints - BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp) - // BigZByte is 1,000 SI e bytes in big.Ints - BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp) - // BigYByte is 1,000 SI z bytes in big.Ints - BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp) -) - -var bigBytesSizeTable = map[string]*big.Int{ - "b": BigByte, - "kib": BigKiByte, - "kb": BigKByte, - "mib": BigMiByte, - "mb": BigMByte, - "gib": BigGiByte, - "gb": BigGByte, - "tib": BigTiByte, - "tb": BigTByte, - "pib": BigPiByte, - "pb": BigPByte, - "eib": BigEiByte, - "eb": BigEByte, - "zib": BigZiByte, - "zb": BigZByte, - "yib": BigYiByte, - "yb": BigYByte, - // Without suffix - "": BigByte, - "ki": BigKiByte, - "k": BigKByte, - "mi": BigMiByte, - "m": BigMByte, - "gi": BigGiByte, - "g": BigGByte, - "ti": BigTiByte, - "t": BigTByte, - "pi": BigPiByte, - "p": BigPByte, - "ei": BigEiByte, - "e": BigEByte, - "z": BigZByte, - "zi": BigZiByte, - "y": BigYByte, - "yi": BigYiByte, -} - -var ten = big.NewInt(10) - -func humanateBigBytes(s, base *big.Int, sizes []string) string { - if s.Cmp(ten) < 0 { - return fmt.Sprintf("%dB", s) - } - c := (&big.Int{}).Set(s) - val, mag := oomm(c, base, len(sizes)-1) - suffix := sizes[mag] - f := "%.0f%s" - if val < 10 { - f = "%.1f%s" - } - - return fmt.Sprintf(f, val, suffix) - -} - -// BigBytes produces a human readable representation of an SI size. -// -// See also: ParseBigBytes. -// -// BigBytes(82854982) -> 83MB -func BigBytes(s *big.Int) string { - sizes := []string{"B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} - return humanateBigBytes(s, bigSIExp, sizes) -} - -// BigIBytes produces a human readable representation of an IEC size. -// -// See also: ParseBigBytes. -// -// BigIBytes(82854982) -> 79MiB -func BigIBytes(s *big.Int) string { - sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} - return humanateBigBytes(s, bigIECExp, sizes) -} - -// ParseBigBytes parses a string representation of bytes into the number -// of bytes it represents. -// -// See also: BigBytes, BigIBytes. -// -// ParseBigBytes("42MB") -> 42000000, nil -// ParseBigBytes("42mib") -> 44040192, nil -func ParseBigBytes(s string) (*big.Int, error) { - lastDigit := 0 - for _, r := range s { - if !(unicode.IsDigit(r) || r == '.') { - break - } - lastDigit++ - } - - val := &big.Rat{} - _, err := fmt.Sscanf(s[:lastDigit], "%f", val) - if err != nil { - return nil, err - } - - extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) - if m, ok := bigBytesSizeTable[extra]; ok { - mv := (&big.Rat{}).SetInt(m) - val.Mul(val, mv) - rv := &big.Int{} - rv.Div(val.Num(), val.Denom()) - return rv, nil - } - - return nil, fmt.Errorf("unhandled size name: %v", extra) -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/bigbytes_test.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/bigbytes_test.go deleted file mode 100644 index a0f977a6e..000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/bigbytes_test.go +++ /dev/null @@ -1,219 +0,0 @@ -package humanize - -import ( - "math/big" - "testing" -) - -func TestBigByteParsing(t *testing.T) { - tests := []struct { - in string - exp uint64 - }{ - {"42", 42}, - {"42MB", 42000000}, - {"42MiB", 44040192}, - {"42mb", 42000000}, - {"42mib", 44040192}, - {"42MIB", 44040192}, - {"42 MB", 42000000}, - {"42 MiB", 44040192}, - {"42 mb", 42000000}, - {"42 mib", 44040192}, - {"42 MIB", 44040192}, - {"42.5MB", 42500000}, - {"42.5MiB", 44564480}, - {"42.5 MB", 42500000}, - {"42.5 MiB", 44564480}, - // No need to say B - {"42M", 42000000}, - {"42Mi", 44040192}, - {"42m", 42000000}, - {"42mi", 44040192}, - {"42MI", 44040192}, - {"42 M", 42000000}, - {"42 Mi", 44040192}, - {"42 m", 42000000}, - {"42 mi", 44040192}, - {"42 MI", 44040192}, - {"42.5M", 42500000}, - {"42.5Mi", 44564480}, - {"42.5 M", 42500000}, - {"42.5 Mi", 44564480}, - // Large testing, breaks when too much larger than - // this. - {"12.5 EB", uint64(12.5 * float64(EByte))}, - {"12.5 E", uint64(12.5 * float64(EByte))}, - {"12.5 EiB", uint64(12.5 * float64(EiByte))}, - } - - for _, p := range tests { - got, err := ParseBigBytes(p.in) - if err != nil { - t.Errorf("Couldn't parse %v: %v", p.in, err) - } else { - if got.Uint64() != p.exp { - t.Errorf("Expected %v for %v, got %v", - p.exp, p.in, got) - } - } - } -} - -func TestBigByteErrors(t *testing.T) { - got, err := ParseBigBytes("84 JB") - if err == nil { - t.Errorf("Expected error, got %v", got) - } - got, err = ParseBigBytes("") - if err == nil { - t.Errorf("Expected error parsing nothing") - } -} - -func bbyte(in uint64) string { - return BigBytes((&big.Int{}).SetUint64(in)) -} - -func bibyte(in uint64) string { - return BigIBytes((&big.Int{}).SetUint64(in)) -} - -func TestBigBytes(t *testing.T) { - testList{ - {"bytes(0)", bbyte(0), "0B"}, - {"bytes(1)", bbyte(1), "1B"}, - {"bytes(803)", bbyte(803), "803B"}, - {"bytes(999)", bbyte(999), "999B"}, - - {"bytes(1024)", bbyte(1024), "1.0KB"}, - {"bytes(1MB - 1)", bbyte(MByte - Byte), "1000KB"}, - - {"bytes(1MB)", bbyte(1024 * 1024), "1.0MB"}, - {"bytes(1GB - 1K)", bbyte(GByte - KByte), "1000MB"}, - - {"bytes(1GB)", bbyte(GByte), "1.0GB"}, - {"bytes(1TB - 1M)", bbyte(TByte - MByte), "1000GB"}, - - {"bytes(1TB)", bbyte(TByte), "1.0TB"}, - {"bytes(1PB - 1T)", bbyte(PByte - TByte), "999TB"}, - - {"bytes(1PB)", bbyte(PByte), "1.0PB"}, - {"bytes(1PB - 1T)", bbyte(EByte - PByte), "999PB"}, - - {"bytes(1EB)", bbyte(EByte), "1.0EB"}, - // Overflows. - // {"bytes(1EB - 1P)", Bytes((KByte*EByte)-PByte), "1023EB"}, - - {"bytes(0)", bibyte(0), "0B"}, - {"bytes(1)", bibyte(1), "1B"}, - {"bytes(803)", bibyte(803), "803B"}, - {"bytes(1023)", bibyte(1023), "1023B"}, - - {"bytes(1024)", bibyte(1024), "1.0KiB"}, - {"bytes(1MB - 1)", bibyte(MiByte - IByte), "1024KiB"}, - - {"bytes(1MB)", bibyte(1024 * 1024), "1.0MiB"}, - {"bytes(1GB - 1K)", bibyte(GiByte - KiByte), "1024MiB"}, - - {"bytes(1GB)", bibyte(GiByte), "1.0GiB"}, - {"bytes(1TB - 1M)", bibyte(TiByte - MiByte), "1024GiB"}, - - {"bytes(1TB)", bibyte(TiByte), "1.0TiB"}, - {"bytes(1PB - 1T)", bibyte(PiByte - TiByte), "1023TiB"}, - - {"bytes(1PB)", bibyte(PiByte), "1.0PiB"}, - {"bytes(1PB - 1T)", bibyte(EiByte - PiByte), "1023PiB"}, - - {"bytes(1EiB)", bibyte(EiByte), "1.0EiB"}, - // Overflows. - // {"bytes(1EB - 1P)", bibyte((KIByte*EIByte)-PiByte), "1023EB"}, - - {"bytes(5.5GiB)", bibyte(5.5 * GiByte), "5.5GiB"}, - - {"bytes(5.5GB)", bbyte(5.5 * GByte), "5.5GB"}, - }.validate(t) -} - -func TestVeryBigBytes(t *testing.T) { - b, _ := (&big.Int{}).SetString("15347691069326346944512", 10) - s := BigBytes(b) - if s != "15ZB" { - t.Errorf("Expected 15ZB, got %v", s) - } - s = BigIBytes(b) - if s != "13ZiB" { - t.Errorf("Expected 13ZiB, got %v", s) - } - - b, _ = (&big.Int{}).SetString("15716035654990179271180288", 10) - s = BigBytes(b) - if s != "16YB" { - t.Errorf("Expected 16YB, got %v", s) - } - s = BigIBytes(b) - if s != "13YiB" { - t.Errorf("Expected 13YiB, got %v", s) - } -} - -func TestVeryVeryBigBytes(t *testing.T) { - b, _ := (&big.Int{}).SetString("16093220510709943573688614912", 10) - s := BigBytes(b) - if s != "16093YB" { - t.Errorf("Expected 16093YB, got %v", s) - } - s = BigIBytes(b) - if s != "13312YiB" { - t.Errorf("Expected 13312YiB, got %v", s) - } -} - -func TestParseVeryBig(t *testing.T) { - tests := []struct { - in string - out string - }{ - {"16ZB", "16000000000000000000000"}, - {"16ZiB", "18889465931478580854784"}, - {"16.5ZB", "16500000000000000000000"}, - {"16.5ZiB", "19479761741837286506496"}, - {"16Z", "16000000000000000000000"}, - {"16Zi", "18889465931478580854784"}, - {"16.5Z", "16500000000000000000000"}, - {"16.5Zi", "19479761741837286506496"}, - - {"16YB", "16000000000000000000000000"}, - {"16YiB", "19342813113834066795298816"}, - {"16.5YB", "16500000000000000000000000"}, - {"16.5YiB", "19947276023641381382651904"}, - {"16Y", "16000000000000000000000000"}, - {"16Yi", "19342813113834066795298816"}, - {"16.5Y", "16500000000000000000000000"}, - {"16.5Yi", "19947276023641381382651904"}, - } - - for _, test := range tests { - x, err := ParseBigBytes(test.in) - if err != nil { - t.Errorf("Error parsing %q: %v", test.in, err) - continue - } - - if x.String() != test.out { - t.Errorf("Expected %q for %q, got %v", test.out, test.in, x) - } - } -} - -func BenchmarkParseBigBytes(b *testing.B) { - for i := 0; i < b.N; i++ { - ParseBigBytes("16.5Z") - } -} - -func BenchmarkBigBytes(b *testing.B) { - for i := 0; i < b.N; i++ { - bibyte(16.5 * GByte) - } -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/bytes.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/bytes.go deleted file mode 100644 index 4c4b5af15..000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/bytes.go +++ /dev/null @@ -1,134 +0,0 @@ -package humanize - -import ( - "fmt" - "math" - "strconv" - "strings" - "unicode" -) - -// IEC Sizes. -// kibis of bits -const ( - Byte = 1 << (iota * 10) - KiByte - MiByte - GiByte - TiByte - PiByte - EiByte -) - -// SI Sizes. -const ( - IByte = 1 - KByte = IByte * 1000 - MByte = KByte * 1000 - GByte = MByte * 1000 - TByte = GByte * 1000 - PByte = TByte * 1000 - EByte = PByte * 1000 -) - -var bytesSizeTable = map[string]uint64{ - "b": Byte, - "kib": KiByte, - "kb": KByte, - "mib": MiByte, - "mb": MByte, - "gib": GiByte, - "gb": GByte, - "tib": TiByte, - "tb": TByte, - "pib": PiByte, - "pb": PByte, - "eib": EiByte, - "eb": EByte, - // Without suffix - "": Byte, - "ki": KiByte, - "k": KByte, - "mi": MiByte, - "m": MByte, - "gi": GiByte, - "g": GByte, - "ti": TiByte, - "t": TByte, - "pi": PiByte, - "p": PByte, - "ei": EiByte, - "e": EByte, -} - -func logn(n, b float64) float64 { - return math.Log(n) / math.Log(b) -} - -func humanateBytes(s uint64, base float64, sizes []string) string { - if s < 10 { - return fmt.Sprintf("%dB", s) - } - e := math.Floor(logn(float64(s), base)) - suffix := sizes[int(e)] - val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10 - f := "%.0f%s" - if val < 10 { - f = "%.1f%s" - } - - return fmt.Sprintf(f, val, suffix) -} - -// Bytes produces a human readable representation of an SI size. -// -// See also: ParseBytes. -// -// Bytes(82854982) -> 83MB -func Bytes(s uint64) string { - sizes := []string{"B", "KB", "MB", "GB", "TB", "PB", "EB"} - return humanateBytes(s, 1000, sizes) -} - -// IBytes produces a human readable representation of an IEC size. -// -// See also: ParseBytes. -// -// IBytes(82854982) -> 79MiB -func IBytes(s uint64) string { - sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"} - return humanateBytes(s, 1024, sizes) -} - -// ParseBytes parses a string representation of bytes into the number -// of bytes it represents. -// -// See Also: Bytes, IBytes. -// -// ParseBytes("42MB") -> 42000000, nil -// ParseBytes("42mib") -> 44040192, nil -func ParseBytes(s string) (uint64, error) { - lastDigit := 0 - for _, r := range s { - if !(unicode.IsDigit(r) || r == '.') { - break - } - lastDigit++ - } - - f, err := strconv.ParseFloat(s[:lastDigit], 64) - if err != nil { - return 0, err - } - - extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) - if m, ok := bytesSizeTable[extra]; ok { - f *= float64(m) - if f >= math.MaxUint64 { - return 0, fmt.Errorf("too large: %v", s) - } - return uint64(f), nil - } - - return 0, fmt.Errorf("unhandled size name: %v", extra) -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/bytes_test.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/bytes_test.go deleted file mode 100644 index 76a594c1a..000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/bytes_test.go +++ /dev/null @@ -1,144 +0,0 @@ -package humanize - -import ( - "testing" -) - -func TestByteParsing(t *testing.T) { - tests := []struct { - in string - exp uint64 - }{ - {"42", 42}, - {"42MB", 42000000}, - {"42MiB", 44040192}, - {"42mb", 42000000}, - {"42mib", 44040192}, - {"42MIB", 44040192}, - {"42 MB", 42000000}, - {"42 MiB", 44040192}, - {"42 mb", 42000000}, - {"42 mib", 44040192}, - {"42 MIB", 44040192}, - {"42.5MB", 42500000}, - {"42.5MiB", 44564480}, - {"42.5 MB", 42500000}, - {"42.5 MiB", 44564480}, - // No need to say B - {"42M", 42000000}, - {"42Mi", 44040192}, - {"42m", 42000000}, - {"42mi", 44040192}, - {"42MI", 44040192}, - {"42 M", 42000000}, - {"42 Mi", 44040192}, - {"42 m", 42000000}, - {"42 mi", 44040192}, - {"42 MI", 44040192}, - {"42.5M", 42500000}, - {"42.5Mi", 44564480}, - {"42.5 M", 42500000}, - {"42.5 Mi", 44564480}, - // Large testing, breaks when too much larger than - // this. - {"12.5 EB", uint64(12.5 * float64(EByte))}, - {"12.5 E", uint64(12.5 * float64(EByte))}, - {"12.5 EiB", uint64(12.5 * float64(EiByte))}, - } - - for _, p := range tests { - got, err := ParseBytes(p.in) - if err != nil { - t.Errorf("Couldn't parse %v: %v", p.in, err) - } - if got != p.exp { - t.Errorf("Expected %v for %v, got %v", - p.exp, p.in, got) - } - } -} - -func TestByteErrors(t *testing.T) { - got, err := ParseBytes("84 JB") - if err == nil { - t.Errorf("Expected error, got %v", got) - } - got, err = ParseBytes("") - if err == nil { - t.Errorf("Expected error parsing nothing") - } - got, err = ParseBytes("16 EiB") - if err == nil { - t.Errorf("Expected error, got %v", got) - } -} - -func TestBytes(t *testing.T) { - testList{ - {"bytes(0)", Bytes(0), "0B"}, - {"bytes(1)", Bytes(1), "1B"}, - {"bytes(803)", Bytes(803), "803B"}, - {"bytes(999)", Bytes(999), "999B"}, - - {"bytes(1024)", Bytes(1024), "1.0KB"}, - {"bytes(9999)", Bytes(9999), "10KB"}, - {"bytes(1MB - 1)", Bytes(MByte - Byte), "1000KB"}, - - {"bytes(1MB)", Bytes(1024 * 1024), "1.0MB"}, - {"bytes(1GB - 1K)", Bytes(GByte - KByte), "1000MB"}, - - {"bytes(1GB)", Bytes(GByte), "1.0GB"}, - {"bytes(1TB - 1M)", Bytes(TByte - MByte), "1000GB"}, - {"bytes(10MB)", Bytes(9999 * 1000), "10MB"}, - - {"bytes(1TB)", Bytes(TByte), "1.0TB"}, - {"bytes(1PB - 1T)", Bytes(PByte - TByte), "999TB"}, - - {"bytes(1PB)", Bytes(PByte), "1.0PB"}, - {"bytes(1PB - 1T)", Bytes(EByte - PByte), "999PB"}, - - {"bytes(1EB)", Bytes(EByte), "1.0EB"}, - // Overflows. - // {"bytes(1EB - 1P)", Bytes((KByte*EByte)-PByte), "1023EB"}, - - {"bytes(0)", IBytes(0), "0B"}, - {"bytes(1)", IBytes(1), "1B"}, - {"bytes(803)", IBytes(803), "803B"}, - {"bytes(1023)", IBytes(1023), "1023B"}, - - {"bytes(1024)", IBytes(1024), "1.0KiB"}, - {"bytes(1MB - 1)", IBytes(MiByte - IByte), "1024KiB"}, - - {"bytes(1MB)", IBytes(1024 * 1024), "1.0MiB"}, - {"bytes(1GB - 1K)", IBytes(GiByte - KiByte), "1024MiB"}, - - {"bytes(1GB)", IBytes(GiByte), "1.0GiB"}, - {"bytes(1TB - 1M)", IBytes(TiByte - MiByte), "1024GiB"}, - - {"bytes(1TB)", IBytes(TiByte), "1.0TiB"}, - {"bytes(1PB - 1T)", IBytes(PiByte - TiByte), "1023TiB"}, - - {"bytes(1PB)", IBytes(PiByte), "1.0PiB"}, - {"bytes(1PB - 1T)", IBytes(EiByte - PiByte), "1023PiB"}, - - {"bytes(1EiB)", IBytes(EiByte), "1.0EiB"}, - // Overflows. - // {"bytes(1EB - 1P)", IBytes((KIByte*EIByte)-PiByte), "1023EB"}, - - {"bytes(5.5GiB)", IBytes(5.5 * GiByte), "5.5GiB"}, - - {"bytes(5.5GB)", Bytes(5.5 * GByte), "5.5GB"}, - }.validate(t) -} - -func BenchmarkParseBytes(b *testing.B) { - for i := 0; i < b.N; i++ { - ParseBytes("16.5GB") - } -} - -func BenchmarkBytes(b *testing.B) { - for i := 0; i < b.N; i++ { - Bytes(16.5 * GByte) - } -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/comma.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/comma.go deleted file mode 100644 index 9d0d2ed18..000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/comma.go +++ /dev/null @@ -1,101 +0,0 @@ -package humanize - -import ( - "bytes" - "math/big" - "strconv" - "strings" -) - -// Comma produces a string form of the given number in base 10 with -// commas after every three orders of magnitude. -// -// e.g. Comma(834142) -> 834,142 -func Comma(v int64) string { - sign := "" - if v < 0 { - sign = "-" - v = 0 - v - } - - parts := []string{"", "", "", "", "", "", "", ""} - j := len(parts) - 1 - - for v > 999 { - parts[j] = strconv.FormatInt(v%1000, 10) - switch len(parts[j]) { - case 2: - parts[j] = "0" + parts[j] - case 1: - parts[j] = "00" + parts[j] - } - v = v / 1000 - j-- - } - parts[j] = strconv.Itoa(int(v)) - return sign + strings.Join(parts[j:len(parts)], ",") -} - -// Commaf produces a string form of the given number in base 10 with -// commas after every three orders of magnitude. -// -// e.g. Comma(834142.32) -> 834,142.32 -func Commaf(v float64) string { - buf := &bytes.Buffer{} - if v < 0 { - buf.Write([]byte{'-'}) - v = 0 - v - } - - comma := []byte{','} - - parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".") - pos := 0 - if len(parts[0])%3 != 0 { - pos += len(parts[0]) % 3 - buf.WriteString(parts[0][:pos]) - buf.Write(comma) - } - for ; pos < len(parts[0]); pos += 3 { - buf.WriteString(parts[0][pos : pos+3]) - buf.Write(comma) - } - buf.Truncate(buf.Len() - 1) - - if len(parts) > 1 { - buf.Write([]byte{'.'}) - buf.WriteString(parts[1]) - } - return buf.String() -} - -// BigComma produces a string form of the given big.Int in base 10 -// with commas after every three orders of magnitude. -func BigComma(b *big.Int) string { - sign := "" - if b.Sign() < 0 { - sign = "-" - b.Abs(b) - } - - athousand := big.NewInt(1000) - c := (&big.Int{}).Set(b) - _, m := oom(c, athousand) - parts := make([]string, m+1) - j := len(parts) - 1 - - mod := &big.Int{} - for b.Cmp(athousand) >= 0 { - b.DivMod(b, athousand, mod) - parts[j] = strconv.FormatInt(mod.Int64(), 10) - switch len(parts[j]) { - case 2: - parts[j] = "0" + parts[j] - case 1: - parts[j] = "00" + parts[j] - } - j-- - } - parts[j] = strconv.Itoa(int(b.Int64())) - return sign + strings.Join(parts[j:len(parts)], ",") -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/comma_test.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/comma_test.go deleted file mode 100644 index 49040fb71..000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/comma_test.go +++ /dev/null @@ -1,134 +0,0 @@ -package humanize - -import ( - "math" - "math/big" - "testing" -) - -func TestCommas(t *testing.T) { - testList{ - {"0", Comma(0), "0"}, - {"10", Comma(10), "10"}, - {"100", Comma(100), "100"}, - {"1,000", Comma(1000), "1,000"}, - {"10,000", Comma(10000), "10,000"}, - {"100,000", Comma(100000), "100,000"}, - {"10,000,000", Comma(10000000), "10,000,000"}, - {"10,100,000", Comma(10100000), "10,100,000"}, - {"10,010,000", Comma(10010000), "10,010,000"}, - {"10,001,000", Comma(10001000), "10,001,000"}, - {"123,456,789", Comma(123456789), "123,456,789"}, - {"maxint", Comma(9.223372e+18), "9,223,372,000,000,000,000"}, - {"minint", Comma(-9.223372e+18), "-9,223,372,000,000,000,000"}, - {"-123,456,789", Comma(-123456789), "-123,456,789"}, - {"-10,100,000", Comma(-10100000), "-10,100,000"}, - {"-10,010,000", Comma(-10010000), "-10,010,000"}, - {"-10,001,000", Comma(-10001000), "-10,001,000"}, - {"-10,000,000", Comma(-10000000), "-10,000,000"}, - {"-100,000", Comma(-100000), "-100,000"}, - {"-10,000", Comma(-10000), "-10,000"}, - {"-1,000", Comma(-1000), "-1,000"}, - {"-100", Comma(-100), "-100"}, - {"-10", Comma(-10), "-10"}, - }.validate(t) -} - -func TestCommafs(t *testing.T) { - testList{ - {"0", Commaf(0), "0"}, - {"10.11", Commaf(10.11), "10.11"}, - {"100", Commaf(100), "100"}, - {"1,000", Commaf(1000), "1,000"}, - {"10,000", Commaf(10000), "10,000"}, - {"100,000", Commaf(100000), "100,000"}, - {"834,142.32", Commaf(834142.32), "834,142.32"}, - {"10,000,000", Commaf(10000000), "10,000,000"}, - {"10,100,000", Commaf(10100000), "10,100,000"}, - {"10,010,000", Commaf(10010000), "10,010,000"}, - {"10,001,000", Commaf(10001000), "10,001,000"}, - {"123,456,789", Commaf(123456789), "123,456,789"}, - {"maxf64", Commaf(math.MaxFloat64), "179,769,313,486,231,570,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000"}, - {"minf64", Commaf(math.SmallestNonzeroFloat64), "0.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005"}, - {"-123,456,789", Commaf(-123456789), "-123,456,789"}, - {"-10,100,000", Commaf(-10100000), "-10,100,000"}, - {"-10,010,000", Commaf(-10010000), "-10,010,000"}, - {"-10,001,000", Commaf(-10001000), "-10,001,000"}, - {"-10,000,000", Commaf(-10000000), "-10,000,000"}, - {"-100,000", Commaf(-100000), "-100,000"}, - {"-10,000", Commaf(-10000), "-10,000"}, - {"-1,000", Commaf(-1000), "-1,000"}, - {"-100.11", Commaf(-100.11), "-100.11"}, - {"-10", Commaf(-10), "-10"}, - }.validate(t) -} - -func BenchmarkCommas(b *testing.B) { - for i := 0; i < b.N; i++ { - Comma(1234567890) - } -} - -func BenchmarkCommaf(b *testing.B) { - for i := 0; i < b.N; i++ { - Commaf(1234567890.83584) - } -} - -func BenchmarkBigCommas(b *testing.B) { - for i := 0; i < b.N; i++ { - BigComma(big.NewInt(1234567890)) - } -} - -func bigComma(i int64) string { - return BigComma(big.NewInt(i)) -} - -func TestBigCommas(t *testing.T) { - testList{ - {"0", bigComma(0), "0"}, - {"10", bigComma(10), "10"}, - {"100", bigComma(100), "100"}, - {"1,000", bigComma(1000), "1,000"}, - {"10,000", bigComma(10000), "10,000"}, - {"100,000", bigComma(100000), "100,000"}, - {"10,000,000", bigComma(10000000), "10,000,000"}, - {"10,100,000", bigComma(10100000), "10,100,000"}, - {"10,010,000", bigComma(10010000), "10,010,000"}, - {"10,001,000", bigComma(10001000), "10,001,000"}, - {"123,456,789", bigComma(123456789), "123,456,789"}, - {"maxint", bigComma(9.223372e+18), "9,223,372,000,000,000,000"}, - {"minint", bigComma(-9.223372e+18), "-9,223,372,000,000,000,000"}, - {"-123,456,789", bigComma(-123456789), "-123,456,789"}, - {"-10,100,000", bigComma(-10100000), "-10,100,000"}, - {"-10,010,000", bigComma(-10010000), "-10,010,000"}, - {"-10,001,000", bigComma(-10001000), "-10,001,000"}, - {"-10,000,000", bigComma(-10000000), "-10,000,000"}, - {"-100,000", bigComma(-100000), "-100,000"}, - {"-10,000", bigComma(-10000), "-10,000"}, - {"-1,000", bigComma(-1000), "-1,000"}, - {"-100", bigComma(-100), "-100"}, - {"-10", bigComma(-10), "-10"}, - }.validate(t) -} - -func TestVeryBigCommas(t *testing.T) { - tests := []struct{ in, exp string }{ - { - "84889279597249724975972597249849757294578485", - "84,889,279,597,249,724,975,972,597,249,849,757,294,578,485", - }, - { - "-84889279597249724975972597249849757294578485", - "-84,889,279,597,249,724,975,972,597,249,849,757,294,578,485", - }, - } - for _, test := range tests { - n, _ := (&big.Int{}).SetString(test.in, 10) - got := BigComma(n) - if test.exp != got { - t.Errorf("Expected %q, got %q", test.exp, got) - } - } -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/common_test.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/common_test.go deleted file mode 100644 index fc7db1516..000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/common_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package humanize - -import ( - "testing" -) - -type testList []struct { - name, got, exp string -} - -func (tl testList) validate(t *testing.T) { - for _, test := range tl { - if test.got != test.exp { - t.Errorf("On %v, expected '%v', but got '%v'", - test.name, test.exp, test.got) - } - } -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/ftoa.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/ftoa.go deleted file mode 100644 index c76190b10..000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/ftoa.go +++ /dev/null @@ -1,23 +0,0 @@ -package humanize - -import "strconv" - -func stripTrailingZeros(s string) string { - offset := len(s) - 1 - for offset > 0 { - if s[offset] == '.' { - offset-- - break - } - if s[offset] != '0' { - break - } - offset-- - } - return s[:offset+1] -} - -// Ftoa converts a float to a string with no trailing zeros. -func Ftoa(num float64) string { - return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64)) -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/ftoa_test.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/ftoa_test.go deleted file mode 100644 index 40d13bd71..000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/ftoa_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package humanize - -import ( - "fmt" - "regexp" - "strconv" - "testing" -) - -func TestFtoa(t *testing.T) { - testList{ - {"200", Ftoa(200), "200"}, - {"2", Ftoa(2), "2"}, - {"2.2", Ftoa(2.2), "2.2"}, - {"2.02", Ftoa(2.02), "2.02"}, - {"200.02", Ftoa(200.02), "200.02"}, - }.validate(t) -} - -func BenchmarkFtoaRegexTrailing(b *testing.B) { - trailingZerosRegex := regexp.MustCompile(`\.?0+$`) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - trailingZerosRegex.ReplaceAllString("2.00000", "") - trailingZerosRegex.ReplaceAllString("2.0000", "") - trailingZerosRegex.ReplaceAllString("2.000", "") - trailingZerosRegex.ReplaceAllString("2.00", "") - trailingZerosRegex.ReplaceAllString("2.0", "") - trailingZerosRegex.ReplaceAllString("2", "") - } -} - -func BenchmarkFtoaFunc(b *testing.B) { - for i := 0; i < b.N; i++ { - stripTrailingZeros("2.00000") - stripTrailingZeros("2.0000") - stripTrailingZeros("2.000") - stripTrailingZeros("2.00") - stripTrailingZeros("2.0") - stripTrailingZeros("2") - } -} - -func BenchmarkFmtF(b *testing.B) { - for i := 0; i < b.N; i++ { - fmt.Sprintf("%f", 2.03584) - } -} - -func BenchmarkStrconvF(b *testing.B) { - for i := 0; i < b.N; i++ { - strconv.FormatFloat(2.03584, 'f', 6, 64) - } -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/humanize.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/humanize.go deleted file mode 100644 index a69540a06..000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/humanize.go +++ /dev/null @@ -1,8 +0,0 @@ -/* -Package humanize converts boring ugly numbers to human-friendly strings and back. - -Durations can be turned into strings such as "3 days ago", numbers -representing sizes like 82854982 into useful strings like, "83MB" or -"79MiB" (whichever you prefer). -*/ -package humanize diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/number.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/number.go deleted file mode 100644 index 32141348c..000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/number.go +++ /dev/null @@ -1,192 +0,0 @@ -package humanize - -/* -Slightly adapted from the source to fit go-humanize. - -Author: https://github.com/gorhill -Source: https://gist.github.com/gorhill/5285193 - -*/ - -import ( - "math" - "strconv" -) - -var ( - renderFloatPrecisionMultipliers = [...]float64{ - 1, - 10, - 100, - 1000, - 10000, - 100000, - 1000000, - 10000000, - 100000000, - 1000000000, - } - - renderFloatPrecisionRounders = [...]float64{ - 0.5, - 0.05, - 0.005, - 0.0005, - 0.00005, - 0.000005, - 0.0000005, - 0.00000005, - 0.000000005, - 0.0000000005, - } -) - -// FormatFloat produces a formatted number as string based on the following user-specified criteria: -// * thousands separator -// * decimal separator -// * decimal precision -// -// Usage: s := RenderFloat(format, n) -// The format parameter tells how to render the number n. -// -// See examples: http://play.golang.org/p/LXc1Ddm1lJ -// -// Examples of format strings, given n = 12345.6789: -// "#,###.##" => "12,345.67" -// "#,###." => "12,345" -// "#,###" => "12345,678" -// "#\u202F###,##" => "12 345,68" -// "#.###,###### => 12.345,678900 -// "" (aka default format) => 12,345.67 -// -// The highest precision allowed is 9 digits after the decimal symbol. -// There is also a version for integer number, FormatInteger(), -// which is convenient for calls within template. -func FormatFloat(format string, n float64) string { - // Special cases: - // NaN = "NaN" - // +Inf = "+Infinity" - // -Inf = "-Infinity" - if math.IsNaN(n) { - return "NaN" - } - if n > math.MaxFloat64 { - return "Infinity" - } - if n < -math.MaxFloat64 { - return "-Infinity" - } - - // default format - precision := 2 - decimalStr := "." - thousandStr := "," - positiveStr := "" - negativeStr := "-" - - if len(format) > 0 { - format := []rune(format) - - // If there is an explicit format directive, - // then default values are these: - precision = 9 - thousandStr = "" - - // collect indices of meaningful formatting directives - formatIndx := []int{} - for i, char := range format { - if char != '#' && char != '0' { - formatIndx = append(formatIndx, i) - } - } - - if len(formatIndx) > 0 { - // Directive at index 0: - // Must be a '+' - // Raise an error if not the case - // index: 0123456789 - // +0.000,000 - // +000,000.0 - // +0000.00 - // +0000 - if formatIndx[0] == 0 { - if format[formatIndx[0]] != '+' { - panic("RenderFloat(): invalid positive sign directive") - } - positiveStr = "+" - formatIndx = formatIndx[1:] - } - - // Two directives: - // First is thousands separator - // Raise an error if not followed by 3-digit - // 0123456789 - // 0.000,000 - // 000,000.00 - if len(formatIndx) == 2 { - if (formatIndx[1] - formatIndx[0]) != 4 { - panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers") - } - thousandStr = string(format[formatIndx[0]]) - formatIndx = formatIndx[1:] - } - - // One directive: - // Directive is decimal separator - // The number of digit-specifier following the separator indicates wanted precision - // 0123456789 - // 0.00 - // 000,0000 - if len(formatIndx) == 1 { - decimalStr = string(format[formatIndx[0]]) - precision = len(format) - formatIndx[0] - 1 - } - } - } - - // generate sign part - var signStr string - if n >= 0.000000001 { - signStr = positiveStr - } else if n <= -0.000000001 { - signStr = negativeStr - n = -n - } else { - signStr = "" - n = 0.0 - } - - // split number into integer and fractional parts - intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision]) - - // generate integer part string - intStr := strconv.Itoa(int(intf)) - - // add thousand separator if required - if len(thousandStr) > 0 { - for i := len(intStr); i > 3; { - i -= 3 - intStr = intStr[:i] + thousandStr + intStr[i:] - } - } - - // no fractional part, we can leave now - if precision == 0 { - return signStr + intStr - } - - // generate fractional part - fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision])) - // may need padding - if len(fracStr) < precision { - fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr - } - - return signStr + intStr + decimalStr + fracStr -} - -// FormatInteger produces a formatted number as string. -// See FormatFloat. -func FormatInteger(format string, n int) string { - return FormatFloat(format, float64(n)) -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/number_test.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/number_test.go deleted file mode 100644 index dd38a5bb9..000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/number_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package humanize - -import ( - "math" - "testing" -) - -type TestStruct struct { - name string - format string - num float64 - formatted string -} - -func TestFormatFloat(t *testing.T) { - tests := []TestStruct{ - {"default", "", 12345.6789, "12,345.68"}, - {"#", "#", 12345.6789, "12345.678900000"}, - {"#.", "#.", 12345.6789, "12346"}, - {"#,#", "#,#", 12345.6789, "12345,7"}, - {"#,##", "#,##", 12345.6789, "12345,68"}, - {"#,###", "#,###", 12345.6789, "12345,679"}, - {"#,###.", "#,###.", 12345.6789, "12,346"}, - {"#,###.##", "#,###.##", 12345.6789, "12,345.68"}, - {"#,###.###", "#,###.###", 12345.6789, "12,345.679"}, - {"#,###.####", "#,###.####", 12345.6789, "12,345.6789"}, - {"#.###,######", "#.###,######", 12345.6789, "12.345,678900"}, - {"#\u202f###,##", "#\u202f###,##", 12345.6789, "12 345,68"}, - - // special cases - {"NaN", "#", math.NaN(), "NaN"}, - {"+Inf", "#", math.Inf(1), "Infinity"}, - {"-Inf", "#", math.Inf(-1), "-Infinity"}, - {"signStr <= -0.000000001", "", -0.000000002, "-0.00"}, - {"signStr = 0", "", 0, "0.00"}, - {"Format directive must start with +", "+000", 12345.6789, "+12345.678900000"}, - } - - for _, test := range tests { - got := FormatFloat(test.format, test.num) - if got != test.formatted { - t.Errorf("On %v (%v, %v), got %v, wanted %v", - test.name, test.format, test.num, got, test.formatted) - } - } - // Test a single integer - got := FormatInteger("#", 12345) - if got != "12345.000000000" { - t.Errorf("On %v (%v, %v), got %v, wanted %v", - "integerTest", "#", 12345, got, "12345.000000000") - } - // Test the things that could panic - panictests := []TestStruct{ - {"RenderFloat(): invalid positive sign directive", "-", 12345.6789, "12,345.68"}, - {"RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers", "0.01", 12345.6789, "12,345.68"}, - } - for _, test := range panictests { - didPanic := false - var message interface{} - func() { - - defer func() { - if message = recover(); message != nil { - didPanic = true - } - }() - - // call the target function - _ = FormatFloat(test.format, test.num) - - }() - if didPanic != true { - t.Errorf("On %v, should have panic and did not.", - test.name) - } - } - -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/ordinals.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/ordinals.go deleted file mode 100644 index 43d88a861..000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/ordinals.go +++ /dev/null @@ -1,25 +0,0 @@ -package humanize - -import "strconv" - -// Ordinal gives you the input number in a rank/ordinal format. -// -// Ordinal(3) -> 3rd -func Ordinal(x int) string { - suffix := "th" - switch x % 10 { - case 1: - if x%100 != 11 { - suffix = "st" - } - case 2: - if x%100 != 12 { - suffix = "nd" - } - case 3: - if x%100 != 13 { - suffix = "rd" - } - } - return strconv.Itoa(x) + suffix -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/ordinals_test.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/ordinals_test.go deleted file mode 100644 index 51d85ee7a..000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/ordinals_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package humanize - -import ( - "testing" -) - -func TestOrdinals(t *testing.T) { - testList{ - {"0", Ordinal(0), "0th"}, - {"1", Ordinal(1), "1st"}, - {"2", Ordinal(2), "2nd"}, - {"3", Ordinal(3), "3rd"}, - {"4", Ordinal(4), "4th"}, - {"10", Ordinal(10), "10th"}, - {"11", Ordinal(11), "11th"}, - {"12", Ordinal(12), "12th"}, - {"13", Ordinal(13), "13th"}, - {"101", Ordinal(101), "101st"}, - {"102", Ordinal(102), "102nd"}, - {"103", Ordinal(103), "103rd"}, - }.validate(t) -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/si.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/si.go deleted file mode 100644 index dee8b765a..000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/si.go +++ /dev/null @@ -1,110 +0,0 @@ -package humanize - -import ( - "errors" - "math" - "regexp" - "strconv" -) - -var siPrefixTable = map[float64]string{ - -24: "y", // yocto - -21: "z", // zepto - -18: "a", // atto - -15: "f", // femto - -12: "p", // pico - -9: "n", // nano - -6: "µ", // micro - -3: "m", // milli - 0: "", - 3: "k", // kilo - 6: "M", // mega - 9: "G", // giga - 12: "T", // tera - 15: "P", // peta - 18: "E", // exa - 21: "Z", // zetta - 24: "Y", // yotta -} - -var revSIPrefixTable = revfmap(siPrefixTable) - -// revfmap reverses the map and precomputes the power multiplier -func revfmap(in map[float64]string) map[string]float64 { - rv := map[string]float64{} - for k, v := range in { - rv[v] = math.Pow(10, k) - } - return rv -} - -var riParseRegex *regexp.Regexp - -func init() { - ri := `^([0-9.]+)([` - for _, v := range siPrefixTable { - ri += v - } - ri += `]?)(.*)` - - riParseRegex = regexp.MustCompile(ri) -} - -// ComputeSI finds the most appropriate SI prefix for the given number -// and returns the prefix along with the value adjusted to be within -// that prefix. -// -// See also: SI, ParseSI. -// -// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p") -func ComputeSI(input float64) (float64, string) { - if input == 0 { - return 0, "" - } - exponent := math.Floor(logn(input, 10)) - exponent = math.Floor(exponent/3) * 3 - - value := input / math.Pow(10, exponent) - - // Handle special case where value is exactly 1000.0 - // Should return 1M instead of 1000k - if value == 1000.0 { - exponent += 3 - value = input / math.Pow(10, exponent) - } - - prefix := siPrefixTable[exponent] - return value, prefix -} - -// SI returns a string with default formatting. -// -// SI uses Ftoa to format float value, removing trailing zeros. -// -// See also: ComputeSI, ParseSI. -// -// e.g. SI(1000000, B) -> 1MB -// e.g. SI(2.2345e-12, "F") -> 2.2345pF -func SI(input float64, unit string) string { - value, prefix := ComputeSI(input) - return Ftoa(value) + prefix + unit -} - -var errInvalid = errors.New("invalid input") - -// ParseSI parses an SI string back into the number and unit. -// -// See also: SI, ComputeSI. -// -// e.g. ParseSI(2.2345pF) -> (2.2345e-12, "F", nil) -func ParseSI(input string) (float64, string, error) { - found := riParseRegex.FindStringSubmatch(input) - if len(found) != 4 { - return 0, "", errInvalid - } - mag := revSIPrefixTable[found[2]] - unit := found[3] - - base, err := strconv.ParseFloat(found[1], 64) - return base * mag, unit, err -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/si_test.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/si_test.go deleted file mode 100644 index 32fb386b5..000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/si_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package humanize - -import ( - "math" - "testing" -) - -func TestSI(t *testing.T) { - tests := []struct { - name string - num float64 - formatted string - }{ - {"e-24", 1e-24, "1yF"}, - {"e-21", 1e-21, "1zF"}, - {"e-18", 1e-18, "1aF"}, - {"e-15", 1e-15, "1fF"}, - {"e-12", 1e-12, "1pF"}, - {"e-12", 2.2345e-12, "2.2345pF"}, - {"e-12", 2.23e-12, "2.23pF"}, - {"e-11", 2.23e-11, "22.3pF"}, - {"e-10", 2.2e-10, "220pF"}, - {"e-9", 2.2e-9, "2.2nF"}, - {"e-8", 2.2e-8, "22nF"}, - {"e-7", 2.2e-7, "220nF"}, - {"e-6", 2.2e-6, "2.2µF"}, - {"e-6", 1e-6, "1µF"}, - {"e-5", 2.2e-5, "22µF"}, - {"e-4", 2.2e-4, "220µF"}, - {"e-3", 2.2e-3, "2.2mF"}, - {"e-2", 2.2e-2, "22mF"}, - {"e-1", 2.2e-1, "220mF"}, - {"e+0", 2.2e-0, "2.2F"}, - {"e+0", 2.2, "2.2F"}, - {"e+1", 2.2e+1, "22F"}, - {"0", 0, "0F"}, - {"e+1", 22, "22F"}, - {"e+2", 2.2e+2, "220F"}, - {"e+2", 220, "220F"}, - {"e+3", 2.2e+3, "2.2kF"}, - {"e+3", 2200, "2.2kF"}, - {"e+4", 2.2e+4, "22kF"}, - {"e+4", 22000, "22kF"}, - {"e+5", 2.2e+5, "220kF"}, - {"e+6", 2.2e+6, "2.2MF"}, - {"e+6", 1e+6, "1MF"}, - {"e+7", 2.2e+7, "22MF"}, - {"e+8", 2.2e+8, "220MF"}, - {"e+9", 2.2e+9, "2.2GF"}, - {"e+10", 2.2e+10, "22GF"}, - {"e+11", 2.2e+11, "220GF"}, - {"e+12", 2.2e+12, "2.2TF"}, - {"e+15", 2.2e+15, "2.2PF"}, - {"e+18", 2.2e+18, "2.2EF"}, - {"e+21", 2.2e+21, "2.2ZF"}, - {"e+24", 2.2e+24, "2.2YF"}, - - // special case - {"1F", 1000 * 1000, "1MF"}, - {"1F", 1e6, "1MF"}, - } - - for _, test := range tests { - got := SI(test.num, "F") - if got != test.formatted { - t.Errorf("On %v (%v), got %v, wanted %v", - test.name, test.num, got, test.formatted) - } - - gotf, gotu, err := ParseSI(test.formatted) - if err != nil { - t.Errorf("Error parsing %v (%v): %v", test.name, test.formatted, err) - continue - } - - if math.Abs(1-(gotf/test.num)) > 0.01 { - t.Errorf("On %v (%v), got %v, wanted %v (±%v)", - test.name, test.formatted, gotf, test.num, - math.Abs(1-(gotf/test.num))) - } - if gotu != "F" { - t.Errorf("On %v (%v), expected unit F, got %v", - test.name, test.formatted, gotu) - } - } - - // Parse error - gotf, gotu, err := ParseSI("x1.21JW") // 1.21 jigga whats - if err == nil { - t.Errorf("Expected error on x1.21JW, got %v %v", gotf, gotu) - } -} - -func BenchmarkParseSI(b *testing.B) { - for i := 0; i < b.N; i++ { - ParseSI("2.2346ZB") - } -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/times.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/times.go deleted file mode 100644 index 592ebe1d6..000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/times.go +++ /dev/null @@ -1,91 +0,0 @@ -package humanize - -import ( - "fmt" - "math" - "sort" - "time" -) - -// Seconds-based time units -const ( - Minute = 60 - Hour = 60 * Minute - Day = 24 * Hour - Week = 7 * Day - Month = 30 * Day - Year = 12 * Month - LongTime = 37 * Year -) - -// Time formats a time into a relative string. -// -// Time(someT) -> "3 weeks ago" -func Time(then time.Time) string { - return RelTime(then, time.Now(), "ago", "from now") -} - -var magnitudes = []struct { - d int64 - format string - divby int64 -}{ - {1, "now", 1}, - {2, "1 second %s", 1}, - {Minute, "%d seconds %s", 1}, - {2 * Minute, "1 minute %s", 1}, - {Hour, "%d minutes %s", Minute}, - {2 * Hour, "1 hour %s", 1}, - {Day, "%d hours %s", Hour}, - {2 * Day, "1 day %s", 1}, - {Week, "%d days %s", Day}, - {2 * Week, "1 week %s", 1}, - {Month, "%d weeks %s", Week}, - {2 * Month, "1 month %s", 1}, - {Year, "%d months %s", Month}, - {18 * Month, "1 year %s", 1}, - {2 * Year, "2 years %s", 1}, - {LongTime, "%d years %s", Year}, - {math.MaxInt64, "a long while %s", 1}, -} - -// RelTime formats a time into a relative string. -// -// It takes two times and two labels. In addition to the generic time -// delta string (e.g. 5 minutes), the labels are used applied so that -// the label corresponding to the smaller time is applied. -// -// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier" -func RelTime(a, b time.Time, albl, blbl string) string { - lbl := albl - diff := b.Unix() - a.Unix() - - after := a.After(b) - if after { - lbl = blbl - diff = a.Unix() - b.Unix() - } - - n := sort.Search(len(magnitudes), func(i int) bool { - return magnitudes[i].d > diff - }) - - mag := magnitudes[n] - args := []interface{}{} - escaped := false - for _, ch := range mag.format { - if escaped { - switch ch { - case '%': - case 's': - args = append(args, lbl) - case 'd': - args = append(args, diff/mag.divby) - } - escaped = false - } else { - escaped = ch == '%' - } - } - return fmt.Sprintf(mag.format, args...) -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/times_test.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/times_test.go deleted file mode 100644 index 528daa4ec..000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/times_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package humanize - -import ( - "math" - "testing" - "time" -) - -func TestPast(t *testing.T) { - now := time.Now().Unix() - testList{ - {"now", Time(time.Unix(now, 0)), "now"}, - {"1 second ago", Time(time.Unix(now-1, 0)), "1 second ago"}, - {"12 seconds ago", Time(time.Unix(now-12, 0)), "12 seconds ago"}, - {"30 seconds ago", Time(time.Unix(now-30, 0)), "30 seconds ago"}, - {"45 seconds ago", Time(time.Unix(now-45, 0)), "45 seconds ago"}, - {"1 minute ago", Time(time.Unix(now-63, 0)), "1 minute ago"}, - {"15 minutes ago", Time(time.Unix(now-15*Minute, 0)), "15 minutes ago"}, - {"1 hour ago", Time(time.Unix(now-63*Minute, 0)), "1 hour ago"}, - {"2 hours ago", Time(time.Unix(now-2*Hour, 0)), "2 hours ago"}, - {"21 hours ago", Time(time.Unix(now-21*Hour, 0)), "21 hours ago"}, - {"1 day ago", Time(time.Unix(now-26*Hour, 0)), "1 day ago"}, - {"2 days ago", Time(time.Unix(now-49*Hour, 0)), "2 days ago"}, - {"3 days ago", Time(time.Unix(now-3*Day, 0)), "3 days ago"}, - {"1 week ago (1)", Time(time.Unix(now-7*Day, 0)), "1 week ago"}, - {"1 week ago (2)", Time(time.Unix(now-12*Day, 0)), "1 week ago"}, - {"2 weeks ago", Time(time.Unix(now-15*Day, 0)), "2 weeks ago"}, - {"1 month ago", Time(time.Unix(now-39*Day, 0)), "1 month ago"}, - {"3 months ago", Time(time.Unix(now-99*Day, 0)), "3 months ago"}, - {"1 year ago (1)", Time(time.Unix(now-365*Day, 0)), "1 year ago"}, - {"1 year ago (1)", Time(time.Unix(now-400*Day, 0)), "1 year ago"}, - {"2 years ago (1)", Time(time.Unix(now-548*Day, 0)), "2 years ago"}, - {"2 years ago (2)", Time(time.Unix(now-725*Day, 0)), "2 years ago"}, - {"2 years ago (3)", Time(time.Unix(now-800*Day, 0)), "2 years ago"}, - {"3 years ago", Time(time.Unix(now-3*Year, 0)), "3 years ago"}, - {"long ago", Time(time.Unix(now-LongTime, 0)), "a long while ago"}, - }.validate(t) -} - -func TestFuture(t *testing.T) { - now := time.Now().Unix() - testList{ - {"now", Time(time.Unix(now, 0)), "now"}, - {"1 second from now", Time(time.Unix(now+1, 0)), "1 second from now"}, - {"12 seconds from now", Time(time.Unix(now+12, 0)), "12 seconds from now"}, - {"30 seconds from now", Time(time.Unix(now+30, 0)), "30 seconds from now"}, - {"45 seconds from now", Time(time.Unix(now+45, 0)), "45 seconds from now"}, - {"15 minutes from now", Time(time.Unix(now+15*Minute, 0)), "15 minutes from now"}, - {"2 hours from now", Time(time.Unix(now+2*Hour, 0)), "2 hours from now"}, - {"21 hours from now", Time(time.Unix(now+21*Hour, 0)), "21 hours from now"}, - {"1 day from now", Time(time.Unix(now+26*Hour, 0)), "1 day from now"}, - {"2 days from now", Time(time.Unix(now+49*Hour, 0)), "2 days from now"}, - {"3 days from now", Time(time.Unix(now+3*Day, 0)), "3 days from now"}, - {"1 week from now (1)", Time(time.Unix(now+7*Day, 0)), "1 week from now"}, - {"1 week from now (2)", Time(time.Unix(now+12*Day, 0)), "1 week from now"}, - {"2 weeks from now", Time(time.Unix(now+15*Day, 0)), "2 weeks from now"}, - {"1 month from now", Time(time.Unix(now+30*Day, 0)), "1 month from now"}, - {"1 year from now", Time(time.Unix(now+365*Day, 0)), "1 year from now"}, - {"2 years from now", Time(time.Unix(now+2*Year, 0)), "2 years from now"}, - {"a while from now", Time(time.Unix(now+LongTime, 0)), "a long while from now"}, - }.validate(t) -} - -func TestRange(t *testing.T) { - start := time.Time{} - end := time.Unix(math.MaxInt64, math.MaxInt64) - x := RelTime(start, end, "ago", "from now") - if x != "a long while from now" { - t.Errorf("Expected a long while from now, got %q", x) - } -} diff --git a/Godeps/_workspace/src/github.com/facebookgo/atomicfile/.travis.yml b/Godeps/_workspace/src/github.com/facebookgo/atomicfile/.travis.yml deleted file mode 100644 index 2cc62c5e8..000000000 --- a/Godeps/_workspace/src/github.com/facebookgo/atomicfile/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -language: go - -go: - - 1.2 - - 1.3 - -matrix: - fast_finish: true - -before_install: - - go get -v code.google.com/p/go.tools/cmd/vet - - go get -v github.com/golang/lint/golint - - go get -v code.google.com/p/go.tools/cmd/cover - -install: - - go install -race -v std - - go get -race -t -v ./... - - go install -race -v ./... - -script: - - go vet ./... - - $HOME/gopath/bin/golint . - - go test -cpu=2 -race -v ./... - - go test -cpu=2 -covermode=atomic ./... diff --git a/Godeps/_workspace/src/github.com/facebookgo/atomicfile/atomicfile.go b/Godeps/_workspace/src/github.com/facebookgo/atomicfile/atomicfile.go deleted file mode 100644 index 60cda2a5b..000000000 --- a/Godeps/_workspace/src/github.com/facebookgo/atomicfile/atomicfile.go +++ /dev/null @@ -1,54 +0,0 @@ -// Package atomicfile provides the ability to write a file with an eventual -// rename on Close. This allows for a file to always be in a consistent state -// and never represent an in-progress write. -package atomicfile - -import ( - "io/ioutil" - "os" - "path/filepath" -) - -// File behaves like os.File, but does an atomic rename operation at Close. -type File struct { - *os.File - path string -} - -// New creates a new temporary file that will replace the file at the given -// path when Closed. -func New(path string, mode os.FileMode) (*File, error) { - f, err := ioutil.TempFile(filepath.Dir(path), filepath.Base(path)) - if err != nil { - return nil, err - } - if err := os.Chmod(f.Name(), mode); err != nil { - os.Remove(f.Name()) - return nil, err - } - return &File{File: f, path: path}, nil -} - -// Close the file replacing the configured file. -func (f *File) Close() error { - if err := f.File.Close(); err != nil { - return err - } - if err := os.Rename(f.Name(), f.path); err != nil { - return err - } - return nil -} - -// Abort closes the file and removes it instead of replacing the configured -// file. This is useful if after starting to write to the file you decide you -// don't want it anymore. -func (f *File) Abort() error { - if err := f.File.Close(); err != nil { - return err - } - if err := os.Remove(f.Name()); err != nil { - return err - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/facebookgo/atomicfile/atomicfile_test.go b/Godeps/_workspace/src/github.com/facebookgo/atomicfile/atomicfile_test.go deleted file mode 100644 index 0ed481f8c..000000000 --- a/Godeps/_workspace/src/github.com/facebookgo/atomicfile/atomicfile_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package atomicfile_test - -import ( - "bytes" - "io/ioutil" - "os" - "testing" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/facebookgo/atomicfile" -) - -func test(t *testing.T, dir, prefix string) { - t.Parallel() - - tmpfile, err := ioutil.TempFile(dir, prefix) - if err != nil { - t.Fatal(err) - } - name := tmpfile.Name() - - if err := os.Remove(name); err != nil { - t.Fatal(err) - } - - defer os.Remove(name) - f, err := atomicfile.New(name, os.FileMode(0666)) - if err != nil { - t.Fatal(err) - } - f.Write([]byte("foo")) - if _, err := os.Stat(name); !os.IsNotExist(err) { - t.Fatal("did not expect file to exist") - } - if err := f.Close(); err != nil { - t.Fatal(err) - } - if _, err := os.Stat(name); err != nil { - t.Fatalf("expected file to exist: %s", err) - } -} - -func TestCurrentDir(t *testing.T) { - cwd, _ := os.Getwd() - test(t, cwd, "atomicfile-current-dir-") -} - -func TestRootTmpDir(t *testing.T) { - test(t, "/tmp", "atomicfile-root-tmp-dir-") -} - -func TestDefaultTmpDir(t *testing.T) { - test(t, "", "atomicfile-default-tmp-dir-") -} - -func TestAbort(t *testing.T) { - contents := []byte("the answer is 42") - t.Parallel() - tmpfile, err := ioutil.TempFile("", "atomicfile-abort-") - if err != nil { - t.Fatal(err) - } - name := tmpfile.Name() - if _, err := tmpfile.Write(contents); err != nil { - t.Fatal(err) - } - defer os.Remove(name) - - f, err := atomicfile.New(name, os.FileMode(0666)) - if err != nil { - t.Fatal(err) - } - f.Write([]byte("foo")) - if err := f.Abort(); err != nil { - t.Fatal(err) - } - if _, err := os.Stat(name); err != nil { - t.Fatalf("expected file to exist: %s", err) - } - actual, err := ioutil.ReadFile(name) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(contents, actual) { - t.Fatalf(`did not find expected "%s" instead found "%s"`, contents, actual) - } -} diff --git a/Godeps/_workspace/src/github.com/facebookgo/atomicfile/license b/Godeps/_workspace/src/github.com/facebookgo/atomicfile/license deleted file mode 100644 index 4ce34257c..000000000 --- a/Godeps/_workspace/src/github.com/facebookgo/atomicfile/license +++ /dev/null @@ -1,30 +0,0 @@ -BSD License - -For atomicfile software - -Copyright (c) 2014, Facebook, Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name Facebook nor the names of its contributors may be used to - endorse or promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/facebookgo/atomicfile/patents b/Godeps/_workspace/src/github.com/facebookgo/atomicfile/patents deleted file mode 100644 index 887426cf1..000000000 --- a/Godeps/_workspace/src/github.com/facebookgo/atomicfile/patents +++ /dev/null @@ -1,23 +0,0 @@ -Additional Grant of Patent Rights - -"Software" means the atomicfile software distributed by Facebook, Inc. - -Facebook hereby grants you a perpetual, worldwide, royalty-free, non-exclusive, -irrevocable (subject to the termination provision below) license under any -rights in any patent claims owned by Facebook, to make, have made, use, sell, -offer to sell, import, and otherwise transfer the Software. For avoidance of -doubt, no license is granted under Facebook’s rights in any patent claims that -are infringed by (i) modifications to the Software made by you or a third party, -or (ii) the Software in combination with any software or other technology -provided by you or a third party. - -The license granted hereunder will terminate, automatically and without notice, -for anyone that makes any claim (including by filing any lawsuit, assertion or -other action) alleging (a) direct, indirect, or contributory infringement or -inducement to infringe any patent: (i) by Facebook or any of its subsidiaries or -affiliates, whether or not such claim is related to the Software, (ii) by any -party if such claim arises in whole or in part from any software, product or -service of Facebook or any of its subsidiaries or affiliates, whether or not -such claim is related to the Software, or (iii) by any party relating to the -Software; or (b) that any right in any patent claim of Facebook is invalid or -unenforceable. diff --git a/Godeps/_workspace/src/github.com/facebookgo/atomicfile/readme.md b/Godeps/_workspace/src/github.com/facebookgo/atomicfile/readme.md deleted file mode 100644 index 80038c3e0..000000000 --- a/Godeps/_workspace/src/github.com/facebookgo/atomicfile/readme.md +++ /dev/null @@ -1,4 +0,0 @@ -atomicfile [![Build Status](https://secure.travis-ci.org/facebookgo/atomicfile.png)](http://travis-ci.org/facebookgo/atomicfile) -========== - -Documentation: http://godoc.org/github.com/facebookgo/atomicfile diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/.travis.yml b/Godeps/_workspace/src/github.com/ipfs/go-datastore/.travis.yml deleted file mode 100644 index 9b1d623af..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -language: go - -go: - - 1.3 - - release - - tip - -script: - - make test - -env: TEST_NO_FUSE=1 TEST_VERBOSE=1 diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/Godeps/Godeps.json b/Godeps/_workspace/src/github.com/ipfs/go-datastore/Godeps/Godeps.json deleted file mode 100644 index f5b260883..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/Godeps/Godeps.json +++ /dev/null @@ -1,84 +0,0 @@ -{ - "ImportPath": "github.com/jbenet/go-datastore", - "GoVersion": "go1.5", - "Packages": [ - "./..." - ], - "Deps": [ - { - "ImportPath": "github.com/Sirupsen/logrus", - "Comment": "v0.8.3-37-g418b41d", - "Rev": "418b41d23a1bf978c06faea5313ba194650ac088" - }, - { - "ImportPath": "github.com/codahale/blake2", - "Rev": "3fa823583afba430e8fc7cdbcc670dbf90bfacc4" - }, - { - "ImportPath": "github.com/codahale/hdrhistogram", - "Rev": "5fd85ec0b4e2dd5d4158d257d943f2e586d86b62" - }, - { - "ImportPath": "github.com/codahale/metrics", - "Rev": "7d3beb1b480077e77c08a6f6c65ea969f6e91420" - }, - { - "ImportPath": "github.com/dustin/randbo", - "Rev": "7f1b564ca7242d22bcc6e2128beb90d9fa38b9f0" - }, - { - "ImportPath": "github.com/fzzy/radix/redis", - "Comment": "v0.5.1", - "Rev": "27a863cdffdb0998d13e1e11992b18489aeeaa25" - }, - { - "ImportPath": "github.com/hashicorp/golang-lru", - "Rev": "4dfff096c4973178c8f35cf6dd1a732a0a139370" - }, - { - "ImportPath": "github.com/ipfs/go-log", - "Rev": "ee5cb9834b33bcf29689183e0323e328c8b8de29" - }, - { - "ImportPath": "github.com/jbenet/go-os-rename", - "Rev": "2d93ae970ba96c41f717036a5bf5494faf1f38c0" - }, - { - "ImportPath": "github.com/jbenet/goprocess", - "Rev": "5b02f8d275a2dd882fb06f8bbdf74347795ff3b1" - }, - { - "ImportPath": "github.com/mattbaird/elastigo/api", - "Rev": "041b88c1fcf6489a5721ede24378ce1253b9159d" - }, - { - "ImportPath": "github.com/mattbaird/elastigo/core", - "Rev": "041b88c1fcf6489a5721ede24378ce1253b9159d" - }, - { - "ImportPath": "github.com/satori/go.uuid", - "Rev": "7c7f2020c4c9491594b85767967f4619c2fa75f9" - }, - { - "ImportPath": "github.com/syndtr/goleveldb/leveldb", - "Rev": "871eee0a7546bb7d1b2795142e29c4534abc49b3" - }, - { - "ImportPath": "github.com/syndtr/gosnappy/snappy", - "Rev": "ce8acff4829e0c2458a67ead32390ac0a381c862" - }, - { - "ImportPath": "golang.org/x/net/context", - "Rev": "dfcbca9c45aeabb8971affa4f76b2d40f6f72328" - }, - { - "ImportPath": "gopkg.in/check.v1", - "Rev": "91ae5f88a67b14891cfd43895b01164f6c120420" - }, - { - "ImportPath": "launchpad.net/gocheck", - "Comment": "87", - "Rev": "gustavo@niemeyer.net-20140225173054-xu9zlkf9kxhvow02" - } - ] -} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/Godeps/Readme b/Godeps/_workspace/src/github.com/ipfs/go-datastore/Godeps/Readme deleted file mode 100644 index 4cdaa53d5..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/Godeps/Readme +++ /dev/null @@ -1,5 +0,0 @@ -This directory tree is generated automatically by godep. - -Please do not edit. - -See https://github.com/tools/godep for more information. diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/LICENSE b/Godeps/_workspace/src/github.com/ipfs/go-datastore/LICENSE deleted file mode 100644 index 96bcd5df0..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License - -Copyright (c) 2014 Juan Batiz-Benet - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/Makefile b/Godeps/_workspace/src/github.com/ipfs/go-datastore/Makefile deleted file mode 100644 index 100d402cb..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/Makefile +++ /dev/null @@ -1,24 +0,0 @@ -build: - go build - -test: build - go test -race -cpu=5 -v ./... - -# saves/vendors third-party dependencies to Godeps/_workspace -# -r flag rewrites import paths to use the vendored path -# ./... performs operation on all packages in tree -vendor: godep - godep save -r ./... - -deps: - go get ./... - -watch: - -make - @echo "[watching *.go; for recompilation]" - # for portability, use watchmedo -- pip install watchmedo - @watchmedo shell-command --patterns="*.go;" --recursive \ - --command='make' . - -godep: - go get github.com/tools/godep diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/README.md b/Godeps/_workspace/src/github.com/ipfs/go-datastore/README.md deleted file mode 100644 index 2baf4b2a2..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# datastore interface - -datastore is a generic layer of abstraction for data store and database access. It is a simple API with the aim to enable application development in a datastore-agnostic way, allowing datastores to be swapped seamlessly without changing application code. Thus, one can leverage different datastores with different strengths without committing the application to one datastore throughout its lifetime. - -In addition, grouped datastores significantly simplify interesting data access patterns (such as caching and sharding). - -Based on [datastore.py](https://github.com/datastore/datastore). - -### Documentation - -https://godoc.org/github.com/jbenet/go-datastore - -### License - -MIT diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/basic_ds.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/basic_ds.go deleted file mode 100644 index cec1022b6..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/basic_ds.go +++ /dev/null @@ -1,189 +0,0 @@ -package datastore - -import ( - "io" - "log" - - dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query" -) - -// Here are some basic datastore implementations. - -type keyMap map[Key]interface{} - -// MapDatastore uses a standard Go map for internal storage. -type MapDatastore struct { - values keyMap -} - -// NewMapDatastore constructs a MapDatastore -func NewMapDatastore() (d *MapDatastore) { - return &MapDatastore{ - values: keyMap{}, - } -} - -// Put implements Datastore.Put -func (d *MapDatastore) Put(key Key, value interface{}) (err error) { - d.values[key] = value - return nil -} - -// Get implements Datastore.Get -func (d *MapDatastore) Get(key Key) (value interface{}, err error) { - val, found := d.values[key] - if !found { - return nil, ErrNotFound - } - return val, nil -} - -// Has implements Datastore.Has -func (d *MapDatastore) Has(key Key) (exists bool, err error) { - _, found := d.values[key] - return found, nil -} - -// Delete implements Datastore.Delete -func (d *MapDatastore) Delete(key Key) (err error) { - if _, found := d.values[key]; !found { - return ErrNotFound - } - delete(d.values, key) - return nil -} - -// Query implements Datastore.Query -func (d *MapDatastore) Query(q dsq.Query) (dsq.Results, error) { - re := make([]dsq.Entry, 0, len(d.values)) - for k, v := range d.values { - re = append(re, dsq.Entry{Key: k.String(), Value: v}) - } - r := dsq.ResultsWithEntries(q, re) - r = dsq.NaiveQueryApply(q, r) - return r, nil -} - -func (d *MapDatastore) Batch() (Batch, error) { - return NewBasicBatch(d), nil -} - -func (d *MapDatastore) Close() error { - return nil -} - -// NullDatastore stores nothing, but conforms to the API. -// Useful to test with. -type NullDatastore struct { -} - -// NewNullDatastore constructs a null datastoe -func NewNullDatastore() *NullDatastore { - return &NullDatastore{} -} - -// Put implements Datastore.Put -func (d *NullDatastore) Put(key Key, value interface{}) (err error) { - return nil -} - -// Get implements Datastore.Get -func (d *NullDatastore) Get(key Key) (value interface{}, err error) { - return nil, nil -} - -// Has implements Datastore.Has -func (d *NullDatastore) Has(key Key) (exists bool, err error) { - return false, nil -} - -// Delete implements Datastore.Delete -func (d *NullDatastore) Delete(key Key) (err error) { - return nil -} - -// Query implements Datastore.Query -func (d *NullDatastore) Query(q dsq.Query) (dsq.Results, error) { - return dsq.ResultsWithEntries(q, nil), nil -} - -func (d *NullDatastore) Batch() (Batch, error) { - return NewBasicBatch(d), nil -} - -func (d *NullDatastore) Close() error { - return nil -} - -// LogDatastore logs all accesses through the datastore. -type LogDatastore struct { - Name string - child Datastore -} - -// Shim is a datastore which has a child. -type Shim interface { - Datastore - - Children() []Datastore -} - -// NewLogDatastore constructs a log datastore. -func NewLogDatastore(ds Datastore, name string) *LogDatastore { - if len(name) < 1 { - name = "LogDatastore" - } - return &LogDatastore{Name: name, child: ds} -} - -// Children implements Shim -func (d *LogDatastore) Children() []Datastore { - return []Datastore{d.child} -} - -// Put implements Datastore.Put -func (d *LogDatastore) Put(key Key, value interface{}) (err error) { - log.Printf("%s: Put %s\n", d.Name, key) - // log.Printf("%s: Put %s ```%s```", d.Name, key, value) - return d.child.Put(key, value) -} - -// Get implements Datastore.Get -func (d *LogDatastore) Get(key Key) (value interface{}, err error) { - log.Printf("%s: Get %s\n", d.Name, key) - return d.child.Get(key) -} - -// Has implements Datastore.Has -func (d *LogDatastore) Has(key Key) (exists bool, err error) { - log.Printf("%s: Has %s\n", d.Name, key) - return d.child.Has(key) -} - -// Delete implements Datastore.Delete -func (d *LogDatastore) Delete(key Key) (err error) { - log.Printf("%s: Delete %s\n", d.Name, key) - return d.child.Delete(key) -} - -// Query implements Datastore.Query -func (d *LogDatastore) Query(q dsq.Query) (dsq.Results, error) { - log.Printf("%s: Query\n", d.Name) - return d.child.Query(q) -} - -func (d *LogDatastore) Batch() (Batch, error) { - log.Printf("%s: Batch\n", d.Name) - if bds, ok := d.child.(Batching); ok { - return bds.Batch() - } - return nil, ErrBatchUnsupported -} - -func (d *LogDatastore) Close() error { - log.Printf("%s: Close\n", d.Name) - if cds, ok := d.child.(io.Closer); ok { - return cds.Close() - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/batch.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/batch.go deleted file mode 100644 index 04c5124b3..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/batch.go +++ /dev/null @@ -1,44 +0,0 @@ -package datastore - -// basicBatch implements the transaction interface for datastores who do -// not have any sort of underlying transactional support -type basicBatch struct { - puts map[Key]interface{} - deletes map[Key]struct{} - - target Datastore -} - -func NewBasicBatch(ds Datastore) Batch { - return &basicBatch{ - puts: make(map[Key]interface{}), - deletes: make(map[Key]struct{}), - target: ds, - } -} - -func (bt *basicBatch) Put(key Key, val interface{}) error { - bt.puts[key] = val - return nil -} - -func (bt *basicBatch) Delete(key Key) error { - bt.deletes[key] = struct{}{} - return nil -} - -func (bt *basicBatch) Commit() error { - for k, val := range bt.puts { - if err := bt.target.Put(k, val); err != nil { - return err - } - } - - for k, _ := range bt.deletes { - if err := bt.target.Delete(k); err != nil { - return err - } - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/callback/callback.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/callback/callback.go deleted file mode 100644 index f347a5068..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/callback/callback.go +++ /dev/null @@ -1,42 +0,0 @@ -package callback - -import ( - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" - dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query" -) - -type Datastore struct { - D ds.Datastore - F func() -} - -func Wrap(ds ds.Datastore, f func()) *Datastore { - return &Datastore{ds, f} -} - -func (c *Datastore) SetFunc(f func()) { c.F = f } - -func (c *Datastore) Put(key ds.Key, value interface{}) (err error) { - c.F() - return c.D.Put(key, value) -} - -func (c *Datastore) Get(key ds.Key) (value interface{}, err error) { - c.F() - return c.D.Get(key) -} - -func (c *Datastore) Has(key ds.Key) (exists bool, err error) { - c.F() - return c.D.Has(key) -} - -func (c *Datastore) Delete(key ds.Key) (err error) { - c.F() - return c.D.Delete(key) -} - -func (c *Datastore) Query(q dsq.Query) (dsq.Results, error) { - c.F() - return c.D.Query(q) -} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/coalesce/coalesce.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/coalesce/coalesce.go deleted file mode 100644 index 0bd4382d2..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/coalesce/coalesce.go +++ /dev/null @@ -1,140 +0,0 @@ -package coalesce - -import ( - "io" - "sync" - - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" - dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query" -) - -// parent keys -var ( - putKey = "put" - getKey = "get" - hasKey = "has" - deleteKey = "delete" -) - -type keySync struct { - op string - k ds.Key - value interface{} -} - -type valSync struct { - val interface{} - err error - done chan struct{} -} - -// Datastore uses golang-lru for internal storage. -type datastore struct { - child ds.Datastore - - reqmu sync.Mutex - req map[keySync]*valSync -} - -// Wrap wraps a given datastore with a coalescing datastore. -// All simultaenous requests which have the same keys will -// yield the exact same result. Note that this shares -// memory. It is not possible to copy a generic interface{} -func Wrap(d ds.Datastore) ds.Datastore { - return &datastore{child: d, req: make(map[keySync]*valSync)} -} - -// sync synchronizes requests for a given key. -func (d *datastore) sync(k keySync) (vs *valSync, found bool) { - d.reqmu.Lock() - vs, found = d.req[k] - if !found { - vs = &valSync{done: make(chan struct{})} - d.req[k] = vs - } - d.reqmu.Unlock() - - // if we did find one, wait till it's done. - if found { - <-vs.done - } - return vs, found -} - -// sync synchronizes requests for a given key. -func (d *datastore) syncDone(k keySync) { - - d.reqmu.Lock() - vs, found := d.req[k] - if !found { - panic("attempt to syncDone non-existent request") - } - delete(d.req, k) - d.reqmu.Unlock() - - // release all the waiters. - close(vs.done) -} - -// Put stores the object `value` named by `key`. -func (d *datastore) Put(key ds.Key, value interface{}) (err error) { - ks := keySync{putKey, key, value} - vs, found := d.sync(ks) - if !found { - vs.err = d.child.Put(key, value) - d.syncDone(ks) - } - return err -} - -// Get retrieves the object `value` named by `key`. -func (d *datastore) Get(key ds.Key) (value interface{}, err error) { - ks := keySync{getKey, key, nil} - vs, found := d.sync(ks) - if !found { - vs.val, vs.err = d.child.Get(key) - d.syncDone(ks) - } - return vs.val, vs.err -} - -// Has returns whether the `key` is mapped to a `value`. -func (d *datastore) Has(key ds.Key) (exists bool, err error) { - ks := keySync{hasKey, key, nil} - vs, found := d.sync(ks) - if !found { - vs.val, vs.err = d.child.Has(key) - d.syncDone(ks) - } - return vs.val.(bool), vs.err -} - -// Delete removes the value for given `key`. -func (d *datastore) Delete(key ds.Key) (err error) { - ks := keySync{deleteKey, key, nil} - vs, found := d.sync(ks) - if !found { - vs.err = d.child.Delete(key) - d.syncDone(ks) - } - return vs.err -} - -// Query returns a list of keys in the datastore -func (d *datastore) Query(q dsq.Query) (dsq.Results, error) { - // query not coalesced yet. - return d.child.Query(q) -} - -func (d *datastore) Close() error { - d.reqmu.Lock() - defer d.reqmu.Unlock() - - for _, s := range d.req { - <-s.done - } - if c, ok := d.child.(io.Closer); ok { - return c.Close() - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/datastore.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/datastore.go deleted file mode 100644 index 8f91e9be9..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/datastore.go +++ /dev/null @@ -1,122 +0,0 @@ -package datastore - -import ( - "errors" - - query "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query" -) - -/* -Datastore represents storage for any key-value pair. - -Datastores are general enough to be backed by all kinds of different storage: -in-memory caches, databases, a remote datastore, flat files on disk, etc. - -The general idea is to wrap a more complicated storage facility in a simple, -uniform interface, keeping the freedom of using the right tools for the job. -In particular, a Datastore can aggregate other datastores in interesting ways, -like sharded (to distribute load) or tiered access (caches before databases). - -While Datastores should be written general enough to accept all sorts of -values, some implementations will undoubtedly have to be specific (e.g. SQL -databases where fields should be decomposed into columns), particularly to -support queries efficiently. Moreover, certain datastores may enforce certain -types of values (e.g. requiring an io.Reader, a specific struct, etc) or -serialization formats (JSON, Protobufs, etc). - -IMPORTANT: No Datastore should ever Panic! This is a cross-module interface, -and thus it should behave predictably and handle exceptional conditions with -proper error reporting. Thus, all Datastore calls may return errors, which -should be checked by callers. -*/ -type Datastore interface { - // Put stores the object `value` named by `key`. - // - // The generalized Datastore interface does not impose a value type, - // allowing various datastore middleware implementations (which do not - // handle the values directly) to be composed together. - // - // Ultimately, the lowest-level datastore will need to do some value checking - // or risk getting incorrect values. It may also be useful to expose a more - // type-safe interface to your application, and do the checking up-front. - Put(key Key, value interface{}) error - - // Get retrieves the object `value` named by `key`. - // Get will return ErrNotFound if the key is not mapped to a value. - Get(key Key) (value interface{}, err error) - - // Has returns whether the `key` is mapped to a `value`. - // In some contexts, it may be much cheaper only to check for existence of - // a value, rather than retrieving the value itself. (e.g. HTTP HEAD). - // The default implementation is found in `GetBackedHas`. - Has(key Key) (exists bool, err error) - - // Delete removes the value for given `key`. - Delete(key Key) error - - // Query searches the datastore and returns a query result. This function - // may return before the query actually runs. To wait for the query: - // - // result, _ := ds.Query(q) - // - // // use the channel interface; result may come in at different times - // for entry := range result.Entries() { ... } - // - // // or wait for the query to be completely done - // result.Wait() - // result.AllEntries() - // - Query(q query.Query) (query.Results, error) -} - -type Batching interface { - Datastore - - Batch() (Batch, error) -} - -var ErrBatchUnsupported = errors.New("this datastore does not support batching") - -// ThreadSafeDatastore is an interface that all threadsafe datastore should -// implement to leverage type safety checks. -type ThreadSafeDatastore interface { - Datastore - IsThreadSafe() -} - -// Errors - -// ErrNotFound is returned by Get, Has, and Delete when a datastore does not -// map the given key to a value. -var ErrNotFound = errors.New("datastore: key not found") - -// ErrInvalidType is returned by Put when a given value is incopatible with -// the type the datastore supports. This means a conversion (or serialization) -// is needed beforehand. -var ErrInvalidType = errors.New("datastore: invalid type error") - -// GetBackedHas provides a default Datastore.Has implementation. -// It exists so Datastore.Has implementations can use it, like so: -// -// func (*d SomeDatastore) Has(key Key) (exists bool, err error) { -// return GetBackedHas(d, key) -// } -func GetBackedHas(ds Datastore, key Key) (bool, error) { - _, err := ds.Get(key) - switch err { - case nil: - return true, nil - case ErrNotFound: - return false, nil - default: - return false, err - } -} - -type Batch interface { - Put(key Key, val interface{}) error - - Delete(key Key) error - - Commit() error -} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/elastigo/datastore.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/elastigo/datastore.go deleted file mode 100644 index 83f59817a..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/elastigo/datastore.go +++ /dev/null @@ -1,128 +0,0 @@ -package elastigo - -import ( - "errors" - "fmt" - "net/url" - "strings" - - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" - query "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query" - - "github.com/codahale/blake2" - "github.com/mattbaird/elastigo/api" - "github.com/mattbaird/elastigo/core" -) - -// Currently, elastigo does not allow connecting to multiple elasticsearch -// instances. The elastigo API uses global static variables (ugh). -// See https://github.com/mattbaird/elastigo/issues/22 -// -// Thus, we use a global static variable (GlobalInstance), and return an -// error if NewDatastore is called twice with different addresses. -var GlobalInstance string - -// Datastore uses a standard Go map for internal storage. -type Datastore struct { - url string - index string - - // Elastic search does not allow slashes in their object ids, - // so we hash the key. By default, we use the provided BlakeKeyHash - KeyHash func(ds.Key) string -} - -func NewDatastore(urlstr string) (*Datastore, error) { - if GlobalInstance != "" && GlobalInstance != urlstr { - return nil, fmt.Errorf("elastigo only allows one client. See godoc.") - } - - uf := "http://:/" - u, err := url.Parse(urlstr) - if err != nil { - return nil, fmt.Errorf("error parsing url: %s (%s)", urlstr, uf) - } - - host := strings.Split(u.Host, ":") - api.Domain = host[0] - if len(host) > 1 { - api.Port = host[1] - } - - index := strings.Trim(u.Path, "/") - if strings.Contains(index, "/") { - e := "elastigo index cannot have slashes: %s (%s -> %s)" - return nil, fmt.Errorf(e, index, urlstr, uf) - } - - GlobalInstance = urlstr - return &Datastore{ - url: urlstr, - index: index, - KeyHash: BlakeKeyHash, - }, nil -} - -// Returns the ElasticSearch index for given key. If the datastore specifies -// an index, use that. Else, key.Parent -func (d *Datastore) Index(key ds.Key) string { - if len(d.index) > 0 { - return d.index - } - return key.Parent().BaseNamespace() -} - -// value should be JSON serializable. -func (d *Datastore) Put(key ds.Key, value interface{}) (err error) { - id := d.KeyHash(key) - res, err := core.Index(false, d.Index(key), key.Type(), id, value) - if err != nil { - return err - } - if !res.Ok { - return fmt.Errorf("Elasticsearch response: NOT OK. %v", res) - } - return nil -} - -func (d *Datastore) Get(key ds.Key) (value interface{}, err error) { - id := d.KeyHash(key) - res, err := core.Get(false, d.Index(key), key.Type(), id) - if err != nil { - return nil, err - } - if !res.Ok { - return nil, fmt.Errorf("Elasticsearch response: NOT OK. %v", res) - } - return res.Source, nil -} - -func (d *Datastore) Has(key ds.Key) (exists bool, err error) { - id := d.KeyHash(key) - return core.Exists(false, d.Index(key), key.Type(), id) -} - -func (d *Datastore) Delete(key ds.Key) (err error) { - id := d.KeyHash(key) - res, err := core.Delete(false, d.Index(key), key.Type(), id, 0, "") - if err != nil { - return err - } - if !res.Ok { - return fmt.Errorf("Elasticsearch response: NOT OK. %v", res) - } - return nil -} - -func (d *Datastore) Query(query.Query) (query.Results, error) { - return nil, errors.New("Not yet implemented!") -} - -// Hash a key and return the first 16 hex chars of its blake2b hash. -// basically: Blake2b(key).HexString[:16] -func BlakeKeyHash(key ds.Key) string { - h := blake2.NewBlake2B() - h.Write(key.Bytes()) - d := h.Sum(nil) - return fmt.Sprintf("%x", d)[:16] -} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/flatfs/flatfs.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/flatfs/flatfs.go deleted file mode 100644 index 1db6c3c0b..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/flatfs/flatfs.go +++ /dev/null @@ -1,392 +0,0 @@ -// Package flatfs is a Datastore implementation that stores all -// objects in a two-level directory structure in the local file -// system, regardless of the hierarchy of the keys. -package flatfs - -import ( - "encoding/hex" - "errors" - "io/ioutil" - "os" - "path" - "path/filepath" - "strings" - "time" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-os-rename" - - logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" -) - -var log = logging.Logger("flatfs") - -const ( - extension = ".data" - maxPrefixLen = 16 -) - -var ( - ErrBadPrefixLen = errors.New("bad prefix length") -) - -type Datastore struct { - path string - // length of the dir splay prefix, in bytes of hex digits - hexPrefixLen int - - // sychronize all writes and directory changes for added safety - sync bool -} - -var _ datastore.Datastore = (*Datastore)(nil) - -func New(path string, prefixLen int, sync bool) (*Datastore, error) { - if prefixLen <= 0 || prefixLen > maxPrefixLen { - return nil, ErrBadPrefixLen - } - fs := &Datastore{ - path: path, - // convert from binary bytes to bytes of hex encoding - hexPrefixLen: prefixLen * hex.EncodedLen(1), - sync: sync, - } - return fs, nil -} - -var padding = strings.Repeat("_", maxPrefixLen*hex.EncodedLen(1)) - -func (fs *Datastore) encode(key datastore.Key) (dir, file string) { - safe := hex.EncodeToString(key.Bytes()[1:]) - prefix := (safe + padding)[:fs.hexPrefixLen] - dir = path.Join(fs.path, prefix) - file = path.Join(dir, safe+extension) - return dir, file -} - -func (fs *Datastore) decode(file string) (key datastore.Key, ok bool) { - if path.Ext(file) != extension { - return datastore.Key{}, false - } - name := file[:len(file)-len(extension)] - k, err := hex.DecodeString(name) - if err != nil { - return datastore.Key{}, false - } - return datastore.NewKey(string(k)), true -} - -func (fs *Datastore) makePrefixDir(dir string) error { - if err := fs.makePrefixDirNoSync(dir); err != nil { - return err - } - - // In theory, if we create a new prefix dir and add a file to - // it, the creation of the prefix dir itself might not be - // durable yet. Sync the root dir after a successful mkdir of - // a prefix dir, just to be paranoid. - if fs.sync { - if err := syncDir(fs.path); err != nil { - return err - } - } - return nil -} - -func (fs *Datastore) makePrefixDirNoSync(dir string) error { - if err := os.Mkdir(dir, 0777); err != nil { - // EEXIST is safe to ignore here, that just means the prefix - // directory already existed. - if !os.IsExist(err) { - return err - } - } - return nil -} - -var putMaxRetries = 3 - -func (fs *Datastore) Put(key datastore.Key, value interface{}) error { - val, ok := value.([]byte) - if !ok { - return datastore.ErrInvalidType - } - - var err error - for i := 0; i < putMaxRetries; i++ { - err = fs.doPut(key, val) - if err == nil { - return nil - } - - if !strings.Contains(err.Error(), "too many open files") { - return err - } - - log.Error("too many open files, retrying in %dms", 100*i) - time.Sleep(time.Millisecond * 100 * time.Duration(i)) - } - return err -} - -func (fs *Datastore) doPut(key datastore.Key, val []byte) error { - dir, path := fs.encode(key) - if err := fs.makePrefixDir(dir); err != nil { - return err - } - - tmp, err := ioutil.TempFile(dir, "put-") - if err != nil { - return err - } - closed := false - removed := false - defer func() { - if !closed { - // silence errcheck - _ = tmp.Close() - } - if !removed { - // silence errcheck - _ = os.Remove(tmp.Name()) - } - }() - - if _, err := tmp.Write(val); err != nil { - return err - } - if fs.sync { - if err := tmp.Sync(); err != nil { - return err - } - } - if err := tmp.Close(); err != nil { - return err - } - closed = true - - err = osrename.Rename(tmp.Name(), path) - if err != nil { - return err - } - removed = true - - if fs.sync { - if err := syncDir(dir); err != nil { - return err - } - } - return nil -} - -func (fs *Datastore) putMany(data map[datastore.Key]interface{}) error { - var dirsToSync []string - files := make(map[*os.File]string) - - for key, value := range data { - val, ok := value.([]byte) - if !ok { - return datastore.ErrInvalidType - } - dir, path := fs.encode(key) - if err := fs.makePrefixDirNoSync(dir); err != nil { - return err - } - dirsToSync = append(dirsToSync, dir) - - tmp, err := ioutil.TempFile(dir, "put-") - if err != nil { - return err - } - - if _, err := tmp.Write(val); err != nil { - return err - } - - files[tmp] = path - } - - ops := make(map[*os.File]int) - - defer func() { - for fi, _ := range files { - val, _ := ops[fi] - switch val { - case 0: - _ = fi.Close() - fallthrough - case 1: - _ = os.Remove(fi.Name()) - } - } - }() - - // Now we sync everything - // sync and close files - for fi, _ := range files { - if fs.sync { - if err := fi.Sync(); err != nil { - return err - } - } - - if err := fi.Close(); err != nil { - return err - } - - // signify closed - ops[fi] = 1 - } - - // move files to their proper places - for fi, path := range files { - if err := osrename.Rename(fi.Name(), path); err != nil { - return err - } - - // signify removed - ops[fi] = 2 - } - - // now sync the dirs for those files - if fs.sync { - for _, dir := range dirsToSync { - if err := syncDir(dir); err != nil { - return err - } - } - - // sync top flatfs dir - if err := syncDir(fs.path); err != nil { - return err - } - } - - return nil -} - -func (fs *Datastore) Get(key datastore.Key) (value interface{}, err error) { - _, path := fs.encode(key) - data, err := ioutil.ReadFile(path) - if err != nil { - if os.IsNotExist(err) { - return nil, datastore.ErrNotFound - } - // no specific error to return, so just pass it through - return nil, err - } - return data, nil -} - -func (fs *Datastore) Has(key datastore.Key) (exists bool, err error) { - _, path := fs.encode(key) - switch _, err := os.Stat(path); { - case err == nil: - return true, nil - case os.IsNotExist(err): - return false, nil - default: - return false, err - } -} - -func (fs *Datastore) Delete(key datastore.Key) error { - _, path := fs.encode(key) - switch err := os.Remove(path); { - case err == nil: - return nil - case os.IsNotExist(err): - return datastore.ErrNotFound - default: - return err - } -} - -func (fs *Datastore) Query(q query.Query) (query.Results, error) { - if (q.Prefix != "" && q.Prefix != "/") || - len(q.Filters) > 0 || - len(q.Orders) > 0 || - q.Limit > 0 || - q.Offset > 0 || - !q.KeysOnly { - // TODO this is overly simplistic, but the only caller is - // `ipfs refs local` for now, and this gets us moving. - return nil, errors.New("flatfs only supports listing all keys in random order") - } - - reschan := make(chan query.Result) - go func() { - defer close(reschan) - err := filepath.Walk(fs.path, func(path string, info os.FileInfo, err error) error { - - if !info.Mode().IsRegular() || info.Name()[0] == '.' { - return nil - } - - key, ok := fs.decode(info.Name()) - if !ok { - log.Warning("failed to decode entry in flatfs") - return nil - } - - reschan <- query.Result{ - Entry: query.Entry{ - Key: key.String(), - }, - } - return nil - }) - if err != nil { - log.Warning("walk failed: ", err) - } - }() - return query.ResultsWithChan(q, reschan), nil -} - -func (fs *Datastore) Close() error { - return nil -} - -type flatfsBatch struct { - puts map[datastore.Key]interface{} - deletes map[datastore.Key]struct{} - - ds *Datastore -} - -func (fs *Datastore) Batch() (datastore.Batch, error) { - return &flatfsBatch{ - puts: make(map[datastore.Key]interface{}), - deletes: make(map[datastore.Key]struct{}), - ds: fs, - }, nil -} - -func (bt *flatfsBatch) Put(key datastore.Key, val interface{}) error { - bt.puts[key] = val - return nil -} - -func (bt *flatfsBatch) Delete(key datastore.Key) error { - bt.deletes[key] = struct{}{} - return nil -} - -func (bt *flatfsBatch) Commit() error { - if err := bt.ds.putMany(bt.puts); err != nil { - return err - } - - for k, _ := range bt.deletes { - if err := bt.ds.Delete(k); err != nil { - return err - } - } - - return nil -} - -var _ datastore.ThreadSafeDatastore = (*Datastore)(nil) - -func (*Datastore) IsThreadSafe() {} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/flatfs/sync_std.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/flatfs/sync_std.go deleted file mode 100644 index 0608bf977..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/flatfs/sync_std.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build !windows - -package flatfs - -import "os" - -func syncDir(dir string) error { - dirF, err := os.Open(dir) - if err != nil { - return err - } - defer dirF.Close() - if err := dirF.Sync(); err != nil { - return err - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/flatfs/sync_windows.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/flatfs/sync_windows.go deleted file mode 100644 index b3b1ce3cc..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/flatfs/sync_windows.go +++ /dev/null @@ -1,5 +0,0 @@ -package flatfs - -func syncDir(dir string) error { - return nil -} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/fs/fs.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/fs/fs.go deleted file mode 100644 index 0710fd63b..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/fs/fs.go +++ /dev/null @@ -1,159 +0,0 @@ -// Package fs is a simple Datastore implementation that stores keys -// are directories and files, mirroring the key. That is, the key -// "/foo/bar" is stored as file "PATH/foo/bar/.dsobject". -// -// This means key some segments will not work. For example, the -// following keys will result in unwanted behavior: -// -// - "/foo/./bar" -// - "/foo/../bar" -// - "/foo\x00bar" -// -// Keys that only differ in case may be confused with each other on -// case insensitive file systems, for example in OS X. -// -// This package is intended for exploratory use, where the user would -// examine the file system manually, and should only be used with -// human-friendly, trusted keys. You have been warned. -package fs - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" - query "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query" -) - -var ObjectKeySuffix = ".dsobject" - -// Datastore uses a uses a file per key to store values. -type Datastore struct { - path string -} - -// NewDatastore returns a new fs Datastore at given `path` -func NewDatastore(path string) (ds.Datastore, error) { - if !isDir(path) { - return nil, fmt.Errorf("Failed to find directory at: %v (file? perms?)", path) - } - - return &Datastore{path: path}, nil -} - -// KeyFilename returns the filename associated with `key` -func (d *Datastore) KeyFilename(key ds.Key) string { - return filepath.Join(d.path, key.String(), ObjectKeySuffix) -} - -// Put stores the given value. -func (d *Datastore) Put(key ds.Key, value interface{}) (err error) { - - // TODO: maybe use io.Readers/Writers? - // r, err := dsio.CastAsReader(value) - // if err != nil { - // return err - // } - - val, ok := value.([]byte) - if !ok { - return ds.ErrInvalidType - } - - fn := d.KeyFilename(key) - - // mkdirall above. - err = os.MkdirAll(filepath.Dir(fn), 0755) - if err != nil { - return err - } - - return ioutil.WriteFile(fn, val, 0666) -} - -// Get returns the value for given key -func (d *Datastore) Get(key ds.Key) (value interface{}, err error) { - fn := d.KeyFilename(key) - if !isFile(fn) { - return nil, ds.ErrNotFound - } - - return ioutil.ReadFile(fn) -} - -// Has returns whether the datastore has a value for a given key -func (d *Datastore) Has(key ds.Key) (exists bool, err error) { - return ds.GetBackedHas(d, key) -} - -// Delete removes the value for given key -func (d *Datastore) Delete(key ds.Key) (err error) { - fn := d.KeyFilename(key) - if !isFile(fn) { - return ds.ErrNotFound - } - - return os.Remove(fn) -} - -// Query implements Datastore.Query -func (d *Datastore) Query(q query.Query) (query.Results, error) { - - results := make(chan query.Result) - - walkFn := func(path string, info os.FileInfo, err error) error { - // remove ds path prefix - if strings.HasPrefix(path, d.path) { - path = path[len(d.path):] - } - - if !info.IsDir() { - if strings.HasSuffix(path, ObjectKeySuffix) { - path = path[:len(path)-len(ObjectKeySuffix)] - } - key := ds.NewKey(path) - entry := query.Entry{Key: key.String(), Value: query.NotFetched} - results <- query.Result{Entry: entry} - } - return nil - } - - go func() { - filepath.Walk(d.path, walkFn) - close(results) - }() - r := query.ResultsWithChan(q, results) - r = query.NaiveQueryApply(q, r) - return r, nil -} - -// isDir returns whether given path is a directory -func isDir(path string) bool { - finfo, err := os.Stat(path) - if err != nil { - return false - } - - return finfo.IsDir() -} - -// isFile returns whether given path is a file -func isFile(path string) bool { - finfo, err := os.Stat(path) - if err != nil { - return false - } - - return !finfo.IsDir() -} - -func (d *Datastore) Close() error { - return nil -} - -func (d *Datastore) Batch() (ds.Batch, error) { - return ds.NewBasicBatch(d), nil -} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/key.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/key.go deleted file mode 100644 index 8df0eeaec..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/key.go +++ /dev/null @@ -1,252 +0,0 @@ -package datastore - -import ( - "path" - "strings" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/satori/go.uuid" - - dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query" -) - -/* -A Key represents the unique identifier of an object. -Our Key scheme is inspired by file systems and Google App Engine key model. - -Keys are meant to be unique across a system. Keys are hierarchical, -incorporating more and more specific namespaces. Thus keys can be deemed -'children' or 'ancestors' of other keys:: - - Key("/Comedy") - Key("/Comedy/MontyPython") - -Also, every namespace can be parametrized to embed relevant object -information. For example, the Key `name` (most specific namespace) could -include the object type:: - - Key("/Comedy/MontyPython/Actor:JohnCleese") - Key("/Comedy/MontyPython/Sketch:CheeseShop") - Key("/Comedy/MontyPython/Sketch:CheeseShop/Character:Mousebender") - -*/ -type Key struct { - string -} - -// NewKey constructs a key from string. it will clean the value. -func NewKey(s string) Key { - k := Key{s} - k.Clean() - return k -} - -// KeyWithNamespaces constructs a key out of a namespace slice. -func KeyWithNamespaces(ns []string) Key { - return NewKey(strings.Join(ns, "/")) -} - -// Clean up a Key, using path.Clean. -func (k *Key) Clean() { - k.string = path.Clean("/" + k.string) -} - -// Strings is the string value of Key -func (k Key) String() string { - return k.string -} - -// Bytes returns the string value of Key as a []byte -func (k Key) Bytes() []byte { - return []byte(k.string) -} - -// Equal checks equality of two keys -func (k Key) Equal(k2 Key) bool { - return k.string == k2.string -} - -// Less checks whether this key is sorted lower than another. -func (k Key) Less(k2 Key) bool { - list1 := k.List() - list2 := k2.List() - for i, c1 := range list1 { - if len(list2) < (i + 1) { - return false - } - - c2 := list2[i] - if c1 < c2 { - return true - } else if c1 > c2 { - return false - } - // c1 == c2, continue - } - - // list1 is shorter or exactly the same. - return len(list1) < len(list2) -} - -// List returns the `list` representation of this Key. -// NewKey("/Comedy/MontyPython/Actor:JohnCleese").List() -// ["Comedy", "MontyPythong", "Actor:JohnCleese"] -func (k Key) List() []string { - return strings.Split(k.string, "/")[1:] -} - -// Reverse returns the reverse of this Key. -// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Reverse() -// NewKey("/Actor:JohnCleese/MontyPython/Comedy") -func (k Key) Reverse() Key { - l := k.List() - r := make([]string, len(l), len(l)) - for i, e := range l { - r[len(l)-i-1] = e - } - return KeyWithNamespaces(r) -} - -// Namespaces returns the `namespaces` making up this Key. -// NewKey("/Comedy/MontyPython/Actor:JohnCleese").List() -// ["Comedy", "MontyPythong", "Actor:JohnCleese"] -func (k Key) Namespaces() []string { - return k.List() -} - -// BaseNamespace returns the "base" namespace of this key (path.Base(filename)) -// NewKey("/Comedy/MontyPython/Actor:JohnCleese").BaseNamespace() -// "Actor:JohnCleese" -func (k Key) BaseNamespace() string { - n := k.Namespaces() - return n[len(n)-1] -} - -// Type returns the "type" of this key (value of last namespace). -// NewKey("/Comedy/MontyPython/Actor:JohnCleese").List() -// "Actor" -func (k Key) Type() string { - return NamespaceType(k.BaseNamespace()) -} - -// Name returns the "name" of this key (field of last namespace). -// NewKey("/Comedy/MontyPython/Actor:JohnCleese").List() -// "Actor" -func (k Key) Name() string { - return NamespaceValue(k.BaseNamespace()) -} - -// Instance returns an "instance" of this type key (appends value to namespace). -// NewKey("/Comedy/MontyPython/Actor:JohnCleese").List() -// "JohnCleese" -func (k Key) Instance(s string) Key { - return NewKey(k.string + ":" + s) -} - -// Path returns the "path" of this key (parent + type). -// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Path() -// NewKey("/Comedy/MontyPython/Actor") -func (k Key) Path() Key { - s := k.Parent().string + "/" + NamespaceType(k.BaseNamespace()) - return NewKey(s) -} - -// Parent returns the `parent` Key of this Key. -// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Parent() -// NewKey("/Comedy/MontyPython") -func (k Key) Parent() Key { - n := k.List() - if len(n) == 1 { - return NewKey("/") - } - return NewKey(strings.Join(n[:len(n)-1], "/")) -} - -// Child returns the `child` Key of this Key. -// NewKey("/Comedy/MontyPython").Child("Actor:JohnCleese") -// NewKey("/Comedy/MontyPython/Actor:JohnCleese") -func (k Key) Child(k2 Key) Key { - return NewKey(k.string + "/" + k2.string) -} - -// ChildString returns the `child` Key of this Key -- string helper. -// NewKey("/Comedy/MontyPython").Child("Actor:JohnCleese") -// NewKey("/Comedy/MontyPython/Actor:JohnCleese") -func (k Key) ChildString(s string) Key { - return NewKey(k.string + "/" + s) -} - -// IsAncestorOf returns whether this key is a prefix of `other` -// NewKey("/Comedy").IsAncestorOf("/Comedy/MontyPython") -// true -func (k Key) IsAncestorOf(other Key) bool { - if other.string == k.string { - return false - } - return strings.HasPrefix(other.string, k.string) -} - -// IsDescendantOf returns whether this key contains another as a prefix. -// NewKey("/Comedy/MontyPython").IsDescendantOf("/Comedy") -// true -func (k Key) IsDescendantOf(other Key) bool { - if other.string == k.string { - return false - } - return strings.HasPrefix(k.string, other.string) -} - -// IsTopLevel returns whether this key has only one namespace. -func (k Key) IsTopLevel() bool { - return len(k.List()) == 1 -} - -// RandomKey returns a randomly (uuid) generated key. -// RandomKey() -// NewKey("/f98719ea086343f7b71f32ea9d9d521d") -func RandomKey() Key { - return NewKey(strings.Replace(uuid.NewV4().String(), "-", "", -1)) -} - -/* -A Key Namespace is like a path element. -A namespace can optionally include a type (delimited by ':') - - > NamespaceValue("Song:PhilosopherSong") - PhilosopherSong - > NamespaceType("Song:PhilosopherSong") - Song - > NamespaceType("Music:Song:PhilosopherSong") - Music:Song -*/ - -// NamespaceType is the first component of a namespace. `foo` in `foo:bar` -func NamespaceType(namespace string) string { - parts := strings.Split(namespace, ":") - if len(parts) < 2 { - return "" - } - return strings.Join(parts[0:len(parts)-1], ":") -} - -// NamespaceValue returns the last component of a namespace. `baz` in `f:b:baz` -func NamespaceValue(namespace string) string { - parts := strings.Split(namespace, ":") - return parts[len(parts)-1] -} - -// KeySlice attaches the methods of sort.Interface to []Key, -// sorting in increasing order. -type KeySlice []Key - -func (p KeySlice) Len() int { return len(p) } -func (p KeySlice) Less(i, j int) bool { return p[i].Less(p[j]) } -func (p KeySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// EntryKeys -func EntryKeys(e []dsq.Entry) []Key { - ks := make([]Key, len(e)) - for i, e := range e { - ks[i] = NewKey(e.Key) - } - return ks -} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/keytransform/doc.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/keytransform/doc.go deleted file mode 100644 index b389dcfaf..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/keytransform/doc.go +++ /dev/null @@ -1,25 +0,0 @@ -// Package keytransform introduces a Datastore Shim that transforms keys before -// passing them to its child. It can be used to manipulate what keys look like -// to the user, for example namespacing keys, reversing them, etc. -// -// Use the Wrap function to wrap a datastore with any KeyTransform. -// A KeyTransform is simply an interface with two functions, a conversion and -// its inverse. For example: -// -// import ( -// ktds "github.com/ipfs/go-datastore/keytransform" -// ds "github.com/ipfs/go-datastore" -// ) -// -// func reverseKey(k ds.Key) ds.Key { -// return k.Reverse() -// } -// -// func invertKeys(d ds.Datastore) { -// return ktds.Wrap(d, &ktds.Pair{ -// Convert: reverseKey, -// Invert: reverseKey, // reverse is its own inverse. -// }) -// } -// -package keytransform diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/keytransform/interface.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/keytransform/interface.go deleted file mode 100644 index 6414f9cf4..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/keytransform/interface.go +++ /dev/null @@ -1,34 +0,0 @@ -package keytransform - -import ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" - -// KeyMapping is a function that maps one key to annother -type KeyMapping func(ds.Key) ds.Key - -// KeyTransform is an object with a pair of functions for (invertibly) -// transforming keys -type KeyTransform interface { - ConvertKey(ds.Key) ds.Key - InvertKey(ds.Key) ds.Key -} - -// Datastore is a keytransform.Datastore -type Datastore interface { - ds.Shim - KeyTransform -} - -// Wrap wraps a given datastore with a KeyTransform function. -// The resulting wrapped datastore will use the transform on all Datastore -// operations. -func Wrap(child ds.Datastore, t KeyTransform) *ktds { - if t == nil { - panic("t (KeyTransform) is nil") - } - - if child == nil { - panic("child (ds.Datastore) is nil") - } - - return &ktds{child: child, KeyTransform: t} -} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/keytransform/keytransform.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/keytransform/keytransform.go deleted file mode 100644 index be07bcda6..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/keytransform/keytransform.go +++ /dev/null @@ -1,118 +0,0 @@ -package keytransform - -import ( - "io" - - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" - dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query" -) - -type Pair struct { - Convert KeyMapping - Invert KeyMapping -} - -func (t *Pair) ConvertKey(k ds.Key) ds.Key { - return t.Convert(k) -} - -func (t *Pair) InvertKey(k ds.Key) ds.Key { - return t.Invert(k) -} - -// ktds keeps a KeyTransform function -type ktds struct { - child ds.Datastore - - KeyTransform -} - -// Children implements ds.Shim -func (d *ktds) Children() []ds.Datastore { - return []ds.Datastore{d.child} -} - -// Put stores the given value, transforming the key first. -func (d *ktds) Put(key ds.Key, value interface{}) (err error) { - return d.child.Put(d.ConvertKey(key), value) -} - -// Get returns the value for given key, transforming the key first. -func (d *ktds) Get(key ds.Key) (value interface{}, err error) { - return d.child.Get(d.ConvertKey(key)) -} - -// Has returns whether the datastore has a value for a given key, transforming -// the key first. -func (d *ktds) Has(key ds.Key) (exists bool, err error) { - return d.child.Has(d.ConvertKey(key)) -} - -// Delete removes the value for given key -func (d *ktds) Delete(key ds.Key) (err error) { - return d.child.Delete(d.ConvertKey(key)) -} - -// Query implements Query, inverting keys on the way back out. -func (d *ktds) Query(q dsq.Query) (dsq.Results, error) { - qr, err := d.child.Query(q) - if err != nil { - return nil, err - } - - ch := make(chan dsq.Result) - go func() { - defer close(ch) - defer qr.Close() - - for r := range qr.Next() { - if r.Error == nil { - r.Entry.Key = d.InvertKey(ds.NewKey(r.Entry.Key)).String() - } - ch <- r - } - }() - - return dsq.DerivedResults(qr, ch), nil -} - -func (d *ktds) Close() error { - if c, ok := d.child.(io.Closer); ok { - return c.Close() - } - return nil -} - -func (d *ktds) Batch() (ds.Batch, error) { - bds, ok := d.child.(ds.Batching) - if !ok { - return nil, ds.ErrBatchUnsupported - } - - childbatch, err := bds.Batch() - if err != nil { - return nil, err - } - return &transformBatch{ - dst: childbatch, - f: d.ConvertKey, - }, nil -} - -type transformBatch struct { - dst ds.Batch - - f KeyMapping -} - -func (t *transformBatch) Put(key ds.Key, val interface{}) error { - return t.dst.Put(t.f(key), val) -} - -func (t *transformBatch) Delete(key ds.Key) error { - return t.dst.Delete(t.f(key)) -} - -func (t *transformBatch) Commit() error { - return t.dst.Commit() -} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/leveldb/datastore.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/leveldb/datastore.go deleted file mode 100644 index 7820a7974..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/leveldb/datastore.go +++ /dev/null @@ -1,155 +0,0 @@ -package leveldb - -import ( - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" - dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" - - "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" -) - -type datastore struct { - DB *leveldb.DB -} - -type Options opt.Options - -func NewDatastore(path string, opts *Options) (*datastore, error) { - var nopts opt.Options - if opts != nil { - nopts = opt.Options(*opts) - } - db, err := leveldb.OpenFile(path, &nopts) - if err != nil { - return nil, err - } - - return &datastore{ - DB: db, - }, nil -} - -// Returns ErrInvalidType if value is not of type []byte. -// -// NOTE: Using sync = false. -// see http://godoc.org/github.com/syndtr/goleveldb/leveldb/opt#WriteOptions -func (d *datastore) Put(key ds.Key, value interface{}) (err error) { - val, ok := value.([]byte) - if !ok { - return ds.ErrInvalidType - } - return d.DB.Put(key.Bytes(), val, nil) -} - -func (d *datastore) Get(key ds.Key) (value interface{}, err error) { - val, err := d.DB.Get(key.Bytes(), nil) - if err != nil { - if err == leveldb.ErrNotFound { - return nil, ds.ErrNotFound - } - return nil, err - } - return val, nil -} - -func (d *datastore) Has(key ds.Key) (exists bool, err error) { - return d.DB.Has(key.Bytes(), nil) -} - -func (d *datastore) Delete(key ds.Key) (err error) { - err = d.DB.Delete(key.Bytes(), nil) - if err == leveldb.ErrNotFound { - return ds.ErrNotFound - } - return err -} - -func (d *datastore) Query(q dsq.Query) (dsq.Results, error) { - - // we can use multiple iterators concurrently. see: - // https://godoc.org/github.com/syndtr/goleveldb/leveldb#DB.NewIterator - // advance the iterator only if the reader reads - // - // run query in own sub-process tied to Results.Process(), so that - // it waits for us to finish AND so that clients can signal to us - // that resources should be reclaimed. - qrb := dsq.NewResultBuilder(q) - qrb.Process.Go(func(worker goprocess.Process) { - d.runQuery(worker, qrb) - }) - - // go wait on the worker (without signaling close) - go qrb.Process.CloseAfterChildren() - - // Now, apply remaining things (filters, order) - qr := qrb.Results() - for _, f := range q.Filters { - qr = dsq.NaiveFilter(qr, f) - } - for _, o := range q.Orders { - qr = dsq.NaiveOrder(qr, o) - } - return qr, nil -} - -func (d *datastore) runQuery(worker goprocess.Process, qrb *dsq.ResultBuilder) { - - var rnge *util.Range - if qrb.Query.Prefix != "" { - rnge = util.BytesPrefix([]byte(qrb.Query.Prefix)) - } - i := d.DB.NewIterator(rnge, nil) - defer i.Release() - - // advance iterator for offset - if qrb.Query.Offset > 0 { - for j := 0; j < qrb.Query.Offset; j++ { - i.Next() - } - } - - // iterate, and handle limit, too - for sent := 0; i.Next(); sent++ { - // end early if we hit the limit - if qrb.Query.Limit > 0 && sent >= qrb.Query.Limit { - break - } - - k := ds.NewKey(string(i.Key())).String() - e := dsq.Entry{Key: k} - - if !qrb.Query.KeysOnly { - buf := make([]byte, len(i.Value())) - copy(buf, i.Value()) - e.Value = buf - } - - select { - case qrb.Output <- dsq.Result{Entry: e}: // we sent it out - case <-worker.Closing(): // client told us to end early. - break - } - } - - if err := i.Error(); err != nil { - select { - case qrb.Output <- dsq.Result{Error: err}: // client read our error - case <-worker.Closing(): // client told us to end. - return - } - } -} - -func (d *datastore) Batch() (ds.Batch, error) { - // TODO: implement batch on leveldb - return nil, ds.ErrBatchUnsupported -} - -// LevelDB needs to be closed. -func (d *datastore) Close() (err error) { - return d.DB.Close() -} - -func (d *datastore) IsThreadSafe() {} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/lru/datastore.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/lru/datastore.go deleted file mode 100644 index 501cbc988..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/lru/datastore.go +++ /dev/null @@ -1,64 +0,0 @@ -package lru - -import ( - "errors" - - lru "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/hashicorp/golang-lru" - - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" - dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query" -) - -// Datastore uses golang-lru for internal storage. -type Datastore struct { - cache *lru.Cache -} - -// NewDatastore constructs a new LRU Datastore with given capacity. -func NewDatastore(capacity int) (*Datastore, error) { - cache, err := lru.New(capacity) - if err != nil { - return nil, err - } - - return &Datastore{cache: cache}, nil -} - -// Put stores the object `value` named by `key`. -func (d *Datastore) Put(key ds.Key, value interface{}) (err error) { - d.cache.Add(key, value) - return nil -} - -// Get retrieves the object `value` named by `key`. -func (d *Datastore) Get(key ds.Key) (value interface{}, err error) { - val, ok := d.cache.Get(key) - if !ok { - return nil, ds.ErrNotFound - } - return val, nil -} - -// Has returns whether the `key` is mapped to a `value`. -func (d *Datastore) Has(key ds.Key) (exists bool, err error) { - return ds.GetBackedHas(d, key) -} - -// Delete removes the value for given `key`. -func (d *Datastore) Delete(key ds.Key) (err error) { - d.cache.Remove(key) - return nil -} - -// KeyList returns a list of keys in the datastore -func (d *Datastore) Query(q dsq.Query) (dsq.Results, error) { - return nil, errors.New("KeyList not implemented.") -} - -func (d *Datastore) Close() error { - return nil -} - -func (d *Datastore) Batch() (ds.Batch, error) { - return nil, ds.ErrBatchUnsupported -} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/measure/measure.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/measure/measure.go deleted file mode 100644 index 9aa825c8c..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/measure/measure.go +++ /dev/null @@ -1,248 +0,0 @@ -// Package measure provides a Datastore wrapper that records metrics -// using github.com/codahale/metrics. -package measure - -import ( - "io" - "time" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/codahale/metrics" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query" -) - -// Histogram measurements exceeding these limits are dropped. TODO -// maybe it would be better to cap the value? Should we keep track of -// drops? -const ( - maxLatency = int64(1 * time.Second) - maxSize = int64(1 << 32) -) - -// New wraps the datastore, providing metrics on the operations. The -// metrics are registered with names starting with prefix and a dot. -// -// If prefix is not unique, New will panic. Call Close to release the -// prefix. -func New(prefix string, ds datastore.Datastore) *measure { - m := &measure{ - backend: ds, - - putNum: metrics.Counter(prefix + ".Put.num"), - putErr: metrics.Counter(prefix + ".Put.err"), - putLatency: metrics.NewHistogram(prefix+".Put.latency", 0, maxLatency, 3), - putSize: metrics.NewHistogram(prefix+".Put.size", 0, maxSize, 3), - - getNum: metrics.Counter(prefix + ".Get.num"), - getErr: metrics.Counter(prefix + ".Get.err"), - getLatency: metrics.NewHistogram(prefix+".Get.latency", 0, maxLatency, 3), - getSize: metrics.NewHistogram(prefix+".Get.size", 0, maxSize, 3), - - hasNum: metrics.Counter(prefix + ".Has.num"), - hasErr: metrics.Counter(prefix + ".Has.err"), - hasLatency: metrics.NewHistogram(prefix+".Has.latency", 0, maxLatency, 3), - - deleteNum: metrics.Counter(prefix + ".Delete.num"), - deleteErr: metrics.Counter(prefix + ".Delete.err"), - deleteLatency: metrics.NewHistogram(prefix+".Delete.latency", 0, maxLatency, 3), - - queryNum: metrics.Counter(prefix + ".Query.num"), - queryErr: metrics.Counter(prefix + ".Query.err"), - queryLatency: metrics.NewHistogram(prefix+".Query.latency", 0, maxLatency, 3), - } - return m -} - -type measure struct { - backend datastore.Datastore - - putNum metrics.Counter - putErr metrics.Counter - putLatency *metrics.Histogram - putSize *metrics.Histogram - - getNum metrics.Counter - getErr metrics.Counter - getLatency *metrics.Histogram - getSize *metrics.Histogram - - hasNum metrics.Counter - hasErr metrics.Counter - hasLatency *metrics.Histogram - - deleteNum metrics.Counter - deleteErr metrics.Counter - deleteLatency *metrics.Histogram - - queryNum metrics.Counter - queryErr metrics.Counter - queryLatency *metrics.Histogram -} - -var _ datastore.Datastore = (*measure)(nil) - -func recordLatency(h *metrics.Histogram, start time.Time) { - elapsed := time.Now().Sub(start) / time.Microsecond - _ = h.RecordValue(int64(elapsed)) -} - -func (m *measure) Put(key datastore.Key, value interface{}) error { - defer recordLatency(m.putLatency, time.Now()) - m.putNum.Add() - if b, ok := value.([]byte); ok { - _ = m.putSize.RecordValue(int64(len(b))) - } - err := m.backend.Put(key, value) - if err != nil { - m.putErr.Add() - } - return err -} - -func (m *measure) Get(key datastore.Key) (value interface{}, err error) { - defer recordLatency(m.getLatency, time.Now()) - m.getNum.Add() - value, err = m.backend.Get(key) - if err != nil { - m.getErr.Add() - } else { - if b, ok := value.([]byte); ok { - _ = m.getSize.RecordValue(int64(len(b))) - } - } - return value, err -} - -func (m *measure) Has(key datastore.Key) (exists bool, err error) { - defer recordLatency(m.hasLatency, time.Now()) - m.hasNum.Add() - exists, err = m.backend.Has(key) - if err != nil { - m.hasErr.Add() - } - return exists, err -} - -func (m *measure) Delete(key datastore.Key) error { - defer recordLatency(m.deleteLatency, time.Now()) - m.deleteNum.Add() - err := m.backend.Delete(key) - if err != nil { - m.deleteErr.Add() - } - return err -} - -func (m *measure) Query(q query.Query) (query.Results, error) { - defer recordLatency(m.queryLatency, time.Now()) - m.queryNum.Add() - res, err := m.backend.Query(q) - if err != nil { - m.queryErr.Add() - } - return res, err -} - -type measuredBatch struct { - puts int - deletes int - - putts datastore.Batch - delts datastore.Batch - - m *measure -} - -func (m *measure) Batch() (datastore.Batch, error) { - bds, ok := m.backend.(datastore.Batching) - if !ok { - return nil, datastore.ErrBatchUnsupported - } - pb, err := bds.Batch() - if err != nil { - return nil, err - } - - db, err := bds.Batch() - if err != nil { - return nil, err - } - - return &measuredBatch{ - putts: pb, - delts: db, - - m: m, - }, nil -} - -func (mt *measuredBatch) Put(key datastore.Key, val interface{}) error { - mt.puts++ - valb, ok := val.([]byte) - if !ok { - return datastore.ErrInvalidType - } - _ = mt.m.putSize.RecordValue(int64(len(valb))) - return mt.putts.Put(key, val) -} - -func (mt *measuredBatch) Delete(key datastore.Key) error { - mt.deletes++ - return mt.delts.Delete(key) -} - -func (mt *measuredBatch) Commit() error { - err := logBatchCommit(mt.delts, mt.deletes, mt.m.deleteNum, mt.m.deleteErr, mt.m.deleteLatency) - if err != nil { - return err - } - - err = logBatchCommit(mt.putts, mt.puts, mt.m.putNum, mt.m.putErr, mt.m.putLatency) - if err != nil { - return err - } - - return nil -} - -func logBatchCommit(b datastore.Batch, n int, num, errs metrics.Counter, lat *metrics.Histogram) error { - if n > 0 { - before := time.Now() - err := b.Commit() - took := int(time.Now().Sub(before)/time.Microsecond) / n - num.AddN(uint64(n)) - for i := 0; i < n; i++ { - _ = lat.RecordValue(int64(took)) - } - if err != nil { - errs.Add() - return err - } - } - return nil -} - -func (m *measure) Close() error { - m.putNum.Remove() - m.putErr.Remove() - m.putLatency.Remove() - m.putSize.Remove() - m.getNum.Remove() - m.getErr.Remove() - m.getLatency.Remove() - m.getSize.Remove() - m.hasNum.Remove() - m.hasErr.Remove() - m.hasLatency.Remove() - m.deleteNum.Remove() - m.deleteErr.Remove() - m.deleteLatency.Remove() - m.queryNum.Remove() - m.queryErr.Remove() - m.queryLatency.Remove() - - if c, ok := m.backend.(io.Closer); ok { - return c.Close() - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/mount/mount.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/mount/mount.go deleted file mode 100644 index 5846b947e..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/mount/mount.go +++ /dev/null @@ -1,188 +0,0 @@ -// Package mount provides a Datastore that has other Datastores -// mounted at various key prefixes. -package mount - -import ( - "errors" - "io" - "strings" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/keytransform" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query" -) - -var ( - ErrNoMount = errors.New("no datastore mounted for this key") -) - -type Mount struct { - Prefix datastore.Key - Datastore datastore.Datastore -} - -func New(mounts []Mount) *Datastore { - // make a copy so we're sure it doesn't mutate - m := make([]Mount, len(mounts)) - for i, v := range mounts { - m[i] = v - } - return &Datastore{mounts: m} -} - -type Datastore struct { - mounts []Mount -} - -var _ datastore.Datastore = (*Datastore)(nil) - -func (d *Datastore) lookup(key datastore.Key) (ds datastore.Datastore, mountpoint, rest datastore.Key) { - for _, m := range d.mounts { - if m.Prefix.Equal(key) || m.Prefix.IsAncestorOf(key) { - s := strings.TrimPrefix(key.String(), m.Prefix.String()) - k := datastore.NewKey(s) - return m.Datastore, m.Prefix, k - } - } - return nil, datastore.NewKey("/"), key -} - -func (d *Datastore) Put(key datastore.Key, value interface{}) error { - ds, _, k := d.lookup(key) - if ds == nil { - return ErrNoMount - } - return ds.Put(k, value) -} - -func (d *Datastore) Get(key datastore.Key) (value interface{}, err error) { - ds, _, k := d.lookup(key) - if ds == nil { - return nil, datastore.ErrNotFound - } - return ds.Get(k) -} - -func (d *Datastore) Has(key datastore.Key) (exists bool, err error) { - ds, _, k := d.lookup(key) - if ds == nil { - return false, nil - } - return ds.Has(k) -} - -func (d *Datastore) Delete(key datastore.Key) error { - ds, _, k := d.lookup(key) - if ds == nil { - return datastore.ErrNotFound - } - return ds.Delete(k) -} - -func (d *Datastore) Query(q query.Query) (query.Results, error) { - if len(q.Filters) > 0 || - len(q.Orders) > 0 || - q.Limit > 0 || - q.Offset > 0 { - // TODO this is overly simplistic, but the only caller is - // `ipfs refs local` for now, and this gets us moving. - return nil, errors.New("mount only supports listing all prefixed keys in random order") - } - key := datastore.NewKey(q.Prefix) - ds, mount, k := d.lookup(key) - if ds == nil { - return nil, errors.New("mount only supports listing a mount point") - } - // TODO support listing cross mount points too - - // delegate the query to the mounted datastore, while adjusting - // keys in and out - q2 := q - q2.Prefix = k.String() - wrapDS := keytransform.Wrap(ds, &keytransform.Pair{ - Convert: func(datastore.Key) datastore.Key { - panic("this should never be called") - }, - Invert: func(k datastore.Key) datastore.Key { - return mount.Child(k) - }, - }) - - r, err := wrapDS.Query(q2) - if err != nil { - return nil, err - } - r = query.ResultsReplaceQuery(r, q) - return r, nil -} - -func (d *Datastore) Close() error { - for _, d := range d.mounts { - if c, ok := d.Datastore.(io.Closer); ok { - err := c.Close() - if err != nil { - return err - } - } - } - return nil -} - -type mountBatch struct { - mounts map[string]datastore.Batch - - d *Datastore -} - -func (d *Datastore) Batch() (datastore.Batch, error) { - return &mountBatch{ - mounts: make(map[string]datastore.Batch), - d: d, - }, nil -} - -func (mt *mountBatch) lookupBatch(key datastore.Key) (datastore.Batch, datastore.Key, error) { - child, loc, rest := mt.d.lookup(key) - t, ok := mt.mounts[loc.String()] - if !ok { - bds, ok := child.(datastore.Batching) - if !ok { - return nil, datastore.NewKey(""), datastore.ErrBatchUnsupported - } - var err error - t, err = bds.Batch() - if err != nil { - return nil, datastore.NewKey(""), err - } - mt.mounts[loc.String()] = t - } - return t, rest, nil -} - -func (mt *mountBatch) Put(key datastore.Key, val interface{}) error { - t, rest, err := mt.lookupBatch(key) - if err != nil { - return err - } - - return t.Put(rest, val) -} - -func (mt *mountBatch) Delete(key datastore.Key) error { - t, rest, err := mt.lookupBatch(key) - if err != nil { - return err - } - - return t.Delete(rest) -} - -func (mt *mountBatch) Commit() error { - for _, t := range mt.mounts { - err := t.Commit() - if err != nil { - return err - } - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/namespace/doc.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/namespace/doc.go deleted file mode 100644 index 9ff9a8ca3..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/namespace/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -// Package namespace introduces a namespace Datastore Shim, which basically -// mounts the entire child datastore under a prefix. -// -// Use the Wrap function to wrap a datastore with any Key prefix. For example: -// -// import ( -// "fmt" -// -// ds "github.com/ipfs/go-datastore" -// nsds "github.com/ipfs/go-datastore/namespace" -// ) -// -// func main() { -// mp := ds.NewMapDatastore() -// ns := nsds.Wrap(mp, ds.NewKey("/foo/bar")) -// -// // in the Namespace Datastore: -// ns.Put(ds.NewKey("/beep"), "boop") -// v2, _ := ns.Get(ds.NewKey("/beep")) // v2 == "boop" -// -// // and, in the underlying MapDatastore: -// v3, _ := mp.Get(ds.NewKey("/foo/bar/beep")) // v3 == "boop" -// } -package namespace diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/namespace/namespace.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/namespace/namespace.go deleted file mode 100644 index 88aaf4aa1..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/namespace/namespace.go +++ /dev/null @@ -1,91 +0,0 @@ -package namespace - -import ( - "fmt" - "strings" - - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" - ktds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/keytransform" - dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query" -) - -// PrefixTransform constructs a KeyTransform with a pair of functions that -// add or remove the given prefix key. -// -// Warning: Will panic if prefix not found when it should be there. This is -// to avoid insidious data inconsistency errors. -func PrefixTransform(prefix ds.Key) ktds.KeyTransform { - return &ktds.Pair{ - - // Convert adds the prefix - Convert: func(k ds.Key) ds.Key { - return prefix.Child(k) - }, - - // Invert removes the prefix. panics if prefix not found. - Invert: func(k ds.Key) ds.Key { - if !prefix.IsAncestorOf(k) { - fmt.Errorf("Expected prefix (%s) in key (%s)", prefix, k) - panic("expected prefix not found") - } - - s := strings.TrimPrefix(k.String(), prefix.String()) - return ds.NewKey(s) - }, - } -} - -// Wrap wraps a given datastore with a key-prefix. -func Wrap(child ds.Datastore, prefix ds.Key) *datastore { - if child == nil { - panic("child (ds.Datastore) is nil") - } - - d := ktds.Wrap(child, PrefixTransform(prefix)) - return &datastore{Datastore: d, raw: child, prefix: prefix} -} - -type datastore struct { - prefix ds.Key - raw ds.Datastore - ktds.Datastore -} - -// Query implements Query, inverting keys on the way back out. -func (d *datastore) Query(q dsq.Query) (dsq.Results, error) { - qr, err := d.raw.Query(q) - if err != nil { - return nil, err - } - - ch := make(chan dsq.Result) - go func() { - defer close(ch) - defer qr.Close() - - for r := range qr.Next() { - if r.Error != nil { - ch <- r - continue - } - - k := ds.NewKey(r.Entry.Key) - if !d.prefix.IsAncestorOf(k) { - continue - } - - r.Entry.Key = d.Datastore.InvertKey(k).String() - ch <- r - } - }() - - return dsq.DerivedResults(qr, ch), nil -} - -func (d *datastore) Batch() (ds.Batch, error) { - if bds, ok := d.Datastore.(ds.Batching); ok { - return bds.Batch() - } - - return nil, ds.ErrBatchUnsupported -} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/panic/panic.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/panic/panic.go deleted file mode 100644 index ca67641ce..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/panic/panic.go +++ /dev/null @@ -1,120 +0,0 @@ -package sync - -import ( - "fmt" - "io" - "os" - - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" - dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query" -) - -type datastore struct { - child ds.Datastore -} - -// Wrap shims a datastore such than _any_ operation failing triggers a panic -// This is useful for debugging invariants. -func Wrap(d ds.Datastore) ds.Shim { - return &datastore{child: d} -} - -func (d *datastore) Children() []ds.Datastore { - return []ds.Datastore{d.child} -} - -func (d *datastore) Put(key ds.Key, value interface{}) error { - err := d.child.Put(key, value) - if err != nil { - fmt.Fprintf(os.Stdout, "panic datastore: %s", err) - panic("panic datastore: Put failed") - } - return nil -} - -func (d *datastore) Get(key ds.Key) (interface{}, error) { - val, err := d.child.Get(key) - if err != nil { - fmt.Fprintf(os.Stdout, "panic datastore: %s", err) - panic("panic datastore: Get failed") - } - return val, nil -} - -func (d *datastore) Has(key ds.Key) (bool, error) { - e, err := d.child.Has(key) - if err != nil { - fmt.Fprintf(os.Stdout, "panic datastore: %s", err) - panic("panic datastore: Has failed") - } - return e, nil -} - -func (d *datastore) Delete(key ds.Key) error { - err := d.child.Delete(key) - if err != nil { - fmt.Fprintf(os.Stdout, "panic datastore: %s", err) - panic("panic datastore: Delete failed") - } - return nil -} - -func (d *datastore) Query(q dsq.Query) (dsq.Results, error) { - r, err := d.child.Query(q) - if err != nil { - fmt.Fprintf(os.Stdout, "panic datastore: %s", err) - panic("panic datastore: Query failed") - } - return r, nil -} - -func (d *datastore) Close() error { - if c, ok := d.child.(io.Closer); ok { - err := c.Close() - if err != nil { - fmt.Fprintf(os.Stdout, "panic datastore: %s", err) - panic("panic datastore: Close failed") - } - } - return nil -} - -func (d *datastore) Batch() (ds.Batch, error) { - b, err := d.child.(ds.Batching).Batch() - if err != nil { - return nil, err - } - - return &panicBatch{b}, nil -} - -type panicBatch struct { - t ds.Batch -} - -func (p *panicBatch) Put(key ds.Key, val interface{}) error { - err := p.t.Put(key, val) - if err != nil { - fmt.Fprintf(os.Stdout, "panic datastore: %s", err) - panic("panic datastore: transaction put failed") - } - return nil -} - -func (p *panicBatch) Delete(key ds.Key) error { - err := p.t.Delete(key) - if err != nil { - fmt.Fprintf(os.Stdout, "panic datastore: %s", err) - panic("panic datastore: transaction delete failed") - } - return nil -} - -func (p *panicBatch) Commit() error { - err := p.t.Commit() - if err != nil { - fmt.Fprintf(os.Stdout, "panic datastore: %s", err) - panic("panic datastore: transaction commit failed") - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/query/filter.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/query/filter.go deleted file mode 100644 index d8b48ea32..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/query/filter.go +++ /dev/null @@ -1,86 +0,0 @@ -package query - -import ( - "fmt" - "reflect" - "strings" -) - -// Filter is an object that tests ResultEntries -type Filter interface { - // Filter returns whether an entry passes the filter - Filter(e Entry) bool -} - -// Op is a comparison operator -type Op string - -var ( - Equal = Op("==") - NotEqual = Op("!=") - GreaterThan = Op(">") - GreaterThanOrEqual = Op(">=") - LessThan = Op("<") - LessThanOrEqual = Op("<=") -) - -// FilterValueCompare is used to signal to datastores they -// should apply internal comparisons. unfortunately, there -// is no way to apply comparisons* to interface{} types in -// Go, so if the datastore doesnt have a special way to -// handle these comparisons, you must provided the -// TypedFilter to actually do filtering. -// -// [*] other than == and !=, which use reflect.DeepEqual. -type FilterValueCompare struct { - Op Op - Value interface{} - TypedFilter Filter -} - -func (f FilterValueCompare) Filter(e Entry) bool { - if f.TypedFilter != nil { - return f.TypedFilter.Filter(e) - } - - switch f.Op { - case Equal: - return reflect.DeepEqual(f.Value, e.Value) - case NotEqual: - return !reflect.DeepEqual(f.Value, e.Value) - default: - panic(fmt.Errorf("cannot apply op '%s' to interface{}.", f.Op)) - } -} - -type FilterKeyCompare struct { - Op Op - Key string -} - -func (f FilterKeyCompare) Filter(e Entry) bool { - switch f.Op { - case Equal: - return e.Key == f.Key - case NotEqual: - return e.Key != f.Key - case GreaterThan: - return e.Key > f.Key - case GreaterThanOrEqual: - return e.Key >= f.Key - case LessThan: - return e.Key < f.Key - case LessThanOrEqual: - return e.Key <= f.Key - default: - panic(fmt.Errorf("unknown op '%s'", f.Op)) - } -} - -type FilterKeyPrefix struct { - Prefix string -} - -func (f FilterKeyPrefix) Filter(e Entry) bool { - return strings.HasPrefix(e.Key, f.Prefix) -} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/query/order.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/query/order.go deleted file mode 100644 index 8fa987ba4..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/query/order.go +++ /dev/null @@ -1,66 +0,0 @@ -package query - -import ( - "sort" -) - -// Order is an object used to order objects -type Order interface { - - // Sort sorts the Entry slice according to - // the Order criteria. - Sort([]Entry) -} - -// OrderByValue is used to signal to datastores they -// should apply internal orderings. unfortunately, there -// is no way to apply order comparisons to interface{} types -// in Go, so if the datastore doesnt have a special way to -// handle these comparisons, you must provide an Order -// implementation that casts to the correct type. -type OrderByValue struct { - TypedOrder Order -} - -func (o OrderByValue) Sort(res []Entry) { - if o.TypedOrder == nil { - panic("cannot order interface{} by value. see query docs.") - } - o.TypedOrder.Sort(res) -} - -// OrderByValueDescending is used to signal to datastores they -// should apply internal orderings. unfortunately, there -// is no way to apply order comparisons to interface{} types -// in Go, so if the datastore doesnt have a special way to -// handle these comparisons, you are SOL. -type OrderByValueDescending struct { - TypedOrder Order -} - -func (o OrderByValueDescending) Sort(res []Entry) { - if o.TypedOrder == nil { - panic("cannot order interface{} by value. see query docs.") - } - o.TypedOrder.Sort(res) -} - -// OrderByKey -type OrderByKey struct{} - -func (o OrderByKey) Sort(res []Entry) { - sort.Stable(reByKey(res)) -} - -// OrderByKeyDescending -type OrderByKeyDescending struct{} - -func (o OrderByKeyDescending) Sort(res []Entry) { - sort.Stable(sort.Reverse(reByKey(res))) -} - -type reByKey []Entry - -func (s reByKey) Len() int { return len(s) } -func (s reByKey) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s reByKey) Less(i, j int) bool { return s[i].Key < s[j].Key } diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/query/query.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/query/query.go deleted file mode 100644 index 5b3a679ce..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/query/query.go +++ /dev/null @@ -1,250 +0,0 @@ -package query - -import ( - goprocess "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" -) - -/* -Query represents storage for any key-value pair. - -tl;dr: - - queries are supported across datastores. - Cheap on top of relational dbs, and expensive otherwise. - Pick the right tool for the job! - -In addition to the key-value store get and set semantics, datastore -provides an interface to retrieve multiple records at a time through -the use of queries. The datastore Query model gleans a common set of -operations performed when querying. To avoid pasting here years of -database research, let’s summarize the operations datastore supports. - -Query Operations: - - * namespace - scope the query, usually by object type - * filters - select a subset of values by applying constraints - * orders - sort the results by applying sort conditions - * limit - impose a numeric limit on the number of results - * offset - skip a number of results (for efficient pagination) - -datastore combines these operations into a simple Query class that allows -applications to define their constraints in a simple, generic, way without -introducing datastore specific calls, languages, etc. - -Of course, different datastores provide relational query support across a -wide spectrum, from full support in traditional databases to none at all in -most key-value stores. Datastore aims to provide a common, simple interface -for the sake of application evolution over time and keeping large code bases -free of tool-specific code. It would be ridiculous to claim to support high- -performance queries on architectures that obviously do not. Instead, datastore -provides the interface, ideally translating queries to their native form -(e.g. into SQL for MySQL). - -However, on the wrong datastore, queries can potentially incur the high cost -of performing the aforemantioned query operations on the data set directly in -Go. It is the client’s responsibility to select the right tool for the job: -pick a data storage solution that fits the application’s needs now, and wrap -it with a datastore implementation. As the needs change, swap out datastore -implementations to support your new use cases. Some applications, particularly -in early development stages, can afford to incurr the cost of queries on non- -relational databases (e.g. using a FSDatastore and not worry about a database -at all). When it comes time to switch the tool for performance, updating the -application code can be as simple as swapping the datastore in one place, not -all over the application code base. This gain in engineering time, both at -initial development and during later iterations, can significantly offset the -cost of the layer of abstraction. - -*/ -type Query struct { - Prefix string // namespaces the query to results whose keys have Prefix - Filters []Filter // filter results. apply sequentially - Orders []Order // order results. apply sequentially - Limit int // maximum number of results - Offset int // skip given number of results - KeysOnly bool // return only keys. -} - -// NotFetched is a special type that signals whether or not the value -// of an Entry has been fetched or not. This is needed because -// datastore implementations get to decide whether Query returns values -// or only keys. nil is not a good signal, as real values may be nil. -const NotFetched int = iota - -// Entry is a query result entry. -type Entry struct { - Key string // cant be ds.Key because circular imports ...!!! - Value interface{} -} - -// Result is a special entry that includes an error, so that the client -// may be warned about internal errors. -type Result struct { - Entry - - Error error -} - -// Results is a set of Query results. This is the interface for clients. -// Example: -// -// qr, _ := myds.Query(q) -// for r := range qr.Next() { -// if r.Error != nil { -// // handle. -// break -// } -// -// fmt.Println(r.Entry.Key, r.Entry.Value) -// } -// -// or, wait on all results at once: -// -// qr, _ := myds.Query(q) -// es, _ := qr.Rest() -// for _, e := range es { -// fmt.Println(e.Key, e.Value) -// } -// -type Results interface { - Query() Query // the query these Results correspond to - Next() <-chan Result // returns a channel to wait for the next result - Rest() ([]Entry, error) // waits till processing finishes, returns all entries at once. - Close() error // client may call Close to signal early exit - - // Process returns a goprocess.Process associated with these results. - // most users will not need this function (Close is all they want), - // but it's here in case you want to connect the results to other - // goprocess-friendly things. - Process() goprocess.Process -} - -// results implements Results -type results struct { - query Query - proc goprocess.Process - res <-chan Result -} - -func (r *results) Next() <-chan Result { - return r.res -} - -func (r *results) Rest() ([]Entry, error) { - var es []Entry - for e := range r.res { - if e.Error != nil { - return es, e.Error - } - es = append(es, e.Entry) - } - <-r.proc.Closed() // wait till the processing finishes. - return es, nil -} - -func (r *results) Process() goprocess.Process { - return r.proc -} - -func (r *results) Close() error { - return r.proc.Close() -} - -func (r *results) Query() Query { - return r.query -} - -// ResultBuilder is what implementors use to construct results -// Implementors of datastores and their clients must respect the -// Process of the Request: -// -// * clients must call r.Process().Close() on an early exit, so -// implementations can reclaim resources. -// * if the Entries are read to completion (channel closed), Process -// should be closed automatically. -// * datastores must respect <-Process.Closing(), which intermediates -// an early close signal from the client. -// -type ResultBuilder struct { - Query Query - Process goprocess.Process - Output chan Result -} - -// Results returns a Results to to this builder. -func (rb *ResultBuilder) Results() Results { - return &results{ - query: rb.Query, - proc: rb.Process, - res: rb.Output, - } -} - -func NewResultBuilder(q Query) *ResultBuilder { - b := &ResultBuilder{ - Query: q, - Output: make(chan Result), - } - b.Process = goprocess.WithTeardown(func() error { - close(b.Output) - return nil - }) - return b -} - -// ResultsWithChan returns a Results object from a channel -// of Result entries. Respects its own Close() -func ResultsWithChan(q Query, res <-chan Result) Results { - b := NewResultBuilder(q) - - // go consume all the entries and add them to the results. - b.Process.Go(func(worker goprocess.Process) { - for { - select { - case <-worker.Closing(): // client told us to close early - return - case e, more := <-res: - if !more { - return - } - - select { - case b.Output <- e: - case <-worker.Closing(): // client told us to close early - return - } - } - } - return - }) - - go b.Process.CloseAfterChildren() - return b.Results() -} - -// ResultsWithEntries returns a Results object from a list of entries -func ResultsWithEntries(q Query, res []Entry) Results { - b := NewResultBuilder(q) - - // go consume all the entries and add them to the results. - b.Process.Go(func(worker goprocess.Process) { - for _, e := range res { - select { - case b.Output <- Result{Entry: e}: - case <-worker.Closing(): // client told us to close early - return - } - } - return - }) - - go b.Process.CloseAfterChildren() - return b.Results() -} - -func ResultsReplaceQuery(r Results, q Query) Results { - return &results{ - query: q, - proc: r.Process(), - res: r.Next(), - } -} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/query/query_impl.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/query/query_impl.go deleted file mode 100644 index 9e584e7b2..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/query/query_impl.go +++ /dev/null @@ -1,127 +0,0 @@ -package query - -func DerivedResults(qr Results, ch <-chan Result) Results { - return &results{ - query: qr.Query(), - proc: qr.Process(), - res: ch, - } -} - -// NaiveFilter applies a filter to the results. -func NaiveFilter(qr Results, filter Filter) Results { - ch := make(chan Result) - go func() { - defer close(ch) - defer qr.Close() - - for e := range qr.Next() { - if e.Error != nil || filter.Filter(e.Entry) { - ch <- e - } - } - }() - - return DerivedResults(qr, ch) -} - -// NaiveLimit truncates the results to a given int limit -func NaiveLimit(qr Results, limit int) Results { - ch := make(chan Result) - go func() { - defer close(ch) - defer qr.Close() - - l := 0 - for e := range qr.Next() { - if e.Error != nil { - ch <- e - continue - } - ch <- e - l++ - if limit > 0 && l >= limit { - break - } - } - }() - - return DerivedResults(qr, ch) -} - -// NaiveOffset skips a given number of results -func NaiveOffset(qr Results, offset int) Results { - ch := make(chan Result) - go func() { - defer close(ch) - defer qr.Close() - - sent := 0 - for e := range qr.Next() { - if e.Error != nil { - ch <- e - } - - if sent < offset { - sent++ - continue - } - ch <- e - } - }() - - return DerivedResults(qr, ch) -} - -// NaiveOrder reorders results according to given Order. -// WARNING: this is the only non-stream friendly operation! -func NaiveOrder(qr Results, o Order) Results { - ch := make(chan Result) - var entries []Entry - go func() { - defer close(ch) - defer qr.Close() - - for e := range qr.Next() { - if e.Error != nil { - ch <- e - } - - entries = append(entries, e.Entry) - } - - o.Sort(entries) - for _, e := range entries { - ch <- Result{Entry: e} - } - }() - - return DerivedResults(qr, ch) -} - -func NaiveQueryApply(q Query, qr Results) Results { - if q.Prefix != "" { - qr = NaiveFilter(qr, FilterKeyPrefix{q.Prefix}) - } - for _, f := range q.Filters { - qr = NaiveFilter(qr, f) - } - for _, o := range q.Orders { - qr = NaiveOrder(qr, o) - } - if q.Offset != 0 { - qr = NaiveOffset(qr, q.Offset) - } - if q.Limit != 0 { - qr = NaiveLimit(qr, q.Offset) - } - return qr -} - -func ResultEntriesFrom(keys []string, vals []interface{}) []Entry { - re := make([]Entry, len(keys)) - for i, k := range keys { - re[i] = Entry{Key: k, Value: vals[i]} - } - return re -} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/redis/redis.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/redis/redis.go deleted file mode 100644 index b15d199af..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/redis/redis.go +++ /dev/null @@ -1,92 +0,0 @@ -package redis - -import ( - "errors" - "fmt" - "sync" - "time" - - "github.com/fzzy/radix/redis" - datastore "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" - query "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query" -) - -var _ datastore.Datastore = &Datastore{} -var _ datastore.ThreadSafeDatastore = &Datastore{} - -var ErrInvalidType = errors.New("redis datastore: invalid type error. this datastore only supports []byte values") - -func NewExpiringDatastore(client *redis.Client, ttl time.Duration) (*Datastore, error) { - return &Datastore{ - client: client, - ttl: ttl, - }, nil -} - -func NewDatastore(client *redis.Client) (*Datastore, error) { - return &Datastore{ - client: client, - }, nil -} - -type Datastore struct { - mu sync.Mutex - client *redis.Client - ttl time.Duration -} - -func (ds *Datastore) Put(key datastore.Key, value interface{}) error { - ds.mu.Lock() - defer ds.mu.Unlock() - - data, ok := value.([]byte) - if !ok { - return ErrInvalidType - } - - ds.client.Append("SET", key.String(), data) - if ds.ttl != 0 { - ds.client.Append("EXPIRE", key.String(), ds.ttl.Seconds()) - } - if err := ds.client.GetReply().Err; err != nil { - return fmt.Errorf("failed to put value: %s", err) - } - if ds.ttl != 0 { - if err := ds.client.GetReply().Err; err != nil { - return fmt.Errorf("failed to set expiration: %s", err) - } - } - return nil -} - -func (ds *Datastore) Get(key datastore.Key) (value interface{}, err error) { - ds.mu.Lock() - defer ds.mu.Unlock() - return ds.client.Cmd("GET", key.String()).Bytes() -} - -func (ds *Datastore) Has(key datastore.Key) (exists bool, err error) { - ds.mu.Lock() - defer ds.mu.Unlock() - return ds.client.Cmd("EXISTS", key.String()).Bool() -} - -func (ds *Datastore) Delete(key datastore.Key) (err error) { - ds.mu.Lock() - defer ds.mu.Unlock() - return ds.client.Cmd("DEL", key.String()).Err -} - -func (ds *Datastore) Query(q query.Query) (query.Results, error) { - return nil, errors.New("TODO implement query for redis datastore?") -} - -func (ds *Datastore) IsThreadSafe() {} - -func (ds *Datastore) Batch() (datastore.Batch, error) { - return nil, datastore.ErrBatchUnsupported -} - -func (ds *Datastore) Close() error { - return ds.client.Close() -} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync/sync.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync/sync.go deleted file mode 100644 index 7ea9b652a..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync/sync.go +++ /dev/null @@ -1,116 +0,0 @@ -package sync - -import ( - "io" - "sync" - - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" - dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query" -) - -// MutexDatastore contains a child datastire and a mutex. -// used for coarse sync -type MutexDatastore struct { - sync.RWMutex - - child ds.Datastore -} - -// MutexWrap constructs a datastore with a coarse lock around -// the entire datastore, for every single operation -func MutexWrap(d ds.Datastore) *MutexDatastore { - return &MutexDatastore{child: d} -} - -// Children implements Shim -func (d *MutexDatastore) Children() []ds.Datastore { - return []ds.Datastore{d.child} -} - -// IsThreadSafe implements ThreadSafeDatastore -func (d *MutexDatastore) IsThreadSafe() {} - -// Put implements Datastore.Put -func (d *MutexDatastore) Put(key ds.Key, value interface{}) (err error) { - d.Lock() - defer d.Unlock() - return d.child.Put(key, value) -} - -// Get implements Datastore.Get -func (d *MutexDatastore) Get(key ds.Key) (value interface{}, err error) { - d.RLock() - defer d.RUnlock() - return d.child.Get(key) -} - -// Has implements Datastore.Has -func (d *MutexDatastore) Has(key ds.Key) (exists bool, err error) { - d.RLock() - defer d.RUnlock() - return d.child.Has(key) -} - -// Delete implements Datastore.Delete -func (d *MutexDatastore) Delete(key ds.Key) (err error) { - d.Lock() - defer d.Unlock() - return d.child.Delete(key) -} - -// KeyList implements Datastore.KeyList -func (d *MutexDatastore) Query(q dsq.Query) (dsq.Results, error) { - d.RLock() - defer d.RUnlock() - return d.child.Query(q) -} - -func (d *MutexDatastore) Batch() (ds.Batch, error) { - d.RLock() - defer d.RUnlock() - bds, ok := d.child.(ds.Batching) - if !ok { - return nil, ds.ErrBatchUnsupported - } - - b, err := bds.Batch() - if err != nil { - return nil, err - } - return &syncBatch{ - batch: b, - mds: d, - }, nil -} - -func (d *MutexDatastore) Close() error { - d.RWMutex.Lock() - defer d.RWMutex.Unlock() - if c, ok := d.child.(io.Closer); ok { - return c.Close() - } - return nil -} - -type syncBatch struct { - batch ds.Batch - mds *MutexDatastore -} - -func (b *syncBatch) Put(key ds.Key, val interface{}) error { - b.mds.Lock() - defer b.mds.Unlock() - return b.batch.Put(key, val) -} - -func (b *syncBatch) Delete(key ds.Key) error { - b.mds.Lock() - defer b.mds.Unlock() - return b.batch.Delete(key) -} - -func (b *syncBatch) Commit() error { - b.mds.Lock() - defer b.mds.Unlock() - return b.batch.Commit() -} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/syncmount/mount.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/syncmount/mount.go deleted file mode 100644 index 6e601a96b..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/syncmount/mount.go +++ /dev/null @@ -1,198 +0,0 @@ -// Package mount provides a Datastore that has other Datastores -// mounted at various key prefixes and is threadsafe -package syncmount - -import ( - "errors" - "io" - "strings" - "sync" - - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/keytransform" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query" -) - -var ( - ErrNoMount = errors.New("no datastore mounted for this key") -) - -type Mount struct { - Prefix ds.Key - Datastore ds.Datastore -} - -func New(mounts []Mount) *Datastore { - // make a copy so we're sure it doesn't mutate - m := make([]Mount, len(mounts)) - for i, v := range mounts { - m[i] = v - } - return &Datastore{mounts: m} -} - -type Datastore struct { - mounts []Mount - lk sync.Mutex -} - -var _ ds.Datastore = (*Datastore)(nil) - -func (d *Datastore) lookup(key ds.Key) (ds.Datastore, ds.Key, ds.Key) { - d.lk.Lock() - defer d.lk.Unlock() - for _, m := range d.mounts { - if m.Prefix.Equal(key) || m.Prefix.IsAncestorOf(key) { - s := strings.TrimPrefix(key.String(), m.Prefix.String()) - k := ds.NewKey(s) - return m.Datastore, m.Prefix, k - } - } - return nil, ds.NewKey("/"), key -} - -func (d *Datastore) Put(key ds.Key, value interface{}) error { - cds, _, k := d.lookup(key) - if cds == nil { - return ErrNoMount - } - return cds.Put(k, value) -} - -func (d *Datastore) Get(key ds.Key) (value interface{}, err error) { - cds, _, k := d.lookup(key) - if cds == nil { - return nil, ds.ErrNotFound - } - return cds.Get(k) -} - -func (d *Datastore) Has(key ds.Key) (exists bool, err error) { - cds, _, k := d.lookup(key) - if cds == nil { - return false, nil - } - return cds.Has(k) -} - -func (d *Datastore) Delete(key ds.Key) error { - cds, _, k := d.lookup(key) - if cds == nil { - return ds.ErrNotFound - } - return cds.Delete(k) -} - -func (d *Datastore) Query(q query.Query) (query.Results, error) { - if len(q.Filters) > 0 || - len(q.Orders) > 0 || - q.Limit > 0 || - q.Offset > 0 { - // TODO this is overly simplistic, but the only caller is - // `ipfs refs local` for now, and this gets us moving. - return nil, errors.New("mount only supports listing all prefixed keys in random order") - } - key := ds.NewKey(q.Prefix) - cds, mount, k := d.lookup(key) - if cds == nil { - return nil, errors.New("mount only supports listing a mount point") - } - // TODO support listing cross mount points too - - // delegate the query to the mounted datastore, while adjusting - // keys in and out - q2 := q - q2.Prefix = k.String() - wrapDS := keytransform.Wrap(cds, &keytransform.Pair{ - Convert: func(ds.Key) ds.Key { - panic("this should never be called") - }, - Invert: func(k ds.Key) ds.Key { - return mount.Child(k) - }, - }) - - r, err := wrapDS.Query(q2) - if err != nil { - return nil, err - } - r = query.ResultsReplaceQuery(r, q) - return r, nil -} - -func (d *Datastore) IsThreadSafe() {} - -func (d *Datastore) Close() error { - for _, d := range d.mounts { - if c, ok := d.Datastore.(io.Closer); ok { - err := c.Close() - if err != nil { - return err - } - } - } - return nil -} - -type mountBatch struct { - mounts map[string]ds.Batch - lk sync.Mutex - - d *Datastore -} - -func (d *Datastore) Batch() (ds.Batch, error) { - return &mountBatch{ - mounts: make(map[string]ds.Batch), - d: d, - }, nil -} - -func (mt *mountBatch) lookupBatch(key ds.Key) (ds.Batch, ds.Key, error) { - mt.lk.Lock() - defer mt.lk.Unlock() - - child, loc, rest := mt.d.lookup(key) - t, ok := mt.mounts[loc.String()] - if !ok { - bds, ok := child.(ds.Batching) - if !ok { - return nil, ds.NewKey(""), ds.ErrBatchUnsupported - } - var err error - t, err = bds.Batch() - if err != nil { - return nil, ds.NewKey(""), err - } - mt.mounts[loc.String()] = t - } - return t, rest, nil -} - -func (mt *mountBatch) Put(key ds.Key, val interface{}) error { - t, rest, err := mt.lookupBatch(key) - if err != nil { - return err - } - - return t.Put(rest, val) -} - -func (mt *mountBatch) Delete(key ds.Key) error { - t, rest, err := mt.lookupBatch(key) - if err != nil { - return err - } - - return t.Delete(rest) -} - -func (mt *mountBatch) Commit() error { - for _, t := range mt.mounts { - err := t.Commit() - if err != nil { - return err - } - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/test/assert.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/test/assert.go deleted file mode 100644 index 9cf7e6553..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/test/assert.go +++ /dev/null @@ -1,25 +0,0 @@ -package dstest - -import "testing" - -func Nil(err error, t *testing.T, msgs ...string) { - if err != nil { - t.Fatal(msgs, "error:", err) - } -} - -func True(v bool, t *testing.T, msgs ...string) { - if !v { - t.Fatal(msgs) - } -} - -func False(v bool, t *testing.T, msgs ...string) { - True(!v, t, msgs...) -} - -func Err(err error, t *testing.T, msgs ...string) { - if err == nil { - t.Fatal(msgs, "error:", err) - } -} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/test/test_util.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/test/test_util.go deleted file mode 100644 index 28458b553..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/test/test_util.go +++ /dev/null @@ -1,99 +0,0 @@ -package dstest - -import ( - "bytes" - "encoding/base32" - "testing" - - rand "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/dustin/randbo" - dstore "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" -) - -func RunBatchTest(t *testing.T, ds dstore.Batching) { - batch, err := ds.Batch() - if err != nil { - t.Fatal(err) - } - - r := rand.New() - var blocks [][]byte - var keys []dstore.Key - for i := 0; i < 20; i++ { - blk := make([]byte, 256*1024) - r.Read(blk) - blocks = append(blocks, blk) - - key := dstore.NewKey(base32.StdEncoding.EncodeToString(blk[:8])) - keys = append(keys, key) - - err := batch.Put(key, blk) - if err != nil { - t.Fatal(err) - } - } - - // Ensure they are not in the datastore before comitting - for _, k := range keys { - _, err := ds.Get(k) - if err == nil { - t.Fatal("should not have found this block") - } - } - - // commit, write them to the datastore - err = batch.Commit() - if err != nil { - t.Fatal(err) - } - - for i, k := range keys { - blk, err := ds.Get(k) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(blk.([]byte), blocks[i]) { - t.Fatal("blocks not correct!") - } - } -} - -func RunBatchDeleteTest(t *testing.T, ds dstore.Batching) { - r := rand.New() - var keys []dstore.Key - for i := 0; i < 20; i++ { - blk := make([]byte, 16) - r.Read(blk) - - key := dstore.NewKey(base32.StdEncoding.EncodeToString(blk[:8])) - keys = append(keys, key) - - err := ds.Put(key, blk) - if err != nil { - t.Fatal(err) - } - } - - batch, err := ds.Batch() - if err != nil { - t.Fatal(err) - } - - for _, k := range keys { - err := batch.Delete(k) - if err != nil { - t.Fatal(err) - } - } - err = batch.Commit() - if err != nil { - t.Fatal(err) - } - - for _, k := range keys { - _, err := ds.Get(k) - if err == nil { - t.Fatal("shouldnt have found block") - } - } -} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/tiered/tiered.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/tiered/tiered.go deleted file mode 100644 index 0b698095c..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/tiered/tiered.go +++ /dev/null @@ -1,94 +0,0 @@ -package tiered - -import ( - "fmt" - "sync" - - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" - dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query" -) - -type tiered []ds.Datastore - -// New returns a tiered datastore. Puts and Deletes will write-through to -// all datastores, Has and Get will try each datastore sequentially, and -// Query will always try the last one (most complete) first. -func New(dses ...ds.Datastore) tiered { - return tiered(dses) -} - -// Put stores the object `value` named by `key`. -func (d tiered) Put(key ds.Key, value interface{}) (err error) { - errs := make(chan error, len(d)) - - var wg sync.WaitGroup - for _, cd := range d { - wg.Add(1) - go func(cd ds.Datastore) { - defer wg.Done() - if err := cd.Put(key, value); err != nil { - errs <- err - } - }(cd) - } - wg.Wait() - - close(errs) - for err := range errs { - return err - } - return nil -} - -// Get retrieves the object `value` named by `key`. -func (d tiered) Get(key ds.Key) (value interface{}, err error) { - err = fmt.Errorf("no datastores") - for _, cd := range d { - value, err = cd.Get(key) - if err == nil { - break - } - } - return -} - -// Has returns whether the `key` is mapped to a `value`. -func (d tiered) Has(key ds.Key) (exists bool, err error) { - err = fmt.Errorf("no datastores") - for _, cd := range d { - exists, err = cd.Has(key) - if err == nil && exists { - break - } - } - return -} - -// Delete removes the value for given `key`. -func (d tiered) Delete(key ds.Key) (err error) { - errs := make(chan error, len(d)) - - var wg sync.WaitGroup - for _, cd := range d { - wg.Add(1) - go func(cd ds.Datastore) { - defer wg.Done() - if err := cd.Delete(key); err != nil { - errs <- err - } - }(cd) - } - wg.Wait() - - close(errs) - for err := range errs { - return err - } - return nil -} - -// Query returns a list of keys in the datastore -func (d tiered) Query(q dsq.Query) (dsq.Results, error) { - // query always the last (most complete) one - return d[len(d)-1].Query(q) -} diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/timecache/timecache.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/timecache/timecache.go deleted file mode 100644 index bfa793aec..000000000 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/timecache/timecache.go +++ /dev/null @@ -1,104 +0,0 @@ -package timecache - -import ( - "io" - "sync" - "time" - - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" - dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query" -) - -// op keys -var ( - putKey = "put" - getKey = "get" - hasKey = "has" - deleteKey = "delete" -) - -type datastore struct { - cache ds.Datastore - ttl time.Duration - - ttlmu sync.Mutex - ttls map[ds.Key]time.Time -} - -func WithTTL(ttl time.Duration) *datastore { - return WithCache(ds.NewMapDatastore(), ttl) -} - -// WithCache wraps a given datastore as a timecache. -// Get + Has requests are considered expired after a TTL. -func WithCache(d ds.Datastore, ttl time.Duration) *datastore { - return &datastore{cache: d, ttl: ttl, ttls: make(map[ds.Key]time.Time)} -} - -func (d *datastore) gc() { - var now = time.Now() - var del []ds.Key - - // remove all expired ttls. - d.ttlmu.Lock() - for k, ttl := range d.ttls { - if now.After(ttl) { - delete(d.ttls, k) - del = append(del, k) - } - } - d.ttlmu.Unlock() - - for _, k := range del { - d.cache.Delete(k) - } -} - -func (d *datastore) ttlPut(key ds.Key) { - d.ttlmu.Lock() - d.ttls[key] = time.Now().Add(d.ttl) - d.ttlmu.Unlock() -} - -func (d *datastore) ttlDelete(key ds.Key) { - d.ttlmu.Lock() - delete(d.ttls, key) - d.ttlmu.Unlock() -} - -// Put stores the object `value` named by `key`. -func (d *datastore) Put(key ds.Key, value interface{}) (err error) { - err = d.cache.Put(key, value) - d.ttlPut(key) - return err -} - -// Get retrieves the object `value` named by `key`. -func (d *datastore) Get(key ds.Key) (value interface{}, err error) { - d.gc() - return d.cache.Get(key) -} - -// Has returns whether the `key` is mapped to a `value`. -func (d *datastore) Has(key ds.Key) (exists bool, err error) { - d.gc() - return d.cache.Has(key) -} - -// Delete removes the value for given `key`. -func (d *datastore) Delete(key ds.Key) (err error) { - d.ttlDelete(key) - return d.cache.Delete(key) -} - -// Query returns a list of keys in the datastore -func (d *datastore) Query(q dsq.Query) (dsq.Results, error) { - return d.cache.Query(q) -} - -func (d *datastore) Close() error { - if c, ok := d.cache.(io.Closer); ok { - return c.Close() - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-context/frac/fracctx.go b/Godeps/_workspace/src/github.com/jbenet/go-context/frac/fracctx.go deleted file mode 100644 index c92daa9c9..000000000 --- a/Godeps/_workspace/src/github.com/jbenet/go-context/frac/fracctx.go +++ /dev/null @@ -1,62 +0,0 @@ -// Package ctxext provides multiple useful context constructors. -package ctxext - -import ( - "time" - - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" -) - -// WithDeadlineFraction returns a Context with a fraction of the -// original context's timeout. This is useful in sequential pipelines -// of work, where one might try options and fall back to others -// depending on the time available, or failure to respond. For example: -// -// // getPicture returns a picture from our encrypted database -// // we have a pipeline of multiple steps. we need to: -// // - get the data from a database -// // - decrypt it -// // - apply many transforms -// // -// // we **know** that each step takes increasingly more time. -// // The transforms are much more expensive than decryption, and -// // decryption is more expensive than the database lookup. -// // If our database takes too long (i.e. >0.2 of available time), -// // there's no use in continuing. -// func getPicture(ctx context.Context, key string) ([]byte, error) { -// // fractional timeout contexts to the rescue! -// -// // try the database with 0.2 of remaining time. -// ctx1, _ := ctxext.WithDeadlineFraction(ctx, 0.2) -// val, err := db.Get(ctx1, key) -// if err != nil { -// return nil, err -// } -// -// // try decryption with 0.3 of remaining time. -// ctx2, _ := ctxext.WithDeadlineFraction(ctx, 0.3) -// if val, err = decryptor.Decrypt(ctx2, val); err != nil { -// return nil, err -// } -// -// // try transforms with all remaining time. hopefully it's enough! -// return transformer.Transform(ctx, val) -// } -// -// -func WithDeadlineFraction(ctx context.Context, fraction float64) ( - context.Context, context.CancelFunc) { - - d, found := ctx.Deadline() - if !found { // no deadline - return context.WithCancel(ctx) - } - - left := d.Sub(time.Now()) - if left < 0 { // already passed... - return context.WithCancel(ctx) - } - - left = time.Duration(float64(left) * fraction) - return context.WithTimeout(ctx, left) -} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-context/frac/fracctx_test.go b/Godeps/_workspace/src/github.com/jbenet/go-context/frac/fracctx_test.go deleted file mode 100644 index 975b7696d..000000000 --- a/Godeps/_workspace/src/github.com/jbenet/go-context/frac/fracctx_test.go +++ /dev/null @@ -1,145 +0,0 @@ -package ctxext - -import ( - "os" - "testing" - "time" - - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" -) - -// this test is on the context tool itself, not our stuff. it's for sanity on ours. -func TestDeadline(t *testing.T) { - if os.Getenv("TRAVIS") == "true" { - t.Skip("timeouts don't work reliably on travis") - } - - ctx, _ := context.WithTimeout(context.Background(), 5*time.Millisecond) - - select { - case <-ctx.Done(): - t.Fatal("ended too early") - default: - } - - <-time.After(6 * time.Millisecond) - - select { - case <-ctx.Done(): - default: - t.Fatal("ended too late") - } -} - -func TestDeadlineFractionForever(t *testing.T) { - - ctx, _ := WithDeadlineFraction(context.Background(), 0.5) - - _, found := ctx.Deadline() - if found { - t.Fatal("should last forever") - } -} - -func TestDeadlineFractionHalf(t *testing.T) { - if os.Getenv("TRAVIS") == "true" { - t.Skip("timeouts don't work reliably on travis") - } - - ctx1, _ := context.WithTimeout(context.Background(), 10*time.Millisecond) - ctx2, _ := WithDeadlineFraction(ctx1, 0.5) - - select { - case <-ctx1.Done(): - t.Fatal("ctx1 ended too early") - case <-ctx2.Done(): - t.Fatal("ctx2 ended too early") - default: - } - - <-time.After(2 * time.Millisecond) - - select { - case <-ctx1.Done(): - t.Fatal("ctx1 ended too early") - case <-ctx2.Done(): - t.Fatal("ctx2 ended too early") - default: - } - - <-time.After(4 * time.Millisecond) - - select { - case <-ctx1.Done(): - t.Fatal("ctx1 ended too early") - case <-ctx2.Done(): - default: - t.Fatal("ctx2 ended too late") - } - - <-time.After(6 * time.Millisecond) - - select { - case <-ctx1.Done(): - default: - t.Fatal("ctx1 ended too late") - } - -} - -func TestDeadlineFractionCancel(t *testing.T) { - - ctx1, cancel1 := context.WithTimeout(context.Background(), 10*time.Millisecond) - ctx2, cancel2 := WithDeadlineFraction(ctx1, 0.5) - - select { - case <-ctx1.Done(): - t.Fatal("ctx1 ended too early") - case <-ctx2.Done(): - t.Fatal("ctx2 ended too early") - default: - } - - cancel2() - - select { - case <-ctx1.Done(): - t.Fatal("ctx1 should NOT be cancelled") - case <-ctx2.Done(): - default: - t.Fatal("ctx2 should be cancelled") - } - - cancel1() - - select { - case <-ctx1.Done(): - case <-ctx2.Done(): - default: - t.Fatal("ctx1 should be cancelled") - } - -} - -func TestDeadlineFractionObeysParent(t *testing.T) { - - ctx1, cancel1 := context.WithTimeout(context.Background(), 10*time.Millisecond) - ctx2, _ := WithDeadlineFraction(ctx1, 0.5) - - select { - case <-ctx1.Done(): - t.Fatal("ctx1 ended too early") - case <-ctx2.Done(): - t.Fatal("ctx2 ended too early") - default: - } - - cancel1() - - select { - case <-ctx2.Done(): - default: - t.Fatal("ctx2 should be cancelled") - } - -} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-context/io/ctxio.go b/Godeps/_workspace/src/github.com/jbenet/go-context/io/ctxio.go deleted file mode 100644 index 411eea892..000000000 --- a/Godeps/_workspace/src/github.com/jbenet/go-context/io/ctxio.go +++ /dev/null @@ -1,120 +0,0 @@ -// Package ctxio provides io.Reader and io.Writer wrappers that -// respect context.Contexts. Use these at the interface between -// your context code and your io. -// -// WARNING: read the code. see how writes and reads will continue -// until you cancel the io. Maybe this package should provide -// versions of io.ReadCloser and io.WriteCloser that automatically -// call .Close when the context expires. But for now -- since in my -// use cases I have long-lived connections with ephemeral io wrappers -// -- this has yet to be a need. -package ctxio - -import ( - "io" - - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" -) - -type ioret struct { - n int - err error -} - -type Writer interface { - io.Writer -} - -type ctxWriter struct { - w io.Writer - ctx context.Context -} - -// NewWriter wraps a writer to make it respect given Context. -// If there is a blocking write, the returned Writer will return -// whenever the context is cancelled (the return values are n=0 -// and err=ctx.Err().) -// -// Note well: this wrapper DOES NOT ACTUALLY cancel the underlying -// write-- there is no way to do that with the standard go io -// interface. So the read and write _will_ happen or hang. So, use -// this sparingly, make sure to cancel the read or write as necesary -// (e.g. closing a connection whose context is up, etc.) -// -// Furthermore, in order to protect your memory from being read -// _after_ you've cancelled the context, this io.Writer will -// first make a **copy** of the buffer. -func NewWriter(ctx context.Context, w io.Writer) *ctxWriter { - if ctx == nil { - ctx = context.Background() - } - return &ctxWriter{ctx: ctx, w: w} -} - -func (w *ctxWriter) Write(buf []byte) (int, error) { - buf2 := make([]byte, len(buf)) - copy(buf2, buf) - - c := make(chan ioret, 1) - - go func() { - n, err := w.w.Write(buf2) - c <- ioret{n, err} - close(c) - }() - - select { - case r := <-c: - return r.n, r.err - case <-w.ctx.Done(): - return 0, w.ctx.Err() - } -} - -type Reader interface { - io.Reader -} - -type ctxReader struct { - r io.Reader - ctx context.Context -} - -// NewReader wraps a reader to make it respect given Context. -// If there is a blocking read, the returned Reader will return -// whenever the context is cancelled (the return values are n=0 -// and err=ctx.Err().) -// -// Note well: this wrapper DOES NOT ACTUALLY cancel the underlying -// write-- there is no way to do that with the standard go io -// interface. So the read and write _will_ happen or hang. So, use -// this sparingly, make sure to cancel the read or write as necesary -// (e.g. closing a connection whose context is up, etc.) -// -// Furthermore, in order to protect your memory from being read -// _before_ you've cancelled the context, this io.Reader will -// allocate a buffer of the same size, and **copy** into the client's -// if the read succeeds in time. -func NewReader(ctx context.Context, r io.Reader) *ctxReader { - return &ctxReader{ctx: ctx, r: r} -} - -func (r *ctxReader) Read(buf []byte) (int, error) { - buf2 := make([]byte, len(buf)) - - c := make(chan ioret, 1) - - go func() { - n, err := r.r.Read(buf2) - c <- ioret{n, err} - close(c) - }() - - select { - case ret := <-c: - copy(buf, buf2) - return ret.n, ret.err - case <-r.ctx.Done(): - return 0, r.ctx.Err() - } -} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-context/io/ctxio_test.go b/Godeps/_workspace/src/github.com/jbenet/go-context/io/ctxio_test.go deleted file mode 100644 index a9de8694c..000000000 --- a/Godeps/_workspace/src/github.com/jbenet/go-context/io/ctxio_test.go +++ /dev/null @@ -1,273 +0,0 @@ -package ctxio - -import ( - "bytes" - "io" - "testing" - "time" - - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" -) - -func TestReader(t *testing.T) { - buf := []byte("abcdef") - buf2 := make([]byte, 3) - r := NewReader(context.Background(), bytes.NewReader(buf)) - - // read first half - n, err := r.Read(buf2) - if n != 3 { - t.Error("n should be 3") - } - if err != nil { - t.Error("should have no error") - } - if string(buf2) != string(buf[:3]) { - t.Error("incorrect contents") - } - - // read second half - n, err = r.Read(buf2) - if n != 3 { - t.Error("n should be 3") - } - if err != nil { - t.Error("should have no error") - } - if string(buf2) != string(buf[3:6]) { - t.Error("incorrect contents") - } - - // read more. - n, err = r.Read(buf2) - if n != 0 { - t.Error("n should be 0", n) - } - if err != io.EOF { - t.Error("should be EOF", err) - } -} - -func TestWriter(t *testing.T) { - var buf bytes.Buffer - w := NewWriter(context.Background(), &buf) - - // write three - n, err := w.Write([]byte("abc")) - if n != 3 { - t.Error("n should be 3") - } - if err != nil { - t.Error("should have no error") - } - if string(buf.Bytes()) != string("abc") { - t.Error("incorrect contents") - } - - // write three more - n, err = w.Write([]byte("def")) - if n != 3 { - t.Error("n should be 3") - } - if err != nil { - t.Error("should have no error") - } - if string(buf.Bytes()) != string("abcdef") { - t.Error("incorrect contents") - } -} - -func TestReaderCancel(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - piper, pipew := io.Pipe() - r := NewReader(ctx, piper) - - buf := make([]byte, 10) - done := make(chan ioret) - - go func() { - n, err := r.Read(buf) - done <- ioret{n, err} - }() - - pipew.Write([]byte("abcdefghij")) - - select { - case ret := <-done: - if ret.n != 10 { - t.Error("ret.n should be 10", ret.n) - } - if ret.err != nil { - t.Error("ret.err should be nil", ret.err) - } - if string(buf) != "abcdefghij" { - t.Error("read contents differ") - } - case <-time.After(20 * time.Millisecond): - t.Fatal("failed to read") - } - - go func() { - n, err := r.Read(buf) - done <- ioret{n, err} - }() - - cancel() - - select { - case ret := <-done: - if ret.n != 0 { - t.Error("ret.n should be 0", ret.n) - } - if ret.err == nil { - t.Error("ret.err should be ctx error", ret.err) - } - case <-time.After(20 * time.Millisecond): - t.Fatal("failed to stop reading after cancel") - } -} - -func TestWriterCancel(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - piper, pipew := io.Pipe() - w := NewWriter(ctx, pipew) - - buf := make([]byte, 10) - done := make(chan ioret) - - go func() { - n, err := w.Write([]byte("abcdefghij")) - done <- ioret{n, err} - }() - - piper.Read(buf) - - select { - case ret := <-done: - if ret.n != 10 { - t.Error("ret.n should be 10", ret.n) - } - if ret.err != nil { - t.Error("ret.err should be nil", ret.err) - } - if string(buf) != "abcdefghij" { - t.Error("write contents differ") - } - case <-time.After(20 * time.Millisecond): - t.Fatal("failed to write") - } - - go func() { - n, err := w.Write([]byte("abcdefghij")) - done <- ioret{n, err} - }() - - cancel() - - select { - case ret := <-done: - if ret.n != 0 { - t.Error("ret.n should be 0", ret.n) - } - if ret.err == nil { - t.Error("ret.err should be ctx error", ret.err) - } - case <-time.After(20 * time.Millisecond): - t.Fatal("failed to stop writing after cancel") - } -} - -func TestReadPostCancel(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - piper, pipew := io.Pipe() - r := NewReader(ctx, piper) - - buf := make([]byte, 10) - done := make(chan ioret) - - go func() { - n, err := r.Read(buf) - done <- ioret{n, err} - }() - - cancel() - - select { - case ret := <-done: - if ret.n != 0 { - t.Error("ret.n should be 0", ret.n) - } - if ret.err == nil { - t.Error("ret.err should be ctx error", ret.err) - } - case <-time.After(20 * time.Millisecond): - t.Fatal("failed to stop reading after cancel") - } - - pipew.Write([]byte("abcdefghij")) - - if !bytes.Equal(buf, make([]byte, len(buf))) { - t.Fatal("buffer should have not been written to") - } -} - -func TestWritePostCancel(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - piper, pipew := io.Pipe() - w := NewWriter(ctx, pipew) - - buf := []byte("abcdefghij") - buf2 := make([]byte, 10) - done := make(chan ioret) - - go func() { - n, err := w.Write(buf) - done <- ioret{n, err} - }() - - piper.Read(buf2) - - select { - case ret := <-done: - if ret.n != 10 { - t.Error("ret.n should be 10", ret.n) - } - if ret.err != nil { - t.Error("ret.err should be nil", ret.err) - } - if string(buf2) != "abcdefghij" { - t.Error("write contents differ") - } - case <-time.After(20 * time.Millisecond): - t.Fatal("failed to write") - } - - go func() { - n, err := w.Write(buf) - done <- ioret{n, err} - }() - - cancel() - - select { - case ret := <-done: - if ret.n != 0 { - t.Error("ret.n should be 0", ret.n) - } - if ret.err == nil { - t.Error("ret.err should be ctx error", ret.err) - } - case <-time.After(20 * time.Millisecond): - t.Fatal("failed to stop writing after cancel") - } - - copy(buf, []byte("aaaaaaaaaa")) - - piper.Read(buf2) - - if string(buf2) == "aaaaaaaaaa" { - t.Error("buffer was read from after ctx cancel") - } else if string(buf2) != "abcdefghij" { - t.Error("write contents differ from expected") - } -} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-os-rename/.travis.yml b/Godeps/_workspace/src/github.com/jbenet/go-os-rename/.travis.yml deleted file mode 100644 index d6b2109ca..000000000 --- a/Godeps/_workspace/src/github.com/jbenet/go-os-rename/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go - -go: - - 1.4 - - release - -script: - - go test -race -cpu=5 -v ./... diff --git a/Godeps/_workspace/src/github.com/jbenet/go-os-rename/LICENSE b/Godeps/_workspace/src/github.com/jbenet/go-os-rename/LICENSE deleted file mode 100644 index 5c304d1a4..000000000 --- a/Godeps/_workspace/src/github.com/jbenet/go-os-rename/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Godeps/_workspace/src/github.com/jbenet/go-os-rename/README.md b/Godeps/_workspace/src/github.com/jbenet/go-os-rename/README.md deleted file mode 100644 index 9d8cc60ba..000000000 --- a/Godeps/_workspace/src/github.com/jbenet/go-os-rename/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# go-os-rename - -Easily rename files in place. This is needed because Windows errors on rename if a file already exists. Please see this commit, from which the code is extracted: https://github.com/lvarvel/cacheddownloader/commit/505a1fd - -#### Godoc: https://godoc.org/github.com/jbenet/go-os-rename - -## Author - -The original author of this code are "David Morhovich, David Varvel and John Shahid" (see [this commit](https://github.com/lvarvel/cacheddownloader/commit/505a1fdcc5af7823f20d7c87d9e4d1c833c59053)) - -## License - -The code originally comes from https://github.com/lvarvel/cacheddownloader, which is licensed Apache 2.0. diff --git a/Godeps/_workspace/src/github.com/jbenet/go-os-rename/rename_std.go b/Godeps/_workspace/src/github.com/jbenet/go-os-rename/rename_std.go deleted file mode 100644 index b0d3644ee..000000000 --- a/Godeps/_workspace/src/github.com/jbenet/go-os-rename/rename_std.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !windows - -package osrename - -import "os" - -func Rename(src, dst string) error { - return os.Rename(src, dst) -} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-os-rename/rename_test.go b/Godeps/_workspace/src/github.com/jbenet/go-os-rename/rename_test.go deleted file mode 100644 index d21ab34f3..000000000 --- a/Godeps/_workspace/src/github.com/jbenet/go-os-rename/rename_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package osrename_test - -import ( - "bytes" - rn "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-os-rename" - "io/ioutil" - "os" - "testing" -) - -func tempdir(t testing.TB) (path string, cleanup func()) { - path, err := ioutil.TempDir("", "test-windows-rename") - if err != nil { - t.Fatalf("cannot create temp directory: %v", err) - } - - cleanup = func() { - if err := os.RemoveAll(path); err != nil { - t.Errorf("tempdir cleanup failed: %v", err) - } - } - return path, cleanup -} - -func TestAtomicRename(t *testing.T) { - dirBase, cleanup := tempdir(t) - defer cleanup() - - // Create base file - origFilePath := dirBase + "original.txt" - err := ioutil.WriteFile(origFilePath, []byte("tests"), 0644) - - if err != nil { - t.Fatalf("Could not write original test file") - } - - // Create secondary file - tempFilePath := dirBase + "newTempFile.txt" - err = ioutil.WriteFile(tempFilePath, []byte("success"), 0644) - if err != nil { - t.Fatalf("Could not write temp file") - } - - // Execute our magic rename function - err = rn.Rename(tempFilePath, origFilePath) - if err != nil { - t.Fatalf("Could not rename temp file") - } - - // Let's read the renamed file and ensure that we get data - renamedFileBytes, err := ioutil.ReadFile(origFilePath) - if err != nil { - t.Fatalf("Could not read renamed file") - } - - // Let's compare the bytes of the renamed file - if bytes.Compare(renamedFileBytes, []byte("success")) != 0 { - t.Fatalf("Did not find expected bytes in renamed file %d vs %d", renamedFileBytes, []byte("success")) - } -} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-os-rename/rename_windows.go b/Godeps/_workspace/src/github.com/jbenet/go-os-rename/rename_windows.go deleted file mode 100644 index 8ee46d9fe..000000000 --- a/Godeps/_workspace/src/github.com/jbenet/go-os-rename/rename_windows.go +++ /dev/null @@ -1,43 +0,0 @@ -// +build windows - -package osrename - -import ( - "syscall" - "unsafe" -) - -func Rename(src, dst string) error { - kernel32, err := syscall.LoadLibrary("kernel32.dll") - if err != nil { - return err - } - defer syscall.FreeLibrary(kernel32) - moveFileExUnicode, err := syscall.GetProcAddress(kernel32, "MoveFileExW") - if err != nil { - return err - } - - srcString, err := syscall.UTF16PtrFromString(src) - if err != nil { - return err - } - - dstString, err := syscall.UTF16PtrFromString(dst) - if err != nil { - return err - } - - srcPtr := uintptr(unsafe.Pointer(srcString)) - dstPtr := uintptr(unsafe.Pointer(dstString)) - - MOVEFILE_REPLACE_EXISTING := 0x1 - flag := uintptr(MOVEFILE_REPLACE_EXISTING) - - _, _, callErr := syscall.Syscall(uintptr(moveFileExUnicode), 3, srcPtr, dstPtr, flag) - if callErr != 0 { - return callErr - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-random/random/random.go b/Godeps/_workspace/src/github.com/jbenet/go-random/random/random.go index 563c4fbef..cdf79b64f 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-random/random/random.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-random/random/random.go @@ -5,8 +5,8 @@ import ( "os" "strconv" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/dustin/go-humanize" random "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-random" + "gx/ipfs/QmPSBJL4momYnE7DcUyk2DVhD6rH488ZmHBGLbxNdhU44K/go-humanize" ) func main() { diff --git a/Godeps/_workspace/src/github.com/olekukonko/ts/.travis.yml b/Godeps/_workspace/src/github.com/olekukonko/ts/.travis.yml deleted file mode 100644 index e53b2de38..000000000 --- a/Godeps/_workspace/src/github.com/olekukonko/ts/.travis.yml +++ /dev/null @@ -1,6 +0,0 @@ -language: go - -go: - - 1.1 - - 1.2 - - tip \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/olekukonko/ts/LICENCE b/Godeps/_workspace/src/github.com/olekukonko/ts/LICENCE deleted file mode 100644 index 1fd848425..000000000 --- a/Godeps/_workspace/src/github.com/olekukonko/ts/LICENCE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (C) 2014 by Oleku Konko - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/olekukonko/ts/README.md b/Godeps/_workspace/src/github.com/olekukonko/ts/README.md deleted file mode 100644 index 4e1598b1c..000000000 --- a/Godeps/_workspace/src/github.com/olekukonko/ts/README.md +++ /dev/null @@ -1,28 +0,0 @@ -ts (Terminal Size) -== - -[![Build Status](https://travis-ci.org/olekukonko/ts.png?branch=master)](https://travis-ci.org/olekukonko/ts) [![Total views](https://sourcegraph.com/api/repos/github.com/olekukonko/ts/counters/views.png)](https://sourcegraph.com/github.com/olekukonko/ts) - -Simple go Application to get Terminal Size. So Many Implementations do not support windows but `ts` has full windows support. -Run `go get github.com/olekukonko/ts` to download and install - -#### Example - -```go -package main - -import ( - "fmt" - "github.com/olekukonko/ts" -) - -func main() { - size, _ := ts.GetSize() - fmt.Println(size.Col()) // Get Width - fmt.Println(size.Row()) // Get Height - fmt.Println(size.PosX()) // Get X position - fmt.Println(size.PosY()) // Get Y position -} -``` - -[See Documentation](http://godoc.org/github.com/olekukonko/ts) diff --git a/Godeps/_workspace/src/github.com/olekukonko/ts/doc.go b/Godeps/_workspace/src/github.com/olekukonko/ts/doc.go deleted file mode 100644 index 50c63cae0..000000000 --- a/Godeps/_workspace/src/github.com/olekukonko/ts/doc.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2014 Oleku Konko All rights reserved. -// Use of this source code is governed by a MIT -// license that can be found in the LICENSE file. - -// This module is a Terminal API for the Go Programming Language. -// The protocols were written in pure Go and works on windows and unix systems - -/** - -Simple go Application to get Terminal Size. So Many Implementations do not support windows but `ts` has full windows support. -Run `go get github.com/olekukonko/ts` to download and install - -Installation - -Minimum requirements are Go 1.1+ with fill Windows support - -Example - - package main - - import ( - "fmt" - "github.com/olekukonko/ts" - ) - - func main() { - size, _ := ts.GetSize() - fmt.Println(size.Col()) // Get Width - fmt.Println(size.Row()) // Get Height - fmt.Println(size.PosX()) // Get X position - fmt.Println(size.PosY()) // Get Y position - } - -**/ - -package ts diff --git a/Godeps/_workspace/src/github.com/olekukonko/ts/ts.go b/Godeps/_workspace/src/github.com/olekukonko/ts/ts.go deleted file mode 100644 index 35fdf7427..000000000 --- a/Godeps/_workspace/src/github.com/olekukonko/ts/ts.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2014 Oleku Konko All rights reserved. -// Use of this source code is governed by a MIT -// license that can be found in the LICENSE file. - -// This module is a Terminal API for the Go Programming Language. -// The protocols were written in pure Go and works on windows and unix systems - -package ts - -// Return System Size -type Size struct { - row uint16 - col uint16 - posX uint16 - posY uint16 -} - -// Get Terminal Width -func (w Size) Col() int { - return int(w.col) -} - -// Get Terminal Height -func (w Size) Row() int { - return int(w.row) -} - -// Get Position X -func (w Size) PosX() int { - return int(w.posX) -} - -// Get Position Y -func (w Size) PosY() int { - return int(w.posY) -} diff --git a/Godeps/_workspace/src/github.com/olekukonko/ts/ts_darwin.go b/Godeps/_workspace/src/github.com/olekukonko/ts/ts_darwin.go deleted file mode 100644 index bdcf42b47..000000000 --- a/Godeps/_workspace/src/github.com/olekukonko/ts/ts_darwin.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build darwin - -// Copyright 2014 Oleku Konko All rights reserved. -// Use of this source code is governed by a MIT -// license that can be found in the LICENSE file. - -// This module is a Terminal API for the Go Programming Language. -// The protocols were written in pure Go and works on windows and unix systems - -package ts - -const ( - TIOCGWINSZ = 0x40087468 -) diff --git a/Godeps/_workspace/src/github.com/olekukonko/ts/ts_linux.go b/Godeps/_workspace/src/github.com/olekukonko/ts/ts_linux.go deleted file mode 100644 index ee2db6d47..000000000 --- a/Godeps/_workspace/src/github.com/olekukonko/ts/ts_linux.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build linux - -// Copyright 2014 Oleku Konko All rights reserved. -// Use of this source code is governed by a MIT -// license that can be found in the LICENSE file. - -// This module is a Terminal API for the Go Programming Language. -// The protocols were written in pure Go and works on windows and unix systems -package ts - -const ( - TIOCGWINSZ = 0x5413 -) diff --git a/Godeps/_workspace/src/github.com/olekukonko/ts/ts_other.go b/Godeps/_workspace/src/github.com/olekukonko/ts/ts_other.go deleted file mode 100644 index bf8048192..000000000 --- a/Godeps/_workspace/src/github.com/olekukonko/ts/ts_other.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !windows,!darwin,!freebsd,!netbsd,!openbsd,!linux - -// Copyright 2014 Oleku Konko All rights reserved. -// Use of this source code is governed by a MIT -// license that can be found in the LICENSE file. - -// This module is a Terminal API for the Go Programming Language. -// The protocols were written in pure Go and works on windows and unix systems - -package ts - -const ( - TIOCGWINSZ = 0 -) diff --git a/Godeps/_workspace/src/github.com/olekukonko/ts/ts_test.go b/Godeps/_workspace/src/github.com/olekukonko/ts/ts_test.go deleted file mode 100644 index 4998e7c01..000000000 --- a/Godeps/_workspace/src/github.com/olekukonko/ts/ts_test.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2014 Oleku Konko All rights reserved. -// Use of this source code is governed by a MIT -// license that can be found in the LICENSE file. - -// This module is a Terminal API for the Go Programming Language. -// The protocols were written in pure Go and works on windows and unix systems - -package ts - -import ( - "fmt" - "testing" -) - -func ExampleGetSize() { - size, _ := GetSize() - fmt.Println(size.Col()) // Get Width - fmt.Println(size.Row()) // Get Height - fmt.Println(size.PosX()) // Get X position - fmt.Println(size.PosY()) // Get Y position -} - -func TestSize(t *testing.T) { - size, err := GetSize() - - if err != nil { - t.Fatal(err) - } - if size.Col() == 0 || size.Row() == 0 { - t.Fatalf("Screen Size Failed") - } -} diff --git a/Godeps/_workspace/src/github.com/olekukonko/ts/ts_unix.go b/Godeps/_workspace/src/github.com/olekukonko/ts/ts_unix.go deleted file mode 100644 index 8728b614c..000000000 --- a/Godeps/_workspace/src/github.com/olekukonko/ts/ts_unix.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build freebsd netbsd openbsd - -// Copyright 2014 Oleku Konko All rights reserved. -// Use of this source code is governed by a MIT -// license that can be found in the LICENSE file. - -// This module is a Terminal API for the Go Programming Language. -// The protocols were written in pure Go and works on windows and unix systems - -package ts - -const ( - TIOCGWINSZ = 0x40087468 -) diff --git a/Godeps/_workspace/src/github.com/olekukonko/ts/ts_windows.go b/Godeps/_workspace/src/github.com/olekukonko/ts/ts_windows.go deleted file mode 100644 index 2f34d5699..000000000 --- a/Godeps/_workspace/src/github.com/olekukonko/ts/ts_windows.go +++ /dev/null @@ -1,64 +0,0 @@ -// +build windows - -// Copyright 2014 Oleku Konko All rights reserved. -// Use of this source code is governed by a MIT -// license that can be found in the LICENSE file. - -// This module is a Terminal API for the Go Programming Language. -// The protocols were written in pure Go and works on windows and unix systems - -package ts - -import ( - "syscall" - "unsafe" -) - -var ( - kernel32 = syscall.NewLazyDLL("kernel32.dll") - - // Retrieves information about the specified console screen buffer. - // See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx - screenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") -) - -// Contains information about a console screen buffer. -// http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093(v=vs.85).aspx -type CONSOLE_SCREEN_BUFFER_INFO struct { - DwSize COORD - DwCursorPosition COORD - WAttributes uint16 - SrWindow SMALL_RECT - DwMaximumWindowSize COORD -} - -// Defines the coordinates of a character cell in a console screen buffer. -// The origin of the coordinate system (0,0) is at the top, left cell of the buffer. -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms682119(v=vs.85).aspx -type COORD struct { - X, Y uint16 -} - -// Defines the coordinates of the upper left and lower right corners of a rectangle. -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686311(v=vs.85).aspx -type SMALL_RECT struct { - Left, Top, Right, Bottom uint16 -} - -func GetSize() (ws Size, err error) { - var info CONSOLE_SCREEN_BUFFER_INFO - rc, _, err := screenBufferInfo.Call( - uintptr(syscall.Stdout), - uintptr(unsafe.Pointer(&info))) - - if rc == 0 { - return ws, err - } - - ws = Size{info.SrWindow.Bottom, - info.SrWindow.Right, - info.DwCursorPosition.X, - info.DwCursorPosition.Y} - - return ws, nil -} diff --git a/Godeps/_workspace/src/github.com/olekukonko/ts/ts_x.go b/Godeps/_workspace/src/github.com/olekukonko/ts/ts_x.go deleted file mode 100644 index 1b260e381..000000000 --- a/Godeps/_workspace/src/github.com/olekukonko/ts/ts_x.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build !windows - -// Copyright 2014 Oleku Konko All rights reserved. -// Use of this source code is governed by a MIT -// license that can be found in the LICENSE file. - -// This module is a Terminal API for the Go Programming Language. -// The protocols were written in pure Go and works on windows and unix systems - -package ts - -import ( - "syscall" - "unsafe" -) - -// Get Windows Size -func GetSize() (ws Size, err error) { - _, _, ec := syscall.Syscall(syscall.SYS_IOCTL, - uintptr(syscall.Stdout), - uintptr(TIOCGWINSZ), - uintptr(unsafe.Pointer(&ws))) - - err = getError(ec) - - if TIOCGWINSZ == 0 && err != nil { - ws = Size{80, 25, 0, 0} - } - return ws, err -} - -func getError(ec interface{}) (err error) { - switch v := ec.(type) { - - case syscall.Errno: // Some implementation return syscall.Errno number - if v != 0 { - err = syscall.Errno(v) - } - - case error: // Some implementation return error - err = ec.(error) - default: - err = nil - } - return -} diff --git a/Godeps/_workspace/src/github.com/rs/cors/.travis.yml b/Godeps/_workspace/src/github.com/rs/cors/.travis.yml deleted file mode 100644 index bbb5185a2..000000000 --- a/Godeps/_workspace/src/github.com/rs/cors/.travis.yml +++ /dev/null @@ -1,4 +0,0 @@ -language: go -go: -- 1.3 -- 1.4 diff --git a/Godeps/_workspace/src/github.com/rs/cors/LICENSE b/Godeps/_workspace/src/github.com/rs/cors/LICENSE deleted file mode 100644 index d8e2df5a4..000000000 --- a/Godeps/_workspace/src/github.com/rs/cors/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2014 Olivier Poitrey - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is furnished -to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/rs/cors/README.md b/Godeps/_workspace/src/github.com/rs/cors/README.md deleted file mode 100644 index 4fe320cf3..000000000 --- a/Godeps/_workspace/src/github.com/rs/cors/README.md +++ /dev/null @@ -1,96 +0,0 @@ -# Go CORS handler [![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/cors) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/cors/master/LICENSE) [![build](https://img.shields.io/travis/rs/cors.svg?style=flat)](https://travis-ci.org/rs/cors) - -CORS is a `net/http` handler implementing [Cross Origin Resource Sharing W3 specification](http://www.w3.org/TR/cors/) in Golang. - -## Getting Started - -After installing Go and setting up your [GOPATH](http://golang.org/doc/code.html#GOPATH), create your first `.go` file. We'll call it `server.go`. - -```go -package main - -import ( - "net/http" - - "github.com/rs/cors" -) - -func main() { - h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Write([]byte("{\"hello\": \"world\"}")) - }) - - // cors.Default() setup the middleware with default options being - // all origins accepted with simple methods (GET, POST). See - // documentation below for more options. - handler := cors.Default().Handler(h) - http.ListenAndServe(":8080", handler) -} -``` - -Install `cors`: - - go get github.com/rs/cors - -Then run your server: - - go run server.go - -The server now runs on `localhost:8080`: - - $ curl -D - -H 'Origin: http://foo.com' http://localhost:8080/ - HTTP/1.1 200 OK - Access-Control-Allow-Origin: foo.com - Content-Type: application/json - Date: Sat, 25 Oct 2014 03:43:57 GMT - Content-Length: 18 - - {"hello": "world"} - -### More Examples - -* `net/http`: [examples/nethttp/server.go](https://github.com/rs/cors/blob/master/examples/nethttp/server.go) -* [Goji](https://goji.io): [examples/goji/server.go](https://github.com/rs/cors/blob/master/examples/goji/server.go) -* [Martini](http://martini.codegangsta.io): [examples/martini/server.go](https://github.com/rs/cors/blob/master/examples/martini/server.go) -* [Negroni](https://github.com/codegangsta/negroni): [examples/negroni/server.go](https://github.com/rs/cors/blob/master/examples/negroni/server.go) -* [Alice](https://github.com/justinas/alice): [examples/alice/server.go](https://github.com/rs/cors/blob/master/examples/alice/server.go) - -## Parameters - -Parameters are passed to the middleware thru the `cors.New` method as follow: - -```go -c := cors.New(cors.Options{ - AllowedOrigins: []string{"http://foo.com"}, - AllowCredentials: true, -}) - -// Insert the middleware -handler = c.Handler(handler) -``` - -* **AllowedOrigins** `[]string`: A list of origins a cross-domain request can be executed from. If the special `*` value is present in the list, all origins will be allowed. The default value is `*`. -* **AllowOriginFunc** `func (origin string) bool`: A custom function to validate the origin. It take the origin as argument and returns true if allowed or false otherwise. If this option is set, the content of `AllowedOrigins` is ignored -* **AllowedMethods** `[]string`: A list of methods the client is allowed to use with cross-domain requests. -* **AllowedHeaders** `[]string`: A list of non simple headers the client is allowed to use with cross-domain requests. Default value is simple methods (`GET` and `POST`) -* **ExposedHeaders** `[]string`: Indicates which headers are safe to expose to the API of a CORS API specification -* **AllowCredentials** `bool`: Indicates whether the request can include user credentials like cookies, HTTP authentication or client side SSL certificates. The default is `false`. -* **MaxAge** `int`: Indicates how long (in seconds) the results of a preflight request can be cached. The default is `0` which stands for no max age. - -See [API documentation](http://godoc.org/github.com/rs/cors) for more info. - -## Benchmarks - - BenchmarkWithout 20000000 64.6 ns/op 8 B/op 1 allocs/op - BenchmarkDefault 3000000 469 ns/op 114 B/op 2 allocs/op - BenchmarkAllowedOrigin 3000000 608 ns/op 114 B/op 2 allocs/op - BenchmarkPreflight 20000000 73.2 ns/op 0 B/op 0 allocs/op - BenchmarkPreflightHeader 20000000 73.6 ns/op 0 B/op 0 allocs/op - BenchmarkParseHeaderList 2000000 847 ns/op 184 B/op 6 allocs/op - BenchmarkParse…Single 5000000 290 ns/op 32 B/op 3 allocs/op - BenchmarkParse…Normalized 2000000 776 ns/op 160 B/op 6 allocs/op - -## Licenses - -All source code is licensed under the [MIT License](https://raw.github.com/rs/cors/master/LICENSE). diff --git a/Godeps/_workspace/src/github.com/rs/cors/bench_test.go b/Godeps/_workspace/src/github.com/rs/cors/bench_test.go deleted file mode 100644 index b6e3721de..000000000 --- a/Godeps/_workspace/src/github.com/rs/cors/bench_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package cors - -import ( - "net/http" - "testing" -) - -type FakeResponse struct { - header http.Header -} - -func (r FakeResponse) Header() http.Header { - return r.header -} - -func (r FakeResponse) WriteHeader(n int) { -} - -func (r FakeResponse) Write(b []byte) (n int, err error) { - return len(b), nil -} - -func BenchmarkWithout(b *testing.B) { - res := FakeResponse{http.Header{}} - req, _ := http.NewRequest("GET", "http://example.com/foo", nil) - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - testHandler.ServeHTTP(res, req) - } -} - -func BenchmarkDefault(b *testing.B) { - res := FakeResponse{http.Header{}} - req, _ := http.NewRequest("GET", "http://example.com/foo", nil) - req.Header.Add("Origin", "somedomain.com") - handler := Default().Handler(testHandler) - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - handler.ServeHTTP(res, req) - } -} - -func BenchmarkAllowedOrigin(b *testing.B) { - res := FakeResponse{http.Header{}} - req, _ := http.NewRequest("GET", "http://example.com/foo", nil) - req.Header.Add("Origin", "somedomain.com") - c := New(Options{ - AllowedOrigins: []string{"somedomain.com"}, - }) - handler := c.Handler(testHandler) - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - handler.ServeHTTP(res, req) - } -} - -func BenchmarkPreflight(b *testing.B) { - res := FakeResponse{http.Header{}} - req, _ := http.NewRequest("OPTIONS", "http://example.com/foo", nil) - req.Header.Add("Access-Control-Request-Method", "GET") - handler := Default().Handler(testHandler) - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - handler.ServeHTTP(res, req) - } -} - -func BenchmarkPreflightHeader(b *testing.B) { - res := FakeResponse{http.Header{}} - req, _ := http.NewRequest("OPTIONS", "http://example.com/foo", nil) - req.Header.Add("Access-Control-Request-Method", "GET") - req.Header.Add("Access-Control-Request-Headers", "Accept") - handler := Default().Handler(testHandler) - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - handler.ServeHTTP(res, req) - } -} diff --git a/Godeps/_workspace/src/github.com/rs/cors/cors.go b/Godeps/_workspace/src/github.com/rs/cors/cors.go deleted file mode 100644 index c0da31d83..000000000 --- a/Godeps/_workspace/src/github.com/rs/cors/cors.go +++ /dev/null @@ -1,345 +0,0 @@ -/* -Package cors is net/http handler to handle CORS related requests -as defined by http://www.w3.org/TR/cors/ - -You can configure it by passing an option struct to cors.New: - - c := cors.New(cors.Options{ - AllowedOrigins: []string{"foo.com"}, - AllowedMethods: []string{"GET", "POST", "DELETE"}, - AllowCredentials: true, - }) - -Then insert the handler in the chain: - - handler = c.Handler(handler) - -See Options documentation for more options. - -The resulting handler is a standard net/http handler. -*/ -package cors - -import ( - "log" - "net/http" - "os" - "strconv" - "strings" -) - -// Options is a configuration container to setup the CORS middleware. -type Options struct { - // AllowedOrigins is a list of origins a cross-domain request can be executed from. - // If the special "*" value is present in the list, all origins will be allowed. - // Default value is ["*"] - AllowedOrigins []string - // AllowOriginFunc is a custom function to validate the origin. It take the origin - // as argument and returns true if allowed or false otherwise. If this option is - // set, the content of AllowedOrigins is ignored. - AllowOriginFunc func(origin string) bool - // AllowedMethods is a list of methods the client is allowed to use with - // cross-domain requests. Default value is simple methods (GET and POST) - AllowedMethods []string - // AllowedHeaders is list of non simple headers the client is allowed to use with - // cross-domain requests. - // If the special "*" value is present in the list, all headers will be allowed. - // Default value is [] but "Origin" is always appended to the list. - AllowedHeaders []string - // ExposedHeaders indicates which headers are safe to expose to the API of a CORS - // API specification - ExposedHeaders []string - // AllowCredentials indicates whether the request can include user credentials like - // cookies, HTTP authentication or client side SSL certificates. - AllowCredentials bool - // MaxAge indicates how long (in seconds) the results of a preflight request - // can be cached - MaxAge int - // Debugging flag adds additional output to debug server side CORS issues - Debug bool -} - -type Cors struct { - // Debug logger - log *log.Logger - // Set to true when allowed origins contains a "*" - allowedOriginsAll bool - // Normalized list of allowed origins - allowedOrigins []string - // Optional origin validator function - allowOriginFunc func(origin string) bool - // Set to true when allowed headers contains a "*" - allowedHeadersAll bool - // Normalized list of allowed headers - allowedHeaders []string - // Normalized list of allowed methods - allowedMethods []string - // Normalized list of exposed headers - exposedHeaders []string - allowCredentials bool - maxAge int -} - -// New creates a new Cors handler with the provided options. -func New(options Options) *Cors { - c := &Cors{ - exposedHeaders: convert(options.ExposedHeaders, http.CanonicalHeaderKey), - allowOriginFunc: options.AllowOriginFunc, - allowCredentials: options.AllowCredentials, - maxAge: options.MaxAge, - } - if options.Debug { - c.log = log.New(os.Stdout, "[cors] ", log.LstdFlags) - } - - // Normalize options - // NOTE: For origins and methods matching, the spec requires a case-sensitive matching. - // As it may error prone, we chose to ignore the spec here. - - // Allowed Origins - if len(options.AllowedOrigins) == 0 { - // Default is all origins - c.allowedOriginsAll = true - } else { - c.allowedOrigins = convert(options.AllowedOrigins, strings.ToLower) - for _, o := range c.allowedOrigins { - if o == "*" { - c.allowedOriginsAll = true - c.allowedOrigins = nil - break - } - } - } - - // Allowed Headers - if len(options.AllowedHeaders) == 0 { - // Use sensible defaults - c.allowedHeaders = []string{"Origin", "Accept", "Content-Type"} - } else { - // Origin is always appended as some browsers will always request for this header at preflight - c.allowedHeaders = convert(append(options.AllowedHeaders, "Origin"), http.CanonicalHeaderKey) - for _, h := range options.AllowedHeaders { - if h == "*" { - c.allowedHeadersAll = true - c.allowedHeaders = nil - break - } - } - } - - // Allowed Methods - if len(options.AllowedMethods) == 0 { - // Default is spec's "simple" methods - c.allowedMethods = []string{"GET", "POST"} - } else { - c.allowedMethods = convert(options.AllowedMethods, strings.ToUpper) - } - - return c -} - -// Default creates a new Cors handler with default options -func Default() *Cors { - return New(Options{}) -} - -// Handler apply the CORS specification on the request, and add relevant CORS headers -// as necessary. -func (c *Cors) Handler(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method == "OPTIONS" { - c.logf("Handler: Preflight request") - c.handlePreflight(w, r) - // Preflight requests are standalone and should stop the chain as some other - // middleware may not handle OPTIONS requests correctly. One typical example - // is authentication middleware ; OPTIONS requests won't carry authentication - // headers (see #1) - } else { - c.logf("Handler: Actual request") - c.handleActualRequest(w, r) - h.ServeHTTP(w, r) - } - }) -} - -// Martini compatible handler -func (c *Cors) HandlerFunc(w http.ResponseWriter, r *http.Request) { - if r.Method == "OPTIONS" { - c.logf("HandlerFunc: Preflight request") - c.handlePreflight(w, r) - } else { - c.logf("HandlerFunc: Actual request") - c.handleActualRequest(w, r) - } -} - -// Negroni compatible interface -func (c *Cors) ServeHTTP(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - if r.Method == "OPTIONS" { - c.logf("ServeHTTP: Preflight request") - c.handlePreflight(w, r) - // Preflight requests are standalone and should stop the chain as some other - // middleware may not handle OPTIONS requests correctly. One typical example - // is authentication middleware ; OPTIONS requests won't carry authentication - // headers (see #1) - } else { - c.logf("ServeHTTP: Actual request") - c.handleActualRequest(w, r) - next(w, r) - } -} - -// handlePreflight handles pre-flight CORS requests -func (c *Cors) handlePreflight(w http.ResponseWriter, r *http.Request) { - headers := w.Header() - origin := r.Header.Get("Origin") - - if r.Method != "OPTIONS" { - c.logf(" Preflight aborted: %s!=OPTIONS", r.Method) - return - } - if origin == "" { - c.logf(" Preflight aborted: empty origin") - return - } - if !c.isOriginAllowed(origin) { - c.logf(" Preflight aborted: origin '%s' not allowed", origin) - return - } - - reqMethod := r.Header.Get("Access-Control-Request-Method") - if !c.isMethodAllowed(reqMethod) { - c.logf(" Preflight aborted: method '%s' not allowed", reqMethod) - return - } - reqHeaders := parseHeaderList(r.Header.Get("Access-Control-Request-Headers")) - if !c.areHeadersAllowed(reqHeaders) { - c.logf(" Preflight aborted: headers '%v' not allowed", reqHeaders) - return - } - headers.Set("Access-Control-Allow-Origin", origin) - headers.Add("Vary", "Origin") - // Spec says: Since the list of methods can be unbounded, simply returning the method indicated - // by Access-Control-Request-Method (if supported) can be enough - headers.Set("Access-Control-Allow-Methods", strings.ToUpper(reqMethod)) - if len(reqHeaders) > 0 { - - // Spec says: Since the list of headers can be unbounded, simply returning supported headers - // from Access-Control-Request-Headers can be enough - headers.Set("Access-Control-Allow-Headers", strings.Join(reqHeaders, ", ")) - } - if c.allowCredentials { - headers.Set("Access-Control-Allow-Credentials", "true") - } - if c.maxAge > 0 { - headers.Set("Access-Control-Max-Age", strconv.Itoa(c.maxAge)) - } - c.logf(" Preflight response headers: %v", headers) -} - -// handleActualRequest handles simple cross-origin requests, actual request or redirects -func (c *Cors) handleActualRequest(w http.ResponseWriter, r *http.Request) { - headers := w.Header() - origin := r.Header.Get("Origin") - - if r.Method == "OPTIONS" { - c.logf(" Actual request no headers added: method == %s", r.Method) - return - } - if origin == "" { - c.logf(" Actual request no headers added: missing origin") - return - } - if !c.isOriginAllowed(origin) { - c.logf(" Actual request no headers added: origin '%s' not allowed", origin) - return - } - - // Note that spec does define a way to specifically disallow a simple method like GET or - // POST. Access-Control-Allow-Methods is only used for pre-flight requests and the - // spec doesn't instruct to check the allowed methods for simple cross-origin requests. - // We think it's a nice feature to be able to have control on those methods though. - if !c.isMethodAllowed(r.Method) { - if c.log != nil { - c.logf(" Actual request no headers added: method '%s' not allowed", - r.Method) - } - - return - } - headers.Set("Access-Control-Allow-Origin", origin) - headers.Add("Vary", "Origin") - if len(c.exposedHeaders) > 0 { - headers.Set("Access-Control-Expose-Headers", strings.Join(c.exposedHeaders, ", ")) - } - if c.allowCredentials { - headers.Set("Access-Control-Allow-Credentials", "true") - } - c.logf(" Actual response added headers: %v", headers) -} - -// convenience method. checks if debugging is turned on before printing -func (c *Cors) logf(format string, a ...interface{}) { - if c.log != nil { - c.log.Printf(format, a...) - } -} - -// isOriginAllowed checks if a given origin is allowed to perform cross-domain requests -// on the endpoint -func (c *Cors) isOriginAllowed(origin string) bool { - if c.allowOriginFunc != nil { - return c.allowOriginFunc(origin) - } - if c.allowedOriginsAll { - return true - } - origin = strings.ToLower(origin) - for _, o := range c.allowedOrigins { - if o == origin { - return true - } - } - return false -} - -// isMethodAllowed checks if a given method can be used as part of a cross-domain request -// on the endpoing -func (c *Cors) isMethodAllowed(method string) bool { - if len(c.allowedMethods) == 0 { - // If no method allowed, always return false, even for preflight request - return false - } - method = strings.ToUpper(method) - if method == "OPTIONS" { - // Always allow preflight requests - return true - } - for _, m := range c.allowedMethods { - if m == method { - return true - } - } - return false -} - -// areHeadersAllowed checks if a given list of headers are allowed to used within -// a cross-domain request. -func (c *Cors) areHeadersAllowed(requestedHeaders []string) bool { - if c.allowedHeadersAll || len(requestedHeaders) == 0 { - return true - } - for _, header := range requestedHeaders { - header = http.CanonicalHeaderKey(header) - found := false - for _, h := range c.allowedHeaders { - if h == header { - found = true - } - } - if !found { - return false - } - } - return true -} diff --git a/Godeps/_workspace/src/github.com/rs/cors/cors_test.go b/Godeps/_workspace/src/github.com/rs/cors/cors_test.go deleted file mode 100644 index 1a7f0e4a4..000000000 --- a/Godeps/_workspace/src/github.com/rs/cors/cors_test.go +++ /dev/null @@ -1,315 +0,0 @@ -package cors - -import ( - "net/http" - "net/http/httptest" - "regexp" - "testing" -) - -var testHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("bar")) -}) - -func assertHeaders(t *testing.T, resHeaders http.Header, reqHeaders map[string]string) { - for name, value := range reqHeaders { - if resHeaders.Get(name) != value { - t.Errorf("Invalid header `%s', wanted `%s', got `%s'", name, value, resHeaders.Get(name)) - } - } -} - -func TestNoConfig(t *testing.T) { - s := New(Options{ - // Intentionally left blank. - }) - - res := httptest.NewRecorder() - req, _ := http.NewRequest("GET", "http://example.com/foo", nil) - - s.Handler(testHandler).ServeHTTP(res, req) - - assertHeaders(t, res.Header(), map[string]string{ - "Access-Control-Allow-Origin": "", - "Access-Control-Allow-Methods": "", - "Access-Control-Allow-Headers": "", - "Access-Control-Allow-Credentials": "", - "Access-Control-Max-Age": "", - "Access-Control-Expose-Headers": "", - }) -} - -func TestWildcardOrigin(t *testing.T) { - s := New(Options{ - AllowedOrigins: []string{"*"}, - }) - - res := httptest.NewRecorder() - req, _ := http.NewRequest("GET", "http://example.com/foo", nil) - req.Header.Add("Origin", "http://foobar.com") - - s.Handler(testHandler).ServeHTTP(res, req) - - assertHeaders(t, res.Header(), map[string]string{ - "Access-Control-Allow-Origin": "http://foobar.com", - "Access-Control-Allow-Methods": "", - "Access-Control-Allow-Headers": "", - "Access-Control-Allow-Credentials": "", - "Access-Control-Max-Age": "", - "Access-Control-Expose-Headers": "", - }) -} - -func TestAllowedOrigin(t *testing.T) { - s := New(Options{ - AllowedOrigins: []string{"http://foobar.com"}, - }) - - res := httptest.NewRecorder() - req, _ := http.NewRequest("GET", "http://example.com/foo", nil) - req.Header.Add("Origin", "http://foobar.com") - - s.Handler(testHandler).ServeHTTP(res, req) - - assertHeaders(t, res.Header(), map[string]string{ - "Access-Control-Allow-Origin": "http://foobar.com", - "Access-Control-Allow-Methods": "", - "Access-Control-Allow-Headers": "", - "Access-Control-Allow-Credentials": "", - "Access-Control-Max-Age": "", - "Access-Control-Expose-Headers": "", - }) -} - -func TestDisallowedOrigin(t *testing.T) { - s := New(Options{ - AllowedOrigins: []string{"http://foobar.com"}, - }) - - res := httptest.NewRecorder() - req, _ := http.NewRequest("GET", "http://example.com/foo", nil) - req.Header.Add("Origin", "http://barbaz.com") - - s.Handler(testHandler).ServeHTTP(res, req) - - assertHeaders(t, res.Header(), map[string]string{ - "Access-Control-Allow-Origin": "", - "Access-Control-Allow-Methods": "", - "Access-Control-Allow-Headers": "", - "Access-Control-Allow-Credentials": "", - "Access-Control-Max-Age": "", - "Access-Control-Expose-Headers": "", - }) -} - -func TestAllowedOriginFunc(t *testing.T) { - r, _ := regexp.Compile("^http://foo") - s := New(Options{ - AllowOriginFunc: func(o string) bool { - println(r.MatchString(o)) - return r.MatchString(o) - }, - }) - - req, _ := http.NewRequest("GET", "http://example.com/foo", nil) - - res := httptest.NewRecorder() - req.Header.Set("Origin", "http://foobar.com") - s.Handler(testHandler).ServeHTTP(res, req) - assertHeaders(t, res.Header(), map[string]string{ - "Access-Control-Allow-Origin": "http://foobar.com", - }) - - res = httptest.NewRecorder() - req.Header.Set("Origin", "http://barfoo.com") - s.Handler(testHandler).ServeHTTP(res, req) - assertHeaders(t, res.Header(), map[string]string{ - "Access-Control-Allow-Origin": "", - }) -} - -func TestAllowedMethod(t *testing.T) { - s := New(Options{ - AllowedOrigins: []string{"http://foobar.com"}, - AllowedMethods: []string{"PUT", "DELETE"}, - }) - - res := httptest.NewRecorder() - req, _ := http.NewRequest("OPTIONS", "http://example.com/foo", nil) - req.Header.Add("Origin", "http://foobar.com") - req.Header.Add("Access-Control-Request-Method", "PUT") - - s.Handler(testHandler).ServeHTTP(res, req) - - assertHeaders(t, res.Header(), map[string]string{ - "Access-Control-Allow-Origin": "http://foobar.com", - "Access-Control-Allow-Methods": "PUT", - "Access-Control-Allow-Headers": "", - "Access-Control-Allow-Credentials": "", - "Access-Control-Max-Age": "", - "Access-Control-Expose-Headers": "", - }) -} - -func TestDisallowedMethod(t *testing.T) { - s := New(Options{ - AllowedOrigins: []string{"http://foobar.com"}, - AllowedMethods: []string{"PUT", "DELETE"}, - }) - - res := httptest.NewRecorder() - req, _ := http.NewRequest("OPTIONS", "http://example.com/foo", nil) - req.Header.Add("Origin", "http://foobar.com") - req.Header.Add("Access-Control-Request-Method", "PATCH") - - s.Handler(testHandler).ServeHTTP(res, req) - - assertHeaders(t, res.Header(), map[string]string{ - "Access-Control-Allow-Origin": "", - "Access-Control-Allow-Methods": "", - "Access-Control-Allow-Headers": "", - "Access-Control-Allow-Credentials": "", - "Access-Control-Max-Age": "", - "Access-Control-Expose-Headers": "", - }) -} - -func TestAllowedHeader(t *testing.T) { - s := New(Options{ - AllowedOrigins: []string{"http://foobar.com"}, - AllowedHeaders: []string{"X-Header-1", "x-header-2"}, - }) - - res := httptest.NewRecorder() - req, _ := http.NewRequest("OPTIONS", "http://example.com/foo", nil) - req.Header.Add("Origin", "http://foobar.com") - req.Header.Add("Access-Control-Request-Method", "GET") - req.Header.Add("Access-Control-Request-Headers", "X-Header-2, X-HEADER-1") - - s.Handler(testHandler).ServeHTTP(res, req) - - assertHeaders(t, res.Header(), map[string]string{ - "Access-Control-Allow-Origin": "http://foobar.com", - "Access-Control-Allow-Methods": "GET", - "Access-Control-Allow-Headers": "X-Header-2, X-Header-1", - "Access-Control-Allow-Credentials": "", - "Access-Control-Max-Age": "", - "Access-Control-Expose-Headers": "", - }) -} - -func TestAllowedWildcardHeader(t *testing.T) { - s := New(Options{ - AllowedOrigins: []string{"http://foobar.com"}, - AllowedHeaders: []string{"*"}, - }) - - res := httptest.NewRecorder() - req, _ := http.NewRequest("OPTIONS", "http://example.com/foo", nil) - req.Header.Add("Origin", "http://foobar.com") - req.Header.Add("Access-Control-Request-Method", "GET") - req.Header.Add("Access-Control-Request-Headers", "X-Header-2, X-HEADER-1") - - s.Handler(testHandler).ServeHTTP(res, req) - - assertHeaders(t, res.Header(), map[string]string{ - "Access-Control-Allow-Origin": "http://foobar.com", - "Access-Control-Allow-Methods": "GET", - "Access-Control-Allow-Headers": "X-Header-2, X-Header-1", - "Access-Control-Allow-Credentials": "", - "Access-Control-Max-Age": "", - "Access-Control-Expose-Headers": "", - }) -} - -func TestDisallowedHeader(t *testing.T) { - s := New(Options{ - AllowedOrigins: []string{"http://foobar.com"}, - AllowedHeaders: []string{"X-Header-1", "x-header-2"}, - }) - - res := httptest.NewRecorder() - req, _ := http.NewRequest("OPTIONS", "http://example.com/foo", nil) - req.Header.Add("Origin", "http://foobar.com") - req.Header.Add("Access-Control-Request-Method", "GET") - req.Header.Add("Access-Control-Request-Headers", "X-Header-3, X-Header-1") - - s.Handler(testHandler).ServeHTTP(res, req) - - assertHeaders(t, res.Header(), map[string]string{ - "Access-Control-Allow-Origin": "", - "Access-Control-Allow-Methods": "", - "Access-Control-Allow-Headers": "", - "Access-Control-Allow-Credentials": "", - "Access-Control-Max-Age": "", - "Access-Control-Expose-Headers": "", - }) -} - -func TestOriginHeader(t *testing.T) { - s := New(Options{ - AllowedOrigins: []string{"http://foobar.com"}, - }) - - res := httptest.NewRecorder() - req, _ := http.NewRequest("OPTIONS", "http://example.com/foo", nil) - req.Header.Add("Origin", "http://foobar.com") - req.Header.Add("Access-Control-Request-Method", "GET") - req.Header.Add("Access-Control-Request-Headers", "origin") - - s.Handler(testHandler).ServeHTTP(res, req) - - assertHeaders(t, res.Header(), map[string]string{ - "Access-Control-Allow-Origin": "http://foobar.com", - "Access-Control-Allow-Methods": "GET", - "Access-Control-Allow-Headers": "Origin", - "Access-Control-Allow-Credentials": "", - "Access-Control-Max-Age": "", - "Access-Control-Expose-Headers": "", - }) -} - -func TestExposedHeader(t *testing.T) { - s := New(Options{ - AllowedOrigins: []string{"http://foobar.com"}, - ExposedHeaders: []string{"X-Header-1", "x-header-2"}, - }) - - res := httptest.NewRecorder() - req, _ := http.NewRequest("GET", "http://example.com/foo", nil) - req.Header.Add("Origin", "http://foobar.com") - - s.Handler(testHandler).ServeHTTP(res, req) - - assertHeaders(t, res.Header(), map[string]string{ - "Access-Control-Allow-Origin": "http://foobar.com", - "Access-Control-Allow-Methods": "", - "Access-Control-Allow-Headers": "", - "Access-Control-Allow-Credentials": "", - "Access-Control-Max-Age": "", - "Access-Control-Expose-Headers": "X-Header-1, X-Header-2", - }) -} - -func TestAllowedCredentials(t *testing.T) { - s := New(Options{ - AllowedOrigins: []string{"http://foobar.com"}, - AllowCredentials: true, - }) - - res := httptest.NewRecorder() - req, _ := http.NewRequest("OPTIONS", "http://example.com/foo", nil) - req.Header.Add("Origin", "http://foobar.com") - req.Header.Add("Access-Control-Request-Method", "GET") - - s.Handler(testHandler).ServeHTTP(res, req) - - assertHeaders(t, res.Header(), map[string]string{ - "Access-Control-Allow-Origin": "http://foobar.com", - "Access-Control-Allow-Methods": "GET", - "Access-Control-Allow-Headers": "", - "Access-Control-Allow-Credentials": "true", - "Access-Control-Max-Age": "", - "Access-Control-Expose-Headers": "", - }) -} diff --git a/Godeps/_workspace/src/github.com/rs/cors/examples/alice/server.go b/Godeps/_workspace/src/github.com/rs/cors/examples/alice/server.go deleted file mode 100644 index 00a6a0bf1..000000000 --- a/Godeps/_workspace/src/github.com/rs/cors/examples/alice/server.go +++ /dev/null @@ -1,24 +0,0 @@ -package main - -import ( - "net/http" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/rs/cors" - "github.com/justinas/alice" -) - -func main() { - c := cors.New(cors.Options{ - AllowedOrigins: []string{"http://foo.com"}, - }) - - mux := http.NewServeMux() - - mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Write([]byte("{\"hello\": \"world\"}")) - }) - - chain := alice.New(c.Handler).Then(mux) - http.ListenAndServe(":8080", chain) -} diff --git a/Godeps/_workspace/src/github.com/rs/cors/examples/default/server.go b/Godeps/_workspace/src/github.com/rs/cors/examples/default/server.go deleted file mode 100644 index 94e49bb5b..000000000 --- a/Godeps/_workspace/src/github.com/rs/cors/examples/default/server.go +++ /dev/null @@ -1,18 +0,0 @@ -package main - -import ( - "net/http" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/rs/cors" -) - -func main() { - h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Write([]byte("{\"hello\": \"world\"}")) - }) - - // Use default options - handler := cors.Default().Handler(h) - http.ListenAndServe(":8080", handler) -} diff --git a/Godeps/_workspace/src/github.com/rs/cors/examples/goji/server.go b/Godeps/_workspace/src/github.com/rs/cors/examples/goji/server.go deleted file mode 100644 index 96f63cfd9..000000000 --- a/Godeps/_workspace/src/github.com/rs/cors/examples/goji/server.go +++ /dev/null @@ -1,22 +0,0 @@ -package main - -import ( - "net/http" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/rs/cors" - "github.com/zenazn/goji" -) - -func main() { - c := cors.New(cors.Options{ - AllowedOrigins: []string{"http://foo.com"}, - }) - goji.Use(c.Handler) - - goji.Get("/", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Write([]byte("{\"hello\": \"world\"}")) - }) - - goji.Serve() -} diff --git a/Godeps/_workspace/src/github.com/rs/cors/examples/martini/server.go b/Godeps/_workspace/src/github.com/rs/cors/examples/martini/server.go deleted file mode 100644 index 199d989e6..000000000 --- a/Godeps/_workspace/src/github.com/rs/cors/examples/martini/server.go +++ /dev/null @@ -1,23 +0,0 @@ -package main - -import ( - "github.com/go-martini/martini" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/rs/cors" - "github.com/martini-contrib/render" -) - -func main() { - c := cors.New(cors.Options{ - AllowedOrigins: []string{"http://foo.com"}, - }) - - m := martini.Classic() - m.Use(render.Renderer()) - m.Use(c.HandlerFunc) - - m.Get("/", func(r render.Render) { - r.JSON(200, map[string]interface{}{"hello": "world"}) - }) - - m.Run() -} diff --git a/Godeps/_workspace/src/github.com/rs/cors/examples/negroni/server.go b/Godeps/_workspace/src/github.com/rs/cors/examples/negroni/server.go deleted file mode 100644 index b9e6f386a..000000000 --- a/Godeps/_workspace/src/github.com/rs/cors/examples/negroni/server.go +++ /dev/null @@ -1,26 +0,0 @@ -package main - -import ( - "net/http" - - "github.com/codegangsta/negroni" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/rs/cors" -) - -func main() { - c := cors.New(cors.Options{ - AllowedOrigins: []string{"http://foo.com"}, - }) - - mux := http.NewServeMux() - - mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Write([]byte("{\"hello\": \"world\"}")) - }) - - n := negroni.Classic() - n.Use(c) - n.UseHandler(mux) - n.Run(":3000") -} diff --git a/Godeps/_workspace/src/github.com/rs/cors/examples/nethttp/server.go b/Godeps/_workspace/src/github.com/rs/cors/examples/nethttp/server.go deleted file mode 100644 index a11fe064c..000000000 --- a/Godeps/_workspace/src/github.com/rs/cors/examples/nethttp/server.go +++ /dev/null @@ -1,20 +0,0 @@ -package main - -import ( - "net/http" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/rs/cors" -) - -func main() { - c := cors.New(cors.Options{ - AllowedOrigins: []string{"http://foo.com"}, - }) - - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Write([]byte("{\"hello\": \"world\"}")) - }) - - http.ListenAndServe(":8080", c.Handler(handler)) -} diff --git a/Godeps/_workspace/src/github.com/rs/cors/examples/openbar/server.go b/Godeps/_workspace/src/github.com/rs/cors/examples/openbar/server.go deleted file mode 100644 index 746a15a16..000000000 --- a/Godeps/_workspace/src/github.com/rs/cors/examples/openbar/server.go +++ /dev/null @@ -1,22 +0,0 @@ -package main - -import ( - "net/http" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/rs/cors" -) - -func main() { - c := cors.New(cors.Options{ - AllowedOrigins: []string{"*"}, - AllowedMethods: []string{"GET", "POST", "PUT", "DELETE"}, - AllowCredentials: true, - }) - - h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Write([]byte("{\"hello\": \"world\"}")) - }) - - http.ListenAndServe(":8080", c.Handler(h)) -} diff --git a/Godeps/_workspace/src/github.com/rs/cors/utils.go b/Godeps/_workspace/src/github.com/rs/cors/utils.go deleted file mode 100644 index e8823a639..000000000 --- a/Godeps/_workspace/src/github.com/rs/cors/utils.go +++ /dev/null @@ -1,59 +0,0 @@ -package cors - -const toLower = 'a' - 'A' - -type converter func(string) string - -// convert converts a list of string using the passed converter function -func convert(s []string, c converter) []string { - out := []string{} - for _, i := range s { - out = append(out, c(i)) - } - return out -} - -// parseHeaderList tokenize + normalize a string containing a list of headers -func parseHeaderList(headerList string) []string { - l := len(headerList) - h := make([]byte, 0, l) - upper := true - // Estimate the number headers in order to allocate the right splice size - t := 0 - for i := 0; i < l; i++ { - if headerList[i] == ',' { - t++ - } - } - headers := make([]string, 0, t) - for i := 0; i < l; i++ { - b := headerList[i] - if b >= 'a' && b <= 'z' { - if upper { - h = append(h, b-toLower) - } else { - h = append(h, b) - } - } else if b >= 'A' && b <= 'Z' { - if !upper { - h = append(h, b+toLower) - } else { - h = append(h, b) - } - } else if b == '-' || (b >= '0' && b <= '9') { - h = append(h, b) - } - - if b == ' ' || b == ',' || i == l-1 { - if len(h) > 0 { - // Flush the found header - headers = append(headers, string(h)) - h = h[:0] - upper = true - } - } else { - upper = b == '-' - } - } - return headers -} diff --git a/Godeps/_workspace/src/github.com/rs/cors/utils_test.go b/Godeps/_workspace/src/github.com/rs/cors/utils_test.go deleted file mode 100644 index de562b772..000000000 --- a/Godeps/_workspace/src/github.com/rs/cors/utils_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package cors - -import ( - "strings" - "testing" -) - -func TestConvert(t *testing.T) { - s := convert([]string{"A", "b", "C"}, strings.ToLower) - e := []string{"a", "b", "c"} - if s[0] != e[0] || s[1] != e[1] || s[2] != e[2] { - t.Errorf("%v != %v", s, e) - } -} - -func TestParseHeaderList(t *testing.T) { - h := parseHeaderList("header, second-header, THIRD-HEADER, Numb3r3d-H34d3r") - e := []string{"Header", "Second-Header", "Third-Header", "Numb3r3d-H34d3r"} - if h[0] != e[0] || h[1] != e[1] || h[2] != e[2] { - t.Errorf("%v != %v", h, e) - } -} - -func TestParseHeaderListEmpty(t *testing.T) { - if len(parseHeaderList("")) != 0 { - t.Error("should be empty sclice") - } - if len(parseHeaderList(" , ")) != 0 { - t.Error("should be empty sclice") - } -} - -func BenchmarkParseHeaderList(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - parseHeaderList("header, second-header, THIRD-HEADER") - } -} - -func BenchmarkParseHeaderListSingle(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - parseHeaderList("header") - } -} - -func BenchmarkParseHeaderListNormalized(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - parseHeaderList("Header1, Header2, Third-Header") - } -} diff --git a/Godeps/_workspace/src/github.com/satori/go.uuid/.travis.yml b/Godeps/_workspace/src/github.com/satori/go.uuid/.travis.yml deleted file mode 100644 index 0bbdc41c9..000000000 --- a/Godeps/_workspace/src/github.com/satori/go.uuid/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go: - - 1.0 - - 1.1 - - 1.2 - - 1.3 - - 1.4 -sudo: false -notifications: - email: false diff --git a/Godeps/_workspace/src/github.com/satori/go.uuid/LICENSE b/Godeps/_workspace/src/github.com/satori/go.uuid/LICENSE deleted file mode 100644 index 6a1fb910d..000000000 --- a/Godeps/_workspace/src/github.com/satori/go.uuid/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (C) 2013-2015 by Maxim Bublis - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/satori/go.uuid/README.md b/Godeps/_workspace/src/github.com/satori/go.uuid/README.md deleted file mode 100644 index 759f77c8c..000000000 --- a/Godeps/_workspace/src/github.com/satori/go.uuid/README.md +++ /dev/null @@ -1,66 +0,0 @@ -# UUID package for Go language - -[![Build Status](https://travis-ci.org/satori/go.uuid.png?branch=master)](https://travis-ci.org/satori/go.uuid) -[![GoDoc](http://godoc.org/github.com/satori/go.uuid?status.png)](http://godoc.org/github.com/satori/go.uuid) - -This package provides pure Go implementation of Universally Unique Identifier (UUID). Supported both creation and parsing of UUIDs. - -With 100% test coverage and benchmarks out of box. - -Supported versions: -* Version 1, based on timestamp and MAC address (RFC 4122) -* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1) -* Version 3, based on MD5 hashing (RFC 4122) -* Version 4, based on random numbers (RFC 4122) -* Version 5, based on SHA-1 hashing (RFC 4122) - -## Installation - -Use the `go` command: - - $ go get github.com/satori/go.uuid - -## Requirements - -UUID package requires any stable version of Go Programming Language. - -It is tested against following versions of Go: 1.0-1.4 - -## Example - -```go -package main - -import ( - "fmt" - "github.com/satori/go.uuid" -) - -func main() { - // Creating UUID Version 4 - u1 := uuid.NewV4() - fmt.Printf("UUIDv4: %s\n", u1) - - // Parsing UUID from string input - u2, err := uuid.FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - if err != nil { - fmt.Printf("Something gone wrong: %s", err) - } - fmt.Printf("Successfully parsed: %s", u2) -} -``` - -## Documentation - -[Documentation](http://godoc.org/github.com/satori/go.uuid) is hosted at GoDoc project. - -## Links -* [RFC 4122](http://tools.ietf.org/html/rfc4122) -* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01) - -## Copyright - -Copyright (C) 2013-2015 by Maxim Bublis . - -UUID package released under MIT License. -See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details. diff --git a/Godeps/_workspace/src/github.com/satori/go.uuid/benchmarks_test.go b/Godeps/_workspace/src/github.com/satori/go.uuid/benchmarks_test.go deleted file mode 100644 index 9a85f7c6b..000000000 --- a/Godeps/_workspace/src/github.com/satori/go.uuid/benchmarks_test.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright (C) 2013-2014 by Maxim Bublis -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package uuid - -import ( - "testing" -) - -func BenchmarkFromBytes(b *testing.B) { - bytes := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - for i := 0; i < b.N; i++ { - FromBytes(bytes) - } -} - -func BenchmarkFromString(b *testing.B) { - s := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" - for i := 0; i < b.N; i++ { - FromString(s) - } -} - -func BenchmarkFromStringUrn(b *testing.B) { - s := "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" - for i := 0; i < b.N; i++ { - FromString(s) - } -} - -func BenchmarkFromStringWithBrackets(b *testing.B) { - s := "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" - for i := 0; i < b.N; i++ { - FromString(s) - } -} - -func BenchmarkNewV1(b *testing.B) { - for i := 0; i < b.N; i++ { - NewV1() - } -} - -func BenchmarkNewV2(b *testing.B) { - for i := 0; i < b.N; i++ { - NewV2(DomainPerson) - } -} - -func BenchmarkNewV3(b *testing.B) { - for i := 0; i < b.N; i++ { - NewV3(NamespaceDNS, "www.example.com") - } -} - -func BenchmarkNewV4(b *testing.B) { - for i := 0; i < b.N; i++ { - NewV4() - } -} - -func BenchmarkNewV5(b *testing.B) { - for i := 0; i < b.N; i++ { - NewV5(NamespaceDNS, "www.example.com") - } -} - -func BenchmarkMarshalBinary(b *testing.B) { - u := NewV4() - for i := 0; i < b.N; i++ { - u.MarshalBinary() - } -} - -func BenchmarkMarshalText(b *testing.B) { - u := NewV4() - for i := 0; i < b.N; i++ { - u.MarshalText() - } -} - -func BenchmarkUnmarshalBinary(b *testing.B) { - bytes := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - u := UUID{} - for i := 0; i < b.N; i++ { - u.UnmarshalBinary(bytes) - } -} - -func BenchmarkUnmarshalText(b *testing.B) { - bytes := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - u := UUID{} - for i := 0; i < b.N; i++ { - u.UnmarshalText(bytes) - } -} - -func BenchmarkMarshalToString(b *testing.B) { - u := NewV4() - for i := 0; i < b.N; i++ { - u.String() - } -} diff --git a/Godeps/_workspace/src/github.com/satori/go.uuid/uuid.go b/Godeps/_workspace/src/github.com/satori/go.uuid/uuid.go deleted file mode 100644 index e846485dc..000000000 --- a/Godeps/_workspace/src/github.com/satori/go.uuid/uuid.go +++ /dev/null @@ -1,397 +0,0 @@ -// Copyright (C) 2013-2015 by Maxim Bublis -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -// Package uuid provides implementation of Universally Unique Identifier (UUID). -// Supported versions are 1, 3, 4 and 5 (as specified in RFC 4122) and -// version 2 (as specified in DCE 1.1). -package uuid - -import ( - "bytes" - "crypto/md5" - "crypto/rand" - "crypto/sha1" - "encoding/binary" - "encoding/hex" - "fmt" - "hash" - "net" - "os" - "sync" - "time" -) - -// UUID layout variants. -const ( - VariantNCS = iota - VariantRFC4122 - VariantMicrosoft - VariantFuture -) - -// UUID DCE domains. -const ( - DomainPerson = iota - DomainGroup - DomainOrg -) - -// Difference in 100-nanosecond intervals between -// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970). -const epochStart = 122192928000000000 - -// Used in string method conversion -const dash byte = '-' - -// UUID v1/v2 storage. -var ( - storageMutex sync.Mutex - clockSequence uint16 - lastTime uint64 - hardwareAddr [6]byte - posixUID = uint32(os.Getuid()) - posixGID = uint32(os.Getgid()) -) - -// String parse helpers. -var ( - urnPrefix = []byte("urn:uuid:") - byteGroups = []int{8, 4, 4, 4, 12} -) - -// Epoch calculation function -var epochFunc func() uint64 - -// Initialize storage -func init() { - buf := make([]byte, 2) - rand.Read(buf) - clockSequence = binary.BigEndian.Uint16(buf) - - // Initialize hardwareAddr randomly in case - // of real network interfaces absence - rand.Read(hardwareAddr[:]) - - // Set multicast bit as recommended in RFC 4122 - hardwareAddr[0] |= 0x01 - - interfaces, err := net.Interfaces() - if err == nil { - for _, iface := range interfaces { - if len(iface.HardwareAddr) >= 6 { - copy(hardwareAddr[:], iface.HardwareAddr) - break - } - } - } - epochFunc = unixTimeFunc -} - -// Returns difference in 100-nanosecond intervals between -// UUID epoch (October 15, 1582) and current time. -// This is default epoch calculation function. -func unixTimeFunc() uint64 { - return epochStart + uint64(time.Now().UnixNano()/100) -} - -// UUID representation compliant with specification -// described in RFC 4122. -type UUID [16]byte - -// The nil UUID is special form of UUID that is specified to have all -// 128 bits set to zero. -var Nil = UUID{} - -// Predefined namespace UUIDs. -var ( - NamespaceDNS, _ = FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - NamespaceURL, _ = FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8") - NamespaceOID, _ = FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8") - NamespaceX500, _ = FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8") -) - -// And returns result of binary AND of two UUIDs. -func And(u1 UUID, u2 UUID) UUID { - u := UUID{} - for i := 0; i < 16; i++ { - u[i] = u1[i] & u2[i] - } - return u -} - -// Or returns result of binary OR of two UUIDs. -func Or(u1 UUID, u2 UUID) UUID { - u := UUID{} - for i := 0; i < 16; i++ { - u[i] = u1[i] | u2[i] - } - return u -} - -// Equal returns true if u1 and u2 equals, otherwise returns false. -func Equal(u1 UUID, u2 UUID) bool { - return bytes.Equal(u1[:], u2[:]) -} - -// Version returns algorithm version used to generate UUID. -func (u UUID) Version() uint { - return uint(u[6] >> 4) -} - -// Variant returns UUID layout variant. -func (u UUID) Variant() uint { - switch { - case (u[8] & 0x80) == 0x00: - return VariantNCS - case (u[8]&0xc0)|0x80 == 0x80: - return VariantRFC4122 - case (u[8]&0xe0)|0xc0 == 0xc0: - return VariantMicrosoft - } - return VariantFuture -} - -// Bytes returns bytes slice representation of UUID. -func (u UUID) Bytes() []byte { - return u[:] -} - -// Returns canonical string representation of UUID: -// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. -func (u UUID) String() string { - buf := make([]byte, 36) - - hex.Encode(buf[0:8], u[0:4]) - buf[8] = dash - hex.Encode(buf[9:13], u[4:6]) - buf[13] = dash - hex.Encode(buf[14:18], u[6:8]) - buf[18] = dash - hex.Encode(buf[19:23], u[8:10]) - buf[23] = dash - hex.Encode(buf[24:], u[10:]) - - return string(buf) -} - -// SetVersion sets version bits. -func (u *UUID) SetVersion(v byte) { - u[6] = (u[6] & 0x0f) | (v << 4) -} - -// SetVariant sets variant bits as described in RFC 4122. -func (u *UUID) SetVariant() { - u[8] = (u[8] & 0xbf) | 0x80 -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The encoding is the same as returned by String. -func (u UUID) MarshalText() (text []byte, err error) { - text = []byte(u.String()) - return -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// Following formats are supported: -// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", -// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", -// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" -func (u *UUID) UnmarshalText(text []byte) (err error) { - if len(text) < 32 { - err = fmt.Errorf("uuid: invalid UUID string: %s", text) - return - } - - if bytes.Equal(text[:9], urnPrefix) { - text = text[9:] - } else if text[0] == '{' { - text = text[1:] - } - - b := u[:] - - for _, byteGroup := range byteGroups { - if text[0] == '-' { - text = text[1:] - } - - _, err = hex.Decode(b[:byteGroup/2], text[:byteGroup]) - - if err != nil { - return - } - - text = text[byteGroup:] - b = b[byteGroup/2:] - } - - return -} - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (u UUID) MarshalBinary() (data []byte, err error) { - data = u.Bytes() - return -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -// It will return error if the slice isn't 16 bytes long. -func (u *UUID) UnmarshalBinary(data []byte) (err error) { - if len(data) != 16 { - err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data)) - return - } - copy(u[:], data) - - return -} - -// Scan implements the sql.Scanner interface. -// A 16-byte slice is handled by UnmarshalBinary, while -// a longer byte slice or a string is handled by UnmarshalText. -func (u *UUID) Scan(src interface{}) error { - switch src := src.(type) { - case []byte: - if len(src) == 16 { - return u.UnmarshalBinary(src) - } - return u.UnmarshalText(src) - - case string: - return u.UnmarshalText([]byte(src)) - } - - return fmt.Errorf("uuid: cannot convert %T to UUID", src) -} - -// FromBytes returns UUID converted from raw byte slice input. -// It will return error if the slice isn't 16 bytes long. -func FromBytes(input []byte) (u UUID, err error) { - err = u.UnmarshalBinary(input) - return -} - -// FromString returns UUID parsed from string input. -// Input is expected in a form accepted by UnmarshalText. -func FromString(input string) (u UUID, err error) { - err = u.UnmarshalText([]byte(input)) - return -} - -// Returns UUID v1/v2 storage state. -// Returns epoch timestamp and clock sequence. -func getStorage() (uint64, uint16) { - storageMutex.Lock() - defer storageMutex.Unlock() - - timeNow := epochFunc() - // Clock changed backwards since last UUID generation. - // Should increase clock sequence. - if timeNow <= lastTime { - clockSequence++ - } - lastTime = timeNow - - return timeNow, clockSequence -} - -// NewV1 returns UUID based on current timestamp and MAC address. -func NewV1() UUID { - u := UUID{} - - timeNow, clockSeq := getStorage() - - binary.BigEndian.PutUint32(u[0:], uint32(timeNow)) - binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) - binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) - binary.BigEndian.PutUint16(u[8:], clockSeq) - - copy(u[10:], hardwareAddr[:]) - - u.SetVersion(1) - u.SetVariant() - - return u -} - -// NewV2 returns DCE Security UUID based on POSIX UID/GID. -func NewV2(domain byte) UUID { - u := UUID{} - - switch domain { - case DomainPerson: - binary.BigEndian.PutUint32(u[0:], posixUID) - case DomainGroup: - binary.BigEndian.PutUint32(u[0:], posixGID) - } - - timeNow, clockSeq := getStorage() - - binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) - binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) - binary.BigEndian.PutUint16(u[8:], clockSeq) - u[9] = domain - - copy(u[10:], hardwareAddr[:]) - - u.SetVersion(2) - u.SetVariant() - - return u -} - -// NewV3 returns UUID based on MD5 hash of namespace UUID and name. -func NewV3(ns UUID, name string) UUID { - u := newFromHash(md5.New(), ns, name) - u.SetVersion(3) - u.SetVariant() - - return u -} - -// NewV4 returns random generated UUID. -func NewV4() UUID { - u := UUID{} - rand.Read(u[:]) - u.SetVersion(4) - u.SetVariant() - - return u -} - -// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name. -func NewV5(ns UUID, name string) UUID { - u := newFromHash(sha1.New(), ns, name) - u.SetVersion(5) - u.SetVariant() - - return u -} - -// Returns UUID based on hashing of namespace UUID and name. -func newFromHash(h hash.Hash, ns UUID, name string) UUID { - u := UUID{} - h.Write(ns[:]) - h.Write([]byte(name)) - copy(u[:], h.Sum(nil)) - - return u -} diff --git a/Godeps/_workspace/src/github.com/satori/go.uuid/uuid_test.go b/Godeps/_workspace/src/github.com/satori/go.uuid/uuid_test.go deleted file mode 100644 index a912d6a79..000000000 --- a/Godeps/_workspace/src/github.com/satori/go.uuid/uuid_test.go +++ /dev/null @@ -1,477 +0,0 @@ -// Copyright (C) 2013, 2015 by Maxim Bublis -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package uuid - -import ( - "bytes" - "testing" -) - -func TestBytes(t *testing.T) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - - bytes1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - - if !bytes.Equal(u.Bytes(), bytes1) { - t.Errorf("Incorrect bytes representation for UUID: %s", u) - } -} - -func TestString(t *testing.T) { - if NamespaceDNS.String() != "6ba7b810-9dad-11d1-80b4-00c04fd430c8" { - t.Errorf("Incorrect string representation for UUID: %s", NamespaceDNS.String()) - } -} - -func TestEqual(t *testing.T) { - if !Equal(NamespaceDNS, NamespaceDNS) { - t.Errorf("Incorrect comparison of %s and %s", NamespaceDNS, NamespaceDNS) - } - - if Equal(NamespaceDNS, NamespaceURL) { - t.Errorf("Incorrect comparison of %s and %s", NamespaceDNS, NamespaceURL) - } -} - -func TestOr(t *testing.T) { - u1 := UUID{0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff} - u2 := UUID{0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00} - - u := UUID{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} - - if !Equal(u, Or(u1, u2)) { - t.Errorf("Incorrect bitwise OR result %s", Or(u1, u2)) - } -} - -func TestAnd(t *testing.T) { - u1 := UUID{0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff} - u2 := UUID{0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00} - - u := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} - - if !Equal(u, And(u1, u2)) { - t.Errorf("Incorrect bitwise AND result %s", And(u1, u2)) - } -} - -func TestVersion(t *testing.T) { - u := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} - - if u.Version() != 1 { - t.Errorf("Incorrect version for UUID: %d", u.Version()) - } -} - -func TestSetVersion(t *testing.T) { - u := UUID{} - u.SetVersion(4) - - if u.Version() != 4 { - t.Errorf("Incorrect version for UUID after u.setVersion(4): %d", u.Version()) - } -} - -func TestVariant(t *testing.T) { - u1 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} - - if u1.Variant() != VariantNCS { - t.Errorf("Incorrect variant for UUID variant %d: %d", VariantNCS, u1.Variant()) - } - - u2 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} - - if u2.Variant() != VariantRFC4122 { - t.Errorf("Incorrect variant for UUID variant %d: %d", VariantRFC4122, u2.Variant()) - } - - u3 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} - - if u3.Variant() != VariantMicrosoft { - t.Errorf("Incorrect variant for UUID variant %d: %d", VariantMicrosoft, u3.Variant()) - } - - u4 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} - - if u4.Variant() != VariantFuture { - t.Errorf("Incorrect variant for UUID variant %d: %d", VariantFuture, u4.Variant()) - } -} - -func TestSetVariant(t *testing.T) { - u := new(UUID) - u.SetVariant() - - if u.Variant() != VariantRFC4122 { - t.Errorf("Incorrect variant for UUID after u.setVariant(): %d", u.Variant()) - } -} - -func TestFromBytes(t *testing.T) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - - u1, err := FromBytes(b1) - if err != nil { - t.Errorf("Error parsing UUID from bytes: %s", err) - } - - if !Equal(u, u1) { - t.Errorf("UUIDs should be equal: %s and %s", u, u1) - } - - b2 := []byte{} - - _, err = FromBytes(b2) - if err == nil { - t.Errorf("Should return error parsing from empty byte slice, got %s", err) - } -} - -func TestMarshalBinary(t *testing.T) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - - b2, err := u.MarshalBinary() - if err != nil { - t.Errorf("Error marshaling UUID: %s", err) - } - - if !bytes.Equal(b1, b2) { - t.Errorf("Marshaled UUID should be %s, got %s", b1, b2) - } -} - -func TestUnmarshalBinary(t *testing.T) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - - u1 := UUID{} - err := u1.UnmarshalBinary(b1) - if err != nil { - t.Errorf("Error unmarshaling UUID: %s", err) - } - - if !Equal(u, u1) { - t.Errorf("UUIDs should be equal: %s and %s", u, u1) - } - - b2 := []byte{} - u2 := UUID{} - - err = u2.UnmarshalBinary(b2) - if err == nil { - t.Errorf("Should return error unmarshalling from empty byte slice, got %s", err) - } -} - -func TestFromString(t *testing.T) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - - s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" - s2 := "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" - s3 := "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" - - _, err := FromString("") - if err == nil { - t.Errorf("Should return error trying to parse empty string, got %s", err) - } - - u1, err := FromString(s1) - if err != nil { - t.Errorf("Error parsing UUID from string: %s", err) - } - - if !Equal(u, u1) { - t.Errorf("UUIDs should be equal: %s and %s", u, u1) - } - - u2, err := FromString(s2) - if err != nil { - t.Errorf("Error parsing UUID from string: %s", err) - } - - if !Equal(u, u2) { - t.Errorf("UUIDs should be equal: %s and %s", u, u2) - } - - u3, err := FromString(s3) - if err != nil { - t.Errorf("Error parsing UUID from string: %s", err) - } - - if !Equal(u, u3) { - t.Errorf("UUIDs should be equal: %s and %s", u, u3) - } -} - -func TestMarshalText(t *testing.T) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - - b2, err := u.MarshalText() - if err != nil { - t.Errorf("Error marshaling UUID: %s", err) - } - - if !bytes.Equal(b1, b2) { - t.Errorf("Marshaled UUID should be %s, got %s", b1, b2) - } -} - -func TestUnmarshalText(t *testing.T) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - - u1 := UUID{} - err := u1.UnmarshalText(b1) - if err != nil { - t.Errorf("Error unmarshaling UUID: %s", err) - } - - if !Equal(u, u1) { - t.Errorf("UUIDs should be equal: %s and %s", u, u1) - } - - b2 := []byte("") - u2 := UUID{} - - err = u2.UnmarshalText(b2) - if err == nil { - t.Errorf("Should return error trying to unmarshal from empty string") - } -} - -func TestScanBinary(t *testing.T) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - - u1 := UUID{} - err := u1.Scan(b1) - if err != nil { - t.Errorf("Error unmarshaling UUID: %s", err) - } - - if !Equal(u, u1) { - t.Errorf("UUIDs should be equal: %s and %s", u, u1) - } - - b2 := []byte{} - u2 := UUID{} - - err = u2.Scan(b2) - if err == nil { - t.Errorf("Should return error unmarshalling from empty byte slice, got %s", err) - } -} - -func TestScanString(t *testing.T) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" - - u1 := UUID{} - err := u1.Scan(s1) - if err != nil { - t.Errorf("Error unmarshaling UUID: %s", err) - } - - if !Equal(u, u1) { - t.Errorf("UUIDs should be equal: %s and %s", u, u1) - } - - s2 := "" - u2 := UUID{} - - err = u2.Scan(s2) - if err == nil { - t.Errorf("Should return error trying to unmarshal from empty string") - } -} - -func TestScanText(t *testing.T) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - - u1 := UUID{} - err := u1.Scan(b1) - if err != nil { - t.Errorf("Error unmarshaling UUID: %s", err) - } - - if !Equal(u, u1) { - t.Errorf("UUIDs should be equal: %s and %s", u, u1) - } - - b2 := []byte("") - u2 := UUID{} - - err = u2.Scan(b2) - if err == nil { - t.Errorf("Should return error trying to unmarshal from empty string") - } -} - -func TestScanUnsupported(t *testing.T) { - u := UUID{} - - err := u.Scan(true) - if err == nil { - t.Errorf("Should return error trying to unmarshal from bool") - } -} - -func TestNewV1(t *testing.T) { - u := NewV1() - - if u.Version() != 1 { - t.Errorf("UUIDv1 generated with incorrect version: %d", u.Version()) - } - - if u.Variant() != VariantRFC4122 { - t.Errorf("UUIDv1 generated with incorrect variant: %d", u.Variant()) - } - - u1 := NewV1() - u2 := NewV1() - - if Equal(u1, u2) { - t.Errorf("UUIDv1 generated two equal UUIDs: %s and %s", u1, u2) - } - - oldFunc := epochFunc - epochFunc = func() uint64 { return 0 } - - u3 := NewV1() - u4 := NewV1() - - if Equal(u3, u4) { - t.Errorf("UUIDv1 generated two equal UUIDs: %s and %s", u3, u4) - } - - epochFunc = oldFunc -} - -func TestNewV2(t *testing.T) { - u1 := NewV2(DomainPerson) - - if u1.Version() != 2 { - t.Errorf("UUIDv2 generated with incorrect version: %d", u1.Version()) - } - - if u1.Variant() != VariantRFC4122 { - t.Errorf("UUIDv2 generated with incorrect variant: %d", u1.Variant()) - } - - u2 := NewV2(DomainGroup) - - if u2.Version() != 2 { - t.Errorf("UUIDv2 generated with incorrect version: %d", u2.Version()) - } - - if u2.Variant() != VariantRFC4122 { - t.Errorf("UUIDv2 generated with incorrect variant: %d", u2.Variant()) - } -} - -func TestNewV3(t *testing.T) { - u := NewV3(NamespaceDNS, "www.example.com") - - if u.Version() != 3 { - t.Errorf("UUIDv3 generated with incorrect version: %d", u.Version()) - } - - if u.Variant() != VariantRFC4122 { - t.Errorf("UUIDv3 generated with incorrect variant: %d", u.Variant()) - } - - if u.String() != "5df41881-3aed-3515-88a7-2f4a814cf09e" { - t.Errorf("UUIDv3 generated incorrectly: %s", u.String()) - } - - u = NewV3(NamespaceDNS, "python.org") - - if u.String() != "6fa459ea-ee8a-3ca4-894e-db77e160355e" { - t.Errorf("UUIDv3 generated incorrectly: %s", u.String()) - } - - u1 := NewV3(NamespaceDNS, "golang.org") - u2 := NewV3(NamespaceDNS, "golang.org") - if !Equal(u1, u2) { - t.Errorf("UUIDv3 generated different UUIDs for same namespace and name: %s and %s", u1, u2) - } - - u3 := NewV3(NamespaceDNS, "example.com") - if Equal(u1, u3) { - t.Errorf("UUIDv3 generated same UUIDs for different names in same namespace: %s and %s", u1, u2) - } - - u4 := NewV3(NamespaceURL, "golang.org") - if Equal(u1, u4) { - t.Errorf("UUIDv3 generated same UUIDs for sane names in different namespaces: %s and %s", u1, u4) - } -} - -func TestNewV4(t *testing.T) { - u := NewV4() - - if u.Version() != 4 { - t.Errorf("UUIDv4 generated with incorrect version: %d", u.Version()) - } - - if u.Variant() != VariantRFC4122 { - t.Errorf("UUIDv4 generated with incorrect variant: %d", u.Variant()) - } -} - -func TestNewV5(t *testing.T) { - u := NewV5(NamespaceDNS, "www.example.com") - - if u.Version() != 5 { - t.Errorf("UUIDv5 generated with incorrect version: %d", u.Version()) - } - - if u.Variant() != VariantRFC4122 { - t.Errorf("UUIDv5 generated with incorrect variant: %d", u.Variant()) - } - - u = NewV5(NamespaceDNS, "python.org") - - if u.String() != "886313e1-3b8a-5372-9b90-0c9aee199e5d" { - t.Errorf("UUIDv5 generated incorrectly: %s", u.String()) - } - - u1 := NewV5(NamespaceDNS, "golang.org") - u2 := NewV5(NamespaceDNS, "golang.org") - if !Equal(u1, u2) { - t.Errorf("UUIDv5 generated different UUIDs for same namespace and name: %s and %s", u1, u2) - } - - u3 := NewV5(NamespaceDNS, "example.com") - if Equal(u1, u3) { - t.Errorf("UUIDv5 generated same UUIDs for different names in same namespace: %s and %s", u1, u2) - } - - u4 := NewV5(NamespaceURL, "golang.org") - if Equal(u1, u4) { - t.Errorf("UUIDv3 generated same UUIDs for sane names in different namespaces: %s and %s", u1, u4) - } -} diff --git a/Godeps/_workspace/src/github.com/steakknife/hamming/MIT-LICENSE.txt b/Godeps/_workspace/src/github.com/steakknife/hamming/MIT-LICENSE.txt deleted file mode 100644 index ccf77fe46..000000000 --- a/Godeps/_workspace/src/github.com/steakknife/hamming/MIT-LICENSE.txt +++ /dev/null @@ -1,8 +0,0 @@ -The MIT License (MIT) -Copyright © 2014, 2015 Barry Allard - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/steakknife/hamming/README.md b/Godeps/_workspace/src/github.com/steakknife/hamming/README.md deleted file mode 100644 index c7a791a1d..000000000 --- a/Godeps/_workspace/src/github.com/steakknife/hamming/README.md +++ /dev/null @@ -1,44 +0,0 @@ -# hamming distance calculations in Go - -Copyright © 2014, 2015 Barry Allard - -[MIT license](MIT-LICENSE.txt) - -## Usage - -```go -import 'github.com/steakknife/hamming' - -// ... - -// hamming distance between values -hamming.Byte(0xFF, 0x00) // 8 -hamming.Byte(0x00, 0x00) // 0 - -// just count bits in a byte -hamming.CountBitsByte(0xA5), // 4 -``` - -See help in the [docs](https://godoc.org/github.com/steakknife/hamming) - -## Get - - go get -u github.com/steakknife/hamming # master is always stable - -## Source - -- On the web: https://github.com/steakknife/hamming - -- Git: `git clone https://github.com/steakknife/hamming` - -## Contact - -- [Feedback](mailto:barry.allard@gmail.com) - -- [Issues](https://github.com/steakknife/hamming/issues) - -## License - -[MIT license](MIT-LICENSE.txt) - -Copyright © 2014, 2015 Barry Allard diff --git a/Godeps/_workspace/src/github.com/steakknife/hamming/hamming.go b/Godeps/_workspace/src/github.com/steakknife/hamming/hamming.go deleted file mode 100644 index 311fcd9c5..000000000 --- a/Godeps/_workspace/src/github.com/steakknife/hamming/hamming.go +++ /dev/null @@ -1,97 +0,0 @@ -// -// hamming distance calculations in Go -// -// https://github.com/steakknife/hamming -// -// Copyright © 2014, 2015 Barry Allard -// -// MIT license -// -// -// Usage -// -// The functions are named (CountBits)?(Byte|Uint64)s?. The plural forms are for slices. The CountBits.+ forms are Population Count only, where the bare-type forms are Hamming distance. -// -// import 'github.com/steakknife/hamming' -// -// // ... -// -// // hamming distance between values -// hamming.Byte(0xFF, 0x00) // 8 -// hamming.Byte(0x00, 0x00) // 0 -// -// // just count bits in a byte -// hamming.CountBitsByte(0xA5), // 4 -// -package hamming - -// SSE4.x PopCnt is 10x slower -// References: check out Hacker's Delight - -const ( - m1 uint64 = 0x5555555555555555 //binary: 0101... - m2 uint64 = 0x3333333333333333 //binary: 00110011.. - m4 uint64 = 0x0f0f0f0f0f0f0f0f //binary: 4 zeros, 4 ones ... - m8 uint64 = 0x00ff00ff00ff00ff //binary: 8 zeros, 8 ones ... - m16 uint64 = 0x0000ffff0000ffff //binary: 16 zeros, 16 ones ... - m32 uint64 = 0x00000000ffffffff //binary: 32 zeros, 32 ones - hff uint64 = 0xffffffffffffffff //binary: all ones - h01 uint64 = 0x0101010101010101 //the sum of 256 to the power of 0,1,2,3... -) - -var table = [256]byte{0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8} - -// hamming distance of two uint64's -func Uint64(x, y uint64) int { - return CountBitsUint64(x ^ y) -} - -// hamming distance of two uint64 buffers, of which the size of the first argument is used for both (panics if b1 is smaller than b0, does not compare b1 beyond length of b0) -func Uint64s(b0, b1 []uint64) int { - d := 0 - for i, x := range b0 { - d += Uint64(x, b1[i]) - } - return d -} - -// hamming distance of two bytes -func Byte(x, y byte) int { - return CountBitsByte(x ^ y) -} - -// hamming distance of two byte buffers, of which the size of the first argument is used for both (panics if b1 is smaller than b0, does not compare b1 beyond length of b0) -func Bytes(b0, b1 []byte) int { - d := 0 - for i, x := range b0 { - d += Byte(x, b1[i]) - } - return d -} - -func CountBitsUint64(x uint64) int { - x -= (x >> 1) & m1 // put count of each 2 bits into those 2 bits - x = (x & m2) + ((x >> 2) & m2) // put count of each 4 bits into those 4 bits - x = (x + (x >> 4)) & m4 // put count of each 8 bits into those 8 bits - return int((x * h01) >> 56) // returns left 8 bits of x + (x<<8) + (x<<16) + (x<<24) + ... -} - -func CountBitsUint64s(b []uint64) int { - c := 0 - for _, x := range b { - c += CountBitsUint64(x) - } - return c -} - -func CountBitsByte(x byte) int { - return int(table[x]) -} - -func CountBitsBytes(b []byte) int { - c := 0 - for _, x := range b { - c += CountBitsByte(x) - } - return c -} diff --git a/Godeps/_workspace/src/github.com/steakknife/hamming/hamming_test.go b/Godeps/_workspace/src/github.com/steakknife/hamming/hamming_test.go deleted file mode 100644 index 44723d2d6..000000000 --- a/Godeps/_workspace/src/github.com/steakknife/hamming/hamming_test.go +++ /dev/null @@ -1,143 +0,0 @@ -// -// hamming distance calculations in Go -// -// https://github.com/steakknife/hamming -// -// Copyright © 2014, 2015 Barry Allard -// -// MIT license -// -package hamming - -import ( - "testing" -) - -type testCountBitsUint64Case struct { - x uint64 - n int -} - -type testCountBitsByteCase struct { - x byte - n int -} - -type testBytesCase struct { - b0, b1 []byte - n int -} - -type testUint64sCase struct { - b0, b1 []uint64 - n int -} - -var testCountBitsByteCases = []testCountBitsByteCase{ - {0x00, 0}, - {0x01, 1}, - {0x02, 1}, - {0x03, 2}, - {0xaa, 4}, - {0x55, 4}, - {0x7f, 7}, - {0xff, 8}, -} - -var testCountBitsUint64Cases = []testCountBitsUint64Case{ - {0x00, 0}, - {0x01, 1}, - {0x02, 1}, - {0x03, 2}, - {0xaa, 4}, - {0x55, 4}, - {0x7f, 7}, - {0xff, 8}, - {0xffff, 16}, - {0xffffffff, 32}, - {0x1ffffffff, 33}, - {0x3ffffffff, 34}, - {0x7ffffffff, 35}, - {0xfffffffff, 36}, - {0x3fffffffffffffff, 62}, - {0x7fffffffffffffff, 63}, - {0xffffffffffffffff, 64}, -} - -var testBytesCases = []testBytesCase{ - {[]byte{}, []byte{}, 0}, - {[]byte{1}, []byte{0}, 1}, - {[]byte{1}, []byte{2}, 2}, - {[]byte{1, 0}, []byte{0, 1}, 2}, - {[]byte{1, 0}, []byte{0, 1}, 2}, -} - -var testUint64sCases = []testUint64sCase{ - {[]uint64{}, []uint64{}, 0}, - {[]uint64{1}, []uint64{0}, 1}, - {[]uint64{1}, []uint64{2}, 2}, - {[]uint64{1, 0}, []uint64{0, 1}, 2}, - {[]uint64{1, 0}, []uint64{0, 1}, 2}, -} - -func TestCountBitByte(t *testing.T) { - for _, c := range testCountBitsByteCases { - if actualN := CountBitsByte(c.x); actualN != c.n { - t.Fatal("CountBitsByte(", c.x, ") = ", actualN, " != ", c.n) - } else { - t.Log("CountBitsByte(", c.x, ") == ", c.n) - } - } -} - -func TestBytes(t *testing.T) { - for _, c := range testBytesCases { - if actualN := Bytes(c.b0, c.b1); actualN != c.n { - t.Fatal("Bytes(", c.b0, ",", c.b1, ") = ", actualN, " != ", c.n) - } else { - t.Log("Bytes(", c.b0, ",", c.b1, ") == ", c.n) - } - } -} - -func TestUint64s(t *testing.T) { - for _, c := range testUint64sCases { - if actualN := Uint64s(c.b0, c.b1); actualN != c.n { - t.Fatal("Uint64s(", c.b0, ",", c.b1, ") = ", actualN, " != ", c.n) - } else { - t.Log("Uint64s(", c.b0, ",", c.b1, ") == ", c.n) - } - } -} - -func TestCountBitUint64(t *testing.T) { - for _, c := range testCountBitsUint64Cases { - if actualN := CountBitsUint64(c.x); actualN != c.n { - t.Fatal("CountBitsUint64(", c.x, ") = ", actualN, " != ", c.n) - } else { - t.Log("CountBitsUint64(", c.x, ") == ", c.n) - } - } -} - -func BenchmarkCountBitsUint64(b *testing.B) { - j := 0 - for i := 0; i < b.N; i++ { - CountBitsUint64(testCountBitsUint64Cases[j].x) - j++ - if j == len(testCountBitsUint64Cases) { - j = 0 - } - } -} - -func BenchmarkCountBitsByte(b *testing.B) { - j := 0 - for i := 0; i < b.N; i++ { - CountBitsByte(testCountBitsByteCases[j].x) - j++ - if j == len(testCountBitsByteCases) { - j = 0 - } - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go deleted file mode 100644 index 88993e87b..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go +++ /dev/null @@ -1,252 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "encoding/binary" - "fmt" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb" -) - -type ErrBatchCorrupted struct { - Reason string -} - -func (e *ErrBatchCorrupted) Error() string { - return fmt.Sprintf("leveldb: batch corrupted: %s", e.Reason) -} - -func newErrBatchCorrupted(reason string) error { - return errors.NewErrCorrupted(nil, &ErrBatchCorrupted{reason}) -} - -const ( - batchHdrLen = 8 + 4 - batchGrowRec = 3000 -) - -type BatchReplay interface { - Put(key, value []byte) - Delete(key []byte) -} - -// Batch is a write batch. -type Batch struct { - data []byte - rLen, bLen int - seq uint64 - sync bool -} - -func (b *Batch) grow(n int) { - off := len(b.data) - if off == 0 { - off = batchHdrLen - if b.data != nil { - b.data = b.data[:off] - } - } - if cap(b.data)-off < n { - if b.data == nil { - b.data = make([]byte, off, off+n) - } else { - odata := b.data - div := 1 - if b.rLen > batchGrowRec { - div = b.rLen / batchGrowRec - } - b.data = make([]byte, off, off+n+(off-batchHdrLen)/div) - copy(b.data, odata) - } - } -} - -func (b *Batch) appendRec(kt kType, key, value []byte) { - n := 1 + binary.MaxVarintLen32 + len(key) - if kt == ktVal { - n += binary.MaxVarintLen32 + len(value) - } - b.grow(n) - off := len(b.data) - data := b.data[:off+n] - data[off] = byte(kt) - off += 1 - off += binary.PutUvarint(data[off:], uint64(len(key))) - copy(data[off:], key) - off += len(key) - if kt == ktVal { - off += binary.PutUvarint(data[off:], uint64(len(value))) - copy(data[off:], value) - off += len(value) - } - b.data = data[:off] - b.rLen++ - // Include 8-byte ikey header - b.bLen += len(key) + len(value) + 8 -} - -// Put appends 'put operation' of the given key/value pair to the batch. -// It is safe to modify the contents of the argument after Put returns. -func (b *Batch) Put(key, value []byte) { - b.appendRec(ktVal, key, value) -} - -// Delete appends 'delete operation' of the given key to the batch. -// It is safe to modify the contents of the argument after Delete returns. -func (b *Batch) Delete(key []byte) { - b.appendRec(ktDel, key, nil) -} - -// Dump dumps batch contents. The returned slice can be loaded into the -// batch using Load method. -// The returned slice is not its own copy, so the contents should not be -// modified. -func (b *Batch) Dump() []byte { - return b.encode() -} - -// Load loads given slice into the batch. Previous contents of the batch -// will be discarded. -// The given slice will not be copied and will be used as batch buffer, so -// it is not safe to modify the contents of the slice. -func (b *Batch) Load(data []byte) error { - return b.decode(0, data) -} - -// Replay replays batch contents. -func (b *Batch) Replay(r BatchReplay) error { - return b.decodeRec(func(i int, kt kType, key, value []byte) { - switch kt { - case ktVal: - r.Put(key, value) - case ktDel: - r.Delete(key) - } - }) -} - -// Len returns number of records in the batch. -func (b *Batch) Len() int { - return b.rLen -} - -// Reset resets the batch. -func (b *Batch) Reset() { - b.data = b.data[:0] - b.seq = 0 - b.rLen = 0 - b.bLen = 0 - b.sync = false -} - -func (b *Batch) init(sync bool) { - b.sync = sync -} - -func (b *Batch) append(p *Batch) { - if p.rLen > 0 { - b.grow(len(p.data) - batchHdrLen) - b.data = append(b.data, p.data[batchHdrLen:]...) - b.rLen += p.rLen - } - if p.sync { - b.sync = true - } -} - -// size returns sums of key/value pair length plus 8-bytes ikey. -func (b *Batch) size() int { - return b.bLen -} - -func (b *Batch) encode() []byte { - b.grow(0) - binary.LittleEndian.PutUint64(b.data, b.seq) - binary.LittleEndian.PutUint32(b.data[8:], uint32(b.rLen)) - - return b.data -} - -func (b *Batch) decode(prevSeq uint64, data []byte) error { - if len(data) < batchHdrLen { - return newErrBatchCorrupted("too short") - } - - b.seq = binary.LittleEndian.Uint64(data) - if b.seq < prevSeq { - return newErrBatchCorrupted("invalid sequence number") - } - b.rLen = int(binary.LittleEndian.Uint32(data[8:])) - if b.rLen < 0 { - return newErrBatchCorrupted("invalid records length") - } - // No need to be precise at this point, it won't be used anyway - b.bLen = len(data) - batchHdrLen - b.data = data - - return nil -} - -func (b *Batch) decodeRec(f func(i int, kt kType, key, value []byte)) (err error) { - off := batchHdrLen - for i := 0; i < b.rLen; i++ { - if off >= len(b.data) { - return newErrBatchCorrupted("invalid records length") - } - - kt := kType(b.data[off]) - if kt > ktVal { - return newErrBatchCorrupted("bad record: invalid type") - } - off += 1 - - x, n := binary.Uvarint(b.data[off:]) - off += n - if n <= 0 || off+int(x) > len(b.data) { - return newErrBatchCorrupted("bad record: invalid key length") - } - key := b.data[off : off+int(x)] - off += int(x) - var value []byte - if kt == ktVal { - x, n := binary.Uvarint(b.data[off:]) - off += n - if n <= 0 || off+int(x) > len(b.data) { - return newErrBatchCorrupted("bad record: invalid value length") - } - value = b.data[off : off+int(x)] - off += int(x) - } - - f(i, kt, key, value) - } - - return nil -} - -func (b *Batch) memReplay(to *memdb.DB) error { - return b.decodeRec(func(i int, kt kType, key, value []byte) { - ikey := newIkey(key, b.seq+uint64(i), kt) - to.Put(ikey, value) - }) -} - -func (b *Batch) memDecodeAndReplay(prevSeq uint64, data []byte, to *memdb.DB) error { - if err := b.decode(prevSeq, data); err != nil { - return err - } - return b.memReplay(to) -} - -func (b *Batch) revertMemReplay(to *memdb.DB) error { - return b.decodeRec(func(i int, kt kType, key, value []byte) { - ikey := newIkey(key, b.seq+uint64(i), kt) - to.Delete(ikey) - }) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go deleted file mode 100644 index 49578d99b..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "testing" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb" -) - -type tbRec struct { - kt kType - key, value []byte -} - -type testBatch struct { - rec []*tbRec -} - -func (p *testBatch) Put(key, value []byte) { - p.rec = append(p.rec, &tbRec{ktVal, key, value}) -} - -func (p *testBatch) Delete(key []byte) { - p.rec = append(p.rec, &tbRec{ktDel, key, nil}) -} - -func compareBatch(t *testing.T, b1, b2 *Batch) { - if b1.seq != b2.seq { - t.Errorf("invalid seq number want %d, got %d", b1.seq, b2.seq) - } - if b1.Len() != b2.Len() { - t.Fatalf("invalid record length want %d, got %d", b1.Len(), b2.Len()) - } - p1, p2 := new(testBatch), new(testBatch) - err := b1.Replay(p1) - if err != nil { - t.Fatal("error when replaying batch 1: ", err) - } - err = b2.Replay(p2) - if err != nil { - t.Fatal("error when replaying batch 2: ", err) - } - for i := range p1.rec { - r1, r2 := p1.rec[i], p2.rec[i] - if r1.kt != r2.kt { - t.Errorf("invalid type on record '%d' want %d, got %d", i, r1.kt, r2.kt) - } - if !bytes.Equal(r1.key, r2.key) { - t.Errorf("invalid key on record '%d' want %s, got %s", i, string(r1.key), string(r2.key)) - } - if r1.kt == ktVal { - if !bytes.Equal(r1.value, r2.value) { - t.Errorf("invalid value on record '%d' want %s, got %s", i, string(r1.value), string(r2.value)) - } - } - } -} - -func TestBatch_EncodeDecode(t *testing.T) { - b1 := new(Batch) - b1.seq = 10009 - b1.Put([]byte("key1"), []byte("value1")) - b1.Put([]byte("key2"), []byte("value2")) - b1.Delete([]byte("key1")) - b1.Put([]byte("k"), []byte("")) - b1.Put([]byte("zzzzzzzzzzz"), []byte("zzzzzzzzzzzzzzzzzzzzzzzz")) - b1.Delete([]byte("key10000")) - b1.Delete([]byte("k")) - buf := b1.encode() - b2 := new(Batch) - err := b2.decode(0, buf) - if err != nil { - t.Error("error when decoding batch: ", err) - } - compareBatch(t, b1, b2) -} - -func TestBatch_Append(t *testing.T) { - b1 := new(Batch) - b1.seq = 10009 - b1.Put([]byte("key1"), []byte("value1")) - b1.Put([]byte("key2"), []byte("value2")) - b1.Delete([]byte("key1")) - b1.Put([]byte("foo"), []byte("foovalue")) - b1.Put([]byte("bar"), []byte("barvalue")) - b2a := new(Batch) - b2a.seq = 10009 - b2a.Put([]byte("key1"), []byte("value1")) - b2a.Put([]byte("key2"), []byte("value2")) - b2a.Delete([]byte("key1")) - b2b := new(Batch) - b2b.Put([]byte("foo"), []byte("foovalue")) - b2b.Put([]byte("bar"), []byte("barvalue")) - b2a.append(b2b) - compareBatch(t, b1, b2a) -} - -func TestBatch_Size(t *testing.T) { - b := new(Batch) - for i := 0; i < 2; i++ { - b.Put([]byte("key1"), []byte("value1")) - b.Put([]byte("key2"), []byte("value2")) - b.Delete([]byte("key1")) - b.Put([]byte("foo"), []byte("foovalue")) - b.Put([]byte("bar"), []byte("barvalue")) - mem := memdb.New(&iComparer{comparer.DefaultComparer}, 0) - b.memReplay(mem) - if b.size() != mem.Size() { - t.Errorf("invalid batch size calculation, want=%d got=%d", mem.Size(), b.size()) - } - b.Reset() - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go deleted file mode 100644 index 0dd60fd82..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build !go1.2 - -package leveldb - -import ( - "sync/atomic" - "testing" -) - -func BenchmarkDBReadConcurrent(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - defer p.close() - - b.ResetTimer() - b.SetBytes(116) - - b.RunParallel(func(pb *testing.PB) { - iter := p.newIter() - defer iter.Release() - for pb.Next() && iter.Next() { - } - }) -} - -func BenchmarkDBReadConcurrent2(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - defer p.close() - - b.ResetTimer() - b.SetBytes(116) - - var dir uint32 - b.RunParallel(func(pb *testing.PB) { - iter := p.newIter() - defer iter.Release() - if atomic.AddUint32(&dir, 1)%2 == 0 { - for pb.Next() && iter.Next() { - } - } else { - if pb.Next() && iter.Last() { - for pb.Next() && iter.Prev() { - } - } - } - }) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go deleted file mode 100644 index 500c3850f..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go +++ /dev/null @@ -1,464 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "fmt" - "math/rand" - "os" - "path/filepath" - "runtime" - "testing" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" -) - -func randomString(r *rand.Rand, n int) []byte { - b := new(bytes.Buffer) - for i := 0; i < n; i++ { - b.WriteByte(' ' + byte(r.Intn(95))) - } - return b.Bytes() -} - -func compressibleStr(r *rand.Rand, frac float32, n int) []byte { - nn := int(float32(n) * frac) - rb := randomString(r, nn) - b := make([]byte, 0, n+nn) - for len(b) < n { - b = append(b, rb...) - } - return b[:n] -} - -type valueGen struct { - src []byte - pos int -} - -func newValueGen(frac float32) *valueGen { - v := new(valueGen) - r := rand.New(rand.NewSource(301)) - v.src = make([]byte, 0, 1048576+100) - for len(v.src) < 1048576 { - v.src = append(v.src, compressibleStr(r, frac, 100)...) - } - return v -} - -func (v *valueGen) get(n int) []byte { - if v.pos+n > len(v.src) { - v.pos = 0 - } - v.pos += n - return v.src[v.pos-n : v.pos] -} - -var benchDB = filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbbench-%d", os.Getuid())) - -type dbBench struct { - b *testing.B - stor storage.Storage - db *DB - - o *opt.Options - ro *opt.ReadOptions - wo *opt.WriteOptions - - keys, values [][]byte -} - -func openDBBench(b *testing.B, noCompress bool) *dbBench { - _, err := os.Stat(benchDB) - if err == nil { - err = os.RemoveAll(benchDB) - if err != nil { - b.Fatal("cannot remove old db: ", err) - } - } - - p := &dbBench{ - b: b, - o: &opt.Options{}, - ro: &opt.ReadOptions{}, - wo: &opt.WriteOptions{}, - } - p.stor, err = storage.OpenFile(benchDB) - if err != nil { - b.Fatal("cannot open stor: ", err) - } - if noCompress { - p.o.Compression = opt.NoCompression - } - - p.db, err = Open(p.stor, p.o) - if err != nil { - b.Fatal("cannot open db: ", err) - } - - runtime.GOMAXPROCS(runtime.NumCPU()) - return p -} - -func (p *dbBench) reopen() { - p.db.Close() - var err error - p.db, err = Open(p.stor, p.o) - if err != nil { - p.b.Fatal("Reopen: got error: ", err) - } -} - -func (p *dbBench) populate(n int) { - p.keys, p.values = make([][]byte, n), make([][]byte, n) - v := newValueGen(0.5) - for i := range p.keys { - p.keys[i], p.values[i] = []byte(fmt.Sprintf("%016d", i)), v.get(100) - } -} - -func (p *dbBench) randomize() { - m := len(p.keys) - times := m * 2 - r1, r2 := rand.New(rand.NewSource(0xdeadbeef)), rand.New(rand.NewSource(0xbeefface)) - for n := 0; n < times; n++ { - i, j := r1.Int()%m, r2.Int()%m - if i == j { - continue - } - p.keys[i], p.keys[j] = p.keys[j], p.keys[i] - p.values[i], p.values[j] = p.values[j], p.values[i] - } -} - -func (p *dbBench) writes(perBatch int) { - b := p.b - db := p.db - - n := len(p.keys) - m := n / perBatch - if n%perBatch > 0 { - m++ - } - batches := make([]Batch, m) - j := 0 - for i := range batches { - first := true - for ; j < n && ((j+1)%perBatch != 0 || first); j++ { - first = false - batches[i].Put(p.keys[j], p.values[j]) - } - } - runtime.GC() - - b.ResetTimer() - b.StartTimer() - for i := range batches { - err := db.Write(&(batches[i]), p.wo) - if err != nil { - b.Fatal("write failed: ", err) - } - } - b.StopTimer() - b.SetBytes(116) -} - -func (p *dbBench) gc() { - p.keys, p.values = nil, nil - runtime.GC() -} - -func (p *dbBench) puts() { - b := p.b - db := p.db - - b.ResetTimer() - b.StartTimer() - for i := range p.keys { - err := db.Put(p.keys[i], p.values[i], p.wo) - if err != nil { - b.Fatal("put failed: ", err) - } - } - b.StopTimer() - b.SetBytes(116) -} - -func (p *dbBench) fill() { - b := p.b - db := p.db - - perBatch := 10000 - batch := new(Batch) - for i, n := 0, len(p.keys); i < n; { - first := true - for ; i < n && ((i+1)%perBatch != 0 || first); i++ { - first = false - batch.Put(p.keys[i], p.values[i]) - } - err := db.Write(batch, p.wo) - if err != nil { - b.Fatal("write failed: ", err) - } - batch.Reset() - } -} - -func (p *dbBench) gets() { - b := p.b - db := p.db - - b.ResetTimer() - for i := range p.keys { - _, err := db.Get(p.keys[i], p.ro) - if err != nil { - b.Error("got error: ", err) - } - } - b.StopTimer() -} - -func (p *dbBench) seeks() { - b := p.b - - iter := p.newIter() - defer iter.Release() - b.ResetTimer() - for i := range p.keys { - if !iter.Seek(p.keys[i]) { - b.Error("value not found for: ", string(p.keys[i])) - } - } - b.StopTimer() -} - -func (p *dbBench) newIter() iterator.Iterator { - iter := p.db.NewIterator(nil, p.ro) - err := iter.Error() - if err != nil { - p.b.Fatal("cannot create iterator: ", err) - } - return iter -} - -func (p *dbBench) close() { - if bp, err := p.db.GetProperty("leveldb.blockpool"); err == nil { - p.b.Log("Block pool stats: ", bp) - } - p.db.Close() - p.stor.Close() - os.RemoveAll(benchDB) - p.db = nil - p.keys = nil - p.values = nil - runtime.GC() - runtime.GOMAXPROCS(1) -} - -func BenchmarkDBWrite(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.writes(1) - p.close() -} - -func BenchmarkDBWriteBatch(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.writes(1000) - p.close() -} - -func BenchmarkDBWriteUncompressed(b *testing.B) { - p := openDBBench(b, true) - p.populate(b.N) - p.writes(1) - p.close() -} - -func BenchmarkDBWriteBatchUncompressed(b *testing.B) { - p := openDBBench(b, true) - p.populate(b.N) - p.writes(1000) - p.close() -} - -func BenchmarkDBWriteRandom(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.randomize() - p.writes(1) - p.close() -} - -func BenchmarkDBWriteRandomSync(b *testing.B) { - p := openDBBench(b, false) - p.wo.Sync = true - p.populate(b.N) - p.writes(1) - p.close() -} - -func BenchmarkDBOverwrite(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.writes(1) - p.writes(1) - p.close() -} - -func BenchmarkDBOverwriteRandom(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.writes(1) - p.randomize() - p.writes(1) - p.close() -} - -func BenchmarkDBPut(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.puts() - p.close() -} - -func BenchmarkDBRead(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - - iter := p.newIter() - b.ResetTimer() - for iter.Next() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadGC(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - - iter := p.newIter() - b.ResetTimer() - for iter.Next() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadUncompressed(b *testing.B) { - p := openDBBench(b, true) - p.populate(b.N) - p.fill() - p.gc() - - iter := p.newIter() - b.ResetTimer() - for iter.Next() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadTable(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.reopen() - p.gc() - - iter := p.newIter() - b.ResetTimer() - for iter.Next() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadReverse(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - - iter := p.newIter() - b.ResetTimer() - iter.Last() - for iter.Prev() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadReverseTable(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.reopen() - p.gc() - - iter := p.newIter() - b.ResetTimer() - iter.Last() - for iter.Prev() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBSeek(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.seeks() - p.close() -} - -func BenchmarkDBSeekRandom(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.randomize() - p.seeks() - p.close() -} - -func BenchmarkDBGet(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gets() - p.close() -} - -func BenchmarkDBGetRandom(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.randomize() - p.gets() - p.close() -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go deleted file mode 100644 index 175e22203..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build !go1.2 - -package cache - -import ( - "math/rand" - "testing" -) - -func BenchmarkLRUCache(b *testing.B) { - c := NewCache(NewLRU(10000)) - - b.SetParallelism(10) - b.RunParallel(func(pb *testing.PB) { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - for pb.Next() { - key := uint64(r.Intn(1000000)) - c.Get(0, key, func() (int, Value) { - return 1, key - }).Release() - } - }) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go deleted file mode 100644 index 8162e16a0..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go +++ /dev/null @@ -1,676 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package cache provides interface and implementation of a cache algorithms. -package cache - -import ( - "sync" - "sync/atomic" - "unsafe" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -// Cacher provides interface to implements a caching functionality. -// An implementation must be goroutine-safe. -type Cacher interface { - // Capacity returns cache capacity. - Capacity() int - - // SetCapacity sets cache capacity. - SetCapacity(capacity int) - - // Promote promotes the 'cache node'. - Promote(n *Node) - - // Ban evicts the 'cache node' and prevent subsequent 'promote'. - Ban(n *Node) - - // Evict evicts the 'cache node'. - Evict(n *Node) - - // EvictNS evicts 'cache node' with the given namespace. - EvictNS(ns uint64) - - // EvictAll evicts all 'cache node'. - EvictAll() - - // Close closes the 'cache tree' - Close() error -} - -// Value is a 'cacheable object'. It may implements util.Releaser, if -// so the the Release method will be called once object is released. -type Value interface{} - -type CacheGetter struct { - Cache *Cache - NS uint64 -} - -func (g *CacheGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle { - return g.Cache.Get(g.NS, key, setFunc) -} - -// The hash tables implementation is based on: -// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu, Kunlong Zhang, and Michael Spear. ACM Symposium on Principles of Distributed Computing, Jul 2014. - -const ( - mInitialSize = 1 << 4 - mOverflowThreshold = 1 << 5 - mOverflowGrowThreshold = 1 << 7 -) - -type mBucket struct { - mu sync.Mutex - node []*Node - frozen bool -} - -func (b *mBucket) freeze() []*Node { - b.mu.Lock() - defer b.mu.Unlock() - if !b.frozen { - b.frozen = true - } - return b.node -} - -func (b *mBucket) get(r *Cache, h *mNode, hash uint32, ns, key uint64, noset bool) (done, added bool, n *Node) { - b.mu.Lock() - - if b.frozen { - b.mu.Unlock() - return - } - - // Scan the node. - for _, n := range b.node { - if n.hash == hash && n.ns == ns && n.key == key { - atomic.AddInt32(&n.ref, 1) - b.mu.Unlock() - return true, false, n - } - } - - // Get only. - if noset { - b.mu.Unlock() - return true, false, nil - } - - // Create node. - n = &Node{ - r: r, - hash: hash, - ns: ns, - key: key, - ref: 1, - } - // Add node to bucket. - b.node = append(b.node, n) - bLen := len(b.node) - b.mu.Unlock() - - // Update counter. - grow := atomic.AddInt32(&r.nodes, 1) >= h.growThreshold - if bLen > mOverflowThreshold { - grow = grow || atomic.AddInt32(&h.overflow, 1) >= mOverflowGrowThreshold - } - - // Grow. - if grow && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) { - nhLen := len(h.buckets) << 1 - nh := &mNode{ - buckets: make([]unsafe.Pointer, nhLen), - mask: uint32(nhLen) - 1, - pred: unsafe.Pointer(h), - growThreshold: int32(nhLen * mOverflowThreshold), - shrinkThreshold: int32(nhLen >> 1), - } - ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh)) - if !ok { - panic("BUG: failed swapping head") - } - go nh.initBuckets() - } - - return true, true, n -} - -func (b *mBucket) delete(r *Cache, h *mNode, hash uint32, ns, key uint64) (done, deleted bool) { - b.mu.Lock() - - if b.frozen { - b.mu.Unlock() - return - } - - // Scan the node. - var ( - n *Node - bLen int - ) - for i := range b.node { - n = b.node[i] - if n.ns == ns && n.key == key { - if atomic.LoadInt32(&n.ref) == 0 { - deleted = true - - // Call releaser. - if n.value != nil { - if r, ok := n.value.(util.Releaser); ok { - r.Release() - } - n.value = nil - } - - // Remove node from bucket. - b.node = append(b.node[:i], b.node[i+1:]...) - bLen = len(b.node) - } - break - } - } - b.mu.Unlock() - - if deleted { - // Call OnDel. - for _, f := range n.onDel { - f() - } - - // Update counter. - atomic.AddInt32(&r.size, int32(n.size)*-1) - shrink := atomic.AddInt32(&r.nodes, -1) < h.shrinkThreshold - if bLen >= mOverflowThreshold { - atomic.AddInt32(&h.overflow, -1) - } - - // Shrink. - if shrink && len(h.buckets) > mInitialSize && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) { - nhLen := len(h.buckets) >> 1 - nh := &mNode{ - buckets: make([]unsafe.Pointer, nhLen), - mask: uint32(nhLen) - 1, - pred: unsafe.Pointer(h), - growThreshold: int32(nhLen * mOverflowThreshold), - shrinkThreshold: int32(nhLen >> 1), - } - ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh)) - if !ok { - panic("BUG: failed swapping head") - } - go nh.initBuckets() - } - } - - return true, deleted -} - -type mNode struct { - buckets []unsafe.Pointer // []*mBucket - mask uint32 - pred unsafe.Pointer // *mNode - resizeInProgess int32 - - overflow int32 - growThreshold int32 - shrinkThreshold int32 -} - -func (n *mNode) initBucket(i uint32) *mBucket { - if b := (*mBucket)(atomic.LoadPointer(&n.buckets[i])); b != nil { - return b - } - - p := (*mNode)(atomic.LoadPointer(&n.pred)) - if p != nil { - var node []*Node - if n.mask > p.mask { - // Grow. - pb := (*mBucket)(atomic.LoadPointer(&p.buckets[i&p.mask])) - if pb == nil { - pb = p.initBucket(i & p.mask) - } - m := pb.freeze() - // Split nodes. - for _, x := range m { - if x.hash&n.mask == i { - node = append(node, x) - } - } - } else { - // Shrink. - pb0 := (*mBucket)(atomic.LoadPointer(&p.buckets[i])) - if pb0 == nil { - pb0 = p.initBucket(i) - } - pb1 := (*mBucket)(atomic.LoadPointer(&p.buckets[i+uint32(len(n.buckets))])) - if pb1 == nil { - pb1 = p.initBucket(i + uint32(len(n.buckets))) - } - m0 := pb0.freeze() - m1 := pb1.freeze() - // Merge nodes. - node = make([]*Node, 0, len(m0)+len(m1)) - node = append(node, m0...) - node = append(node, m1...) - } - b := &mBucket{node: node} - if atomic.CompareAndSwapPointer(&n.buckets[i], nil, unsafe.Pointer(b)) { - if len(node) > mOverflowThreshold { - atomic.AddInt32(&n.overflow, int32(len(node)-mOverflowThreshold)) - } - return b - } - } - - return (*mBucket)(atomic.LoadPointer(&n.buckets[i])) -} - -func (n *mNode) initBuckets() { - for i := range n.buckets { - n.initBucket(uint32(i)) - } - atomic.StorePointer(&n.pred, nil) -} - -// Cache is a 'cache map'. -type Cache struct { - mu sync.RWMutex - mHead unsafe.Pointer // *mNode - nodes int32 - size int32 - cacher Cacher - closed bool -} - -// NewCache creates a new 'cache map'. The cacher is optional and -// may be nil. -func NewCache(cacher Cacher) *Cache { - h := &mNode{ - buckets: make([]unsafe.Pointer, mInitialSize), - mask: mInitialSize - 1, - growThreshold: int32(mInitialSize * mOverflowThreshold), - shrinkThreshold: 0, - } - for i := range h.buckets { - h.buckets[i] = unsafe.Pointer(&mBucket{}) - } - r := &Cache{ - mHead: unsafe.Pointer(h), - cacher: cacher, - } - return r -} - -func (r *Cache) getBucket(hash uint32) (*mNode, *mBucket) { - h := (*mNode)(atomic.LoadPointer(&r.mHead)) - i := hash & h.mask - b := (*mBucket)(atomic.LoadPointer(&h.buckets[i])) - if b == nil { - b = h.initBucket(i) - } - return h, b -} - -func (r *Cache) delete(n *Node) bool { - for { - h, b := r.getBucket(n.hash) - done, deleted := b.delete(r, h, n.hash, n.ns, n.key) - if done { - return deleted - } - } - return false -} - -// Nodes returns number of 'cache node' in the map. -func (r *Cache) Nodes() int { - return int(atomic.LoadInt32(&r.nodes)) -} - -// Size returns sums of 'cache node' size in the map. -func (r *Cache) Size() int { - return int(atomic.LoadInt32(&r.size)) -} - -// Capacity returns cache capacity. -func (r *Cache) Capacity() int { - if r.cacher == nil { - return 0 - } - return r.cacher.Capacity() -} - -// SetCapacity sets cache capacity. -func (r *Cache) SetCapacity(capacity int) { - if r.cacher != nil { - r.cacher.SetCapacity(capacity) - } -} - -// Get gets 'cache node' with the given namespace and key. -// If cache node is not found and setFunc is not nil, Get will atomically creates -// the 'cache node' by calling setFunc. Otherwise Get will returns nil. -// -// The returned 'cache handle' should be released after use by calling Release -// method. -func (r *Cache) Get(ns, key uint64, setFunc func() (size int, value Value)) *Handle { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return nil - } - - hash := murmur32(ns, key, 0xf00) - for { - h, b := r.getBucket(hash) - done, _, n := b.get(r, h, hash, ns, key, setFunc == nil) - if done { - if n != nil { - n.mu.Lock() - if n.value == nil { - if setFunc == nil { - n.mu.Unlock() - n.unref() - return nil - } - - n.size, n.value = setFunc() - if n.value == nil { - n.size = 0 - n.mu.Unlock() - n.unref() - return nil - } - atomic.AddInt32(&r.size, int32(n.size)) - } - n.mu.Unlock() - if r.cacher != nil { - r.cacher.Promote(n) - } - return &Handle{unsafe.Pointer(n)} - } - - break - } - } - return nil -} - -// Delete removes and ban 'cache node' with the given namespace and key. -// A banned 'cache node' will never inserted into the 'cache tree'. Ban -// only attributed to the particular 'cache node', so when a 'cache node' -// is recreated it will not be banned. -// -// If onDel is not nil, then it will be executed if such 'cache node' -// doesn't exist or once the 'cache node' is released. -// -// Delete return true is such 'cache node' exist. -func (r *Cache) Delete(ns, key uint64, onDel func()) bool { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return false - } - - hash := murmur32(ns, key, 0xf00) - for { - h, b := r.getBucket(hash) - done, _, n := b.get(r, h, hash, ns, key, true) - if done { - if n != nil { - if onDel != nil { - n.mu.Lock() - n.onDel = append(n.onDel, onDel) - n.mu.Unlock() - } - if r.cacher != nil { - r.cacher.Ban(n) - } - n.unref() - return true - } - - break - } - } - - if onDel != nil { - onDel() - } - - return false -} - -// Evict evicts 'cache node' with the given namespace and key. This will -// simply call Cacher.Evict. -// -// Evict return true is such 'cache node' exist. -func (r *Cache) Evict(ns, key uint64) bool { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return false - } - - hash := murmur32(ns, key, 0xf00) - for { - h, b := r.getBucket(hash) - done, _, n := b.get(r, h, hash, ns, key, true) - if done { - if n != nil { - if r.cacher != nil { - r.cacher.Evict(n) - } - n.unref() - return true - } - - break - } - } - - return false -} - -// EvictNS evicts 'cache node' with the given namespace. This will -// simply call Cacher.EvictNS. -func (r *Cache) EvictNS(ns uint64) { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return - } - - if r.cacher != nil { - r.cacher.EvictNS(ns) - } -} - -// EvictAll evicts all 'cache node'. This will simply call Cacher.EvictAll. -func (r *Cache) EvictAll() { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return - } - - if r.cacher != nil { - r.cacher.EvictAll() - } -} - -// Close closes the 'cache map' and releases all 'cache node'. -func (r *Cache) Close() error { - r.mu.Lock() - if !r.closed { - r.closed = true - - if r.cacher != nil { - if err := r.cacher.Close(); err != nil { - return err - } - } - - h := (*mNode)(r.mHead) - h.initBuckets() - - for i := range h.buckets { - b := (*mBucket)(h.buckets[i]) - for _, n := range b.node { - // Call releaser. - if n.value != nil { - if r, ok := n.value.(util.Releaser); ok { - r.Release() - } - n.value = nil - } - - // Call OnDel. - for _, f := range n.onDel { - f() - } - } - } - } - r.mu.Unlock() - return nil -} - -// Node is a 'cache node'. -type Node struct { - r *Cache - - hash uint32 - ns, key uint64 - - mu sync.Mutex - size int - value Value - - ref int32 - onDel []func() - - CacheData unsafe.Pointer -} - -// NS returns this 'cache node' namespace. -func (n *Node) NS() uint64 { - return n.ns -} - -// Key returns this 'cache node' key. -func (n *Node) Key() uint64 { - return n.key -} - -// Size returns this 'cache node' size. -func (n *Node) Size() int { - return n.size -} - -// Value returns this 'cache node' value. -func (n *Node) Value() Value { - return n.value -} - -// Ref returns this 'cache node' ref counter. -func (n *Node) Ref() int32 { - return atomic.LoadInt32(&n.ref) -} - -// GetHandle returns an handle for this 'cache node'. -func (n *Node) GetHandle() *Handle { - if atomic.AddInt32(&n.ref, 1) <= 1 { - panic("BUG: Node.GetHandle on zero ref") - } - return &Handle{unsafe.Pointer(n)} -} - -func (n *Node) unref() { - if atomic.AddInt32(&n.ref, -1) == 0 { - n.r.delete(n) - } -} - -func (n *Node) unrefLocked() { - if atomic.AddInt32(&n.ref, -1) == 0 { - n.r.mu.RLock() - if !n.r.closed { - n.r.delete(n) - } - n.r.mu.RUnlock() - } -} - -type Handle struct { - n unsafe.Pointer // *Node -} - -func (h *Handle) Value() Value { - n := (*Node)(atomic.LoadPointer(&h.n)) - if n != nil { - return n.value - } - return nil -} - -func (h *Handle) Release() { - nPtr := atomic.LoadPointer(&h.n) - if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) { - n := (*Node)(nPtr) - n.unrefLocked() - } -} - -func murmur32(ns, key uint64, seed uint32) uint32 { - const ( - m = uint32(0x5bd1e995) - r = 24 - ) - - k1 := uint32(ns >> 32) - k2 := uint32(ns) - k3 := uint32(key >> 32) - k4 := uint32(key) - - k1 *= m - k1 ^= k1 >> r - k1 *= m - - k2 *= m - k2 ^= k2 >> r - k2 *= m - - k3 *= m - k3 ^= k3 >> r - k3 *= m - - k4 *= m - k4 ^= k4 >> r - k4 *= m - - h := seed - - h *= m - h ^= k1 - h *= m - h ^= k2 - h *= m - h ^= k3 - h *= m - h ^= k4 - - h ^= h >> 13 - h *= m - h ^= h >> 15 - - return h -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go deleted file mode 100644 index c2a50156f..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go +++ /dev/null @@ -1,554 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package cache - -import ( - "math/rand" - "runtime" - "sync" - "sync/atomic" - "testing" - "time" - "unsafe" -) - -type int32o int32 - -func (o *int32o) acquire() { - if atomic.AddInt32((*int32)(o), 1) != 1 { - panic("BUG: invalid ref") - } -} - -func (o *int32o) Release() { - if atomic.AddInt32((*int32)(o), -1) != 0 { - panic("BUG: invalid ref") - } -} - -type releaserFunc struct { - fn func() - value Value -} - -func (r releaserFunc) Release() { - if r.fn != nil { - r.fn() - } -} - -func set(c *Cache, ns, key uint64, value Value, charge int, relf func()) *Handle { - return c.Get(ns, key, func() (int, Value) { - if relf != nil { - return charge, releaserFunc{relf, value} - } else { - return charge, value - } - }) -} - -func TestCacheMap(t *testing.T) { - runtime.GOMAXPROCS(runtime.NumCPU()) - - nsx := []struct { - nobjects, nhandles, concurrent, repeat int - }{ - {10000, 400, 50, 3}, - {100000, 1000, 100, 10}, - } - - var ( - objects [][]int32o - handles [][]unsafe.Pointer - ) - - for _, x := range nsx { - objects = append(objects, make([]int32o, x.nobjects)) - handles = append(handles, make([]unsafe.Pointer, x.nhandles)) - } - - c := NewCache(nil) - - wg := new(sync.WaitGroup) - var done int32 - - for ns, x := range nsx { - for i := 0; i < x.concurrent; i++ { - wg.Add(1) - go func(ns, i, repeat int, objects []int32o, handles []unsafe.Pointer) { - defer wg.Done() - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - for j := len(objects) * repeat; j >= 0; j-- { - key := uint64(r.Intn(len(objects))) - h := c.Get(uint64(ns), key, func() (int, Value) { - o := &objects[key] - o.acquire() - return 1, o - }) - if v := h.Value().(*int32o); v != &objects[key] { - t.Fatalf("#%d invalid value: want=%p got=%p", ns, &objects[key], v) - } - if objects[key] != 1 { - t.Fatalf("#%d invalid object %d: %d", ns, key, objects[key]) - } - if !atomic.CompareAndSwapPointer(&handles[r.Intn(len(handles))], nil, unsafe.Pointer(h)) { - h.Release() - } - } - }(ns, i, x.repeat, objects[ns], handles[ns]) - } - - go func(handles []unsafe.Pointer) { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - for atomic.LoadInt32(&done) == 0 { - i := r.Intn(len(handles)) - h := (*Handle)(atomic.LoadPointer(&handles[i])) - if h != nil && atomic.CompareAndSwapPointer(&handles[i], unsafe.Pointer(h), nil) { - h.Release() - } - time.Sleep(time.Millisecond) - } - }(handles[ns]) - } - - go func() { - handles := make([]*Handle, 100000) - for atomic.LoadInt32(&done) == 0 { - for i := range handles { - handles[i] = c.Get(999999999, uint64(i), func() (int, Value) { - return 1, 1 - }) - } - for _, h := range handles { - h.Release() - } - } - }() - - wg.Wait() - - atomic.StoreInt32(&done, 1) - - for _, handles0 := range handles { - for i := range handles0 { - h := (*Handle)(atomic.LoadPointer(&handles0[i])) - if h != nil && atomic.CompareAndSwapPointer(&handles0[i], unsafe.Pointer(h), nil) { - h.Release() - } - } - } - - for ns, objects0 := range objects { - for i, o := range objects0 { - if o != 0 { - t.Fatalf("invalid object #%d.%d: ref=%d", ns, i, o) - } - } - } -} - -func TestCacheMap_NodesAndSize(t *testing.T) { - c := NewCache(nil) - if c.Nodes() != 0 { - t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes()) - } - if c.Size() != 0 { - t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size()) - } - set(c, 0, 1, 1, 1, nil) - set(c, 0, 2, 2, 2, nil) - set(c, 1, 1, 3, 3, nil) - set(c, 2, 1, 4, 1, nil) - if c.Nodes() != 4 { - t.Errorf("invalid nodes counter: want=%d got=%d", 4, c.Nodes()) - } - if c.Size() != 7 { - t.Errorf("invalid size counter: want=%d got=%d", 4, c.Size()) - } -} - -func TestLRUCache_Capacity(t *testing.T) { - c := NewCache(NewLRU(10)) - if c.Capacity() != 10 { - t.Errorf("invalid capacity: want=%d got=%d", 10, c.Capacity()) - } - set(c, 0, 1, 1, 1, nil).Release() - set(c, 0, 2, 2, 2, nil).Release() - set(c, 1, 1, 3, 3, nil).Release() - set(c, 2, 1, 4, 1, nil).Release() - set(c, 2, 2, 5, 1, nil).Release() - set(c, 2, 3, 6, 1, nil).Release() - set(c, 2, 4, 7, 1, nil).Release() - set(c, 2, 5, 8, 1, nil).Release() - if c.Nodes() != 7 { - t.Errorf("invalid nodes counter: want=%d got=%d", 7, c.Nodes()) - } - if c.Size() != 10 { - t.Errorf("invalid size counter: want=%d got=%d", 10, c.Size()) - } - c.SetCapacity(9) - if c.Capacity() != 9 { - t.Errorf("invalid capacity: want=%d got=%d", 9, c.Capacity()) - } - if c.Nodes() != 6 { - t.Errorf("invalid nodes counter: want=%d got=%d", 6, c.Nodes()) - } - if c.Size() != 8 { - t.Errorf("invalid size counter: want=%d got=%d", 8, c.Size()) - } -} - -func TestCacheMap_NilValue(t *testing.T) { - c := NewCache(NewLRU(10)) - h := c.Get(0, 0, func() (size int, value Value) { - return 1, nil - }) - if h != nil { - t.Error("cache handle is non-nil") - } - if c.Nodes() != 0 { - t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes()) - } - if c.Size() != 0 { - t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size()) - } -} - -func TestLRUCache_GetLatency(t *testing.T) { - runtime.GOMAXPROCS(runtime.NumCPU()) - - const ( - concurrentSet = 30 - concurrentGet = 3 - duration = 3 * time.Second - delay = 3 * time.Millisecond - maxkey = 100000 - ) - - var ( - set, getHit, getAll int32 - getMaxLatency, getDuration int64 - ) - - c := NewCache(NewLRU(5000)) - wg := &sync.WaitGroup{} - until := time.Now().Add(duration) - for i := 0; i < concurrentSet; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - r := rand.New(rand.NewSource(time.Now().UnixNano())) - for time.Now().Before(until) { - c.Get(0, uint64(r.Intn(maxkey)), func() (int, Value) { - time.Sleep(delay) - atomic.AddInt32(&set, 1) - return 1, 1 - }).Release() - } - }(i) - } - for i := 0; i < concurrentGet; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - r := rand.New(rand.NewSource(time.Now().UnixNano())) - for { - mark := time.Now() - if mark.Before(until) { - h := c.Get(0, uint64(r.Intn(maxkey)), nil) - latency := int64(time.Now().Sub(mark)) - m := atomic.LoadInt64(&getMaxLatency) - if latency > m { - atomic.CompareAndSwapInt64(&getMaxLatency, m, latency) - } - atomic.AddInt64(&getDuration, latency) - if h != nil { - atomic.AddInt32(&getHit, 1) - h.Release() - } - atomic.AddInt32(&getAll, 1) - } else { - break - } - } - }(i) - } - - wg.Wait() - getAvglatency := time.Duration(getDuration) / time.Duration(getAll) - t.Logf("set=%d getHit=%d getAll=%d getMaxLatency=%v getAvgLatency=%v", - set, getHit, getAll, time.Duration(getMaxLatency), getAvglatency) - - if getAvglatency > delay/3 { - t.Errorf("get avg latency > %v: got=%v", delay/3, getAvglatency) - } -} - -func TestLRUCache_HitMiss(t *testing.T) { - cases := []struct { - key uint64 - value string - }{ - {1, "vvvvvvvvv"}, - {100, "v1"}, - {0, "v2"}, - {12346, "v3"}, - {777, "v4"}, - {999, "v5"}, - {7654, "v6"}, - {2, "v7"}, - {3, "v8"}, - {9, "v9"}, - } - - setfin := 0 - c := NewCache(NewLRU(1000)) - for i, x := range cases { - set(c, 0, x.key, x.value, len(x.value), func() { - setfin++ - }).Release() - for j, y := range cases { - h := c.Get(0, y.key, nil) - if j <= i { - // should hit - if h == nil { - t.Errorf("case '%d' iteration '%d' is miss", i, j) - } else { - if x := h.Value().(releaserFunc).value.(string); x != y.value { - t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value) - } - } - } else { - // should miss - if h != nil { - t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, h.Value().(releaserFunc).value.(string)) - } - } - if h != nil { - h.Release() - } - } - } - - for i, x := range cases { - finalizerOk := false - c.Delete(0, x.key, func() { - finalizerOk = true - }) - - if !finalizerOk { - t.Errorf("case %d delete finalizer not executed", i) - } - - for j, y := range cases { - h := c.Get(0, y.key, nil) - if j > i { - // should hit - if h == nil { - t.Errorf("case '%d' iteration '%d' is miss", i, j) - } else { - if x := h.Value().(releaserFunc).value.(string); x != y.value { - t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value) - } - } - } else { - // should miss - if h != nil { - t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, h.Value().(releaserFunc).value.(string)) - } - } - if h != nil { - h.Release() - } - } - } - - if setfin != len(cases) { - t.Errorf("some set finalizer may not be executed, want=%d got=%d", len(cases), setfin) - } -} - -func TestLRUCache_Eviction(t *testing.T) { - c := NewCache(NewLRU(12)) - o1 := set(c, 0, 1, 1, 1, nil) - set(c, 0, 2, 2, 1, nil).Release() - set(c, 0, 3, 3, 1, nil).Release() - set(c, 0, 4, 4, 1, nil).Release() - set(c, 0, 5, 5, 1, nil).Release() - if h := c.Get(0, 2, nil); h != nil { // 1,3,4,5,2 - h.Release() - } - set(c, 0, 9, 9, 10, nil).Release() // 5,2,9 - - for _, key := range []uint64{9, 2, 5, 1} { - h := c.Get(0, key, nil) - if h == nil { - t.Errorf("miss for key '%d'", key) - } else { - if x := h.Value().(int); x != int(key) { - t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) - } - h.Release() - } - } - o1.Release() - for _, key := range []uint64{1, 2, 5} { - h := c.Get(0, key, nil) - if h == nil { - t.Errorf("miss for key '%d'", key) - } else { - if x := h.Value().(int); x != int(key) { - t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) - } - h.Release() - } - } - for _, key := range []uint64{3, 4, 9} { - h := c.Get(0, key, nil) - if h != nil { - t.Errorf("hit for key '%d'", key) - if x := h.Value().(int); x != int(key) { - t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) - } - h.Release() - } - } -} - -func TestLRUCache_Evict(t *testing.T) { - c := NewCache(NewLRU(6)) - set(c, 0, 1, 1, 1, nil).Release() - set(c, 0, 2, 2, 1, nil).Release() - set(c, 1, 1, 4, 1, nil).Release() - set(c, 1, 2, 5, 1, nil).Release() - set(c, 2, 1, 6, 1, nil).Release() - set(c, 2, 2, 7, 1, nil).Release() - - for ns := 0; ns < 3; ns++ { - for key := 1; key < 3; key++ { - if h := c.Get(uint64(ns), uint64(key), nil); h != nil { - h.Release() - } else { - t.Errorf("Cache.Get on #%d.%d return nil", ns, key) - } - } - } - - if ok := c.Evict(0, 1); !ok { - t.Error("first Cache.Evict on #0.1 return false") - } - if ok := c.Evict(0, 1); ok { - t.Error("second Cache.Evict on #0.1 return true") - } - if h := c.Get(0, 1, nil); h != nil { - t.Errorf("Cache.Get on #0.1 return non-nil: %v", h.Value()) - } - - c.EvictNS(1) - if h := c.Get(1, 1, nil); h != nil { - t.Errorf("Cache.Get on #1.1 return non-nil: %v", h.Value()) - } - if h := c.Get(1, 2, nil); h != nil { - t.Errorf("Cache.Get on #1.2 return non-nil: %v", h.Value()) - } - - c.EvictAll() - for ns := 0; ns < 3; ns++ { - for key := 1; key < 3; key++ { - if h := c.Get(uint64(ns), uint64(key), nil); h != nil { - t.Errorf("Cache.Get on #%d.%d return non-nil: %v", ns, key, h.Value()) - } - } - } -} - -func TestLRUCache_Delete(t *testing.T) { - delFuncCalled := 0 - delFunc := func() { - delFuncCalled++ - } - - c := NewCache(NewLRU(2)) - set(c, 0, 1, 1, 1, nil).Release() - set(c, 0, 2, 2, 1, nil).Release() - - if ok := c.Delete(0, 1, delFunc); !ok { - t.Error("Cache.Delete on #1 return false") - } - if h := c.Get(0, 1, nil); h != nil { - t.Errorf("Cache.Get on #1 return non-nil: %v", h.Value()) - } - if ok := c.Delete(0, 1, delFunc); ok { - t.Error("Cache.Delete on #1 return true") - } - - h2 := c.Get(0, 2, nil) - if h2 == nil { - t.Error("Cache.Get on #2 return nil") - } - if ok := c.Delete(0, 2, delFunc); !ok { - t.Error("(1) Cache.Delete on #2 return false") - } - if ok := c.Delete(0, 2, delFunc); !ok { - t.Error("(2) Cache.Delete on #2 return false") - } - - set(c, 0, 3, 3, 1, nil).Release() - set(c, 0, 4, 4, 1, nil).Release() - c.Get(0, 2, nil).Release() - - for key := 2; key <= 4; key++ { - if h := c.Get(0, uint64(key), nil); h != nil { - h.Release() - } else { - t.Errorf("Cache.Get on #%d return nil", key) - } - } - - h2.Release() - if h := c.Get(0, 2, nil); h != nil { - t.Errorf("Cache.Get on #2 return non-nil: %v", h.Value()) - } - - if delFuncCalled != 4 { - t.Errorf("delFunc isn't called 4 times: got=%d", delFuncCalled) - } -} - -func TestLRUCache_Close(t *testing.T) { - relFuncCalled := 0 - relFunc := func() { - relFuncCalled++ - } - delFuncCalled := 0 - delFunc := func() { - delFuncCalled++ - } - - c := NewCache(NewLRU(2)) - set(c, 0, 1, 1, 1, relFunc).Release() - set(c, 0, 2, 2, 1, relFunc).Release() - - h3 := set(c, 0, 3, 3, 1, relFunc) - if h3 == nil { - t.Error("Cache.Get on #3 return nil") - } - if ok := c.Delete(0, 3, delFunc); !ok { - t.Error("Cache.Delete on #3 return false") - } - - c.Close() - - if relFuncCalled != 3 { - t.Errorf("relFunc isn't called 3 times: got=%d", relFuncCalled) - } - if delFuncCalled != 1 { - t.Errorf("delFunc isn't called 1 times: got=%d", delFuncCalled) - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go deleted file mode 100644 index d9a84cde1..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package cache - -import ( - "sync" - "unsafe" -) - -type lruNode struct { - n *Node - h *Handle - ban bool - - next, prev *lruNode -} - -func (n *lruNode) insert(at *lruNode) { - x := at.next - at.next = n - n.prev = at - n.next = x - x.prev = n -} - -func (n *lruNode) remove() { - if n.prev != nil { - n.prev.next = n.next - n.next.prev = n.prev - n.prev = nil - n.next = nil - } else { - panic("BUG: removing removed node") - } -} - -type lru struct { - mu sync.Mutex - capacity int - used int - recent lruNode -} - -func (r *lru) reset() { - r.recent.next = &r.recent - r.recent.prev = &r.recent - r.used = 0 -} - -func (r *lru) Capacity() int { - r.mu.Lock() - defer r.mu.Unlock() - return r.capacity -} - -func (r *lru) SetCapacity(capacity int) { - var evicted []*lruNode - - r.mu.Lock() - r.capacity = capacity - for r.used > r.capacity { - rn := r.recent.prev - if rn == nil { - panic("BUG: invalid LRU used or capacity counter") - } - rn.remove() - rn.n.CacheData = nil - r.used -= rn.n.Size() - evicted = append(evicted, rn) - } - r.mu.Unlock() - - for _, rn := range evicted { - rn.h.Release() - } -} - -func (r *lru) Promote(n *Node) { - var evicted []*lruNode - - r.mu.Lock() - if n.CacheData == nil { - if n.Size() <= r.capacity { - rn := &lruNode{n: n, h: n.GetHandle()} - rn.insert(&r.recent) - n.CacheData = unsafe.Pointer(rn) - r.used += n.Size() - - for r.used > r.capacity { - rn := r.recent.prev - if rn == nil { - panic("BUG: invalid LRU used or capacity counter") - } - rn.remove() - rn.n.CacheData = nil - r.used -= rn.n.Size() - evicted = append(evicted, rn) - } - } - } else { - rn := (*lruNode)(n.CacheData) - if !rn.ban { - rn.remove() - rn.insert(&r.recent) - } - } - r.mu.Unlock() - - for _, rn := range evicted { - rn.h.Release() - } -} - -func (r *lru) Ban(n *Node) { - r.mu.Lock() - if n.CacheData == nil { - n.CacheData = unsafe.Pointer(&lruNode{n: n, ban: true}) - } else { - rn := (*lruNode)(n.CacheData) - if !rn.ban { - rn.remove() - rn.ban = true - r.used -= rn.n.Size() - r.mu.Unlock() - - rn.h.Release() - rn.h = nil - return - } - } - r.mu.Unlock() -} - -func (r *lru) Evict(n *Node) { - r.mu.Lock() - rn := (*lruNode)(n.CacheData) - if rn == nil || rn.ban { - r.mu.Unlock() - return - } - n.CacheData = nil - r.mu.Unlock() - - rn.h.Release() -} - -func (r *lru) EvictNS(ns uint64) { - var evicted []*lruNode - - r.mu.Lock() - for e := r.recent.prev; e != &r.recent; { - rn := e - e = e.prev - if rn.n.NS() == ns { - rn.remove() - rn.n.CacheData = nil - r.used -= rn.n.Size() - evicted = append(evicted, rn) - } - } - r.mu.Unlock() - - for _, rn := range evicted { - rn.h.Release() - } -} - -func (r *lru) EvictAll() { - r.mu.Lock() - back := r.recent.prev - for rn := back; rn != &r.recent; rn = rn.prev { - rn.n.CacheData = nil - } - r.reset() - r.mu.Unlock() - - for rn := back; rn != &r.recent; rn = rn.prev { - rn.h.Release() - } -} - -func (r *lru) Close() error { - return nil -} - -// NewLRU create a new LRU-cache. -func NewLRU(capacity int) Cacher { - r := &lru{capacity: capacity} - r.reset() - return r -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go deleted file mode 100644 index 0554336db..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" - -type iComparer struct { - ucmp comparer.Comparer -} - -func (icmp *iComparer) uName() string { - return icmp.ucmp.Name() -} - -func (icmp *iComparer) uCompare(a, b []byte) int { - return icmp.ucmp.Compare(a, b) -} - -func (icmp *iComparer) uSeparator(dst, a, b []byte) []byte { - return icmp.ucmp.Separator(dst, a, b) -} - -func (icmp *iComparer) uSuccessor(dst, b []byte) []byte { - return icmp.ucmp.Successor(dst, b) -} - -func (icmp *iComparer) Name() string { - return icmp.uName() -} - -func (icmp *iComparer) Compare(a, b []byte) int { - x := icmp.ucmp.Compare(iKey(a).ukey(), iKey(b).ukey()) - if x == 0 { - if m, n := iKey(a).num(), iKey(b).num(); m > n { - x = -1 - } else if m < n { - x = 1 - } - } - return x -} - -func (icmp *iComparer) Separator(dst, a, b []byte) []byte { - ua, ub := iKey(a).ukey(), iKey(b).ukey() - dst = icmp.ucmp.Separator(dst, ua, ub) - if dst == nil { - return nil - } - if len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 { - dst = append(dst, kMaxNumBytes...) - } else { - // Did not close possibilities that n maybe longer than len(ub). - dst = append(dst, a[len(a)-8:]...) - } - return dst -} - -func (icmp *iComparer) Successor(dst, b []byte) []byte { - ub := iKey(b).ukey() - dst = icmp.ucmp.Successor(dst, ub) - if dst == nil { - return nil - } - if len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 { - dst = append(dst, kMaxNumBytes...) - } else { - // Did not close possibilities that n maybe longer than len(ub). - dst = append(dst, b[len(b)-8:]...) - } - return dst -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go deleted file mode 100644 index 14dddf88d..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package comparer - -import "bytes" - -type bytesComparer struct{} - -func (bytesComparer) Compare(a, b []byte) int { - return bytes.Compare(a, b) -} - -func (bytesComparer) Name() string { - return "leveldb.BytewiseComparator" -} - -func (bytesComparer) Separator(dst, a, b []byte) []byte { - i, n := 0, len(a) - if n > len(b) { - n = len(b) - } - for ; i < n && a[i] == b[i]; i++ { - } - if i >= n { - // Do not shorten if one string is a prefix of the other - } else if c := a[i]; c < 0xff && c+1 < b[i] { - dst = append(dst, a[:i+1]...) - dst[i]++ - return dst - } - return nil -} - -func (bytesComparer) Successor(dst, b []byte) []byte { - for i, c := range b { - if c != 0xff { - dst = append(dst, b[:i+1]...) - dst[i]++ - return dst - } - } - return nil -} - -// DefaultComparer are default implementation of the Comparer interface. -// It uses the natural ordering, consistent with bytes.Compare. -var DefaultComparer = bytesComparer{} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go deleted file mode 100644 index 14a28f16f..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package comparer provides interface and implementation for ordering -// sets of data. -package comparer - -// BasicComparer is the interface that wraps the basic Compare method. -type BasicComparer interface { - // Compare returns -1, 0, or +1 depending on whether a is 'less than', - // 'equal to' or 'greater than' b. The two arguments can only be 'equal' - // if their contents are exactly equal. Furthermore, the empty slice - // must be 'less than' any non-empty slice. - Compare(a, b []byte) int -} - -// Comparer defines a total ordering over the space of []byte keys: a 'less -// than' relationship. -type Comparer interface { - BasicComparer - - // Name returns name of the comparer. - // - // The Level-DB on-disk format stores the comparer name, and opening a - // database with a different comparer from the one it was created with - // will result in an error. - // - // An implementation to a new name whenever the comparer implementation - // changes in a way that will cause the relative ordering of any two keys - // to change. - // - // Names starting with "leveldb." are reserved and should not be used - // by any users of this package. - Name() string - - // Bellow are advanced functions used used to reduce the space requirements - // for internal data structures such as index blocks. - - // Separator appends a sequence of bytes x to dst such that a <= x && x < b, - // where 'less than' is consistent with Compare. An implementation should - // return nil if x equal to a. - // - // Either contents of a or b should not by any means modified. Doing so - // may cause corruption on the internal state. - Separator(dst, a, b []byte) []byte - - // Successor appends a sequence of bytes x to dst such that x >= b, where - // 'less than' is consistent with Compare. An implementation should return - // nil if x equal to b. - // - // Contents of b should not by any means modified. Doing so may cause - // corruption on the internal state. - Successor(dst, b []byte) []byte -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go deleted file mode 100644 index a3bdfae62..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go +++ /dev/null @@ -1,500 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "fmt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" - "io" - "math/rand" - "testing" -) - -const ctValSize = 1000 - -type dbCorruptHarness struct { - dbHarness -} - -func newDbCorruptHarnessWopt(t *testing.T, o *opt.Options) *dbCorruptHarness { - h := new(dbCorruptHarness) - h.init(t, o) - return h -} - -func newDbCorruptHarness(t *testing.T) *dbCorruptHarness { - return newDbCorruptHarnessWopt(t, &opt.Options{ - BlockCacheCapacity: 100, - Strict: opt.StrictJournalChecksum, - }) -} - -func (h *dbCorruptHarness) recover() { - p := &h.dbHarness - t := p.t - - var err error - p.db, err = Recover(h.stor, h.o) - if err != nil { - t.Fatal("Repair: got error: ", err) - } -} - -func (h *dbCorruptHarness) build(n int) { - p := &h.dbHarness - t := p.t - db := p.db - - batch := new(Batch) - for i := 0; i < n; i++ { - batch.Reset() - batch.Put(tkey(i), tval(i, ctValSize)) - err := db.Write(batch, p.wo) - if err != nil { - t.Fatal("write error: ", err) - } - } -} - -func (h *dbCorruptHarness) buildShuffled(n int, rnd *rand.Rand) { - p := &h.dbHarness - t := p.t - db := p.db - - batch := new(Batch) - for i := range rnd.Perm(n) { - batch.Reset() - batch.Put(tkey(i), tval(i, ctValSize)) - err := db.Write(batch, p.wo) - if err != nil { - t.Fatal("write error: ", err) - } - } -} - -func (h *dbCorruptHarness) deleteRand(n, max int, rnd *rand.Rand) { - p := &h.dbHarness - t := p.t - db := p.db - - batch := new(Batch) - for i := 0; i < n; i++ { - batch.Reset() - batch.Delete(tkey(rnd.Intn(max))) - err := db.Write(batch, p.wo) - if err != nil { - t.Fatal("write error: ", err) - } - } -} - -func (h *dbCorruptHarness) corrupt(ft storage.FileType, fi, offset, n int) { - p := &h.dbHarness - t := p.t - - ff, _ := p.stor.GetFiles(ft) - sff := files(ff) - sff.sort() - if fi < 0 { - fi = len(sff) - 1 - } - if fi >= len(sff) { - t.Fatalf("no such file with type %q with index %d", ft, fi) - } - - file := sff[fi] - - r, err := file.Open() - if err != nil { - t.Fatal("cannot open file: ", err) - } - x, err := r.Seek(0, 2) - if err != nil { - t.Fatal("cannot query file size: ", err) - } - m := int(x) - if _, err := r.Seek(0, 0); err != nil { - t.Fatal(err) - } - - if offset < 0 { - if -offset > m { - offset = 0 - } else { - offset = m + offset - } - } - if offset > m { - offset = m - } - if offset+n > m { - n = m - offset - } - - buf := make([]byte, m) - _, err = io.ReadFull(r, buf) - if err != nil { - t.Fatal("cannot read file: ", err) - } - r.Close() - - for i := 0; i < n; i++ { - buf[offset+i] ^= 0x80 - } - - err = file.Remove() - if err != nil { - t.Fatal("cannot remove old file: ", err) - } - w, err := file.Create() - if err != nil { - t.Fatal("cannot create new file: ", err) - } - _, err = w.Write(buf) - if err != nil { - t.Fatal("cannot write new file: ", err) - } - w.Close() -} - -func (h *dbCorruptHarness) removeAll(ft storage.FileType) { - ff, err := h.stor.GetFiles(ft) - if err != nil { - h.t.Fatal("get files: ", err) - } - for _, f := range ff { - if err := f.Remove(); err != nil { - h.t.Error("remove file: ", err) - } - } -} - -func (h *dbCorruptHarness) removeOne(ft storage.FileType) { - ff, err := h.stor.GetFiles(ft) - if err != nil { - h.t.Fatal("get files: ", err) - } - f := ff[rand.Intn(len(ff))] - h.t.Logf("removing file @%d", f.Num()) - if err := f.Remove(); err != nil { - h.t.Error("remove file: ", err) - } -} - -func (h *dbCorruptHarness) check(min, max int) { - p := &h.dbHarness - t := p.t - db := p.db - - var n, badk, badv, missed, good int - iter := db.NewIterator(nil, p.ro) - for iter.Next() { - k := 0 - fmt.Sscanf(string(iter.Key()), "%d", &k) - if k < n { - badk++ - continue - } - missed += k - n - n = k + 1 - if !bytes.Equal(iter.Value(), tval(k, ctValSize)) { - badv++ - } else { - good++ - } - } - err := iter.Error() - iter.Release() - t.Logf("want=%d..%d got=%d badkeys=%d badvalues=%d missed=%d, err=%v", - min, max, good, badk, badv, missed, err) - if good < min || good > max { - t.Errorf("good entries number not in range") - } -} - -func TestCorruptDB_Journal(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(100) - h.check(100, 100) - h.closeDB() - h.corrupt(storage.TypeJournal, -1, 19, 1) - h.corrupt(storage.TypeJournal, -1, 32*1024+1000, 1) - - h.openDB() - h.check(36, 36) - - h.close() -} - -func TestCorruptDB_Table(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(100) - h.compactMem() - h.compactRangeAt(0, "", "") - h.compactRangeAt(1, "", "") - h.closeDB() - h.corrupt(storage.TypeTable, -1, 100, 1) - - h.openDB() - h.check(99, 99) - - h.close() -} - -func TestCorruptDB_TableIndex(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(10000) - h.compactMem() - h.closeDB() - h.corrupt(storage.TypeTable, -1, -2000, 500) - - h.openDB() - h.check(5000, 9999) - - h.close() -} - -func TestCorruptDB_MissingManifest(t *testing.T) { - rnd := rand.New(rand.NewSource(0x0badda7a)) - h := newDbCorruptHarnessWopt(t, &opt.Options{ - BlockCacheCapacity: 100, - Strict: opt.StrictJournalChecksum, - WriteBuffer: 1000 * 60, - }) - - h.build(1000) - h.compactMem() - h.buildShuffled(1000, rnd) - h.compactMem() - h.deleteRand(500, 1000, rnd) - h.compactMem() - h.buildShuffled(1000, rnd) - h.compactMem() - h.deleteRand(500, 1000, rnd) - h.compactMem() - h.buildShuffled(1000, rnd) - h.compactMem() - h.closeDB() - - h.stor.SetIgnoreOpenErr(storage.TypeManifest) - h.removeAll(storage.TypeManifest) - h.openAssert(false) - h.stor.SetIgnoreOpenErr(0) - - h.recover() - h.check(1000, 1000) - h.build(1000) - h.compactMem() - h.compactRange("", "") - h.closeDB() - - h.recover() - h.check(1000, 1000) - - h.close() -} - -func TestCorruptDB_SequenceNumberRecovery(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("foo", "v1") - h.put("foo", "v2") - h.put("foo", "v3") - h.put("foo", "v4") - h.put("foo", "v5") - h.closeDB() - - h.recover() - h.getVal("foo", "v5") - h.put("foo", "v6") - h.getVal("foo", "v6") - - h.reopenDB() - h.getVal("foo", "v6") - - h.close() -} - -func TestCorruptDB_SequenceNumberRecoveryTable(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("foo", "v1") - h.put("foo", "v2") - h.put("foo", "v3") - h.compactMem() - h.put("foo", "v4") - h.put("foo", "v5") - h.compactMem() - h.closeDB() - - h.recover() - h.getVal("foo", "v5") - h.put("foo", "v6") - h.getVal("foo", "v6") - - h.reopenDB() - h.getVal("foo", "v6") - - h.close() -} - -func TestCorruptDB_CorruptedManifest(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("foo", "hello") - h.compactMem() - h.compactRange("", "") - h.closeDB() - h.corrupt(storage.TypeManifest, -1, 0, 1000) - h.openAssert(false) - - h.recover() - h.getVal("foo", "hello") - - h.close() -} - -func TestCorruptDB_CompactionInputError(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(10) - h.compactMem() - h.closeDB() - h.corrupt(storage.TypeTable, -1, 100, 1) - - h.openDB() - h.check(9, 9) - - h.build(10000) - h.check(10000, 10000) - - h.close() -} - -func TestCorruptDB_UnrelatedKeys(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(10) - h.compactMem() - h.closeDB() - h.corrupt(storage.TypeTable, -1, 100, 1) - - h.openDB() - h.put(string(tkey(1000)), string(tval(1000, ctValSize))) - h.getVal(string(tkey(1000)), string(tval(1000, ctValSize))) - h.compactMem() - h.getVal(string(tkey(1000)), string(tval(1000, ctValSize))) - - h.close() -} - -func TestCorruptDB_Level0NewerFileHasOlderSeqnum(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("a", "v1") - h.put("b", "v1") - h.compactMem() - h.put("a", "v2") - h.put("b", "v2") - h.compactMem() - h.put("a", "v3") - h.put("b", "v3") - h.compactMem() - h.put("c", "v0") - h.put("d", "v0") - h.compactMem() - h.compactRangeAt(1, "", "") - h.closeDB() - - h.recover() - h.getVal("a", "v3") - h.getVal("b", "v3") - h.getVal("c", "v0") - h.getVal("d", "v0") - - h.close() -} - -func TestCorruptDB_RecoverInvalidSeq_Issue53(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("a", "v1") - h.put("b", "v1") - h.compactMem() - h.put("a", "v2") - h.put("b", "v2") - h.compactMem() - h.put("a", "v3") - h.put("b", "v3") - h.compactMem() - h.put("c", "v0") - h.put("d", "v0") - h.compactMem() - h.compactRangeAt(0, "", "") - h.closeDB() - - h.recover() - h.getVal("a", "v3") - h.getVal("b", "v3") - h.getVal("c", "v0") - h.getVal("d", "v0") - - h.close() -} - -func TestCorruptDB_MissingTableFiles(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("a", "v1") - h.put("b", "v1") - h.compactMem() - h.put("c", "v2") - h.put("d", "v2") - h.compactMem() - h.put("e", "v3") - h.put("f", "v3") - h.closeDB() - - h.removeOne(storage.TypeTable) - h.openAssert(false) - - h.close() -} - -func TestCorruptDB_RecoverTable(t *testing.T) { - h := newDbCorruptHarnessWopt(t, &opt.Options{ - WriteBuffer: 112 * opt.KiB, - CompactionTableSize: 90 * opt.KiB, - Filter: filter.NewBloomFilter(10), - }) - - h.build(1000) - h.compactMem() - h.compactRangeAt(0, "", "") - h.compactRangeAt(1, "", "") - seq := h.db.seq - h.closeDB() - h.corrupt(storage.TypeTable, 0, 1000, 1) - h.corrupt(storage.TypeTable, 3, 10000, 1) - // Corrupted filter shouldn't affect recovery. - h.corrupt(storage.TypeTable, 3, 113888, 10) - h.corrupt(storage.TypeTable, -1, 20000, 1) - - h.recover() - if h.db.seq != seq { - t.Errorf("invalid seq, want=%d got=%d", seq, h.db.seq) - } - h.check(985, 985) - - h.close() -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go deleted file mode 100644 index b11707868..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go +++ /dev/null @@ -1,945 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "container/list" - "fmt" - "io" - "os" - "runtime" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -type DB struct // DB is a LevelDB database. -{ - // Need 64-bit alignment. - seq uint64 - - // Session. - s *session - - // MemDB. - memMu sync.RWMutex - memPool chan *memdb.DB - mem, frozenMem *memDB - journal *journal.Writer - journalWriter storage.Writer - journalFile storage.File - frozenJournalFile storage.File - frozenSeq uint64 - - // Snapshot. - snapsMu sync.Mutex - snapsList *list.List - - // Stats. - aliveSnaps, aliveIters int32 - - // Write. - writeC chan *Batch - writeMergedC chan bool - writeLockC chan struct{} - writeAckC chan error - writeDelay time.Duration - writeDelayN int - journalC chan *Batch - journalAckC chan error - - // Compaction. - tcompCmdC chan cCmd - tcompPauseC chan chan<- struct{} - mcompCmdC chan cCmd - compErrC chan error - compPerErrC chan error - compErrSetC chan error - compStats []cStats - - // Close. - closeW sync.WaitGroup - closeC chan struct{} - closed uint32 - closer io.Closer -} - -func openDB(s *session) (*DB, error) { - s.log("db@open opening") - start := time.Now() - db := &DB{ - s: s, - // Initial sequence - seq: s.stSeqNum, - // MemDB - memPool: make(chan *memdb.DB, 1), - // Snapshot - snapsList: list.New(), - // Write - writeC: make(chan *Batch), - writeMergedC: make(chan bool), - writeLockC: make(chan struct{}, 1), - writeAckC: make(chan error), - journalC: make(chan *Batch), - journalAckC: make(chan error), - // Compaction - tcompCmdC: make(chan cCmd), - tcompPauseC: make(chan chan<- struct{}), - mcompCmdC: make(chan cCmd), - compErrC: make(chan error), - compPerErrC: make(chan error), - compErrSetC: make(chan error), - compStats: make([]cStats, s.o.GetNumLevel()), - // Close - closeC: make(chan struct{}), - } - - if err := db.recoverJournal(); err != nil { - return nil, err - } - - // Remove any obsolete files. - if err := db.checkAndCleanFiles(); err != nil { - // Close journal. - if db.journal != nil { - db.journal.Close() - db.journalWriter.Close() - } - return nil, err - } - - // Doesn't need to be included in the wait group. - go db.compactionError() - go db.mpoolDrain() - - db.closeW.Add(3) - go db.tCompaction() - go db.mCompaction() - go db.jWriter() - - s.logf("db@open done T·%v", time.Since(start)) - - runtime.SetFinalizer(db, (*DB).Close) - return db, nil -} - -// Open opens or creates a DB for the given storage. -// The DB will be created if not exist, unless ErrorIfMissing is true. -// Also, if ErrorIfExist is true and the DB exist Open will returns -// os.ErrExist error. -// -// Open will return an error with type of ErrCorrupted if corruption -// detected in the DB. Corrupted DB can be recovered with Recover -// function. -// -// The returned DB instance is goroutine-safe. -// The DB must be closed after use, by calling Close method. -func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) { - s, err := newSession(stor, o) - if err != nil { - return - } - defer func() { - if err != nil { - s.close() - s.release() - } - }() - - err = s.recover() - if err != nil { - if !os.IsNotExist(err) || s.o.GetErrorIfMissing() { - return - } - err = s.create() - if err != nil { - return - } - } else if s.o.GetErrorIfExist() { - err = os.ErrExist - return - } - - return openDB(s) -} - -// OpenFile opens or creates a DB for the given path. -// The DB will be created if not exist, unless ErrorIfMissing is true. -// Also, if ErrorIfExist is true and the DB exist OpenFile will returns -// os.ErrExist error. -// -// OpenFile uses standard file-system backed storage implementation as -// desribed in the leveldb/storage package. -// -// OpenFile will return an error with type of ErrCorrupted if corruption -// detected in the DB. Corrupted DB can be recovered with Recover -// function. -// -// The returned DB instance is goroutine-safe. -// The DB must be closed after use, by calling Close method. -func OpenFile(path string, o *opt.Options) (db *DB, err error) { - stor, err := storage.OpenFile(path) - if err != nil { - return - } - db, err = Open(stor, o) - if err != nil { - stor.Close() - } else { - db.closer = stor - } - return -} - -// Recover recovers and opens a DB with missing or corrupted manifest files -// for the given storage. It will ignore any manifest files, valid or not. -// The DB must already exist or it will returns an error. -// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options. -// -// The returned DB instance is goroutine-safe. -// The DB must be closed after use, by calling Close method. -func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) { - s, err := newSession(stor, o) - if err != nil { - return - } - defer func() { - if err != nil { - s.close() - s.release() - } - }() - - err = recoverTable(s, o) - if err != nil { - return - } - return openDB(s) -} - -// RecoverFile recovers and opens a DB with missing or corrupted manifest files -// for the given path. It will ignore any manifest files, valid or not. -// The DB must already exist or it will returns an error. -// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options. -// -// RecoverFile uses standard file-system backed storage implementation as desribed -// in the leveldb/storage package. -// -// The returned DB instance is goroutine-safe. -// The DB must be closed after use, by calling Close method. -func RecoverFile(path string, o *opt.Options) (db *DB, err error) { - stor, err := storage.OpenFile(path) - if err != nil { - return - } - db, err = Recover(stor, o) - if err != nil { - stor.Close() - } else { - db.closer = stor - } - return -} - -func recoverTable(s *session, o *opt.Options) error { - o = dupOptions(o) - // Mask StrictReader, lets StrictRecovery doing its job. - o.Strict &= ^opt.StrictReader - - // Get all tables and sort it by file number. - tableFiles_, err := s.getFiles(storage.TypeTable) - if err != nil { - return err - } - tableFiles := files(tableFiles_) - tableFiles.sort() - - var ( - maxSeq uint64 - recoveredKey, goodKey, corruptedKey, corruptedBlock, droppedTable int - - // We will drop corrupted table. - strict = o.GetStrict(opt.StrictRecovery) - - rec = &sessionRecord{numLevel: o.GetNumLevel()} - bpool = util.NewBufferPool(o.GetBlockSize() + 5) - ) - buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) { - tmp = s.newTemp() - writer, err := tmp.Create() - if err != nil { - return - } - defer func() { - writer.Close() - if err != nil { - tmp.Remove() - tmp = nil - } - }() - - // Copy entries. - tw := table.NewWriter(writer, o) - for iter.Next() { - key := iter.Key() - if validIkey(key) { - err = tw.Append(key, iter.Value()) - if err != nil { - return - } - } - } - err = iter.Error() - if err != nil { - return - } - err = tw.Close() - if err != nil { - return - } - err = writer.Sync() - if err != nil { - return - } - size = int64(tw.BytesLen()) - return - } - recoverTable := func(file storage.File) error { - s.logf("table@recovery recovering @%d", file.Num()) - reader, err := file.Open() - if err != nil { - return err - } - var closed bool - defer func() { - if !closed { - reader.Close() - } - }() - - // Get file size. - size, err := reader.Seek(0, 2) - if err != nil { - return err - } - - var ( - tSeq uint64 - tgoodKey, tcorruptedKey, tcorruptedBlock int - imin, imax []byte - ) - tr, err := table.NewReader(reader, size, storage.NewFileInfo(file), nil, bpool, o) - if err != nil { - return err - } - iter := tr.NewIterator(nil, nil) - if itererr, ok := iter.(iterator.ErrorCallbackSetter); ok { - itererr.SetErrorCallback(func(err error) { - if errors.IsCorrupted(err) { - s.logf("table@recovery block corruption @%d %q", file.Num(), err) - tcorruptedBlock++ - } - }) - } - - // Scan the table. - for iter.Next() { - key := iter.Key() - _, seq, _, kerr := parseIkey(key) - if kerr != nil { - tcorruptedKey++ - continue - } - tgoodKey++ - if seq > tSeq { - tSeq = seq - } - if imin == nil { - imin = append([]byte{}, key...) - } - imax = append(imax[:0], key...) - } - if err := iter.Error(); err != nil { - iter.Release() - return err - } - iter.Release() - - goodKey += tgoodKey - corruptedKey += tcorruptedKey - corruptedBlock += tcorruptedBlock - - if strict && (tcorruptedKey > 0 || tcorruptedBlock > 0) { - droppedTable++ - s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq) - return nil - } - - if tgoodKey > 0 { - if tcorruptedKey > 0 || tcorruptedBlock > 0 { - // Rebuild the table. - s.logf("table@recovery rebuilding @%d", file.Num()) - iter := tr.NewIterator(nil, nil) - tmp, newSize, err := buildTable(iter) - iter.Release() - if err != nil { - return err - } - closed = true - reader.Close() - if err := file.Replace(tmp); err != nil { - return err - } - size = newSize - } - if tSeq > maxSeq { - maxSeq = tSeq - } - recoveredKey += tgoodKey - // Add table to level 0. - rec.addTable(0, file.Num(), uint64(size), imin, imax) - s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq) - } else { - droppedTable++ - s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", file.Num(), tcorruptedKey, tcorruptedBlock, size) - } - - return nil - } - - // Recover all tables. - if len(tableFiles) > 0 { - s.logf("table@recovery F·%d", len(tableFiles)) - - // Mark file number as used. - s.markFileNum(tableFiles[len(tableFiles)-1].Num()) - - for _, file := range tableFiles { - if err := recoverTable(file); err != nil { - return err - } - } - - s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(tableFiles), recoveredKey, goodKey, corruptedKey, maxSeq) - } - - // Set sequence number. - rec.setSeqNum(maxSeq) - - // Create new manifest. - if err := s.create(); err != nil { - return err - } - - // Commit. - return s.commit(rec) -} - -func (db *DB) recoverJournal() error { - // Get all tables and sort it by file number. - journalFiles_, err := db.s.getFiles(storage.TypeJournal) - if err != nil { - return err - } - journalFiles := files(journalFiles_) - journalFiles.sort() - - // Discard older journal. - prev := -1 - for i, file := range journalFiles { - if file.Num() >= db.s.stJournalNum { - if prev >= 0 { - i-- - journalFiles[i] = journalFiles[prev] - } - journalFiles = journalFiles[i:] - break - } else if file.Num() == db.s.stPrevJournalNum { - prev = i - } - } - - var jr *journal.Reader - var of storage.File - var mem *memdb.DB - batch := new(Batch) - cm := newCMem(db.s) - buf := new(util.Buffer) - // Options. - strict := db.s.o.GetStrict(opt.StrictJournal) - checksum := db.s.o.GetStrict(opt.StrictJournalChecksum) - writeBuffer := db.s.o.GetWriteBuffer() - recoverJournal := func(file storage.File) error { - db.logf("journal@recovery recovering @%d", file.Num()) - reader, err := file.Open() - if err != nil { - return err - } - defer reader.Close() - - // Create/reset journal reader instance. - if jr == nil { - jr = journal.NewReader(reader, dropper{db.s, file}, strict, checksum) - } else { - jr.Reset(reader, dropper{db.s, file}, strict, checksum) - } - - // Flush memdb and remove obsolete journal file. - if of != nil { - if mem.Len() > 0 { - if err := cm.flush(mem, 0); err != nil { - return err - } - } - if err := cm.commit(file.Num(), db.seq); err != nil { - return err - } - cm.reset() - of.Remove() - of = nil - } - - // Replay journal to memdb. - mem.Reset() - for { - r, err := jr.Next() - if err != nil { - if err == io.EOF { - break - } - return errors.SetFile(err, file) - } - - buf.Reset() - if _, err := buf.ReadFrom(r); err != nil { - if err == io.ErrUnexpectedEOF { - // This is error returned due to corruption, with strict == false. - continue - } else { - return errors.SetFile(err, file) - } - } - if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mem); err != nil { - if strict || !errors.IsCorrupted(err) { - return errors.SetFile(err, file) - } else { - db.s.logf("journal error: %v (skipped)", err) - // We won't apply sequence number as it might be corrupted. - continue - } - } - - // Save sequence number. - db.seq = batch.seq + uint64(batch.Len()) - - // Flush it if large enough. - if mem.Size() >= writeBuffer { - if err := cm.flush(mem, 0); err != nil { - return err - } - mem.Reset() - } - } - - of = file - return nil - } - - // Recover all journals. - if len(journalFiles) > 0 { - db.logf("journal@recovery F·%d", len(journalFiles)) - - // Mark file number as used. - db.s.markFileNum(journalFiles[len(journalFiles)-1].Num()) - - mem = memdb.New(db.s.icmp, writeBuffer) - for _, file := range journalFiles { - if err := recoverJournal(file); err != nil { - return err - } - } - - // Flush the last journal. - if mem.Len() > 0 { - if err := cm.flush(mem, 0); err != nil { - return err - } - } - } - - // Create a new journal. - if _, err := db.newMem(0); err != nil { - return err - } - - // Commit. - if err := cm.commit(db.journalFile.Num(), db.seq); err != nil { - // Close journal. - if db.journal != nil { - db.journal.Close() - db.journalWriter.Close() - } - return err - } - - // Remove the last obsolete journal file. - if of != nil { - of.Remove() - } - - return nil -} - -func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) { - ikey := newIkey(key, seq, ktSeek) - - em, fm := db.getMems() - for _, m := range [...]*memDB{em, fm} { - if m == nil { - continue - } - defer m.decref() - - mk, mv, me := m.mdb.Find(ikey) - if me == nil { - ukey, _, kt, kerr := parseIkey(mk) - if kerr != nil { - // Shouldn't have had happen. - panic(kerr) - } - if db.s.icmp.uCompare(ukey, key) == 0 { - if kt == ktDel { - return nil, ErrNotFound - } - return append([]byte{}, mv...), nil - } - } else if me != ErrNotFound { - return nil, me - } - } - - v := db.s.version() - value, cSched, err := v.get(ikey, ro, false) - v.release() - if cSched { - // Trigger table compaction. - db.compSendTrigger(db.tcompCmdC) - } - return -} - -func (db *DB) has(key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) { - ikey := newIkey(key, seq, ktSeek) - - em, fm := db.getMems() - for _, m := range [...]*memDB{em, fm} { - if m == nil { - continue - } - defer m.decref() - - mk, _, me := m.mdb.Find(ikey) - if me == nil { - ukey, _, kt, kerr := parseIkey(mk) - if kerr != nil { - // Shouldn't have had happen. - panic(kerr) - } - if db.s.icmp.uCompare(ukey, key) == 0 { - if kt == ktDel { - return false, nil - } - return true, nil - } - } else if me != ErrNotFound { - return false, me - } - } - - v := db.s.version() - _, cSched, err := v.get(ikey, ro, true) - v.release() - if cSched { - // Trigger table compaction. - db.compSendTrigger(db.tcompCmdC) - } - if err == nil { - ret = true - } else if err == ErrNotFound { - err = nil - } - return -} - -// Get gets the value for the given key. It returns ErrNotFound if the -// DB does not contains the key. -// -// The returned slice is its own copy, it is safe to modify the contents -// of the returned slice. -// It is safe to modify the contents of the argument after Get returns. -func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { - err = db.ok() - if err != nil { - return - } - - se := db.acquireSnapshot() - defer db.releaseSnapshot(se) - return db.get(key, se.seq, ro) -} - -// Has returns true if the DB does contains the given key. -// -// It is safe to modify the contents of the argument after Get returns. -func (db *DB) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) { - err = db.ok() - if err != nil { - return - } - - se := db.acquireSnapshot() - defer db.releaseSnapshot(se) - return db.has(key, se.seq, ro) -} - -// NewIterator returns an iterator for the latest snapshot of the -// uderlying DB. -// The returned iterator is not goroutine-safe, but it is safe to use -// multiple iterators concurrently, with each in a dedicated goroutine. -// It is also safe to use an iterator concurrently with modifying its -// underlying DB. The resultant key/value pairs are guaranteed to be -// consistent. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// DB. And a nil Range.Limit is treated as a key after all keys in -// the DB. -// -// The iterator must be released after use, by calling Release method. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (db *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - if err := db.ok(); err != nil { - return iterator.NewEmptyIterator(err) - } - - se := db.acquireSnapshot() - defer db.releaseSnapshot(se) - // Iterator holds 'version' lock, 'version' is immutable so snapshot - // can be released after iterator created. - return db.newIterator(se.seq, slice, ro) -} - -// GetSnapshot returns a latest snapshot of the underlying DB. A snapshot -// is a frozen snapshot of a DB state at a particular point in time. The -// content of snapshot are guaranteed to be consistent. -// -// The snapshot must be released after use, by calling Release method. -func (db *DB) GetSnapshot() (*Snapshot, error) { - if err := db.ok(); err != nil { - return nil, err - } - - return db.newSnapshot(), nil -} - -// GetProperty returns value of the given property name. -// -// Property names: -// leveldb.num-files-at-level{n} -// Returns the number of files at level 'n'. -// leveldb.stats -// Returns statistics of the underlying DB. -// leveldb.sstables -// Returns sstables list for each level. -// leveldb.blockpool -// Returns block pool stats. -// leveldb.cachedblock -// Returns size of cached block. -// leveldb.openedtables -// Returns number of opened tables. -// leveldb.alivesnaps -// Returns number of alive snapshots. -// leveldb.aliveiters -// Returns number of alive iterators. -func (db *DB) GetProperty(name string) (value string, err error) { - err = db.ok() - if err != nil { - return - } - - const prefix = "leveldb." - if !strings.HasPrefix(name, prefix) { - return "", errors.New("leveldb: GetProperty: unknown property: " + name) - } - p := name[len(prefix):] - - v := db.s.version() - defer v.release() - - numFilesPrefix := "num-files-at-level" - switch { - case strings.HasPrefix(p, numFilesPrefix): - var level uint - var rest string - n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest) - if n != 1 || int(level) >= db.s.o.GetNumLevel() { - err = errors.New("leveldb: GetProperty: invalid property: " + name) - } else { - value = fmt.Sprint(v.tLen(int(level))) - } - case p == "stats": - value = "Compactions\n" + - " Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" + - "-------+------------+---------------+---------------+---------------+---------------\n" - for level, tables := range v.tables { - duration, read, write := db.compStats[level].get() - if len(tables) == 0 && duration == 0 { - continue - } - value += fmt.Sprintf(" %3d | %10d | %13.5f | %13.5f | %13.5f | %13.5f\n", - level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(), - float64(read)/1048576.0, float64(write)/1048576.0) - } - case p == "sstables": - for level, tables := range v.tables { - value += fmt.Sprintf("--- level %d ---\n", level) - for _, t := range tables { - value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.file.Num(), t.size, t.imin, t.imax) - } - } - case p == "blockpool": - value = fmt.Sprintf("%v", db.s.tops.bpool) - case p == "cachedblock": - if db.s.tops.bcache != nil { - value = fmt.Sprintf("%d", db.s.tops.bcache.Size()) - } else { - value = "" - } - case p == "openedtables": - value = fmt.Sprintf("%d", db.s.tops.cache.Size()) - case p == "alivesnaps": - value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveSnaps)) - case p == "aliveiters": - value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters)) - default: - err = errors.New("leveldb: GetProperty: unknown property: " + name) - } - - return -} - -// SizeOf calculates approximate sizes of the given key ranges. -// The length of the returned sizes are equal with the length of the given -// ranges. The returned sizes measure storage space usage, so if the user -// data compresses by a factor of ten, the returned sizes will be one-tenth -// the size of the corresponding user data size. -// The results may not include the sizes of recently written data. -func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) { - if err := db.ok(); err != nil { - return nil, err - } - - v := db.s.version() - defer v.release() - - sizes := make(Sizes, 0, len(ranges)) - for _, r := range ranges { - imin := newIkey(r.Start, kMaxSeq, ktSeek) - imax := newIkey(r.Limit, kMaxSeq, ktSeek) - start, err := v.offsetOf(imin) - if err != nil { - return nil, err - } - limit, err := v.offsetOf(imax) - if err != nil { - return nil, err - } - var size uint64 - if limit >= start { - size = limit - start - } - sizes = append(sizes, size) - } - - return sizes, nil -} - -// Close closes the DB. This will also releases any outstanding snapshot and -// abort any in-flight compaction. -// -// It is not safe to close a DB until all outstanding iterators are released. -// It is valid to call Close multiple times. Other methods should not be -// called after the DB has been closed. -func (db *DB) Close() error { - if !db.setClosed() { - return ErrClosed - } - - start := time.Now() - db.log("db@close closing") - - // Clear the finalizer. - runtime.SetFinalizer(db, nil) - - // Get compaction error. - var err error - select { - case err = <-db.compErrC: - default: - } - - // Signal all goroutines. - close(db.closeC) - - // Wait for all gorotines to exit. - db.closeW.Wait() - - // Lock writer and closes journal. - db.writeLockC <- struct{}{} - if db.journal != nil { - db.journal.Close() - db.journalWriter.Close() - } - - if db.writeDelayN > 0 { - db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay) - } - - // Close session. - db.s.close() - db.logf("db@close done T·%v", time.Since(start)) - db.s.release() - - if db.closer != nil { - if err1 := db.closer.Close(); err == nil { - err = err1 - } - } - - // NIL'ing pointers. - db.s = nil - db.mem = nil - db.frozenMem = nil - db.journal = nil - db.journalWriter = nil - db.journalFile = nil - db.frozenJournalFile = nil - db.closer = nil - - return err -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go deleted file mode 100644 index 6bf2c4bb6..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go +++ /dev/null @@ -1,835 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "sync" - "time" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" -) - -var ( - errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting") -) - -type cStats struct { - sync.Mutex - duration time.Duration - read uint64 - write uint64 -} - -func (p *cStats) add(n *cStatsStaging) { - p.Lock() - p.duration += n.duration - p.read += n.read - p.write += n.write - p.Unlock() -} - -func (p *cStats) get() (duration time.Duration, read, write uint64) { - p.Lock() - defer p.Unlock() - return p.duration, p.read, p.write -} - -type cStatsStaging struct { - start time.Time - duration time.Duration - on bool - read uint64 - write uint64 -} - -func (p *cStatsStaging) startTimer() { - if !p.on { - p.start = time.Now() - p.on = true - } -} - -func (p *cStatsStaging) stopTimer() { - if p.on { - p.duration += time.Since(p.start) - p.on = false - } -} - -type cMem struct { - s *session - level int - rec *sessionRecord -} - -func newCMem(s *session) *cMem { - return &cMem{s: s, rec: &sessionRecord{numLevel: s.o.GetNumLevel()}} -} - -func (c *cMem) flush(mem *memdb.DB, level int) error { - s := c.s - - // Write memdb to table. - iter := mem.NewIterator(nil) - defer iter.Release() - t, n, err := s.tops.createFrom(iter) - if err != nil { - return err - } - - // Pick level. - if level < 0 { - v := s.version() - level = v.pickLevel(t.imin.ukey(), t.imax.ukey()) - v.release() - } - c.rec.addTableFile(level, t) - - s.logf("mem@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.imin, t.imax) - - c.level = level - return nil -} - -func (c *cMem) reset() { - c.rec = &sessionRecord{numLevel: c.s.o.GetNumLevel()} -} - -func (c *cMem) commit(journal, seq uint64) error { - c.rec.setJournalNum(journal) - c.rec.setSeqNum(seq) - - // Commit changes. - return c.s.commit(c.rec) -} - -func (db *DB) compactionError() { - var ( - err error - wlocked bool - ) -noerr: - // No error. - for { - select { - case err = <-db.compErrSetC: - switch { - case err == nil: - case errors.IsCorrupted(err): - goto hasperr - default: - goto haserr - } - case _, _ = <-db.closeC: - return - } - } -haserr: - // Transient error. - for { - select { - case db.compErrC <- err: - case err = <-db.compErrSetC: - switch { - case err == nil: - goto noerr - case errors.IsCorrupted(err): - goto hasperr - default: - } - case _, _ = <-db.closeC: - return - } - } -hasperr: - // Persistent error. - for { - select { - case db.compErrC <- err: - case db.compPerErrC <- err: - case db.writeLockC <- struct{}{}: - // Hold write lock, so that write won't pass-through. - wlocked = true - case _, _ = <-db.closeC: - if wlocked { - // We should release the lock or Close will hang. - <-db.writeLockC - } - return - } - } -} - -type compactionTransactCounter int - -func (cnt *compactionTransactCounter) incr() { - *cnt++ -} - -type compactionTransactInterface interface { - run(cnt *compactionTransactCounter) error - revert() error -} - -func (db *DB) compactionTransact(name string, t compactionTransactInterface) { - defer func() { - if x := recover(); x != nil { - if x == errCompactionTransactExiting { - if err := t.revert(); err != nil { - db.logf("%s revert error %q", name, err) - } - } - panic(x) - } - }() - - const ( - backoffMin = 1 * time.Second - backoffMax = 8 * time.Second - backoffMul = 2 * time.Second - ) - var ( - backoff = backoffMin - backoffT = time.NewTimer(backoff) - lastCnt = compactionTransactCounter(0) - - disableBackoff = db.s.o.GetDisableCompactionBackoff() - ) - for n := 0; ; n++ { - // Check wether the DB is closed. - if db.isClosed() { - db.logf("%s exiting", name) - db.compactionExitTransact() - } else if n > 0 { - db.logf("%s retrying N·%d", name, n) - } - - // Execute. - cnt := compactionTransactCounter(0) - err := t.run(&cnt) - if err != nil { - db.logf("%s error I·%d %q", name, cnt, err) - } - - // Set compaction error status. - select { - case db.compErrSetC <- err: - case perr := <-db.compPerErrC: - if err != nil { - db.logf("%s exiting (persistent error %q)", name, perr) - db.compactionExitTransact() - } - case _, _ = <-db.closeC: - db.logf("%s exiting", name) - db.compactionExitTransact() - } - if err == nil { - return - } - if errors.IsCorrupted(err) { - db.logf("%s exiting (corruption detected)", name) - db.compactionExitTransact() - } - - if !disableBackoff { - // Reset backoff duration if counter is advancing. - if cnt > lastCnt { - backoff = backoffMin - lastCnt = cnt - } - - // Backoff. - backoffT.Reset(backoff) - if backoff < backoffMax { - backoff *= backoffMul - if backoff > backoffMax { - backoff = backoffMax - } - } - select { - case <-backoffT.C: - case _, _ = <-db.closeC: - db.logf("%s exiting", name) - db.compactionExitTransact() - } - } - } -} - -type compactionTransactFunc struct { - runFunc func(cnt *compactionTransactCounter) error - revertFunc func() error -} - -func (t *compactionTransactFunc) run(cnt *compactionTransactCounter) error { - return t.runFunc(cnt) -} - -func (t *compactionTransactFunc) revert() error { - if t.revertFunc != nil { - return t.revertFunc() - } - return nil -} - -func (db *DB) compactionTransactFunc(name string, run func(cnt *compactionTransactCounter) error, revert func() error) { - db.compactionTransact(name, &compactionTransactFunc{run, revert}) -} - -func (db *DB) compactionExitTransact() { - panic(errCompactionTransactExiting) -} - -func (db *DB) memCompaction() { - mem := db.getFrozenMem() - if mem == nil { - return - } - defer mem.decref() - - c := newCMem(db.s) - stats := new(cStatsStaging) - - db.logf("mem@flush N·%d S·%s", mem.mdb.Len(), shortenb(mem.mdb.Size())) - - // Don't compact empty memdb. - if mem.mdb.Len() == 0 { - db.logf("mem@flush skipping") - // drop frozen mem - db.dropFrozenMem() - return - } - - // Pause table compaction. - resumeC := make(chan struct{}) - select { - case db.tcompPauseC <- (chan<- struct{})(resumeC): - case <-db.compPerErrC: - close(resumeC) - resumeC = nil - case _, _ = <-db.closeC: - return - } - - db.compactionTransactFunc("mem@flush", func(cnt *compactionTransactCounter) (err error) { - stats.startTimer() - defer stats.stopTimer() - return c.flush(mem.mdb, -1) - }, func() error { - for _, r := range c.rec.addedTables { - db.logf("mem@flush revert @%d", r.num) - f := db.s.getTableFile(r.num) - if err := f.Remove(); err != nil { - return err - } - } - return nil - }) - - db.compactionTransactFunc("mem@commit", func(cnt *compactionTransactCounter) (err error) { - stats.startTimer() - defer stats.stopTimer() - return c.commit(db.journalFile.Num(), db.frozenSeq) - }, nil) - - db.logf("mem@flush committed F·%d T·%v", len(c.rec.addedTables), stats.duration) - - for _, r := range c.rec.addedTables { - stats.write += r.size - } - db.compStats[c.level].add(stats) - - // Drop frozen mem. - db.dropFrozenMem() - - // Resume table compaction. - if resumeC != nil { - select { - case <-resumeC: - close(resumeC) - case _, _ = <-db.closeC: - return - } - } - - // Trigger table compaction. - db.compSendTrigger(db.tcompCmdC) -} - -type tableCompactionBuilder struct { - db *DB - s *session - c *compaction - rec *sessionRecord - stat0, stat1 *cStatsStaging - - snapHasLastUkey bool - snapLastUkey []byte - snapLastSeq uint64 - snapIter int - snapKerrCnt int - snapDropCnt int - - kerrCnt int - dropCnt int - - minSeq uint64 - strict bool - tableSize int - - tw *tWriter -} - -func (b *tableCompactionBuilder) appendKV(key, value []byte) error { - // Create new table if not already. - if b.tw == nil { - // Check for pause event. - if b.db != nil { - select { - case ch := <-b.db.tcompPauseC: - b.db.pauseCompaction(ch) - case _, _ = <-b.db.closeC: - b.db.compactionExitTransact() - default: - } - } - - // Create new table. - var err error - b.tw, err = b.s.tops.create() - if err != nil { - return err - } - } - - // Write key/value into table. - return b.tw.append(key, value) -} - -func (b *tableCompactionBuilder) needFlush() bool { - return b.tw.tw.BytesLen() >= b.tableSize -} - -func (b *tableCompactionBuilder) flush() error { - t, err := b.tw.finish() - if err != nil { - return err - } - b.rec.addTableFile(b.c.level+1, t) - b.stat1.write += t.size - b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.level+1, t.file.Num(), b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax) - b.tw = nil - return nil -} - -func (b *tableCompactionBuilder) cleanup() { - if b.tw != nil { - b.tw.drop() - b.tw = nil - } -} - -func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error { - snapResumed := b.snapIter > 0 - hasLastUkey := b.snapHasLastUkey // The key might has zero length, so this is necessary. - lastUkey := append([]byte{}, b.snapLastUkey...) - lastSeq := b.snapLastSeq - b.kerrCnt = b.snapKerrCnt - b.dropCnt = b.snapDropCnt - // Restore compaction state. - b.c.restore() - - defer b.cleanup() - - b.stat1.startTimer() - defer b.stat1.stopTimer() - - iter := b.c.newIterator() - defer iter.Release() - for i := 0; iter.Next(); i++ { - // Incr transact counter. - cnt.incr() - - // Skip until last state. - if i < b.snapIter { - continue - } - - resumed := false - if snapResumed { - resumed = true - snapResumed = false - } - - ikey := iter.Key() - ukey, seq, kt, kerr := parseIkey(ikey) - - if kerr == nil { - shouldStop := !resumed && b.c.shouldStopBefore(ikey) - - if !hasLastUkey || b.s.icmp.uCompare(lastUkey, ukey) != 0 { - // First occurrence of this user key. - - // Only rotate tables if ukey doesn't hop across. - if b.tw != nil && (shouldStop || b.needFlush()) { - if err := b.flush(); err != nil { - return err - } - - // Creates snapshot of the state. - b.c.save() - b.snapHasLastUkey = hasLastUkey - b.snapLastUkey = append(b.snapLastUkey[:0], lastUkey...) - b.snapLastSeq = lastSeq - b.snapIter = i - b.snapKerrCnt = b.kerrCnt - b.snapDropCnt = b.dropCnt - } - - hasLastUkey = true - lastUkey = append(lastUkey[:0], ukey...) - lastSeq = kMaxSeq - } - - switch { - case lastSeq <= b.minSeq: - // Dropped because newer entry for same user key exist - fallthrough // (A) - case kt == ktDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey): - // For this user key: - // (1) there is no data in higher levels - // (2) data in lower levels will have larger seq numbers - // (3) data in layers that are being compacted here and have - // smaller seq numbers will be dropped in the next - // few iterations of this loop (by rule (A) above). - // Therefore this deletion marker is obsolete and can be dropped. - lastSeq = seq - b.dropCnt++ - continue - default: - lastSeq = seq - } - } else { - if b.strict { - return kerr - } - - // Don't drop corrupted keys. - hasLastUkey = false - lastUkey = lastUkey[:0] - lastSeq = kMaxSeq - b.kerrCnt++ - } - - if err := b.appendKV(ikey, iter.Value()); err != nil { - return err - } - } - - if err := iter.Error(); err != nil { - return err - } - - // Finish last table. - if b.tw != nil && !b.tw.empty() { - return b.flush() - } - return nil -} - -func (b *tableCompactionBuilder) revert() error { - for _, at := range b.rec.addedTables { - b.s.logf("table@build revert @%d", at.num) - f := b.s.getTableFile(at.num) - if err := f.Remove(); err != nil { - return err - } - } - return nil -} - -func (db *DB) tableCompaction(c *compaction, noTrivial bool) { - defer c.release() - - rec := &sessionRecord{numLevel: db.s.o.GetNumLevel()} - rec.addCompPtr(c.level, c.imax) - - if !noTrivial && c.trivial() { - t := c.tables[0][0] - db.logf("table@move L%d@%d -> L%d", c.level, t.file.Num(), c.level+1) - rec.delTable(c.level, t.file.Num()) - rec.addTableFile(c.level+1, t) - db.compactionTransactFunc("table@move", func(cnt *compactionTransactCounter) (err error) { - return db.s.commit(rec) - }, nil) - return - } - - var stats [2]cStatsStaging - for i, tables := range c.tables { - for _, t := range tables { - stats[i].read += t.size - // Insert deleted tables into record - rec.delTable(c.level+i, t.file.Num()) - } - } - sourceSize := int(stats[0].read + stats[1].read) - minSeq := db.minSeq() - db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.level, len(c.tables[0]), c.level+1, len(c.tables[1]), shortenb(sourceSize), minSeq) - - b := &tableCompactionBuilder{ - db: db, - s: db.s, - c: c, - rec: rec, - stat1: &stats[1], - minSeq: minSeq, - strict: db.s.o.GetStrict(opt.StrictCompaction), - tableSize: db.s.o.GetCompactionTableSize(c.level + 1), - } - db.compactionTransact("table@build", b) - - // Commit changes - db.compactionTransactFunc("table@commit", func(cnt *compactionTransactCounter) (err error) { - stats[1].startTimer() - defer stats[1].stopTimer() - return db.s.commit(rec) - }, nil) - - resultSize := int(stats[1].write) - db.logf("table@compaction committed F%s S%s Ke·%d D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), b.kerrCnt, b.dropCnt, stats[1].duration) - - // Save compaction stats - for i := range stats { - db.compStats[c.level+1].add(&stats[i]) - } -} - -func (db *DB) tableRangeCompaction(level int, umin, umax []byte) { - db.logf("table@compaction range L%d %q:%q", level, umin, umax) - - if level >= 0 { - if c := db.s.getCompactionRange(level, umin, umax); c != nil { - db.tableCompaction(c, true) - } - } else { - v := db.s.version() - m := 1 - for i, t := range v.tables[1:] { - if t.overlaps(db.s.icmp, umin, umax, false) { - m = i + 1 - } - } - v.release() - - for level := 0; level < m; level++ { - if c := db.s.getCompactionRange(level, umin, umax); c != nil { - db.tableCompaction(c, true) - } - } - } -} - -func (db *DB) tableAutoCompaction() { - if c := db.s.pickCompaction(); c != nil { - db.tableCompaction(c, false) - } -} - -func (db *DB) tableNeedCompaction() bool { - v := db.s.version() - defer v.release() - return v.needCompaction() -} - -func (db *DB) pauseCompaction(ch chan<- struct{}) { - select { - case ch <- struct{}{}: - case _, _ = <-db.closeC: - db.compactionExitTransact() - } -} - -type cCmd interface { - ack(err error) -} - -type cIdle struct { - ackC chan<- error -} - -func (r cIdle) ack(err error) { - if r.ackC != nil { - defer func() { - recover() - }() - r.ackC <- err - } -} - -type cRange struct { - level int - min, max []byte - ackC chan<- error -} - -func (r cRange) ack(err error) { - if r.ackC != nil { - defer func() { - recover() - }() - r.ackC <- err - } -} - -// This will trigger auto compation and/or wait for all compaction to be done. -func (db *DB) compSendIdle(compC chan<- cCmd) (err error) { - ch := make(chan error) - defer close(ch) - // Send cmd. - select { - case compC <- cIdle{ch}: - case err = <-db.compErrC: - return - case _, _ = <-db.closeC: - return ErrClosed - } - // Wait cmd. - select { - case err = <-ch: - case err = <-db.compErrC: - case _, _ = <-db.closeC: - return ErrClosed - } - return err -} - -// This will trigger auto compaction but will not wait for it. -func (db *DB) compSendTrigger(compC chan<- cCmd) { - select { - case compC <- cIdle{}: - default: - } -} - -// Send range compaction request. -func (db *DB) compSendRange(compC chan<- cCmd, level int, min, max []byte) (err error) { - ch := make(chan error) - defer close(ch) - // Send cmd. - select { - case compC <- cRange{level, min, max, ch}: - case err := <-db.compErrC: - return err - case _, _ = <-db.closeC: - return ErrClosed - } - // Wait cmd. - select { - case err = <-ch: - case err = <-db.compErrC: - case _, _ = <-db.closeC: - return ErrClosed - } - return err -} - -func (db *DB) mCompaction() { - var x cCmd - - defer func() { - if x := recover(); x != nil { - if x != errCompactionTransactExiting { - panic(x) - } - } - if x != nil { - x.ack(ErrClosed) - } - db.closeW.Done() - }() - - for { - select { - case x = <-db.mcompCmdC: - switch x.(type) { - case cIdle: - db.memCompaction() - x.ack(nil) - x = nil - default: - panic("leveldb: unknown command") - } - case _, _ = <-db.closeC: - return - } - } -} - -func (db *DB) tCompaction() { - var x cCmd - var ackQ []cCmd - - defer func() { - if x := recover(); x != nil { - if x != errCompactionTransactExiting { - panic(x) - } - } - for i := range ackQ { - ackQ[i].ack(ErrClosed) - ackQ[i] = nil - } - if x != nil { - x.ack(ErrClosed) - } - db.closeW.Done() - }() - - for { - if db.tableNeedCompaction() { - select { - case x = <-db.tcompCmdC: - case ch := <-db.tcompPauseC: - db.pauseCompaction(ch) - continue - case _, _ = <-db.closeC: - return - default: - } - } else { - for i := range ackQ { - ackQ[i].ack(nil) - ackQ[i] = nil - } - ackQ = ackQ[:0] - select { - case x = <-db.tcompCmdC: - case ch := <-db.tcompPauseC: - db.pauseCompaction(ch) - continue - case _, _ = <-db.closeC: - return - } - } - if x != nil { - switch cmd := x.(type) { - case cIdle: - ackQ = append(ackQ, x) - case cRange: - db.tableRangeCompaction(cmd.level, cmd.min, cmd.max) - x.ack(nil) - default: - panic("leveldb: unknown command") - } - x = nil - } - db.tableAutoCompaction() - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go deleted file mode 100644 index 1366bdb54..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go +++ /dev/null @@ -1,350 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "errors" - "math/rand" - "runtime" - "sync" - "sync/atomic" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - errInvalidIkey = errors.New("leveldb: Iterator: invalid internal key") -) - -type memdbReleaser struct { - once sync.Once - m *memDB -} - -func (mr *memdbReleaser) Release() { - mr.once.Do(func() { - mr.m.decref() - }) -} - -func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - em, fm := db.getMems() - v := db.s.version() - - ti := v.getIterators(slice, ro) - n := len(ti) + 2 - i := make([]iterator.Iterator, 0, n) - emi := em.mdb.NewIterator(slice) - emi.SetReleaser(&memdbReleaser{m: em}) - i = append(i, emi) - if fm != nil { - fmi := fm.mdb.NewIterator(slice) - fmi.SetReleaser(&memdbReleaser{m: fm}) - i = append(i, fmi) - } - i = append(i, ti...) - strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader) - mi := iterator.NewMergedIterator(i, db.s.icmp, strict) - mi.SetReleaser(&versionReleaser{v: v}) - return mi -} - -func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter { - var islice *util.Range - if slice != nil { - islice = &util.Range{} - if slice.Start != nil { - islice.Start = newIkey(slice.Start, kMaxSeq, ktSeek) - } - if slice.Limit != nil { - islice.Limit = newIkey(slice.Limit, kMaxSeq, ktSeek) - } - } - rawIter := db.newRawIterator(islice, ro) - iter := &dbIter{ - db: db, - icmp: db.s.icmp, - iter: rawIter, - seq: seq, - strict: opt.GetStrict(db.s.o.Options, ro, opt.StrictReader), - key: make([]byte, 0), - value: make([]byte, 0), - } - atomic.AddInt32(&db.aliveIters, 1) - runtime.SetFinalizer(iter, (*dbIter).Release) - return iter -} - -func (db *DB) iterSamplingRate() int { - return rand.Intn(2 * db.s.o.GetIteratorSamplingRate()) -} - -type dir int - -const ( - dirReleased dir = iota - 1 - dirSOI - dirEOI - dirBackward - dirForward -) - -// dbIter represent an interator states over a database session. -type dbIter struct { - db *DB - icmp *iComparer - iter iterator.Iterator - seq uint64 - strict bool - - smaplingGap int - dir dir - key []byte - value []byte - err error - releaser util.Releaser -} - -func (i *dbIter) sampleSeek() { - ikey := i.iter.Key() - i.smaplingGap -= len(ikey) + len(i.iter.Value()) - for i.smaplingGap < 0 { - i.smaplingGap += i.db.iterSamplingRate() - i.db.sampleSeek(ikey) - } -} - -func (i *dbIter) setErr(err error) { - i.err = err - i.key = nil - i.value = nil -} - -func (i *dbIter) iterErr() { - if err := i.iter.Error(); err != nil { - i.setErr(err) - } -} - -func (i *dbIter) Valid() bool { - return i.err == nil && i.dir > dirEOI -} - -func (i *dbIter) First() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.iter.First() { - i.dir = dirSOI - return i.next() - } - i.dir = dirEOI - i.iterErr() - return false -} - -func (i *dbIter) Last() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.iter.Last() { - return i.prev() - } - i.dir = dirSOI - i.iterErr() - return false -} - -func (i *dbIter) Seek(key []byte) bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - ikey := newIkey(key, i.seq, ktSeek) - if i.iter.Seek(ikey) { - i.dir = dirSOI - return i.next() - } - i.dir = dirEOI - i.iterErr() - return false -} - -func (i *dbIter) next() bool { - for { - if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil { - i.sampleSeek() - if seq <= i.seq { - switch kt { - case ktDel: - // Skip deleted key. - i.key = append(i.key[:0], ukey...) - i.dir = dirForward - case ktVal: - if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 { - i.key = append(i.key[:0], ukey...) - i.value = append(i.value[:0], i.iter.Value()...) - i.dir = dirForward - return true - } - } - } - } else if i.strict { - i.setErr(kerr) - break - } - if !i.iter.Next() { - i.dir = dirEOI - i.iterErr() - break - } - } - return false -} - -func (i *dbIter) Next() bool { - if i.dir == dirEOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if !i.iter.Next() || (i.dir == dirBackward && !i.iter.Next()) { - i.dir = dirEOI - i.iterErr() - return false - } - return i.next() -} - -func (i *dbIter) prev() bool { - i.dir = dirBackward - del := true - if i.iter.Valid() { - for { - if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil { - i.sampleSeek() - if seq <= i.seq { - if !del && i.icmp.uCompare(ukey, i.key) < 0 { - return true - } - del = (kt == ktDel) - if !del { - i.key = append(i.key[:0], ukey...) - i.value = append(i.value[:0], i.iter.Value()...) - } - } - } else if i.strict { - i.setErr(kerr) - return false - } - if !i.iter.Prev() { - break - } - } - } - if del { - i.dir = dirSOI - i.iterErr() - return false - } - return true -} - -func (i *dbIter) Prev() bool { - if i.dir == dirSOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - switch i.dir { - case dirEOI: - return i.Last() - case dirForward: - for i.iter.Prev() { - if ukey, _, _, kerr := parseIkey(i.iter.Key()); kerr == nil { - i.sampleSeek() - if i.icmp.uCompare(ukey, i.key) < 0 { - goto cont - } - } else if i.strict { - i.setErr(kerr) - return false - } - } - i.dir = dirSOI - i.iterErr() - return false - } - -cont: - return i.prev() -} - -func (i *dbIter) Key() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.key -} - -func (i *dbIter) Value() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.value -} - -func (i *dbIter) Release() { - if i.dir != dirReleased { - // Clear the finalizer. - runtime.SetFinalizer(i, nil) - - if i.releaser != nil { - i.releaser.Release() - i.releaser = nil - } - - i.dir = dirReleased - i.key = nil - i.value = nil - i.iter.Release() - i.iter = nil - atomic.AddInt32(&i.db.aliveIters, -1) - i.db = nil - } -} - -func (i *dbIter) SetReleaser(releaser util.Releaser) { - if i.dir == dirReleased { - panic(util.ErrReleased) - } - if i.releaser != nil && releaser != nil { - panic(util.ErrHasReleaser) - } - i.releaser = releaser -} - -func (i *dbIter) Error() error { - return i.err -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go deleted file mode 100644 index 9738159dc..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "container/list" - "fmt" - "runtime" - "sync" - "sync/atomic" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -type snapshotElement struct { - seq uint64 - ref int - e *list.Element -} - -// Acquires a snapshot, based on latest sequence. -func (db *DB) acquireSnapshot() *snapshotElement { - db.snapsMu.Lock() - defer db.snapsMu.Unlock() - - seq := db.getSeq() - - if e := db.snapsList.Back(); e != nil { - se := e.Value.(*snapshotElement) - if se.seq == seq { - se.ref++ - return se - } else if seq < se.seq { - panic("leveldb: sequence number is not increasing") - } - } - se := &snapshotElement{seq: seq, ref: 1} - se.e = db.snapsList.PushBack(se) - return se -} - -// Releases given snapshot element. -func (db *DB) releaseSnapshot(se *snapshotElement) { - db.snapsMu.Lock() - defer db.snapsMu.Unlock() - - se.ref-- - if se.ref == 0 { - db.snapsList.Remove(se.e) - se.e = nil - } else if se.ref < 0 { - panic("leveldb: Snapshot: negative element reference") - } -} - -// Gets minimum sequence that not being snapshoted. -func (db *DB) minSeq() uint64 { - db.snapsMu.Lock() - defer db.snapsMu.Unlock() - - if e := db.snapsList.Front(); e != nil { - return e.Value.(*snapshotElement).seq - } - - return db.getSeq() -} - -// Snapshot is a DB snapshot. -type Snapshot struct { - db *DB - elem *snapshotElement - mu sync.RWMutex - released bool -} - -// Creates new snapshot object. -func (db *DB) newSnapshot() *Snapshot { - snap := &Snapshot{ - db: db, - elem: db.acquireSnapshot(), - } - atomic.AddInt32(&db.aliveSnaps, 1) - runtime.SetFinalizer(snap, (*Snapshot).Release) - return snap -} - -func (snap *Snapshot) String() string { - return fmt.Sprintf("leveldb.Snapshot{%d}", snap.elem.seq) -} - -// Get gets the value for the given key. It returns ErrNotFound if -// the DB does not contains the key. -// -// The caller should not modify the contents of the returned slice, but -// it is safe to modify the contents of the argument after Get returns. -func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { - err = snap.db.ok() - if err != nil { - return - } - snap.mu.RLock() - defer snap.mu.RUnlock() - if snap.released { - err = ErrSnapshotReleased - return - } - return snap.db.get(key, snap.elem.seq, ro) -} - -// Has returns true if the DB does contains the given key. -// -// It is safe to modify the contents of the argument after Get returns. -func (snap *Snapshot) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) { - err = snap.db.ok() - if err != nil { - return - } - snap.mu.RLock() - defer snap.mu.RUnlock() - if snap.released { - err = ErrSnapshotReleased - return - } - return snap.db.has(key, snap.elem.seq, ro) -} - -// NewIterator returns an iterator for the snapshot of the uderlying DB. -// The returned iterator is not goroutine-safe, but it is safe to use -// multiple iterators concurrently, with each in a dedicated goroutine. -// It is also safe to use an iterator concurrently with modifying its -// underlying DB. The resultant key/value pairs are guaranteed to be -// consistent. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// DB. And a nil Range.Limit is treated as a key after all keys in -// the DB. -// -// The iterator must be released after use, by calling Release method. -// Releasing the snapshot doesn't mean releasing the iterator too, the -// iterator would be still valid until released. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (snap *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - if err := snap.db.ok(); err != nil { - return iterator.NewEmptyIterator(err) - } - snap.mu.Lock() - defer snap.mu.Unlock() - if snap.released { - return iterator.NewEmptyIterator(ErrSnapshotReleased) - } - // Since iterator already hold version ref, it doesn't need to - // hold snapshot ref. - return snap.db.newIterator(snap.elem.seq, slice, ro) -} - -// Release releases the snapshot. This will not release any returned -// iterators, the iterators would still be valid until released or the -// underlying DB is closed. -// -// Other methods should not be called after the snapshot has been released. -func (snap *Snapshot) Release() { - snap.mu.Lock() - defer snap.mu.Unlock() - - if !snap.released { - // Clear the finalizer. - runtime.SetFinalizer(snap, nil) - - snap.released = true - snap.db.releaseSnapshot(snap.elem) - atomic.AddInt32(&snap.db.aliveSnaps, -1) - snap.db = nil - snap.elem = nil - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go deleted file mode 100644 index f642283ba..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "sync/atomic" - "time" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb" -) - -type memDB struct { - db *DB - mdb *memdb.DB - ref int32 -} - -func (m *memDB) incref() { - atomic.AddInt32(&m.ref, 1) -} - -func (m *memDB) decref() { - if ref := atomic.AddInt32(&m.ref, -1); ref == 0 { - // Only put back memdb with std capacity. - if m.mdb.Capacity() == m.db.s.o.GetWriteBuffer() { - m.mdb.Reset() - m.db.mpoolPut(m.mdb) - } - m.db = nil - m.mdb = nil - } else if ref < 0 { - panic("negative memdb ref") - } -} - -// Get latest sequence number. -func (db *DB) getSeq() uint64 { - return atomic.LoadUint64(&db.seq) -} - -// Atomically adds delta to seq. -func (db *DB) addSeq(delta uint64) { - atomic.AddUint64(&db.seq, delta) -} - -func (db *DB) sampleSeek(ikey iKey) { - v := db.s.version() - if v.sampleSeek(ikey) { - // Trigger table compaction. - db.compSendTrigger(db.tcompCmdC) - } - v.release() -} - -func (db *DB) mpoolPut(mem *memdb.DB) { - defer func() { - recover() - }() - select { - case db.memPool <- mem: - default: - } -} - -func (db *DB) mpoolGet() *memdb.DB { - select { - case mem := <-db.memPool: - return mem - default: - return nil - } -} - -func (db *DB) mpoolDrain() { - ticker := time.NewTicker(30 * time.Second) - for { - select { - case <-ticker.C: - select { - case <-db.memPool: - default: - } - case _, _ = <-db.closeC: - close(db.memPool) - return - } - } -} - -// Create new memdb and froze the old one; need external synchronization. -// newMem only called synchronously by the writer. -func (db *DB) newMem(n int) (mem *memDB, err error) { - num := db.s.allocFileNum() - file := db.s.getJournalFile(num) - w, err := file.Create() - if err != nil { - db.s.reuseFileNum(num) - return - } - - db.memMu.Lock() - defer db.memMu.Unlock() - - if db.frozenMem != nil { - panic("still has frozen mem") - } - - if db.journal == nil { - db.journal = journal.NewWriter(w) - } else { - db.journal.Reset(w) - db.journalWriter.Close() - db.frozenJournalFile = db.journalFile - } - db.journalWriter = w - db.journalFile = file - db.frozenMem = db.mem - mdb := db.mpoolGet() - if mdb == nil || mdb.Capacity() < n { - mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n)) - } - mem = &memDB{ - db: db, - mdb: mdb, - ref: 2, - } - db.mem = mem - // The seq only incremented by the writer. And whoever called newMem - // should hold write lock, so no need additional synchronization here. - db.frozenSeq = db.seq - return -} - -// Get all memdbs. -func (db *DB) getMems() (e, f *memDB) { - db.memMu.RLock() - defer db.memMu.RUnlock() - if db.mem == nil { - panic("nil effective mem") - } - db.mem.incref() - if db.frozenMem != nil { - db.frozenMem.incref() - } - return db.mem, db.frozenMem -} - -// Get frozen memdb. -func (db *DB) getEffectiveMem() *memDB { - db.memMu.RLock() - defer db.memMu.RUnlock() - if db.mem == nil { - panic("nil effective mem") - } - db.mem.incref() - return db.mem -} - -// Check whether we has frozen memdb. -func (db *DB) hasFrozenMem() bool { - db.memMu.RLock() - defer db.memMu.RUnlock() - return db.frozenMem != nil -} - -// Get frozen memdb. -func (db *DB) getFrozenMem() *memDB { - db.memMu.RLock() - defer db.memMu.RUnlock() - if db.frozenMem != nil { - db.frozenMem.incref() - } - return db.frozenMem -} - -// Drop frozen memdb; assume that frozen memdb isn't nil. -func (db *DB) dropFrozenMem() { - db.memMu.Lock() - if err := db.frozenJournalFile.Remove(); err != nil { - db.logf("journal@remove removing @%d %q", db.frozenJournalFile.Num(), err) - } else { - db.logf("journal@remove removed @%d", db.frozenJournalFile.Num()) - } - db.frozenJournalFile = nil - db.frozenMem.decref() - db.frozenMem = nil - db.memMu.Unlock() -} - -// Set closed flag; return true if not already closed. -func (db *DB) setClosed() bool { - return atomic.CompareAndSwapUint32(&db.closed, 0, 1) -} - -// Check whether DB was closed. -func (db *DB) isClosed() bool { - return atomic.LoadUint32(&db.closed) != 0 -} - -// Check read ok status. -func (db *DB) ok() error { - if db.isClosed() { - return ErrClosed - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go deleted file mode 100644 index 2cb0a55d4..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go +++ /dev/null @@ -1,2665 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "container/list" - crand "crypto/rand" - "encoding/binary" - "fmt" - "math/rand" - "os" - "path/filepath" - "runtime" - "strings" - "sync" - "sync/atomic" - "testing" - "time" - "unsafe" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -func tkey(i int) []byte { - return []byte(fmt.Sprintf("%016d", i)) -} - -func tval(seed, n int) []byte { - r := rand.New(rand.NewSource(int64(seed))) - return randomString(r, n) -} - -type dbHarness struct { - t *testing.T - - stor *testStorage - db *DB - o *opt.Options - ro *opt.ReadOptions - wo *opt.WriteOptions -} - -func newDbHarnessWopt(t *testing.T, o *opt.Options) *dbHarness { - h := new(dbHarness) - h.init(t, o) - return h -} - -func newDbHarness(t *testing.T) *dbHarness { - return newDbHarnessWopt(t, &opt.Options{}) -} - -func (h *dbHarness) init(t *testing.T, o *opt.Options) { - h.t = t - h.stor = newTestStorage(t) - h.o = o - h.ro = nil - h.wo = nil - - if err := h.openDB0(); err != nil { - // So that it will come after fatal message. - defer h.stor.Close() - h.t.Fatal("Open (init): got error: ", err) - } -} - -func (h *dbHarness) openDB0() (err error) { - h.t.Log("opening DB") - h.db, err = Open(h.stor, h.o) - return -} - -func (h *dbHarness) openDB() { - if err := h.openDB0(); err != nil { - h.t.Fatal("Open: got error: ", err) - } -} - -func (h *dbHarness) closeDB0() error { - h.t.Log("closing DB") - return h.db.Close() -} - -func (h *dbHarness) closeDB() { - if err := h.closeDB0(); err != nil { - h.t.Error("Close: got error: ", err) - } - h.stor.CloseCheck() - runtime.GC() -} - -func (h *dbHarness) reopenDB() { - h.closeDB() - h.openDB() -} - -func (h *dbHarness) close() { - h.closeDB0() - h.db = nil - h.stor.Close() - h.stor = nil - runtime.GC() -} - -func (h *dbHarness) openAssert(want bool) { - db, err := Open(h.stor, h.o) - if err != nil { - if want { - h.t.Error("Open: assert: got error: ", err) - } else { - h.t.Log("Open: assert: got error (expected): ", err) - } - } else { - if !want { - h.t.Error("Open: assert: expect error") - } - db.Close() - } -} - -func (h *dbHarness) write(batch *Batch) { - if err := h.db.Write(batch, h.wo); err != nil { - h.t.Error("Write: got error: ", err) - } -} - -func (h *dbHarness) put(key, value string) { - if err := h.db.Put([]byte(key), []byte(value), h.wo); err != nil { - h.t.Error("Put: got error: ", err) - } -} - -func (h *dbHarness) putMulti(n int, low, hi string) { - for i := 0; i < n; i++ { - h.put(low, "begin") - h.put(hi, "end") - h.compactMem() - } -} - -func (h *dbHarness) maxNextLevelOverlappingBytes(want uint64) { - t := h.t - db := h.db - - var ( - maxOverlaps uint64 - maxLevel int - ) - v := db.s.version() - for i, tt := range v.tables[1 : len(v.tables)-1] { - level := i + 1 - next := v.tables[level+1] - for _, t := range tt { - r := next.getOverlaps(nil, db.s.icmp, t.imin.ukey(), t.imax.ukey(), false) - sum := r.size() - if sum > maxOverlaps { - maxOverlaps = sum - maxLevel = level - } - } - } - v.release() - - if maxOverlaps > want { - t.Errorf("next level most overlapping bytes is more than %d, got=%d level=%d", want, maxOverlaps, maxLevel) - } else { - t.Logf("next level most overlapping bytes is %d, level=%d want=%d", maxOverlaps, maxLevel, want) - } -} - -func (h *dbHarness) delete(key string) { - t := h.t - db := h.db - - err := db.Delete([]byte(key), h.wo) - if err != nil { - t.Error("Delete: got error: ", err) - } -} - -func (h *dbHarness) assertNumKeys(want int) { - iter := h.db.NewIterator(nil, h.ro) - defer iter.Release() - got := 0 - for iter.Next() { - got++ - } - if err := iter.Error(); err != nil { - h.t.Error("assertNumKeys: ", err) - } - if want != got { - h.t.Errorf("assertNumKeys: want=%d got=%d", want, got) - } -} - -func (h *dbHarness) getr(db Reader, key string, expectFound bool) (found bool, v []byte) { - t := h.t - v, err := db.Get([]byte(key), h.ro) - switch err { - case ErrNotFound: - if expectFound { - t.Errorf("Get: key '%s' not found, want found", key) - } - case nil: - found = true - if !expectFound { - t.Errorf("Get: key '%s' found, want not found", key) - } - default: - t.Error("Get: got error: ", err) - } - return -} - -func (h *dbHarness) get(key string, expectFound bool) (found bool, v []byte) { - return h.getr(h.db, key, expectFound) -} - -func (h *dbHarness) getValr(db Reader, key, value string) { - t := h.t - found, r := h.getr(db, key, true) - if !found { - return - } - rval := string(r) - if rval != value { - t.Errorf("Get: invalid value, got '%s', want '%s'", rval, value) - } -} - -func (h *dbHarness) getVal(key, value string) { - h.getValr(h.db, key, value) -} - -func (h *dbHarness) allEntriesFor(key, want string) { - t := h.t - db := h.db - s := db.s - - ikey := newIkey([]byte(key), kMaxSeq, ktVal) - iter := db.newRawIterator(nil, nil) - if !iter.Seek(ikey) && iter.Error() != nil { - t.Error("AllEntries: error during seek, err: ", iter.Error()) - return - } - res := "[ " - first := true - for iter.Valid() { - if ukey, _, kt, kerr := parseIkey(iter.Key()); kerr == nil { - if s.icmp.uCompare(ikey.ukey(), ukey) != 0 { - break - } - if !first { - res += ", " - } - first = false - switch kt { - case ktVal: - res += string(iter.Value()) - case ktDel: - res += "DEL" - } - } else { - if !first { - res += ", " - } - first = false - res += "CORRUPTED" - } - iter.Next() - } - if !first { - res += " " - } - res += "]" - if res != want { - t.Errorf("AllEntries: assert failed for key %q, got=%q want=%q", key, res, want) - } -} - -// Return a string that contains all key,value pairs in order, -// formatted like "(k1->v1)(k2->v2)". -func (h *dbHarness) getKeyVal(want string) { - t := h.t - db := h.db - - s, err := db.GetSnapshot() - if err != nil { - t.Fatal("GetSnapshot: got error: ", err) - } - res := "" - iter := s.NewIterator(nil, nil) - for iter.Next() { - res += fmt.Sprintf("(%s->%s)", string(iter.Key()), string(iter.Value())) - } - iter.Release() - - if res != want { - t.Errorf("GetKeyVal: invalid key/value pair, got=%q want=%q", res, want) - } - s.Release() -} - -func (h *dbHarness) waitCompaction() { - t := h.t - db := h.db - if err := db.compSendIdle(db.tcompCmdC); err != nil { - t.Error("compaction error: ", err) - } -} - -func (h *dbHarness) waitMemCompaction() { - t := h.t - db := h.db - - if err := db.compSendIdle(db.mcompCmdC); err != nil { - t.Error("compaction error: ", err) - } -} - -func (h *dbHarness) compactMem() { - t := h.t - db := h.db - - t.Log("starting memdb compaction") - - db.writeLockC <- struct{}{} - defer func() { - <-db.writeLockC - }() - - if _, err := db.rotateMem(0); err != nil { - t.Error("compaction error: ", err) - } - if err := db.compSendIdle(db.mcompCmdC); err != nil { - t.Error("compaction error: ", err) - } - - if h.totalTables() == 0 { - t.Error("zero tables after mem compaction") - } - - t.Log("memdb compaction done") -} - -func (h *dbHarness) compactRangeAtErr(level int, min, max string, wanterr bool) { - t := h.t - db := h.db - - var _min, _max []byte - if min != "" { - _min = []byte(min) - } - if max != "" { - _max = []byte(max) - } - - t.Logf("starting table range compaction: level=%d, min=%q, max=%q", level, min, max) - - if err := db.compSendRange(db.tcompCmdC, level, _min, _max); err != nil { - if wanterr { - t.Log("CompactRangeAt: got error (expected): ", err) - } else { - t.Error("CompactRangeAt: got error: ", err) - } - } else if wanterr { - t.Error("CompactRangeAt: expect error") - } - - t.Log("table range compaction done") -} - -func (h *dbHarness) compactRangeAt(level int, min, max string) { - h.compactRangeAtErr(level, min, max, false) -} - -func (h *dbHarness) compactRange(min, max string) { - t := h.t - db := h.db - - t.Logf("starting DB range compaction: min=%q, max=%q", min, max) - - var r util.Range - if min != "" { - r.Start = []byte(min) - } - if max != "" { - r.Limit = []byte(max) - } - if err := db.CompactRange(r); err != nil { - t.Error("CompactRange: got error: ", err) - } - - t.Log("DB range compaction done") -} - -func (h *dbHarness) sizeOf(start, limit string) uint64 { - sz, err := h.db.SizeOf([]util.Range{ - {[]byte(start), []byte(limit)}, - }) - if err != nil { - h.t.Error("SizeOf: got error: ", err) - } - return sz.Sum() -} - -func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) { - sz := h.sizeOf(start, limit) - if sz < low || sz > hi { - h.t.Errorf("sizeOf %q to %q not in range, want %d - %d, got %d", - shorten(start), shorten(limit), low, hi, sz) - } -} - -func (h *dbHarness) getSnapshot() (s *Snapshot) { - s, err := h.db.GetSnapshot() - if err != nil { - h.t.Fatal("GetSnapshot: got error: ", err) - } - return -} -func (h *dbHarness) tablesPerLevel(want string) { - res := "" - nz := 0 - v := h.db.s.version() - for level, tt := range v.tables { - if level > 0 { - res += "," - } - res += fmt.Sprint(len(tt)) - if len(tt) > 0 { - nz = len(res) - } - } - v.release() - res = res[:nz] - if res != want { - h.t.Errorf("invalid tables len, want=%s, got=%s", want, res) - } -} - -func (h *dbHarness) totalTables() (n int) { - v := h.db.s.version() - for _, tt := range v.tables { - n += len(tt) - } - v.release() - return -} - -type keyValue interface { - Key() []byte - Value() []byte -} - -func testKeyVal(t *testing.T, kv keyValue, want string) { - res := string(kv.Key()) + "->" + string(kv.Value()) - if res != want { - t.Errorf("invalid key/value, want=%q, got=%q", want, res) - } -} - -func numKey(num int) string { - return fmt.Sprintf("key%06d", num) -} - -var _bloom_filter = filter.NewBloomFilter(10) - -func truno(t *testing.T, o *opt.Options, f func(h *dbHarness)) { - for i := 0; i < 4; i++ { - func() { - switch i { - case 0: - case 1: - if o == nil { - o = &opt.Options{Filter: _bloom_filter} - } else { - old := o - o = &opt.Options{} - *o = *old - o.Filter = _bloom_filter - } - case 2: - if o == nil { - o = &opt.Options{Compression: opt.NoCompression} - } else { - old := o - o = &opt.Options{} - *o = *old - o.Compression = opt.NoCompression - } - } - h := newDbHarnessWopt(t, o) - defer h.close() - switch i { - case 3: - h.reopenDB() - } - f(h) - }() - } -} - -func trun(t *testing.T, f func(h *dbHarness)) { - truno(t, nil, f) -} - -func testAligned(t *testing.T, name string, offset uintptr) { - if offset%8 != 0 { - t.Errorf("field %s offset is not 64-bit aligned", name) - } -} - -func Test_FieldsAligned(t *testing.T) { - p1 := new(DB) - testAligned(t, "DB.seq", unsafe.Offsetof(p1.seq)) - p2 := new(session) - testAligned(t, "session.stNextFileNum", unsafe.Offsetof(p2.stNextFileNum)) - testAligned(t, "session.stJournalNum", unsafe.Offsetof(p2.stJournalNum)) - testAligned(t, "session.stPrevJournalNum", unsafe.Offsetof(p2.stPrevJournalNum)) - testAligned(t, "session.stSeqNum", unsafe.Offsetof(p2.stSeqNum)) -} - -func TestDB_Locking(t *testing.T) { - h := newDbHarness(t) - defer h.stor.Close() - h.openAssert(false) - h.closeDB() - h.openAssert(true) -} - -func TestDB_Empty(t *testing.T) { - trun(t, func(h *dbHarness) { - h.get("foo", false) - - h.reopenDB() - h.get("foo", false) - }) -} - -func TestDB_ReadWrite(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.getVal("foo", "v1") - h.put("bar", "v2") - h.put("foo", "v3") - h.getVal("foo", "v3") - h.getVal("bar", "v2") - - h.reopenDB() - h.getVal("foo", "v3") - h.getVal("bar", "v2") - }) -} - -func TestDB_PutDeleteGet(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.getVal("foo", "v1") - h.put("foo", "v2") - h.getVal("foo", "v2") - h.delete("foo") - h.get("foo", false) - - h.reopenDB() - h.get("foo", false) - }) -} - -func TestDB_EmptyBatch(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.get("foo", false) - err := h.db.Write(new(Batch), h.wo) - if err != nil { - t.Error("writing empty batch yield error: ", err) - } - h.get("foo", false) -} - -func TestDB_GetFromFrozen(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100100}) - defer h.close() - - h.put("foo", "v1") - h.getVal("foo", "v1") - - h.stor.DelaySync(storage.TypeTable) // Block sync calls - h.put("k1", strings.Repeat("x", 100000)) // Fill memtable - h.put("k2", strings.Repeat("y", 100000)) // Trigger compaction - for i := 0; h.db.getFrozenMem() == nil && i < 100; i++ { - time.Sleep(10 * time.Microsecond) - } - if h.db.getFrozenMem() == nil { - h.stor.ReleaseSync(storage.TypeTable) - t.Fatal("No frozen mem") - } - h.getVal("foo", "v1") - h.stor.ReleaseSync(storage.TypeTable) // Release sync calls - - h.reopenDB() - h.getVal("foo", "v1") - h.get("k1", true) - h.get("k2", true) -} - -func TestDB_GetFromTable(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.compactMem() - h.getVal("foo", "v1") - }) -} - -func TestDB_GetSnapshot(t *testing.T) { - trun(t, func(h *dbHarness) { - bar := strings.Repeat("b", 200) - h.put("foo", "v1") - h.put(bar, "v1") - - snap, err := h.db.GetSnapshot() - if err != nil { - t.Fatal("GetSnapshot: got error: ", err) - } - - h.put("foo", "v2") - h.put(bar, "v2") - - h.getVal("foo", "v2") - h.getVal(bar, "v2") - h.getValr(snap, "foo", "v1") - h.getValr(snap, bar, "v1") - - h.compactMem() - - h.getVal("foo", "v2") - h.getVal(bar, "v2") - h.getValr(snap, "foo", "v1") - h.getValr(snap, bar, "v1") - - snap.Release() - - h.reopenDB() - h.getVal("foo", "v2") - h.getVal(bar, "v2") - }) -} - -func TestDB_GetLevel0Ordering(t *testing.T) { - trun(t, func(h *dbHarness) { - for i := 0; i < 4; i++ { - h.put("bar", fmt.Sprintf("b%d", i)) - h.put("foo", fmt.Sprintf("v%d", i)) - h.compactMem() - } - h.getVal("foo", "v3") - h.getVal("bar", "b3") - - v := h.db.s.version() - t0len := v.tLen(0) - v.release() - if t0len < 2 { - t.Errorf("level-0 tables is less than 2, got %d", t0len) - } - - h.reopenDB() - h.getVal("foo", "v3") - h.getVal("bar", "b3") - }) -} - -func TestDB_GetOrderedByLevels(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.compactMem() - h.compactRange("a", "z") - h.getVal("foo", "v1") - h.put("foo", "v2") - h.compactMem() - h.getVal("foo", "v2") - }) -} - -func TestDB_GetPicksCorrectFile(t *testing.T) { - trun(t, func(h *dbHarness) { - // Arrange to have multiple files in a non-level-0 level. - h.put("a", "va") - h.compactMem() - h.compactRange("a", "b") - h.put("x", "vx") - h.compactMem() - h.compactRange("x", "y") - h.put("f", "vf") - h.compactMem() - h.compactRange("f", "g") - - h.getVal("a", "va") - h.getVal("f", "vf") - h.getVal("x", "vx") - - h.compactRange("", "") - h.getVal("a", "va") - h.getVal("f", "vf") - h.getVal("x", "vx") - }) -} - -func TestDB_GetEncountersEmptyLevel(t *testing.T) { - trun(t, func(h *dbHarness) { - // Arrange for the following to happen: - // * sstable A in level 0 - // * nothing in level 1 - // * sstable B in level 2 - // Then do enough Get() calls to arrange for an automatic compaction - // of sstable A. A bug would cause the compaction to be marked as - // occuring at level 1 (instead of the correct level 0). - - // Step 1: First place sstables in levels 0 and 2 - for i := 0; ; i++ { - if i >= 100 { - t.Fatal("could not fill levels-0 and level-2") - } - v := h.db.s.version() - if v.tLen(0) > 0 && v.tLen(2) > 0 { - v.release() - break - } - v.release() - h.put("a", "begin") - h.put("z", "end") - h.compactMem() - - h.getVal("a", "begin") - h.getVal("z", "end") - } - - // Step 2: clear level 1 if necessary. - h.compactRangeAt(1, "", "") - h.tablesPerLevel("1,0,1") - - h.getVal("a", "begin") - h.getVal("z", "end") - - // Step 3: read a bunch of times - for i := 0; i < 200; i++ { - h.get("missing", false) - } - - // Step 4: Wait for compaction to finish - h.waitCompaction() - - v := h.db.s.version() - if v.tLen(0) > 0 { - t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) - } - v.release() - - h.getVal("a", "begin") - h.getVal("z", "end") - }) -} - -func TestDB_IterMultiWithDelete(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("a", "va") - h.put("b", "vb") - h.put("c", "vc") - h.delete("b") - h.get("b", false) - - iter := h.db.NewIterator(nil, nil) - iter.Seek([]byte("c")) - testKeyVal(t, iter, "c->vc") - iter.Prev() - testKeyVal(t, iter, "a->va") - iter.Release() - - h.compactMem() - - iter = h.db.NewIterator(nil, nil) - iter.Seek([]byte("c")) - testKeyVal(t, iter, "c->vc") - iter.Prev() - testKeyVal(t, iter, "a->va") - iter.Release() - }) -} - -func TestDB_IteratorPinsRef(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("foo", "hello") - - // Get iterator that will yield the current contents of the DB. - iter := h.db.NewIterator(nil, nil) - - // Write to force compactions - h.put("foo", "newvalue1") - for i := 0; i < 100; i++ { - h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) - } - h.put("foo", "newvalue2") - - iter.First() - testKeyVal(t, iter, "foo->hello") - if iter.Next() { - t.Errorf("expect eof") - } - iter.Release() -} - -func TestDB_Recover(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.put("baz", "v5") - - h.reopenDB() - h.getVal("foo", "v1") - - h.getVal("foo", "v1") - h.getVal("baz", "v5") - h.put("bar", "v2") - h.put("foo", "v3") - - h.reopenDB() - h.getVal("foo", "v3") - h.put("foo", "v4") - h.getVal("foo", "v4") - h.getVal("bar", "v2") - h.getVal("baz", "v5") - }) -} - -func TestDB_RecoverWithEmptyJournal(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.put("foo", "v2") - - h.reopenDB() - h.reopenDB() - h.put("foo", "v3") - - h.reopenDB() - h.getVal("foo", "v3") - }) -} - -func TestDB_RecoverDuringMemtableCompaction(t *testing.T) { - truno(t, &opt.Options{WriteBuffer: 1000000}, func(h *dbHarness) { - - h.stor.DelaySync(storage.TypeTable) - h.put("big1", strings.Repeat("x", 10000000)) - h.put("big2", strings.Repeat("y", 1000)) - h.put("bar", "v2") - h.stor.ReleaseSync(storage.TypeTable) - - h.reopenDB() - h.getVal("bar", "v2") - h.getVal("big1", strings.Repeat("x", 10000000)) - h.getVal("big2", strings.Repeat("y", 1000)) - }) -} - -func TestDB_MinorCompactionsHappen(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 10000}) - defer h.close() - - n := 500 - - key := func(i int) string { - return fmt.Sprintf("key%06d", i) - } - - for i := 0; i < n; i++ { - h.put(key(i), key(i)+strings.Repeat("v", 1000)) - } - - for i := 0; i < n; i++ { - h.getVal(key(i), key(i)+strings.Repeat("v", 1000)) - } - - h.reopenDB() - for i := 0; i < n; i++ { - h.getVal(key(i), key(i)+strings.Repeat("v", 1000)) - } -} - -func TestDB_RecoverWithLargeJournal(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("big1", strings.Repeat("1", 200000)) - h.put("big2", strings.Repeat("2", 200000)) - h.put("small3", strings.Repeat("3", 10)) - h.put("small4", strings.Repeat("4", 10)) - h.tablesPerLevel("") - - // Make sure that if we re-open with a small write buffer size that - // we flush table files in the middle of a large journal file. - h.o.WriteBuffer = 100000 - h.reopenDB() - h.getVal("big1", strings.Repeat("1", 200000)) - h.getVal("big2", strings.Repeat("2", 200000)) - h.getVal("small3", strings.Repeat("3", 10)) - h.getVal("small4", strings.Repeat("4", 10)) - v := h.db.s.version() - if v.tLen(0) <= 1 { - t.Errorf("tables-0 less than one") - } - v.release() -} - -func TestDB_CompactionsGenerateMultipleFiles(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 10000000, - Compression: opt.NoCompression, - }) - defer h.close() - - v := h.db.s.version() - if v.tLen(0) > 0 { - t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) - } - v.release() - - n := 80 - - // Write 8MB (80 values, each 100K) - for i := 0; i < n; i++ { - h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) - } - - // Reopening moves updates to level-0 - h.reopenDB() - h.compactRangeAt(0, "", "") - - v = h.db.s.version() - if v.tLen(0) > 0 { - t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) - } - if v.tLen(1) <= 1 { - t.Errorf("level-1 tables less than 1, got %d", v.tLen(1)) - } - v.release() - - for i := 0; i < n; i++ { - h.getVal(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) - } -} - -func TestDB_RepeatedWritesToSameKey(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000}) - defer h.close() - - maxTables := h.o.GetNumLevel() + h.o.GetWriteL0PauseTrigger() - - value := strings.Repeat("v", 2*h.o.GetWriteBuffer()) - for i := 0; i < 5*maxTables; i++ { - h.put("key", value) - n := h.totalTables() - if n > maxTables { - t.Errorf("total tables exceed %d, got=%d, iter=%d", maxTables, n, i) - } - } -} - -func TestDB_RepeatedWritesToSameKeyAfterReopen(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000}) - defer h.close() - - h.reopenDB() - - maxTables := h.o.GetNumLevel() + h.o.GetWriteL0PauseTrigger() - - value := strings.Repeat("v", 2*h.o.GetWriteBuffer()) - for i := 0; i < 5*maxTables; i++ { - h.put("key", value) - n := h.totalTables() - if n > maxTables { - t.Errorf("total tables exceed %d, got=%d, iter=%d", maxTables, n, i) - } - } -} - -func TestDB_SparseMerge(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression}) - defer h.close() - - h.putMulti(h.o.GetNumLevel(), "A", "Z") - - // Suppose there is: - // small amount of data with prefix A - // large amount of data with prefix B - // small amount of data with prefix C - // and that recent updates have made small changes to all three prefixes. - // Check that we do not do a compaction that merges all of B in one shot. - h.put("A", "va") - value := strings.Repeat("x", 1000) - for i := 0; i < 100000; i++ { - h.put(fmt.Sprintf("B%010d", i), value) - } - h.put("C", "vc") - h.compactMem() - h.compactRangeAt(0, "", "") - h.waitCompaction() - - // Make sparse update - h.put("A", "va2") - h.put("B100", "bvalue2") - h.put("C", "vc2") - h.compactMem() - - h.waitCompaction() - h.maxNextLevelOverlappingBytes(20 * 1048576) - h.compactRangeAt(0, "", "") - h.waitCompaction() - h.maxNextLevelOverlappingBytes(20 * 1048576) - h.compactRangeAt(1, "", "") - h.waitCompaction() - h.maxNextLevelOverlappingBytes(20 * 1048576) -} - -func TestDB_SizeOf(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - Compression: opt.NoCompression, - WriteBuffer: 10000000, - }) - defer h.close() - - h.sizeAssert("", "xyz", 0, 0) - h.reopenDB() - h.sizeAssert("", "xyz", 0, 0) - - // Write 8MB (80 values, each 100K) - n := 80 - s1 := 100000 - s2 := 105000 - - for i := 0; i < n; i++ { - h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), s1/10)) - } - - // 0 because SizeOf() does not account for memtable space - h.sizeAssert("", numKey(50), 0, 0) - - for r := 0; r < 3; r++ { - h.reopenDB() - - for cs := 0; cs < n; cs += 10 { - for i := 0; i < n; i += 10 { - h.sizeAssert("", numKey(i), uint64(s1*i), uint64(s2*i)) - h.sizeAssert("", numKey(i)+".suffix", uint64(s1*(i+1)), uint64(s2*(i+1))) - h.sizeAssert(numKey(i), numKey(i+10), uint64(s1*10), uint64(s2*10)) - } - - h.sizeAssert("", numKey(50), uint64(s1*50), uint64(s2*50)) - h.sizeAssert("", numKey(50)+".suffix", uint64(s1*50), uint64(s2*50)) - - h.compactRangeAt(0, numKey(cs), numKey(cs+9)) - } - - v := h.db.s.version() - if v.tLen(0) != 0 { - t.Errorf("level-0 tables was not zero, got %d", v.tLen(0)) - } - if v.tLen(1) == 0 { - t.Error("level-1 tables was zero") - } - v.release() - } -} - -func TestDB_SizeOf_MixOfSmallAndLarge(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression}) - defer h.close() - - sizes := []uint64{ - 10000, - 10000, - 100000, - 10000, - 100000, - 10000, - 300000, - 10000, - } - - for i, n := range sizes { - h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), int(n)/10)) - } - - for r := 0; r < 3; r++ { - h.reopenDB() - - var x uint64 - for i, n := range sizes { - y := x - if i > 0 { - y += 1000 - } - h.sizeAssert("", numKey(i), x, y) - x += n - } - - h.sizeAssert(numKey(3), numKey(5), 110000, 111000) - - h.compactRangeAt(0, "", "") - } -} - -func TestDB_Snapshot(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - s1 := h.getSnapshot() - h.put("foo", "v2") - s2 := h.getSnapshot() - h.put("foo", "v3") - s3 := h.getSnapshot() - h.put("foo", "v4") - - h.getValr(s1, "foo", "v1") - h.getValr(s2, "foo", "v2") - h.getValr(s3, "foo", "v3") - h.getVal("foo", "v4") - - s3.Release() - h.getValr(s1, "foo", "v1") - h.getValr(s2, "foo", "v2") - h.getVal("foo", "v4") - - s1.Release() - h.getValr(s2, "foo", "v2") - h.getVal("foo", "v4") - - s2.Release() - h.getVal("foo", "v4") - }) -} - -func TestDB_SnapshotList(t *testing.T) { - db := &DB{snapsList: list.New()} - e0a := db.acquireSnapshot() - e0b := db.acquireSnapshot() - db.seq = 1 - e1 := db.acquireSnapshot() - db.seq = 2 - e2 := db.acquireSnapshot() - - if db.minSeq() != 0 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e0a) - if db.minSeq() != 0 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e2) - if db.minSeq() != 0 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e0b) - if db.minSeq() != 1 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - e2 = db.acquireSnapshot() - if db.minSeq() != 1 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e1) - if db.minSeq() != 2 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e2) - if db.minSeq() != 2 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } -} - -func TestDB_HiddenValuesAreRemoved(t *testing.T) { - trun(t, func(h *dbHarness) { - s := h.db.s - - h.put("foo", "v1") - h.compactMem() - m := h.o.GetMaxMemCompationLevel() - v := s.version() - num := v.tLen(m) - v.release() - if num != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m, num) - } - - // Place a table at level last-1 to prevent merging with preceding mutation - h.put("a", "begin") - h.put("z", "end") - h.compactMem() - v = s.version() - if v.tLen(m) != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m, v.tLen(m)) - } - if v.tLen(m-1) != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m-1, v.tLen(m-1)) - } - v.release() - - h.delete("foo") - h.put("foo", "v2") - h.allEntriesFor("foo", "[ v2, DEL, v1 ]") - h.compactMem() - h.allEntriesFor("foo", "[ v2, DEL, v1 ]") - h.compactRangeAt(m-2, "", "z") - // DEL eliminated, but v1 remains because we aren't compacting that level - // (DEL can be eliminated because v2 hides v1). - h.allEntriesFor("foo", "[ v2, v1 ]") - h.compactRangeAt(m-1, "", "") - // Merging last-1 w/ last, so we are the base level for "foo", so - // DEL is removed. (as is v1). - h.allEntriesFor("foo", "[ v2 ]") - }) -} - -func TestDB_DeletionMarkers2(t *testing.T) { - h := newDbHarness(t) - defer h.close() - s := h.db.s - - h.put("foo", "v1") - h.compactMem() - m := h.o.GetMaxMemCompationLevel() - v := s.version() - num := v.tLen(m) - v.release() - if num != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m, num) - } - - // Place a table at level last-1 to prevent merging with preceding mutation - h.put("a", "begin") - h.put("z", "end") - h.compactMem() - v = s.version() - if v.tLen(m) != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m, v.tLen(m)) - } - if v.tLen(m-1) != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m-1, v.tLen(m-1)) - } - v.release() - - h.delete("foo") - h.allEntriesFor("foo", "[ DEL, v1 ]") - h.compactMem() // Moves to level last-2 - h.allEntriesFor("foo", "[ DEL, v1 ]") - h.compactRangeAt(m-2, "", "") - // DEL kept: "last" file overlaps - h.allEntriesFor("foo", "[ DEL, v1 ]") - h.compactRangeAt(m-1, "", "") - // Merging last-1 w/ last, so we are the base level for "foo", so - // DEL is removed. (as is v1). - h.allEntriesFor("foo", "[ ]") -} - -func TestDB_CompactionTableOpenError(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{OpenFilesCacheCapacity: -1}) - defer h.close() - - im := 10 - jm := 10 - for r := 0; r < 2; r++ { - for i := 0; i < im; i++ { - for j := 0; j < jm; j++ { - h.put(fmt.Sprintf("k%d,%d", i, j), fmt.Sprintf("v%d,%d", i, j)) - } - h.compactMem() - } - } - - if n := h.totalTables(); n != im*2 { - t.Errorf("total tables is %d, want %d", n, im) - } - - h.stor.SetEmuErr(storage.TypeTable, tsOpOpen) - go h.db.CompactRange(util.Range{}) - if err := h.db.compSendIdle(h.db.tcompCmdC); err != nil { - t.Log("compaction error: ", err) - } - h.closeDB0() - h.openDB() - h.stor.SetEmuErr(0, tsOpOpen) - - for i := 0; i < im; i++ { - for j := 0; j < jm; j++ { - h.getVal(fmt.Sprintf("k%d,%d", i, j), fmt.Sprintf("v%d,%d", i, j)) - } - } -} - -func TestDB_OverlapInLevel0(t *testing.T) { - trun(t, func(h *dbHarness) { - if h.o.GetMaxMemCompationLevel() != 2 { - t.Fatal("fix test to reflect the config") - } - - // Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0. - h.put("100", "v100") - h.put("999", "v999") - h.compactMem() - h.delete("100") - h.delete("999") - h.compactMem() - h.tablesPerLevel("0,1,1") - - // Make files spanning the following ranges in level-0: - // files[0] 200 .. 900 - // files[1] 300 .. 500 - // Note that files are sorted by min key. - h.put("300", "v300") - h.put("500", "v500") - h.compactMem() - h.put("200", "v200") - h.put("600", "v600") - h.put("900", "v900") - h.compactMem() - h.tablesPerLevel("2,1,1") - - // Compact away the placeholder files we created initially - h.compactRangeAt(1, "", "") - h.compactRangeAt(2, "", "") - h.tablesPerLevel("2") - - // Do a memtable compaction. Before bug-fix, the compaction would - // not detect the overlap with level-0 files and would incorrectly place - // the deletion in a deeper level. - h.delete("600") - h.compactMem() - h.tablesPerLevel("3") - h.get("600", false) - }) -} - -func TestDB_L0_CompactionBug_Issue44_a(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.reopenDB() - h.put("b", "v") - h.reopenDB() - h.delete("b") - h.delete("a") - h.reopenDB() - h.delete("a") - h.reopenDB() - h.put("a", "v") - h.reopenDB() - h.reopenDB() - h.getKeyVal("(a->v)") - h.waitCompaction() - h.getKeyVal("(a->v)") -} - -func TestDB_L0_CompactionBug_Issue44_b(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.reopenDB() - h.put("", "") - h.reopenDB() - h.delete("e") - h.put("", "") - h.reopenDB() - h.put("c", "cv") - h.reopenDB() - h.put("", "") - h.reopenDB() - h.put("", "") - h.waitCompaction() - h.reopenDB() - h.put("d", "dv") - h.reopenDB() - h.put("", "") - h.reopenDB() - h.delete("d") - h.delete("b") - h.reopenDB() - h.getKeyVal("(->)(c->cv)") - h.waitCompaction() - h.getKeyVal("(->)(c->cv)") -} - -func TestDB_SingleEntryMemCompaction(t *testing.T) { - trun(t, func(h *dbHarness) { - for i := 0; i < 10; i++ { - h.put("big", strings.Repeat("v", opt.DefaultWriteBuffer)) - h.compactMem() - h.put("key", strings.Repeat("v", opt.DefaultBlockSize)) - h.compactMem() - h.put("k", "v") - h.compactMem() - h.put("", "") - h.compactMem() - h.put("verybig", strings.Repeat("v", opt.DefaultWriteBuffer*2)) - h.compactMem() - } - }) -} - -func TestDB_ManifestWriteError(t *testing.T) { - for i := 0; i < 2; i++ { - func() { - h := newDbHarness(t) - defer h.close() - - h.put("foo", "bar") - h.getVal("foo", "bar") - - // Mem compaction (will succeed) - h.compactMem() - h.getVal("foo", "bar") - v := h.db.s.version() - if n := v.tLen(h.o.GetMaxMemCompationLevel()); n != 1 { - t.Errorf("invalid total tables, want=1 got=%d", n) - } - v.release() - - if i == 0 { - h.stor.SetEmuErr(storage.TypeManifest, tsOpWrite) - } else { - h.stor.SetEmuErr(storage.TypeManifest, tsOpSync) - } - - // Merging compaction (will fail) - h.compactRangeAtErr(h.o.GetMaxMemCompationLevel(), "", "", true) - - h.db.Close() - h.stor.SetEmuErr(0, tsOpWrite) - h.stor.SetEmuErr(0, tsOpSync) - - // Should not lose data - h.openDB() - h.getVal("foo", "bar") - }() - } -} - -func assertErr(t *testing.T, err error, wanterr bool) { - if err != nil { - if wanterr { - t.Log("AssertErr: got error (expected): ", err) - } else { - t.Error("AssertErr: got error: ", err) - } - } else if wanterr { - t.Error("AssertErr: expect error") - } -} - -func TestDB_ClosedIsClosed(t *testing.T) { - h := newDbHarness(t) - db := h.db - - var iter, iter2 iterator.Iterator - var snap *Snapshot - func() { - defer h.close() - - h.put("k", "v") - h.getVal("k", "v") - - iter = db.NewIterator(nil, h.ro) - iter.Seek([]byte("k")) - testKeyVal(t, iter, "k->v") - - var err error - snap, err = db.GetSnapshot() - if err != nil { - t.Fatal("GetSnapshot: got error: ", err) - } - - h.getValr(snap, "k", "v") - - iter2 = snap.NewIterator(nil, h.ro) - iter2.Seek([]byte("k")) - testKeyVal(t, iter2, "k->v") - - h.put("foo", "v2") - h.delete("foo") - - // closing DB - iter.Release() - iter2.Release() - }() - - assertErr(t, db.Put([]byte("x"), []byte("y"), h.wo), true) - _, err := db.Get([]byte("k"), h.ro) - assertErr(t, err, true) - - if iter.Valid() { - t.Errorf("iter.Valid should false") - } - assertErr(t, iter.Error(), false) - testKeyVal(t, iter, "->") - if iter.Seek([]byte("k")) { - t.Errorf("iter.Seek should false") - } - assertErr(t, iter.Error(), true) - - assertErr(t, iter2.Error(), false) - - _, err = snap.Get([]byte("k"), h.ro) - assertErr(t, err, true) - - _, err = db.GetSnapshot() - assertErr(t, err, true) - - iter3 := db.NewIterator(nil, h.ro) - assertErr(t, iter3.Error(), true) - - iter3 = snap.NewIterator(nil, h.ro) - assertErr(t, iter3.Error(), true) - - assertErr(t, db.Delete([]byte("k"), h.wo), true) - - _, err = db.GetProperty("leveldb.stats") - assertErr(t, err, true) - - _, err = db.SizeOf([]util.Range{{[]byte("a"), []byte("z")}}) - assertErr(t, err, true) - - assertErr(t, db.CompactRange(util.Range{}), true) - - assertErr(t, db.Close(), true) -} - -type numberComparer struct{} - -func (numberComparer) num(x []byte) (n int) { - fmt.Sscan(string(x[1:len(x)-1]), &n) - return -} - -func (numberComparer) Name() string { - return "test.NumberComparer" -} - -func (p numberComparer) Compare(a, b []byte) int { - return p.num(a) - p.num(b) -} - -func (numberComparer) Separator(dst, a, b []byte) []byte { return nil } -func (numberComparer) Successor(dst, b []byte) []byte { return nil } - -func TestDB_CustomComparer(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - Comparer: numberComparer{}, - WriteBuffer: 1000, - }) - defer h.close() - - h.put("[10]", "ten") - h.put("[0x14]", "twenty") - for i := 0; i < 2; i++ { - h.getVal("[10]", "ten") - h.getVal("[0xa]", "ten") - h.getVal("[20]", "twenty") - h.getVal("[0x14]", "twenty") - h.get("[15]", false) - h.get("[0xf]", false) - h.compactMem() - h.compactRange("[0]", "[9999]") - } - - for n := 0; n < 2; n++ { - for i := 0; i < 100; i++ { - v := fmt.Sprintf("[%d]", i*10) - h.put(v, v) - } - h.compactMem() - h.compactRange("[0]", "[1000000]") - } -} - -func TestDB_ManualCompaction(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - if h.o.GetMaxMemCompationLevel() != 2 { - t.Fatal("fix test to reflect the config") - } - - h.putMulti(3, "p", "q") - h.tablesPerLevel("1,1,1") - - // Compaction range falls before files - h.compactRange("", "c") - h.tablesPerLevel("1,1,1") - - // Compaction range falls after files - h.compactRange("r", "z") - h.tablesPerLevel("1,1,1") - - // Compaction range overlaps files - h.compactRange("p1", "p9") - h.tablesPerLevel("0,0,1") - - // Populate a different range - h.putMulti(3, "c", "e") - h.tablesPerLevel("1,1,2") - - // Compact just the new range - h.compactRange("b", "f") - h.tablesPerLevel("0,0,2") - - // Compact all - h.putMulti(1, "a", "z") - h.tablesPerLevel("0,1,2") - h.compactRange("", "") - h.tablesPerLevel("0,0,1") -} - -func TestDB_BloomFilter(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - DisableBlockCache: true, - Filter: filter.NewBloomFilter(10), - }) - defer h.close() - - key := func(i int) string { - return fmt.Sprintf("key%06d", i) - } - - const n = 10000 - - // Populate multiple layers - for i := 0; i < n; i++ { - h.put(key(i), key(i)) - } - h.compactMem() - h.compactRange("a", "z") - for i := 0; i < n; i += 100 { - h.put(key(i), key(i)) - } - h.compactMem() - - // Prevent auto compactions triggered by seeks - h.stor.DelaySync(storage.TypeTable) - - // Lookup present keys. Should rarely read from small sstable. - h.stor.SetReadCounter(storage.TypeTable) - for i := 0; i < n; i++ { - h.getVal(key(i), key(i)) - } - cnt := int(h.stor.ReadCounter()) - t.Logf("lookup of %d present keys yield %d sstable I/O reads", n, cnt) - - if min, max := n, n+2*n/100; cnt < min || cnt > max { - t.Errorf("num of sstable I/O reads of present keys not in range of %d - %d, got %d", min, max, cnt) - } - - // Lookup missing keys. Should rarely read from either sstable. - h.stor.ResetReadCounter() - for i := 0; i < n; i++ { - h.get(key(i)+".missing", false) - } - cnt = int(h.stor.ReadCounter()) - t.Logf("lookup of %d missing keys yield %d sstable I/O reads", n, cnt) - if max := 3 * n / 100; cnt > max { - t.Errorf("num of sstable I/O reads of missing keys was more than %d, got %d", max, cnt) - } - - h.stor.ReleaseSync(storage.TypeTable) -} - -func TestDB_Concurrent(t *testing.T) { - const n, secs, maxkey = 4, 2, 1000 - - runtime.GOMAXPROCS(n) - trun(t, func(h *dbHarness) { - var closeWg sync.WaitGroup - var stop uint32 - var cnt [n]uint32 - - for i := 0; i < n; i++ { - closeWg.Add(1) - go func(i int) { - var put, get, found uint - defer func() { - t.Logf("goroutine %d stopped after %d ops, put=%d get=%d found=%d missing=%d", - i, cnt[i], put, get, found, get-found) - closeWg.Done() - }() - - rnd := rand.New(rand.NewSource(int64(1000 + i))) - for atomic.LoadUint32(&stop) == 0 { - x := cnt[i] - - k := rnd.Intn(maxkey) - kstr := fmt.Sprintf("%016d", k) - - if (rnd.Int() % 2) > 0 { - put++ - h.put(kstr, fmt.Sprintf("%d.%d.%-1000d", k, i, x)) - } else { - get++ - v, err := h.db.Get([]byte(kstr), h.ro) - if err == nil { - found++ - rk, ri, rx := 0, -1, uint32(0) - fmt.Sscanf(string(v), "%d.%d.%d", &rk, &ri, &rx) - if rk != k { - t.Errorf("invalid key want=%d got=%d", k, rk) - } - if ri < 0 || ri >= n { - t.Error("invalid goroutine number: ", ri) - } else { - tx := atomic.LoadUint32(&(cnt[ri])) - if rx > tx { - t.Errorf("invalid seq number, %d > %d ", rx, tx) - } - } - } else if err != ErrNotFound { - t.Error("Get: got error: ", err) - return - } - } - atomic.AddUint32(&cnt[i], 1) - } - }(i) - } - - time.Sleep(secs * time.Second) - atomic.StoreUint32(&stop, 1) - closeWg.Wait() - }) - - runtime.GOMAXPROCS(1) -} - -func TestDB_Concurrent2(t *testing.T) { - const n, n2 = 4, 4000 - - runtime.GOMAXPROCS(n*2 + 2) - truno(t, &opt.Options{WriteBuffer: 30}, func(h *dbHarness) { - var closeWg sync.WaitGroup - var stop uint32 - - for i := 0; i < n; i++ { - closeWg.Add(1) - go func(i int) { - for k := 0; atomic.LoadUint32(&stop) == 0; k++ { - h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10)) - } - closeWg.Done() - }(i) - } - - for i := 0; i < n; i++ { - closeWg.Add(1) - go func(i int) { - for k := 1000000; k < 0 || atomic.LoadUint32(&stop) == 0; k-- { - h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10)) - } - closeWg.Done() - }(i) - } - - cmp := comparer.DefaultComparer - for i := 0; i < n2; i++ { - closeWg.Add(1) - go func(i int) { - it := h.db.NewIterator(nil, nil) - var pk []byte - for it.Next() { - kk := it.Key() - if cmp.Compare(kk, pk) <= 0 { - t.Errorf("iter %d: %q is successor of %q", i, pk, kk) - } - pk = append(pk[:0], kk...) - var k, vk, vi int - if n, err := fmt.Sscanf(string(it.Key()), "k%d", &k); err != nil { - t.Errorf("iter %d: Scanf error on key %q: %v", i, it.Key(), err) - } else if n < 1 { - t.Errorf("iter %d: Cannot parse key %q", i, it.Key()) - } - if n, err := fmt.Sscanf(string(it.Value()), "%d.%d", &vk, &vi); err != nil { - t.Errorf("iter %d: Scanf error on value %q: %v", i, it.Value(), err) - } else if n < 2 { - t.Errorf("iter %d: Cannot parse value %q", i, it.Value()) - } - - if vk != k { - t.Errorf("iter %d: invalid value i=%d, want=%d got=%d", i, vi, k, vk) - } - } - if err := it.Error(); err != nil { - t.Errorf("iter %d: Got error: %v", i, err) - } - it.Release() - closeWg.Done() - }(i) - } - - atomic.StoreUint32(&stop, 1) - closeWg.Wait() - }) - - runtime.GOMAXPROCS(1) -} - -func TestDB_CreateReopenDbOnFile(t *testing.T) { - dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile-%d", os.Getuid())) - if err := os.RemoveAll(dbpath); err != nil { - t.Fatal("cannot remove old db: ", err) - } - defer os.RemoveAll(dbpath) - - for i := 0; i < 3; i++ { - stor, err := storage.OpenFile(dbpath) - if err != nil { - t.Fatalf("(%d) cannot open storage: %s", i, err) - } - db, err := Open(stor, nil) - if err != nil { - t.Fatalf("(%d) cannot open db: %s", i, err) - } - if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil { - t.Fatalf("(%d) cannot write to db: %s", i, err) - } - if err := db.Close(); err != nil { - t.Fatalf("(%d) cannot close db: %s", i, err) - } - if err := stor.Close(); err != nil { - t.Fatalf("(%d) cannot close storage: %s", i, err) - } - } -} - -func TestDB_CreateReopenDbOnFile2(t *testing.T) { - dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile2-%d", os.Getuid())) - if err := os.RemoveAll(dbpath); err != nil { - t.Fatal("cannot remove old db: ", err) - } - defer os.RemoveAll(dbpath) - - for i := 0; i < 3; i++ { - db, err := OpenFile(dbpath, nil) - if err != nil { - t.Fatalf("(%d) cannot open db: %s", i, err) - } - if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil { - t.Fatalf("(%d) cannot write to db: %s", i, err) - } - if err := db.Close(); err != nil { - t.Fatalf("(%d) cannot close db: %s", i, err) - } - } -} - -func TestDB_DeletionMarkersOnMemdb(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("foo", "v1") - h.compactMem() - h.delete("foo") - h.get("foo", false) - h.getKeyVal("") -} - -func TestDB_LeveldbIssue178(t *testing.T) { - nKeys := (opt.DefaultCompactionTableSize / 30) * 5 - key1 := func(i int) string { - return fmt.Sprintf("my_key_%d", i) - } - key2 := func(i int) string { - return fmt.Sprintf("my_key_%d_xxx", i) - } - - // Disable compression since it affects the creation of layers and the - // code below is trying to test against a very specific scenario. - h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression}) - defer h.close() - - // Create first key range. - batch := new(Batch) - for i := 0; i < nKeys; i++ { - batch.Put([]byte(key1(i)), []byte("value for range 1 key")) - } - h.write(batch) - - // Create second key range. - batch.Reset() - for i := 0; i < nKeys; i++ { - batch.Put([]byte(key2(i)), []byte("value for range 2 key")) - } - h.write(batch) - - // Delete second key range. - batch.Reset() - for i := 0; i < nKeys; i++ { - batch.Delete([]byte(key2(i))) - } - h.write(batch) - h.waitMemCompaction() - - // Run manual compaction. - h.compactRange(key1(0), key1(nKeys-1)) - - // Checking the keys. - h.assertNumKeys(nKeys) -} - -func TestDB_LeveldbIssue200(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("1", "b") - h.put("2", "c") - h.put("3", "d") - h.put("4", "e") - h.put("5", "f") - - iter := h.db.NewIterator(nil, h.ro) - - // Add an element that should not be reflected in the iterator. - h.put("25", "cd") - - iter.Seek([]byte("5")) - assertBytes(t, []byte("5"), iter.Key()) - iter.Prev() - assertBytes(t, []byte("4"), iter.Key()) - iter.Prev() - assertBytes(t, []byte("3"), iter.Key()) - iter.Next() - assertBytes(t, []byte("4"), iter.Key()) - iter.Next() - assertBytes(t, []byte("5"), iter.Key()) -} - -func TestDB_GoleveldbIssue74(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 1 * opt.MiB, - }) - defer h.close() - - const n, dur = 10000, 5 * time.Second - - runtime.GOMAXPROCS(runtime.NumCPU()) - - until := time.Now().Add(dur) - wg := new(sync.WaitGroup) - wg.Add(2) - var done uint32 - go func() { - var i int - defer func() { - t.Logf("WRITER DONE #%d", i) - atomic.StoreUint32(&done, 1) - wg.Done() - }() - - b := new(Batch) - for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ { - iv := fmt.Sprintf("VAL%010d", i) - for k := 0; k < n; k++ { - key := fmt.Sprintf("KEY%06d", k) - b.Put([]byte(key), []byte(key+iv)) - b.Put([]byte(fmt.Sprintf("PTR%06d", k)), []byte(key)) - } - h.write(b) - - b.Reset() - snap := h.getSnapshot() - iter := snap.NewIterator(util.BytesPrefix([]byte("PTR")), nil) - var k int - for ; iter.Next(); k++ { - ptrKey := iter.Key() - key := iter.Value() - - if _, err := snap.Get(ptrKey, nil); err != nil { - t.Fatalf("WRITER #%d snapshot.Get %q: %v", i, ptrKey, err) - } - if value, err := snap.Get(key, nil); err != nil { - t.Fatalf("WRITER #%d snapshot.Get %q: %v", i, key, err) - } else if string(value) != string(key)+iv { - t.Fatalf("WRITER #%d snapshot.Get %q got invalid value, want %q got %q", i, key, string(key)+iv, value) - } - - b.Delete(key) - b.Delete(ptrKey) - } - h.write(b) - iter.Release() - snap.Release() - if k != n { - t.Fatalf("#%d %d != %d", i, k, n) - } - } - }() - go func() { - var i int - defer func() { - t.Logf("READER DONE #%d", i) - atomic.StoreUint32(&done, 1) - wg.Done() - }() - for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ { - snap := h.getSnapshot() - iter := snap.NewIterator(util.BytesPrefix([]byte("PTR")), nil) - var prevValue string - var k int - for ; iter.Next(); k++ { - ptrKey := iter.Key() - key := iter.Value() - - if _, err := snap.Get(ptrKey, nil); err != nil { - t.Fatalf("READER #%d snapshot.Get %q: %v", i, ptrKey, err) - } - - if value, err := snap.Get(key, nil); err != nil { - t.Fatalf("READER #%d snapshot.Get %q: %v", i, key, err) - } else if prevValue != "" && string(value) != string(key)+prevValue { - t.Fatalf("READER #%d snapshot.Get %q got invalid value, want %q got %q", i, key, string(key)+prevValue, value) - } else { - prevValue = string(value[len(key):]) - } - } - iter.Release() - snap.Release() - if k > 0 && k != n { - t.Fatalf("#%d %d != %d", i, k, n) - } - } - }() - wg.Wait() -} - -func TestDB_GetProperties(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - _, err := h.db.GetProperty("leveldb.num-files-at-level") - if err == nil { - t.Error("GetProperty() failed to detect missing level") - } - - _, err = h.db.GetProperty("leveldb.num-files-at-level0") - if err != nil { - t.Error("got unexpected error", err) - } - - _, err = h.db.GetProperty("leveldb.num-files-at-level0x") - if err == nil { - t.Error("GetProperty() failed to detect invalid level") - } -} - -func TestDB_GoleveldbIssue72and83(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 1 * opt.MiB, - OpenFilesCacheCapacity: 3, - }) - defer h.close() - - const n, wn, dur = 10000, 100, 30 * time.Second - - runtime.GOMAXPROCS(runtime.NumCPU()) - - randomData := func(prefix byte, i int) []byte { - data := make([]byte, 1+4+32+64+32) - _, err := crand.Reader.Read(data[1 : len(data)-8]) - if err != nil { - panic(err) - } - data[0] = prefix - binary.LittleEndian.PutUint32(data[len(data)-8:], uint32(i)) - binary.LittleEndian.PutUint32(data[len(data)-4:], util.NewCRC(data[:len(data)-4]).Value()) - return data - } - - keys := make([][]byte, n) - for i := range keys { - keys[i] = randomData(1, 0) - } - - until := time.Now().Add(dur) - wg := new(sync.WaitGroup) - wg.Add(3) - var done uint32 - go func() { - i := 0 - defer func() { - t.Logf("WRITER DONE #%d", i) - wg.Done() - }() - - b := new(Batch) - for ; i < wn && atomic.LoadUint32(&done) == 0; i++ { - b.Reset() - for _, k1 := range keys { - k2 := randomData(2, i) - b.Put(k2, randomData(42, i)) - b.Put(k1, k2) - } - if err := h.db.Write(b, h.wo); err != nil { - atomic.StoreUint32(&done, 1) - t.Fatalf("WRITER #%d db.Write: %v", i, err) - } - } - }() - go func() { - var i int - defer func() { - t.Logf("READER0 DONE #%d", i) - atomic.StoreUint32(&done, 1) - wg.Done() - }() - for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ { - snap := h.getSnapshot() - seq := snap.elem.seq - if seq == 0 { - snap.Release() - continue - } - iter := snap.NewIterator(util.BytesPrefix([]byte{1}), nil) - writei := int(seq/(n*2) - 1) - var k int - for ; iter.Next(); k++ { - k1 := iter.Key() - k2 := iter.Value() - k1checksum0 := binary.LittleEndian.Uint32(k1[len(k1)-4:]) - k1checksum1 := util.NewCRC(k1[:len(k1)-4]).Value() - if k1checksum0 != k1checksum1 { - t.Fatalf("READER0 #%d.%d W#%d invalid K1 checksum: %#x != %#x", i, k, k1checksum0, k1checksum0) - } - k2checksum0 := binary.LittleEndian.Uint32(k2[len(k2)-4:]) - k2checksum1 := util.NewCRC(k2[:len(k2)-4]).Value() - if k2checksum0 != k2checksum1 { - t.Fatalf("READER0 #%d.%d W#%d invalid K2 checksum: %#x != %#x", i, k, k2checksum0, k2checksum1) - } - kwritei := int(binary.LittleEndian.Uint32(k2[len(k2)-8:])) - if writei != kwritei { - t.Fatalf("READER0 #%d.%d W#%d invalid write iteration num: %d", i, k, writei, kwritei) - } - if _, err := snap.Get(k2, nil); err != nil { - t.Fatalf("READER0 #%d.%d W#%d snap.Get: %v\nk1: %x\n -> k2: %x", i, k, writei, err, k1, k2) - } - } - if err := iter.Error(); err != nil { - t.Fatalf("READER0 #%d.%d W#%d snap.Iterator: %v", i, k, writei, err) - } - iter.Release() - snap.Release() - if k > 0 && k != n { - t.Fatalf("READER0 #%d W#%d short read, got=%d want=%d", i, writei, k, n) - } - } - }() - go func() { - var i int - defer func() { - t.Logf("READER1 DONE #%d", i) - atomic.StoreUint32(&done, 1) - wg.Done() - }() - for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ { - iter := h.db.NewIterator(nil, nil) - seq := iter.(*dbIter).seq - if seq == 0 { - iter.Release() - continue - } - writei := int(seq/(n*2) - 1) - var k int - for ok := iter.Last(); ok; ok = iter.Prev() { - k++ - } - if err := iter.Error(); err != nil { - t.Fatalf("READER1 #%d.%d W#%d db.Iterator: %v", i, k, writei, err) - } - iter.Release() - if m := (writei+1)*n + n; k != m { - t.Fatalf("READER1 #%d W#%d short read, got=%d want=%d", i, writei, k, m) - } - } - }() - - wg.Wait() -} - -func TestDB_TransientError(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 128 * opt.KiB, - OpenFilesCacheCapacity: 3, - DisableCompactionBackoff: true, - }) - defer h.close() - - const ( - nSnap = 20 - nKey = 10000 - ) - - var ( - snaps [nSnap]*Snapshot - b = &Batch{} - ) - for i := range snaps { - vtail := fmt.Sprintf("VAL%030d", i) - b.Reset() - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%8d", k) - b.Put([]byte(key), []byte(key+vtail)) - } - h.stor.SetEmuRandErr(storage.TypeTable, tsOpOpen, tsOpRead, tsOpReadAt) - if err := h.db.Write(b, nil); err != nil { - t.Logf("WRITE #%d error: %v", i, err) - h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt, tsOpWrite) - for { - if err := h.db.Write(b, nil); err == nil { - break - } else if errors.IsCorrupted(err) { - t.Fatalf("WRITE #%d corrupted: %v", i, err) - } - } - } - - snaps[i] = h.db.newSnapshot() - b.Reset() - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%8d", k) - b.Delete([]byte(key)) - } - h.stor.SetEmuRandErr(storage.TypeTable, tsOpOpen, tsOpRead, tsOpReadAt) - if err := h.db.Write(b, nil); err != nil { - t.Logf("WRITE #%d error: %v", i, err) - h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt) - for { - if err := h.db.Write(b, nil); err == nil { - break - } else if errors.IsCorrupted(err) { - t.Fatalf("WRITE #%d corrupted: %v", i, err) - } - } - } - } - h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt) - - runtime.GOMAXPROCS(runtime.NumCPU()) - - rnd := rand.New(rand.NewSource(0xecafdaed)) - wg := &sync.WaitGroup{} - for i, snap := range snaps { - wg.Add(2) - - go func(i int, snap *Snapshot, sk []int) { - defer wg.Done() - - vtail := fmt.Sprintf("VAL%030d", i) - for _, k := range sk { - key := fmt.Sprintf("KEY%8d", k) - xvalue, err := snap.Get([]byte(key), nil) - if err != nil { - t.Fatalf("READER_GET #%d SEQ=%d K%d error: %v", i, snap.elem.seq, k, err) - } - value := key + vtail - if !bytes.Equal([]byte(value), xvalue) { - t.Fatalf("READER_GET #%d SEQ=%d K%d invalid value: want %q, got %q", i, snap.elem.seq, k, value, xvalue) - } - } - }(i, snap, rnd.Perm(nKey)) - - go func(i int, snap *Snapshot) { - defer wg.Done() - - vtail := fmt.Sprintf("VAL%030d", i) - iter := snap.NewIterator(nil, nil) - defer iter.Release() - for k := 0; k < nKey; k++ { - if !iter.Next() { - if err := iter.Error(); err != nil { - t.Fatalf("READER_ITER #%d K%d error: %v", i, k, err) - } else { - t.Fatalf("READER_ITER #%d K%d eoi", i, k) - } - } - key := fmt.Sprintf("KEY%8d", k) - xkey := iter.Key() - if !bytes.Equal([]byte(key), xkey) { - t.Fatalf("READER_ITER #%d K%d invalid key: want %q, got %q", i, k, key, xkey) - } - value := key + vtail - xvalue := iter.Value() - if !bytes.Equal([]byte(value), xvalue) { - t.Fatalf("READER_ITER #%d K%d invalid value: want %q, got %q", i, k, value, xvalue) - } - } - }(i, snap) - } - - wg.Wait() -} - -func TestDB_UkeyShouldntHopAcrossTable(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 112 * opt.KiB, - CompactionTableSize: 90 * opt.KiB, - CompactionExpandLimitFactor: 1, - }) - defer h.close() - - const ( - nSnap = 190 - nKey = 140 - ) - - var ( - snaps [nSnap]*Snapshot - b = &Batch{} - ) - for i := range snaps { - vtail := fmt.Sprintf("VAL%030d", i) - b.Reset() - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%08d", k) - b.Put([]byte(key), []byte(key+vtail)) - } - if err := h.db.Write(b, nil); err != nil { - t.Fatalf("WRITE #%d error: %v", i, err) - } - - snaps[i] = h.db.newSnapshot() - b.Reset() - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%08d", k) - b.Delete([]byte(key)) - } - if err := h.db.Write(b, nil); err != nil { - t.Fatalf("WRITE #%d error: %v", i, err) - } - } - - h.compactMem() - - h.waitCompaction() - for level, tables := range h.db.s.stVersion.tables { - for _, table := range tables { - t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax) - } - } - - h.compactRangeAt(0, "", "") - h.waitCompaction() - for level, tables := range h.db.s.stVersion.tables { - for _, table := range tables { - t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax) - } - } - h.compactRangeAt(1, "", "") - h.waitCompaction() - for level, tables := range h.db.s.stVersion.tables { - for _, table := range tables { - t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax) - } - } - runtime.GOMAXPROCS(runtime.NumCPU()) - - wg := &sync.WaitGroup{} - for i, snap := range snaps { - wg.Add(1) - - go func(i int, snap *Snapshot) { - defer wg.Done() - - vtail := fmt.Sprintf("VAL%030d", i) - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%08d", k) - xvalue, err := snap.Get([]byte(key), nil) - if err != nil { - t.Fatalf("READER_GET #%d SEQ=%d K%d error: %v", i, snap.elem.seq, k, err) - } - value := key + vtail - if !bytes.Equal([]byte(value), xvalue) { - t.Fatalf("READER_GET #%d SEQ=%d K%d invalid value: want %q, got %q", i, snap.elem.seq, k, value, xvalue) - } - } - }(i, snap) - } - - wg.Wait() -} - -func TestDB_TableCompactionBuilder(t *testing.T) { - stor := newTestStorage(t) - defer stor.Close() - - const nSeq = 99 - - o := &opt.Options{ - WriteBuffer: 112 * opt.KiB, - CompactionTableSize: 43 * opt.KiB, - CompactionExpandLimitFactor: 1, - CompactionGPOverlapsFactor: 1, - DisableBlockCache: true, - } - s, err := newSession(stor, o) - if err != nil { - t.Fatal(err) - } - if err := s.create(); err != nil { - t.Fatal(err) - } - defer s.close() - var ( - seq uint64 - targetSize = 5 * o.CompactionTableSize - value = bytes.Repeat([]byte{'0'}, 100) - ) - for i := 0; i < 2; i++ { - tw, err := s.tops.create() - if err != nil { - t.Fatal(err) - } - for k := 0; tw.tw.BytesLen() < targetSize; k++ { - key := []byte(fmt.Sprintf("%09d", k)) - seq += nSeq - 1 - for x := uint64(0); x < nSeq; x++ { - if err := tw.append(newIkey(key, seq-x, ktVal), value); err != nil { - t.Fatal(err) - } - } - } - tf, err := tw.finish() - if err != nil { - t.Fatal(err) - } - rec := &sessionRecord{numLevel: s.o.GetNumLevel()} - rec.addTableFile(i, tf) - if err := s.commit(rec); err != nil { - t.Fatal(err) - } - } - - // Build grandparent. - v := s.version() - c := newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...)) - rec := &sessionRecord{numLevel: s.o.GetNumLevel()} - b := &tableCompactionBuilder{ - s: s, - c: c, - rec: rec, - stat1: new(cStatsStaging), - minSeq: 0, - strict: true, - tableSize: o.CompactionTableSize/3 + 961, - } - if err := b.run(new(compactionTransactCounter)); err != nil { - t.Fatal(err) - } - for _, t := range c.tables[0] { - rec.delTable(c.level, t.file.Num()) - } - if err := s.commit(rec); err != nil { - t.Fatal(err) - } - c.release() - - // Build level-1. - v = s.version() - c = newCompaction(s, v, 0, append(tFiles{}, v.tables[0]...)) - rec = &sessionRecord{numLevel: s.o.GetNumLevel()} - b = &tableCompactionBuilder{ - s: s, - c: c, - rec: rec, - stat1: new(cStatsStaging), - minSeq: 0, - strict: true, - tableSize: o.CompactionTableSize, - } - if err := b.run(new(compactionTransactCounter)); err != nil { - t.Fatal(err) - } - for _, t := range c.tables[0] { - rec.delTable(c.level, t.file.Num()) - } - // Move grandparent to level-3 - for _, t := range v.tables[2] { - rec.delTable(2, t.file.Num()) - rec.addTableFile(3, t) - } - if err := s.commit(rec); err != nil { - t.Fatal(err) - } - c.release() - - v = s.version() - for level, want := range []bool{false, true, false, true, false} { - got := len(v.tables[level]) > 0 - if want != got { - t.Fatalf("invalid level-%d tables len: want %v, got %v", level, want, got) - } - } - for i, f := range v.tables[1][:len(v.tables[1])-1] { - nf := v.tables[1][i+1] - if bytes.Equal(f.imax.ukey(), nf.imin.ukey()) { - t.Fatalf("KEY %q hop across table %d .. %d", f.imax.ukey(), f.file.Num(), nf.file.Num()) - } - } - v.release() - - // Compaction with transient error. - v = s.version() - c = newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...)) - rec = &sessionRecord{numLevel: s.o.GetNumLevel()} - b = &tableCompactionBuilder{ - s: s, - c: c, - rec: rec, - stat1: new(cStatsStaging), - minSeq: 0, - strict: true, - tableSize: o.CompactionTableSize, - } - stor.SetEmuErrOnce(storage.TypeTable, tsOpSync) - stor.SetEmuRandErr(storage.TypeTable, tsOpRead, tsOpReadAt, tsOpWrite) - stor.SetEmuRandErrProb(0xf0) - for { - if err := b.run(new(compactionTransactCounter)); err != nil { - t.Logf("(expected) b.run: %v", err) - } else { - break - } - } - if err := s.commit(rec); err != nil { - t.Fatal(err) - } - c.release() - - stor.SetEmuErrOnce(0, tsOpSync) - stor.SetEmuRandErr(0, tsOpRead, tsOpReadAt, tsOpWrite) - - v = s.version() - if len(v.tables[1]) != len(v.tables[2]) { - t.Fatalf("invalid tables length, want %d, got %d", len(v.tables[1]), len(v.tables[2])) - } - for i, f0 := range v.tables[1] { - f1 := v.tables[2][i] - iter0 := s.tops.newIterator(f0, nil, nil) - iter1 := s.tops.newIterator(f1, nil, nil) - for j := 0; true; j++ { - next0 := iter0.Next() - next1 := iter1.Next() - if next0 != next1 { - t.Fatalf("#%d.%d invalid eoi: want %v, got %v", i, j, next0, next1) - } - key0 := iter0.Key() - key1 := iter1.Key() - if !bytes.Equal(key0, key1) { - t.Fatalf("#%d.%d invalid key: want %q, got %q", i, j, key0, key1) - } - if next0 == false { - break - } - } - iter0.Release() - iter1.Release() - } - v.release() -} - -func testDB_IterTriggeredCompaction(t *testing.T, limitDiv int) { - const ( - vSize = 200 * opt.KiB - tSize = 100 * opt.MiB - mIter = 100 - n = tSize / vSize - ) - - h := newDbHarnessWopt(t, &opt.Options{ - Compression: opt.NoCompression, - DisableBlockCache: true, - }) - defer h.close() - - key := func(x int) string { - return fmt.Sprintf("v%06d", x) - } - - // Fill. - value := strings.Repeat("x", vSize) - for i := 0; i < n; i++ { - h.put(key(i), value) - } - h.compactMem() - - // Delete all. - for i := 0; i < n; i++ { - h.delete(key(i)) - } - h.compactMem() - - var ( - limit = n / limitDiv - - startKey = key(0) - limitKey = key(limit) - maxKey = key(n) - slice = &util.Range{Limit: []byte(limitKey)} - - initialSize0 = h.sizeOf(startKey, limitKey) - initialSize1 = h.sizeOf(limitKey, maxKey) - ) - - t.Logf("inital size %s [rest %s]", shortenb(int(initialSize0)), shortenb(int(initialSize1))) - - for r := 0; true; r++ { - if r >= mIter { - t.Fatal("taking too long to compact") - } - - // Iterates. - iter := h.db.NewIterator(slice, h.ro) - for iter.Next() { - } - if err := iter.Error(); err != nil { - t.Fatalf("Iter err: %v", err) - } - iter.Release() - - // Wait compaction. - h.waitCompaction() - - // Check size. - size0 := h.sizeOf(startKey, limitKey) - size1 := h.sizeOf(limitKey, maxKey) - t.Logf("#%03d size %s [rest %s]", r, shortenb(int(size0)), shortenb(int(size1))) - if size0 < initialSize0/10 { - break - } - } - - if initialSize1 > 0 { - h.sizeAssert(limitKey, maxKey, initialSize1/4-opt.MiB, initialSize1+opt.MiB) - } -} - -func TestDB_IterTriggeredCompaction(t *testing.T) { - testDB_IterTriggeredCompaction(t, 1) -} - -func TestDB_IterTriggeredCompactionHalf(t *testing.T) { - testDB_IterTriggeredCompaction(t, 2) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go deleted file mode 100644 index ed322988c..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -// Reader is the interface that wraps basic Get and NewIterator methods. -// This interface implemented by both DB and Snapshot. -type Reader interface { - Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) - NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator -} - -type Sizes []uint64 - -// Sum returns sum of the sizes. -func (p Sizes) Sum() (n uint64) { - for _, s := range p { - n += s - } - return n -} - -// Logging. -func (db *DB) log(v ...interface{}) { db.s.log(v...) } -func (db *DB) logf(format string, v ...interface{}) { db.s.logf(format, v...) } - -// Check and clean files. -func (db *DB) checkAndCleanFiles() error { - v := db.s.version() - defer v.release() - - tablesMap := make(map[uint64]bool) - for _, tables := range v.tables { - for _, t := range tables { - tablesMap[t.file.Num()] = false - } - } - - files, err := db.s.getFiles(storage.TypeAll) - if err != nil { - return err - } - - var nTables int - var rem []storage.File - for _, f := range files { - keep := true - switch f.Type() { - case storage.TypeManifest: - keep = f.Num() >= db.s.manifestFile.Num() - case storage.TypeJournal: - if db.frozenJournalFile != nil { - keep = f.Num() >= db.frozenJournalFile.Num() - } else { - keep = f.Num() >= db.journalFile.Num() - } - case storage.TypeTable: - _, keep = tablesMap[f.Num()] - if keep { - tablesMap[f.Num()] = true - nTables++ - } - } - - if !keep { - rem = append(rem, f) - } - } - - if nTables != len(tablesMap) { - var missing []*storage.FileInfo - for num, present := range tablesMap { - if !present { - missing = append(missing, &storage.FileInfo{Type: storage.TypeTable, Num: num}) - db.logf("db@janitor table missing @%d", num) - } - } - return errors.NewErrCorrupted(nil, &errors.ErrMissingFiles{Files: missing}) - } - - db.logf("db@janitor F·%d G·%d", len(files), len(rem)) - for _, f := range rem { - db.logf("db@janitor removing %s-%d", f.Type(), f.Num()) - if err := f.Remove(); err != nil { - return err - } - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go deleted file mode 100644 index f1c6b7327..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go +++ /dev/null @@ -1,311 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "time" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -func (db *DB) writeJournal(b *Batch) error { - w, err := db.journal.Next() - if err != nil { - return err - } - if _, err := w.Write(b.encode()); err != nil { - return err - } - if err := db.journal.Flush(); err != nil { - return err - } - if b.sync { - return db.journalWriter.Sync() - } - return nil -} - -func (db *DB) jWriter() { - defer db.closeW.Done() - for { - select { - case b := <-db.journalC: - if b != nil { - db.journalAckC <- db.writeJournal(b) - } - case _, _ = <-db.closeC: - return - } - } -} - -func (db *DB) rotateMem(n int) (mem *memDB, err error) { - // Wait for pending memdb compaction. - err = db.compSendIdle(db.mcompCmdC) - if err != nil { - return - } - - // Create new memdb and journal. - mem, err = db.newMem(n) - if err != nil { - return - } - - // Schedule memdb compaction. - db.compSendTrigger(db.mcompCmdC) - return -} - -func (db *DB) flush(n int) (mem *memDB, nn int, err error) { - delayed := false - flush := func() (retry bool) { - v := db.s.version() - defer v.release() - mem = db.getEffectiveMem() - defer func() { - if retry { - mem.decref() - mem = nil - } - }() - nn = mem.mdb.Free() - switch { - case v.tLen(0) >= db.s.o.GetWriteL0SlowdownTrigger() && !delayed: - delayed = true - time.Sleep(time.Millisecond) - case nn >= n: - return false - case v.tLen(0) >= db.s.o.GetWriteL0PauseTrigger(): - delayed = true - err = db.compSendIdle(db.tcompCmdC) - if err != nil { - return false - } - default: - // Allow memdb to grow if it has no entry. - if mem.mdb.Len() == 0 { - nn = n - } else { - mem.decref() - mem, err = db.rotateMem(n) - if err == nil { - nn = mem.mdb.Free() - } else { - nn = 0 - } - } - return false - } - return true - } - start := time.Now() - for flush() { - } - if delayed { - db.writeDelay += time.Since(start) - db.writeDelayN++ - } else if db.writeDelayN > 0 { - db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay) - db.writeDelay = 0 - db.writeDelayN = 0 - } - return -} - -// Write apply the given batch to the DB. The batch will be applied -// sequentially. -// -// It is safe to modify the contents of the arguments after Write returns. -func (db *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) { - err = db.ok() - if err != nil || b == nil || b.Len() == 0 { - return - } - - b.init(wo.GetSync()) - - // The write happen synchronously. - select { - case db.writeC <- b: - if <-db.writeMergedC { - return <-db.writeAckC - } - case db.writeLockC <- struct{}{}: - case err = <-db.compPerErrC: - return - case _, _ = <-db.closeC: - return ErrClosed - } - - merged := 0 - danglingMerge := false - defer func() { - if danglingMerge { - db.writeMergedC <- false - } else { - <-db.writeLockC - } - for i := 0; i < merged; i++ { - db.writeAckC <- err - } - }() - - mem, memFree, err := db.flush(b.size()) - if err != nil { - return - } - defer mem.decref() - - // Calculate maximum size of the batch. - m := 1 << 20 - if x := b.size(); x <= 128<<10 { - m = x + (128 << 10) - } - m = minInt(m, memFree) - - // Merge with other batch. -drain: - for b.size() < m && !b.sync { - select { - case nb := <-db.writeC: - if b.size()+nb.size() <= m { - b.append(nb) - db.writeMergedC <- true - merged++ - } else { - danglingMerge = true - break drain - } - default: - break drain - } - } - - // Set batch first seq number relative from last seq. - b.seq = db.seq + 1 - - // Write journal concurrently if it is large enough. - if b.size() >= (128 << 10) { - // Push the write batch to the journal writer - select { - case db.journalC <- b: - // Write into memdb - if berr := b.memReplay(mem.mdb); berr != nil { - panic(berr) - } - case err = <-db.compPerErrC: - return - case _, _ = <-db.closeC: - err = ErrClosed - return - } - // Wait for journal writer - select { - case err = <-db.journalAckC: - if err != nil { - // Revert memdb if error detected - if berr := b.revertMemReplay(mem.mdb); berr != nil { - panic(berr) - } - return - } - case _, _ = <-db.closeC: - err = ErrClosed - return - } - } else { - err = db.writeJournal(b) - if err != nil { - return - } - if berr := b.memReplay(mem.mdb); berr != nil { - panic(berr) - } - } - - // Set last seq number. - db.addSeq(uint64(b.Len())) - - if b.size() >= memFree { - db.rotateMem(0) - } - return -} - -// Put sets the value for the given key. It overwrites any previous value -// for that key; a DB is not a multi-map. -// -// It is safe to modify the contents of the arguments after Put returns. -func (db *DB) Put(key, value []byte, wo *opt.WriteOptions) error { - b := new(Batch) - b.Put(key, value) - return db.Write(b, wo) -} - -// Delete deletes the value for the given key. It returns ErrNotFound if -// the DB does not contain the key. -// -// It is safe to modify the contents of the arguments after Delete returns. -func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error { - b := new(Batch) - b.Delete(key) - return db.Write(b, wo) -} - -func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool { - iter := mem.NewIterator(nil) - defer iter.Release() - return (max == nil || (iter.First() && icmp.uCompare(max, iKey(iter.Key()).ukey()) >= 0)) && - (min == nil || (iter.Last() && icmp.uCompare(min, iKey(iter.Key()).ukey()) <= 0)) -} - -// CompactRange compacts the underlying DB for the given key range. -// In particular, deleted and overwritten versions are discarded, -// and the data is rearranged to reduce the cost of operations -// needed to access the data. This operation should typically only -// be invoked by users who understand the underlying implementation. -// -// A nil Range.Start is treated as a key before all keys in the DB. -// And a nil Range.Limit is treated as a key after all keys in the DB. -// Therefore if both is nil then it will compact entire DB. -func (db *DB) CompactRange(r util.Range) error { - if err := db.ok(); err != nil { - return err - } - - // Lock writer. - select { - case db.writeLockC <- struct{}{}: - case err := <-db.compPerErrC: - return err - case _, _ = <-db.closeC: - return ErrClosed - } - - // Check for overlaps in memdb. - mem := db.getEffectiveMem() - defer mem.decref() - if isMemOverlaps(db.s.icmp, mem.mdb, r.Start, r.Limit) { - // Memdb compaction. - if _, err := db.rotateMem(0); err != nil { - <-db.writeLockC - return err - } - <-db.writeLockC - if err := db.compSendIdle(db.mcompCmdC); err != nil { - return err - } - } else { - <-db.writeLockC - } - - // Table compaction. - return db.compSendRange(db.tcompCmdC, -1, r.Start, r.Limit) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go deleted file mode 100644 index 53f13bb24..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package leveldb provides implementation of LevelDB key/value database. -// -// Create or open a database: -// -// db, err := leveldb.OpenFile("path/to/db", nil) -// ... -// defer db.Close() -// ... -// -// Read or modify the database content: -// -// // Remember that the contents of the returned slice should not be modified. -// data, err := db.Get([]byte("key"), nil) -// ... -// err = db.Put([]byte("key"), []byte("value"), nil) -// ... -// err = db.Delete([]byte("key"), nil) -// ... -// -// Iterate over database content: -// -// iter := db.NewIterator(nil, nil) -// for iter.Next() { -// // Remember that the contents of the returned slice should not be modified, and -// // only valid until the next call to Next. -// key := iter.Key() -// value := iter.Value() -// ... -// } -// iter.Release() -// err = iter.Error() -// ... -// -// Iterate over subset of database content with a particular prefix: -// iter := db.NewIterator(util.BytesPrefix([]byte("foo-")), nil) -// for iter.Next() { -// // Use key/value. -// ... -// } -// iter.Release() -// err = iter.Error() -// ... -// -// Seek-then-Iterate: -// -// iter := db.NewIterator(nil, nil) -// for ok := iter.Seek(key); ok; ok = iter.Next() { -// // Use key/value. -// ... -// } -// iter.Release() -// err = iter.Error() -// ... -// -// Iterate over subset of database content: -// -// iter := db.NewIterator(&util.Range{Start: []byte("foo"), Limit: []byte("xoo")}, nil) -// for iter.Next() { -// // Use key/value. -// ... -// } -// iter.Release() -// err = iter.Error() -// ... -// -// Batch writes: -// -// batch := new(leveldb.Batch) -// batch.Put([]byte("foo"), []byte("value")) -// batch.Put([]byte("bar"), []byte("another value")) -// batch.Delete([]byte("baz")) -// err = db.Write(batch, nil) -// ... -// -// Use bloom filter: -// -// o := &opt.Options{ -// Filter: filter.NewBloomFilter(10), -// } -// db, err := leveldb.OpenFile("path/to/db", o) -// ... -// defer db.Close() -// ... -package leveldb diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go deleted file mode 100644 index 5d96d6d34..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" -) - -var ( - ErrNotFound = errors.ErrNotFound - ErrSnapshotReleased = errors.New("leveldb: snapshot released") - ErrIterReleased = errors.New("leveldb: iterator released") - ErrClosed = errors.New("leveldb: closed") -) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go deleted file mode 100644 index f31528da7..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package errors provides common error types used throughout leveldb. -package errors - -import ( - "errors" - "fmt" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - ErrNotFound = New("leveldb: not found") - ErrReleased = util.ErrReleased - ErrHasReleaser = util.ErrHasReleaser -) - -// New returns an error that formats as the given text. -func New(text string) error { - return errors.New(text) -} - -// ErrCorrupted is the type that wraps errors that indicate corruption in -// the database. -type ErrCorrupted struct { - File *storage.FileInfo - Err error -} - -func (e *ErrCorrupted) Error() string { - if e.File != nil { - return fmt.Sprintf("%v [file=%v]", e.Err, e.File) - } else { - return e.Err.Error() - } -} - -// NewErrCorrupted creates new ErrCorrupted error. -func NewErrCorrupted(f storage.File, err error) error { - return &ErrCorrupted{storage.NewFileInfo(f), err} -} - -// IsCorrupted returns a boolean indicating whether the error is indicating -// a corruption. -func IsCorrupted(err error) bool { - switch err.(type) { - case *ErrCorrupted: - return true - } - return false -} - -// ErrMissingFiles is the type that indicating a corruption due to missing -// files. -type ErrMissingFiles struct { - Files []*storage.FileInfo -} - -func (e *ErrMissingFiles) Error() string { return "file missing" } - -// SetFile sets 'file info' of the given error with the given file. -// Currently only ErrCorrupted is supported, otherwise will do nothing. -func SetFile(err error, f storage.File) error { - switch x := err.(type) { - case *ErrCorrupted: - x.File = storage.NewFileInfo(f) - return x - } - return err -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go deleted file mode 100644 index 0769157f7..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" -) - -var _ = testutil.Defer(func() { - Describe("Leveldb external", func() { - o := &opt.Options{ - DisableBlockCache: true, - BlockRestartInterval: 5, - BlockSize: 80, - Compression: opt.NoCompression, - OpenFilesCacheCapacity: -1, - Strict: opt.StrictAll, - WriteBuffer: 1000, - CompactionTableSize: 2000, - } - - Describe("write test", func() { - It("should do write correctly", func(done Done) { - db := newTestingDB(o, nil, nil) - t := testutil.DBTesting{ - DB: db, - Deleted: testutil.KeyValue_Generate(nil, 500, 1, 50, 5, 5).Clone(), - } - testutil.DoDBTesting(&t) - db.TestClose() - done <- true - }, 20.0) - }) - - Describe("read test", func() { - testutil.AllKeyValueTesting(nil, nil, func(kv testutil.KeyValue) testutil.DB { - // Building the DB. - db := newTestingDB(o, nil, nil) - kv.IterateShuffled(nil, func(i int, key, value []byte) { - err := db.TestPut(key, value) - Expect(err).NotTo(HaveOccurred()) - }) - - return db - }, func(db testutil.DB) { - db.(*testingDB).TestClose() - }) - }) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go deleted file mode 100644 index ba1e8c165..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter" -) - -type iFilter struct { - filter.Filter -} - -func (f iFilter) Contains(filter, key []byte) bool { - return f.Filter.Contains(filter, iKey(key).ukey()) -} - -func (f iFilter) NewGenerator() filter.FilterGenerator { - return iFilterGenerator{f.Filter.NewGenerator()} -} - -type iFilterGenerator struct { - filter.FilterGenerator -} - -func (g iFilterGenerator) Add(key []byte) { - g.FilterGenerator.Add(iKey(key).ukey()) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom.go deleted file mode 100644 index fc5ea790f..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package filter - -import ( - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -func bloomHash(key []byte) uint32 { - return util.Hash(key, 0xbc9f1d34) -} - -type bloomFilter int - -// The bloom filter serializes its parameters and is backward compatible -// with respect to them. Therefor, its parameters are not added to its -// name. -func (bloomFilter) Name() string { - return "leveldb.BuiltinBloomFilter" -} - -func (f bloomFilter) Contains(filter, key []byte) bool { - nBytes := len(filter) - 1 - if nBytes < 1 { - return false - } - nBits := uint32(nBytes * 8) - - // Use the encoded k so that we can read filters generated by - // bloom filters created using different parameters. - k := filter[nBytes] - if k > 30 { - // Reserved for potentially new encodings for short bloom filters. - // Consider it a match. - return true - } - - kh := bloomHash(key) - delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits - for j := uint8(0); j < k; j++ { - bitpos := kh % nBits - if (uint32(filter[bitpos/8]) & (1 << (bitpos % 8))) == 0 { - return false - } - kh += delta - } - return true -} - -func (f bloomFilter) NewGenerator() FilterGenerator { - // Round down to reduce probing cost a little bit. - k := uint8(f * 69 / 100) // 0.69 =~ ln(2) - if k < 1 { - k = 1 - } else if k > 30 { - k = 30 - } - return &bloomFilterGenerator{ - n: int(f), - k: k, - } -} - -type bloomFilterGenerator struct { - n int - k uint8 - - keyHashes []uint32 -} - -func (g *bloomFilterGenerator) Add(key []byte) { - // Use double-hashing to generate a sequence of hash values. - // See analysis in [Kirsch,Mitzenmacher 2006]. - g.keyHashes = append(g.keyHashes, bloomHash(key)) -} - -func (g *bloomFilterGenerator) Generate(b Buffer) { - // Compute bloom filter size (in both bits and bytes) - nBits := uint32(len(g.keyHashes) * g.n) - // For small n, we can see a very high false positive rate. Fix it - // by enforcing a minimum bloom filter length. - if nBits < 64 { - nBits = 64 - } - nBytes := (nBits + 7) / 8 - nBits = nBytes * 8 - - dest := b.Alloc(int(nBytes) + 1) - dest[nBytes] = g.k - for _, kh := range g.keyHashes { - delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits - for j := uint8(0); j < g.k; j++ { - bitpos := kh % nBits - dest[bitpos/8] |= (1 << (bitpos % 8)) - kh += delta - } - } - - g.keyHashes = g.keyHashes[:0] -} - -// NewBloomFilter creates a new initialized bloom filter for given -// bitsPerKey. -// -// Since bitsPerKey is persisted individually for each bloom filter -// serialization, bloom filters are backwards compatible with respect to -// changing bitsPerKey. This means that no big performance penalty will -// be experienced when changing the parameter. See documentation for -// opt.Options.Filter for more information. -func NewBloomFilter(bitsPerKey int) Filter { - return bloomFilter(bitsPerKey) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go deleted file mode 100644 index 3175ffcbc..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package filter - -import ( - "encoding/binary" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" - "testing" -) - -type harness struct { - t *testing.T - - bloom Filter - generator FilterGenerator - filter []byte -} - -func newHarness(t *testing.T) *harness { - bloom := NewBloomFilter(10) - return &harness{ - t: t, - bloom: bloom, - generator: bloom.NewGenerator(), - } -} - -func (h *harness) add(key []byte) { - h.generator.Add(key) -} - -func (h *harness) addNum(key uint32) { - var b [4]byte - binary.LittleEndian.PutUint32(b[:], key) - h.add(b[:]) -} - -func (h *harness) build() { - b := &util.Buffer{} - h.generator.Generate(b) - h.filter = b.Bytes() -} - -func (h *harness) reset() { - h.filter = nil -} - -func (h *harness) filterLen() int { - return len(h.filter) -} - -func (h *harness) assert(key []byte, want, silent bool) bool { - got := h.bloom.Contains(h.filter, key) - if !silent && got != want { - h.t.Errorf("assert on '%v' failed got '%v', want '%v'", key, got, want) - } - return got -} - -func (h *harness) assertNum(key uint32, want, silent bool) bool { - var b [4]byte - binary.LittleEndian.PutUint32(b[:], key) - return h.assert(b[:], want, silent) -} - -func TestBloomFilter_Empty(t *testing.T) { - h := newHarness(t) - h.build() - h.assert([]byte("hello"), false, false) - h.assert([]byte("world"), false, false) -} - -func TestBloomFilter_Small(t *testing.T) { - h := newHarness(t) - h.add([]byte("hello")) - h.add([]byte("world")) - h.build() - h.assert([]byte("hello"), true, false) - h.assert([]byte("world"), true, false) - h.assert([]byte("x"), false, false) - h.assert([]byte("foo"), false, false) -} - -func nextN(n int) int { - switch { - case n < 10: - n += 1 - case n < 100: - n += 10 - case n < 1000: - n += 100 - default: - n += 1000 - } - return n -} - -func TestBloomFilter_VaryingLengths(t *testing.T) { - h := newHarness(t) - var mediocre, good int - for n := 1; n < 10000; n = nextN(n) { - h.reset() - for i := 0; i < n; i++ { - h.addNum(uint32(i)) - } - h.build() - - got := h.filterLen() - want := (n * 10 / 8) + 40 - if got > want { - t.Errorf("filter len test failed, '%d' > '%d'", got, want) - } - - for i := 0; i < n; i++ { - h.assertNum(uint32(i), true, false) - } - - var rate float32 - for i := 0; i < 10000; i++ { - if h.assertNum(uint32(i+1000000000), true, true) { - rate++ - } - } - rate /= 10000 - if rate > 0.02 { - t.Errorf("false positive rate is more than 2%%, got %v, at len %d", rate, n) - } - if rate > 0.0125 { - mediocre++ - } else { - good++ - } - } - t.Logf("false positive rate: %d good, %d mediocre", good, mediocre) - if mediocre > good/5 { - t.Error("mediocre false positive rate is more than expected") - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/filter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/filter.go deleted file mode 100644 index 7a925c5a8..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/filter.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package filter provides interface and implementation of probabilistic -// data structure. -// -// The filter is resposible for creating small filter from a set of keys. -// These filter will then used to test whether a key is a member of the set. -// In many cases, a filter can cut down the number of disk seeks from a -// handful to a single disk seek per DB.Get call. -package filter - -// Buffer is the interface that wraps basic Alloc, Write and WriteByte methods. -type Buffer interface { - // Alloc allocs n bytes of slice from the buffer. This also advancing - // write offset. - Alloc(n int) []byte - - // Write appends the contents of p to the buffer. - Write(p []byte) (n int, err error) - - // WriteByte appends the byte c to the buffer. - WriteByte(c byte) error -} - -// Filter is the filter. -type Filter interface { - // Name returns the name of this policy. - // - // Note that if the filter encoding changes in an incompatible way, - // the name returned by this method must be changed. Otherwise, old - // incompatible filters may be passed to methods of this type. - Name() string - - // NewGenerator creates a new filter generator. - NewGenerator() FilterGenerator - - // Contains returns true if the filter contains the given key. - // - // The filter are filters generated by the filter generator. - Contains(filter, key []byte) bool -} - -// FilterGenerator is the filter generator. -type FilterGenerator interface { - // Add adds a key to the filter generator. - // - // The key may become invalid after call to this method end, therefor - // key must be copied if implementation require keeping key for later - // use. The key should not modified directly, doing so may cause - // undefined results. - Add(key []byte) - - // Generate generates filters based on keys passed so far. After call - // to Generate the filter generator maybe resetted, depends on implementation. - Generate(b Buffer) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go deleted file mode 100644 index 806552d17..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator - -import ( - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -// BasicArray is the interface that wraps basic Len and Search method. -type BasicArray interface { - // Len returns length of the array. - Len() int - - // Search finds smallest index that point to a key that is greater - // than or equal to the given key. - Search(key []byte) int -} - -// Array is the interface that wraps BasicArray and basic Index method. -type Array interface { - BasicArray - - // Index returns key/value pair with index of i. - Index(i int) (key, value []byte) -} - -// Array is the interface that wraps BasicArray and basic Get method. -type ArrayIndexer interface { - BasicArray - - // Get returns a new data iterator with index of i. - Get(i int) Iterator -} - -type basicArrayIterator struct { - util.BasicReleaser - array BasicArray - pos int - err error -} - -func (i *basicArrayIterator) Valid() bool { - return i.pos >= 0 && i.pos < i.array.Len() && !i.Released() -} - -func (i *basicArrayIterator) First() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - if i.array.Len() == 0 { - i.pos = -1 - return false - } - i.pos = 0 - return true -} - -func (i *basicArrayIterator) Last() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - n := i.array.Len() - if n == 0 { - i.pos = 0 - return false - } - i.pos = n - 1 - return true -} - -func (i *basicArrayIterator) Seek(key []byte) bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - n := i.array.Len() - if n == 0 { - i.pos = 0 - return false - } - i.pos = i.array.Search(key) - if i.pos >= n { - return false - } - return true -} - -func (i *basicArrayIterator) Next() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.pos++ - if n := i.array.Len(); i.pos >= n { - i.pos = n - return false - } - return true -} - -func (i *basicArrayIterator) Prev() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.pos-- - if i.pos < 0 { - i.pos = -1 - return false - } - return true -} - -func (i *basicArrayIterator) Error() error { return i.err } - -type arrayIterator struct { - basicArrayIterator - array Array - pos int - key, value []byte -} - -func (i *arrayIterator) updateKV() { - if i.pos == i.basicArrayIterator.pos { - return - } - i.pos = i.basicArrayIterator.pos - if i.Valid() { - i.key, i.value = i.array.Index(i.pos) - } else { - i.key = nil - i.value = nil - } -} - -func (i *arrayIterator) Key() []byte { - i.updateKV() - return i.key -} - -func (i *arrayIterator) Value() []byte { - i.updateKV() - return i.value -} - -type arrayIteratorIndexer struct { - basicArrayIterator - array ArrayIndexer -} - -func (i *arrayIteratorIndexer) Get() Iterator { - if i.Valid() { - return i.array.Get(i.basicArrayIterator.pos) - } - return nil -} - -// NewArrayIterator returns an iterator from the given array. -func NewArrayIterator(array Array) Iterator { - return &arrayIterator{ - basicArrayIterator: basicArrayIterator{array: array, pos: -1}, - array: array, - pos: -1, - } -} - -// NewArrayIndexer returns an index iterator from the given array. -func NewArrayIndexer(array ArrayIndexer) IteratorIndexer { - return &arrayIteratorIndexer{ - basicArrayIterator: basicArrayIterator{array: array, pos: -1}, - array: array, - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go deleted file mode 100644 index f730e0a44..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator_test - -import ( - . "github.com/onsi/ginkgo" - - . "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" -) - -var _ = testutil.Defer(func() { - Describe("Array iterator", func() { - It("Should iterates and seeks correctly", func() { - // Build key/value. - kv := testutil.KeyValue_Generate(nil, 70, 1, 5, 3, 3) - - // Test the iterator. - t := testutil.IteratorTesting{ - KeyValue: kv.Clone(), - Iter: NewArrayIterator(kv), - } - testutil.DoIteratorTesting(&t) - }) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go deleted file mode 100644 index 8f9e9339d..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator - -import ( - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -// IteratorIndexer is the interface that wraps CommonIterator and basic Get -// method. IteratorIndexer provides index for indexed iterator. -type IteratorIndexer interface { - CommonIterator - - // Get returns a new data iterator for the current position, or nil if - // done. - Get() Iterator -} - -type indexedIterator struct { - util.BasicReleaser - index IteratorIndexer - strict bool - - data Iterator - err error - errf func(err error) - closed bool -} - -func (i *indexedIterator) setData() { - if i.data != nil { - i.data.Release() - } - i.data = i.index.Get() -} - -func (i *indexedIterator) clearData() { - if i.data != nil { - i.data.Release() - } - i.data = nil -} - -func (i *indexedIterator) indexErr() { - if err := i.index.Error(); err != nil { - if i.errf != nil { - i.errf(err) - } - i.err = err - } -} - -func (i *indexedIterator) dataErr() bool { - if err := i.data.Error(); err != nil { - if i.errf != nil { - i.errf(err) - } - if i.strict || !errors.IsCorrupted(err) { - i.err = err - return true - } - } - return false -} - -func (i *indexedIterator) Valid() bool { - return i.data != nil && i.data.Valid() -} - -func (i *indexedIterator) First() bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - if !i.index.First() { - i.indexErr() - i.clearData() - return false - } - i.setData() - return i.Next() -} - -func (i *indexedIterator) Last() bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - if !i.index.Last() { - i.indexErr() - i.clearData() - return false - } - i.setData() - if !i.data.Last() { - if i.dataErr() { - return false - } - i.clearData() - return i.Prev() - } - return true -} - -func (i *indexedIterator) Seek(key []byte) bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - if !i.index.Seek(key) { - i.indexErr() - i.clearData() - return false - } - i.setData() - if !i.data.Seek(key) { - if i.dataErr() { - return false - } - i.clearData() - return i.Next() - } - return true -} - -func (i *indexedIterator) Next() bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - switch { - case i.data != nil && !i.data.Next(): - if i.dataErr() { - return false - } - i.clearData() - fallthrough - case i.data == nil: - if !i.index.Next() { - i.indexErr() - return false - } - i.setData() - return i.Next() - } - return true -} - -func (i *indexedIterator) Prev() bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - switch { - case i.data != nil && !i.data.Prev(): - if i.dataErr() { - return false - } - i.clearData() - fallthrough - case i.data == nil: - if !i.index.Prev() { - i.indexErr() - return false - } - i.setData() - if !i.data.Last() { - if i.dataErr() { - return false - } - i.clearData() - return i.Prev() - } - } - return true -} - -func (i *indexedIterator) Key() []byte { - if i.data == nil { - return nil - } - return i.data.Key() -} - -func (i *indexedIterator) Value() []byte { - if i.data == nil { - return nil - } - return i.data.Value() -} - -func (i *indexedIterator) Release() { - i.clearData() - i.index.Release() - i.BasicReleaser.Release() -} - -func (i *indexedIterator) Error() error { - if i.err != nil { - return i.err - } - if err := i.index.Error(); err != nil { - return err - } - return nil -} - -func (i *indexedIterator) SetErrorCallback(f func(err error)) { - i.errf = f -} - -// NewIndexedIterator returns an 'indexed iterator'. An index is iterator -// that returns another iterator, a 'data iterator'. A 'data iterator' is the -// iterator that contains actual key/value pairs. -// -// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true) -// won't be ignored and will halt 'indexed iterator', otherwise the iterator will -// continue to the next 'data iterator'. Corruption on 'index iterator' will not be -// ignored and will halt the iterator. -func NewIndexedIterator(index IteratorIndexer, strict bool) Iterator { - return &indexedIterator{index: index, strict: strict} -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go deleted file mode 100644 index 4d98cd7ac..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator_test - -import ( - "sort" - - . "github.com/onsi/ginkgo" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" - . "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" -) - -type keyValue struct { - key []byte - testutil.KeyValue -} - -type keyValueIndex []keyValue - -func (x keyValueIndex) Search(key []byte) int { - return sort.Search(x.Len(), func(i int) bool { - return comparer.DefaultComparer.Compare(x[i].key, key) >= 0 - }) -} - -func (x keyValueIndex) Len() int { return len(x) } -func (x keyValueIndex) Index(i int) (key, value []byte) { return x[i].key, nil } -func (x keyValueIndex) Get(i int) Iterator { return NewArrayIterator(x[i]) } - -var _ = testutil.Defer(func() { - Describe("Indexed iterator", func() { - Test := func(n ...int) func() { - if len(n) == 0 { - rnd := testutil.NewRand() - n = make([]int, rnd.Intn(17)+3) - for i := range n { - n[i] = rnd.Intn(19) + 1 - } - } - - return func() { - It("Should iterates and seeks correctly", func(done Done) { - // Build key/value. - index := make(keyValueIndex, len(n)) - sum := 0 - for _, x := range n { - sum += x - } - kv := testutil.KeyValue_Generate(nil, sum, 1, 10, 4, 4) - for i, j := 0, 0; i < len(n); i++ { - for x := n[i]; x > 0; x-- { - key, value := kv.Index(j) - index[i].key = key - index[i].Put(key, value) - j++ - } - } - - // Test the iterator. - t := testutil.IteratorTesting{ - KeyValue: kv.Clone(), - Iter: NewIndexedIterator(NewArrayIndexer(index), true), - } - testutil.DoIteratorTesting(&t) - done <- true - }, 1.5) - } - } - - Describe("with 100 keys", Test(100)) - Describe("with 50-50 keys", Test(50, 50)) - Describe("with 50-1 keys", Test(50, 1)) - Describe("with 50-1-50 keys", Test(50, 1, 50)) - Describe("with 1-50 keys", Test(1, 50)) - Describe("with random N-keys", Test()) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go deleted file mode 100644 index 454112954..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package iterator provides interface and implementation to traverse over -// contents of a database. -package iterator - -import ( - "errors" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - ErrIterReleased = errors.New("leveldb/iterator: iterator released") -) - -// IteratorSeeker is the interface that wraps the 'seeks method'. -type IteratorSeeker interface { - // First moves the iterator to the first key/value pair. If the iterator - // only contains one key/value pair then First and Last whould moves - // to the same key/value pair. - // It returns whether such pair exist. - First() bool - - // Last moves the iterator to the last key/value pair. If the iterator - // only contains one key/value pair then First and Last whould moves - // to the same key/value pair. - // It returns whether such pair exist. - Last() bool - - // Seek moves the iterator to the first key/value pair whose key is greater - // than or equal to the given key. - // It returns whether such pair exist. - // - // It is safe to modify the contents of the argument after Seek returns. - Seek(key []byte) bool - - // Next moves the iterator to the next key/value pair. - // It returns whether the iterator is exhausted. - Next() bool - - // Prev moves the iterator to the previous key/value pair. - // It returns whether the iterator is exhausted. - Prev() bool -} - -// CommonIterator is the interface that wraps common interator methods. -type CommonIterator interface { - IteratorSeeker - - // util.Releaser is the interface that wraps basic Release method. - // When called Release will releases any resources associated with the - // iterator. - util.Releaser - - // util.ReleaseSetter is the interface that wraps the basic SetReleaser - // method. - util.ReleaseSetter - - // TODO: Remove this when ready. - Valid() bool - - // Error returns any accumulated error. Exhausting all the key/value pairs - // is not considered to be an error. - Error() error -} - -// Iterator iterates over a DB's key/value pairs in key order. -// -// When encouter an error any 'seeks method' will return false and will -// yield no key/value pairs. The error can be queried by calling the Error -// method. Calling Release is still necessary. -// -// An iterator must be released after use, but it is not necessary to read -// an iterator until exhaustion. -// Also, an iterator is not necessarily goroutine-safe, but it is safe to use -// multiple iterators concurrently, with each in a dedicated goroutine. -type Iterator interface { - CommonIterator - - // Key returns the key of the current key/value pair, or nil if done. - // The caller should not modify the contents of the returned slice, and - // its contents may change on the next call to any 'seeks method'. - Key() []byte - - // Value returns the key of the current key/value pair, or nil if done. - // The caller should not modify the contents of the returned slice, and - // its contents may change on the next call to any 'seeks method'. - Value() []byte -} - -// ErrorCallbackSetter is the interface that wraps basic SetErrorCallback -// method. -// -// ErrorCallbackSetter implemented by indexed and merged iterator. -type ErrorCallbackSetter interface { - // SetErrorCallback allows set an error callback of the coresponding - // iterator. Use nil to clear the callback. - SetErrorCallback(f func(err error)) -} - -type emptyIterator struct { - util.BasicReleaser - err error -} - -func (i *emptyIterator) rErr() { - if i.err == nil && i.Released() { - i.err = ErrIterReleased - } -} - -func (*emptyIterator) Valid() bool { return false } -func (i *emptyIterator) First() bool { i.rErr(); return false } -func (i *emptyIterator) Last() bool { i.rErr(); return false } -func (i *emptyIterator) Seek(key []byte) bool { i.rErr(); return false } -func (i *emptyIterator) Next() bool { i.rErr(); return false } -func (i *emptyIterator) Prev() bool { i.rErr(); return false } -func (*emptyIterator) Key() []byte { return nil } -func (*emptyIterator) Value() []byte { return nil } -func (i *emptyIterator) Error() error { return i.err } - -// NewEmptyIterator creates an empty iterator. The err parameter can be -// nil, but if not nil the given err will be returned by Error method. -func NewEmptyIterator(err error) Iterator { - return &emptyIterator{err: err} -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go deleted file mode 100644 index e44291a93..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package iterator_test - -import ( - "testing" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestIterator(t *testing.T) { - testutil.RunSuite(t, "Iterator Suite") -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go deleted file mode 100644 index 0a14ca8a5..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator - -import ( - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -type dir int - -const ( - dirReleased dir = iota - 1 - dirSOI - dirEOI - dirBackward - dirForward -) - -type mergedIterator struct { - cmp comparer.Comparer - iters []Iterator - strict bool - - keys [][]byte - index int - dir dir - err error - errf func(err error) - releaser util.Releaser -} - -func assertKey(key []byte) []byte { - if key == nil { - panic("leveldb/iterator: nil key") - } - return key -} - -func (i *mergedIterator) iterErr(iter Iterator) bool { - if err := iter.Error(); err != nil { - if i.errf != nil { - i.errf(err) - } - if i.strict || !errors.IsCorrupted(err) { - i.err = err - return true - } - } - return false -} - -func (i *mergedIterator) Valid() bool { - return i.err == nil && i.dir > dirEOI -} - -func (i *mergedIterator) First() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - for x, iter := range i.iters { - switch { - case iter.First(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - } - i.dir = dirSOI - return i.next() -} - -func (i *mergedIterator) Last() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - for x, iter := range i.iters { - switch { - case iter.Last(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - } - i.dir = dirEOI - return i.prev() -} - -func (i *mergedIterator) Seek(key []byte) bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - for x, iter := range i.iters { - switch { - case iter.Seek(key): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - } - i.dir = dirSOI - return i.next() -} - -func (i *mergedIterator) next() bool { - var key []byte - if i.dir == dirForward { - key = i.keys[i.index] - } - for x, tkey := range i.keys { - if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) < 0) { - key = tkey - i.index = x - } - } - if key == nil { - i.dir = dirEOI - return false - } - i.dir = dirForward - return true -} - -func (i *mergedIterator) Next() bool { - if i.dir == dirEOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - switch i.dir { - case dirSOI: - return i.First() - case dirBackward: - key := append([]byte{}, i.keys[i.index]...) - if !i.Seek(key) { - return false - } - return i.Next() - } - - x := i.index - iter := i.iters[x] - switch { - case iter.Next(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - return i.next() -} - -func (i *mergedIterator) prev() bool { - var key []byte - if i.dir == dirBackward { - key = i.keys[i.index] - } - for x, tkey := range i.keys { - if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) > 0) { - key = tkey - i.index = x - } - } - if key == nil { - i.dir = dirSOI - return false - } - i.dir = dirBackward - return true -} - -func (i *mergedIterator) Prev() bool { - if i.dir == dirSOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - switch i.dir { - case dirEOI: - return i.Last() - case dirForward: - key := append([]byte{}, i.keys[i.index]...) - for x, iter := range i.iters { - if x == i.index { - continue - } - seek := iter.Seek(key) - switch { - case seek && iter.Prev(), !seek && iter.Last(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - } - } - - x := i.index - iter := i.iters[x] - switch { - case iter.Prev(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - return i.prev() -} - -func (i *mergedIterator) Key() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.keys[i.index] -} - -func (i *mergedIterator) Value() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.iters[i.index].Value() -} - -func (i *mergedIterator) Release() { - if i.dir != dirReleased { - i.dir = dirReleased - for _, iter := range i.iters { - iter.Release() - } - i.iters = nil - i.keys = nil - if i.releaser != nil { - i.releaser.Release() - i.releaser = nil - } - } -} - -func (i *mergedIterator) SetReleaser(releaser util.Releaser) { - if i.dir == dirReleased { - panic(util.ErrReleased) - } - if i.releaser != nil && releaser != nil { - panic(util.ErrHasReleaser) - } - i.releaser = releaser -} - -func (i *mergedIterator) Error() error { - return i.err -} - -func (i *mergedIterator) SetErrorCallback(f func(err error)) { - i.errf = f -} - -// NewMergedIterator returns an iterator that merges its input. Walking the -// resultant iterator will return all key/value pairs of all input iterators -// in strictly increasing key order, as defined by cmp. -// The input's key ranges may overlap, but there are assumed to be no duplicate -// keys: if iters[i] contains a key k then iters[j] will not contain that key k. -// None of the iters may be nil. -// -// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true) -// won't be ignored and will halt 'merged iterator', otherwise the iterator will -// continue to the next 'input iterator'. -func NewMergedIterator(iters []Iterator, cmp comparer.Comparer, strict bool) Iterator { - return &mergedIterator{ - iters: iters, - cmp: cmp, - strict: strict, - keys: make([][]byte, len(iters)), - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go deleted file mode 100644 index 79cb970b6..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" - . "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" -) - -var _ = testutil.Defer(func() { - Describe("Merged iterator", func() { - Test := func(filled int, empty int) func() { - return func() { - It("Should iterates and seeks correctly", func(done Done) { - rnd := testutil.NewRand() - - // Build key/value. - filledKV := make([]testutil.KeyValue, filled) - kv := testutil.KeyValue_Generate(nil, 100, 1, 10, 4, 4) - kv.Iterate(func(i int, key, value []byte) { - filledKV[rnd.Intn(filled)].Put(key, value) - }) - - // Create itearators. - iters := make([]Iterator, filled+empty) - for i := range iters { - if empty == 0 || (rnd.Int()%2 == 0 && filled > 0) { - filled-- - Expect(filledKV[filled].Len()).ShouldNot(BeZero()) - iters[i] = NewArrayIterator(filledKV[filled]) - } else { - empty-- - iters[i] = NewEmptyIterator(nil) - } - } - - // Test the iterator. - t := testutil.IteratorTesting{ - KeyValue: kv.Clone(), - Iter: NewMergedIterator(iters, comparer.DefaultComparer, true), - } - testutil.DoIteratorTesting(&t) - done <- true - }, 1.5) - } - } - - Describe("with three, all filled iterators", Test(3, 0)) - Describe("with one filled, one empty iterators", Test(1, 1)) - Describe("with one filled, two empty iterators", Test(1, 2)) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go deleted file mode 100644 index 0ab2bffcc..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go +++ /dev/null @@ -1,520 +0,0 @@ -// Copyright 2011 The LevelDB-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record.go?r=1d5ccbe03246da926391ee12d1c6caae054ff4b0 -// License, authors and contributors informations can be found at bellow URLs respectively: -// https://code.google.com/p/leveldb-go/source/browse/LICENSE -// https://code.google.com/p/leveldb-go/source/browse/AUTHORS -// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS - -// Package journal reads and writes sequences of journals. Each journal is a stream -// of bytes that completes before the next journal starts. -// -// When reading, call Next to obtain an io.Reader for the next journal. Next will -// return io.EOF when there are no more journals. It is valid to call Next -// without reading the current journal to exhaustion. -// -// When writing, call Next to obtain an io.Writer for the next journal. Calling -// Next finishes the current journal. Call Close to finish the final journal. -// -// Optionally, call Flush to finish the current journal and flush the underlying -// writer without starting a new journal. To start a new journal after flushing, -// call Next. -// -// Neither Readers or Writers are safe to use concurrently. -// -// Example code: -// func read(r io.Reader) ([]string, error) { -// var ss []string -// journals := journal.NewReader(r, nil, true, true) -// for { -// j, err := journals.Next() -// if err == io.EOF { -// break -// } -// if err != nil { -// return nil, err -// } -// s, err := ioutil.ReadAll(j) -// if err != nil { -// return nil, err -// } -// ss = append(ss, string(s)) -// } -// return ss, nil -// } -// -// func write(w io.Writer, ss []string) error { -// journals := journal.NewWriter(w) -// for _, s := range ss { -// j, err := journals.Next() -// if err != nil { -// return err -// } -// if _, err := j.Write([]byte(s)), err != nil { -// return err -// } -// } -// return journals.Close() -// } -// -// The wire format is that the stream is divided into 32KiB blocks, and each -// block contains a number of tightly packed chunks. Chunks cannot cross block -// boundaries. The last block may be shorter than 32 KiB. Any unused bytes in a -// block must be zero. -// -// A journal maps to one or more chunks. Each chunk has a 7 byte header (a 4 -// byte checksum, a 2 byte little-endian uint16 length, and a 1 byte chunk type) -// followed by a payload. The checksum is over the chunk type and the payload. -// -// There are four chunk types: whether the chunk is the full journal, or the -// first, middle or last chunk of a multi-chunk journal. A multi-chunk journal -// has one first chunk, zero or more middle chunks, and one last chunk. -// -// The wire format allows for limited recovery in the face of data corruption: -// on a format error (such as a checksum mismatch), the reader moves to the -// next block and looks for the next full or first chunk. -package journal - -import ( - "encoding/binary" - "fmt" - "io" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -// These constants are part of the wire format and should not be changed. -const ( - fullChunkType = 1 - firstChunkType = 2 - middleChunkType = 3 - lastChunkType = 4 -) - -const ( - blockSize = 32 * 1024 - headerSize = 7 -) - -type flusher interface { - Flush() error -} - -// ErrCorrupted is the error type that generated by corrupted block or chunk. -type ErrCorrupted struct { - Size int - Reason string -} - -func (e *ErrCorrupted) Error() string { - return fmt.Sprintf("leveldb/journal: block/chunk corrupted: %s (%d bytes)", e.Reason, e.Size) -} - -// Dropper is the interface that wrap simple Drop method. The Drop -// method will be called when the journal reader dropping a block or chunk. -type Dropper interface { - Drop(err error) -} - -// Reader reads journals from an underlying io.Reader. -type Reader struct { - // r is the underlying reader. - r io.Reader - // the dropper. - dropper Dropper - // strict flag. - strict bool - // checksum flag. - checksum bool - // seq is the sequence number of the current journal. - seq int - // buf[i:j] is the unread portion of the current chunk's payload. - // The low bound, i, excludes the chunk header. - i, j int - // n is the number of bytes of buf that are valid. Once reading has started, - // only the final block can have n < blockSize. - n int - // last is whether the current chunk is the last chunk of the journal. - last bool - // err is any accumulated error. - err error - // buf is the buffer. - buf [blockSize]byte -} - -// NewReader returns a new reader. The dropper may be nil, and if -// strict is true then corrupted or invalid chunk will halt the journal -// reader entirely. -func NewReader(r io.Reader, dropper Dropper, strict, checksum bool) *Reader { - return &Reader{ - r: r, - dropper: dropper, - strict: strict, - checksum: checksum, - last: true, - } -} - -var errSkip = errors.New("leveldb/journal: skipped") - -func (r *Reader) corrupt(n int, reason string, skip bool) error { - if r.dropper != nil { - r.dropper.Drop(&ErrCorrupted{n, reason}) - } - if r.strict && !skip { - r.err = errors.NewErrCorrupted(nil, &ErrCorrupted{n, reason}) - return r.err - } - return errSkip -} - -// nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the -// next block into the buffer if necessary. -func (r *Reader) nextChunk(first bool) error { - for { - if r.j+headerSize <= r.n { - checksum := binary.LittleEndian.Uint32(r.buf[r.j+0 : r.j+4]) - length := binary.LittleEndian.Uint16(r.buf[r.j+4 : r.j+6]) - chunkType := r.buf[r.j+6] - - if checksum == 0 && length == 0 && chunkType == 0 { - // Drop entire block. - m := r.n - r.j - r.i = r.n - r.j = r.n - return r.corrupt(m, "zero header", false) - } else { - m := r.n - r.j - r.i = r.j + headerSize - r.j = r.j + headerSize + int(length) - if r.j > r.n { - // Drop entire block. - r.i = r.n - r.j = r.n - return r.corrupt(m, "chunk length overflows block", false) - } else if r.checksum && checksum != util.NewCRC(r.buf[r.i-1:r.j]).Value() { - // Drop entire block. - r.i = r.n - r.j = r.n - return r.corrupt(m, "checksum mismatch", false) - } - } - if first && chunkType != fullChunkType && chunkType != firstChunkType { - m := r.j - r.i - r.i = r.j - // Report the error, but skip it. - return r.corrupt(m+headerSize, "orphan chunk", true) - } - r.last = chunkType == fullChunkType || chunkType == lastChunkType - return nil - } - - // The last block. - if r.n < blockSize && r.n > 0 { - if !first { - return r.corrupt(0, "missing chunk part", false) - } - r.err = io.EOF - return r.err - } - - // Read block. - n, err := io.ReadFull(r.r, r.buf[:]) - if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { - return err - } - if n == 0 { - if !first { - return r.corrupt(0, "missing chunk part", false) - } - r.err = io.EOF - return r.err - } - r.i, r.j, r.n = 0, 0, n - } -} - -// Next returns a reader for the next journal. It returns io.EOF if there are no -// more journals. The reader returned becomes stale after the next Next call, -// and should no longer be used. If strict is false, the reader will returns -// io.ErrUnexpectedEOF error when found corrupted journal. -func (r *Reader) Next() (io.Reader, error) { - r.seq++ - if r.err != nil { - return nil, r.err - } - r.i = r.j - for { - if err := r.nextChunk(true); err == nil { - break - } else if err != errSkip { - return nil, err - } - } - return &singleReader{r, r.seq, nil}, nil -} - -// Reset resets the journal reader, allows reuse of the journal reader. Reset returns -// last accumulated error. -func (r *Reader) Reset(reader io.Reader, dropper Dropper, strict, checksum bool) error { - r.seq++ - err := r.err - r.r = reader - r.dropper = dropper - r.strict = strict - r.checksum = checksum - r.i = 0 - r.j = 0 - r.n = 0 - r.last = true - r.err = nil - return err -} - -type singleReader struct { - r *Reader - seq int - err error -} - -func (x *singleReader) Read(p []byte) (int, error) { - r := x.r - if r.seq != x.seq { - return 0, errors.New("leveldb/journal: stale reader") - } - if x.err != nil { - return 0, x.err - } - if r.err != nil { - return 0, r.err - } - for r.i == r.j { - if r.last { - return 0, io.EOF - } - x.err = r.nextChunk(false) - if x.err != nil { - if x.err == errSkip { - x.err = io.ErrUnexpectedEOF - } - return 0, x.err - } - } - n := copy(p, r.buf[r.i:r.j]) - r.i += n - return n, nil -} - -func (x *singleReader) ReadByte() (byte, error) { - r := x.r - if r.seq != x.seq { - return 0, errors.New("leveldb/journal: stale reader") - } - if x.err != nil { - return 0, x.err - } - if r.err != nil { - return 0, r.err - } - for r.i == r.j { - if r.last { - return 0, io.EOF - } - x.err = r.nextChunk(false) - if x.err != nil { - if x.err == errSkip { - x.err = io.ErrUnexpectedEOF - } - return 0, x.err - } - } - c := r.buf[r.i] - r.i++ - return c, nil -} - -// Writer writes journals to an underlying io.Writer. -type Writer struct { - // w is the underlying writer. - w io.Writer - // seq is the sequence number of the current journal. - seq int - // f is w as a flusher. - f flusher - // buf[i:j] is the bytes that will become the current chunk. - // The low bound, i, includes the chunk header. - i, j int - // buf[:written] has already been written to w. - // written is zero unless Flush has been called. - written int - // first is whether the current chunk is the first chunk of the journal. - first bool - // pending is whether a chunk is buffered but not yet written. - pending bool - // err is any accumulated error. - err error - // buf is the buffer. - buf [blockSize]byte -} - -// NewWriter returns a new Writer. -func NewWriter(w io.Writer) *Writer { - f, _ := w.(flusher) - return &Writer{ - w: w, - f: f, - } -} - -// fillHeader fills in the header for the pending chunk. -func (w *Writer) fillHeader(last bool) { - if w.i+headerSize > w.j || w.j > blockSize { - panic("leveldb/journal: bad writer state") - } - if last { - if w.first { - w.buf[w.i+6] = fullChunkType - } else { - w.buf[w.i+6] = lastChunkType - } - } else { - if w.first { - w.buf[w.i+6] = firstChunkType - } else { - w.buf[w.i+6] = middleChunkType - } - } - binary.LittleEndian.PutUint32(w.buf[w.i+0:w.i+4], util.NewCRC(w.buf[w.i+6:w.j]).Value()) - binary.LittleEndian.PutUint16(w.buf[w.i+4:w.i+6], uint16(w.j-w.i-headerSize)) -} - -// writeBlock writes the buffered block to the underlying writer, and reserves -// space for the next chunk's header. -func (w *Writer) writeBlock() { - _, w.err = w.w.Write(w.buf[w.written:]) - w.i = 0 - w.j = headerSize - w.written = 0 -} - -// writePending finishes the current journal and writes the buffer to the -// underlying writer. -func (w *Writer) writePending() { - if w.err != nil { - return - } - if w.pending { - w.fillHeader(true) - w.pending = false - } - _, w.err = w.w.Write(w.buf[w.written:w.j]) - w.written = w.j -} - -// Close finishes the current journal and closes the writer. -func (w *Writer) Close() error { - w.seq++ - w.writePending() - if w.err != nil { - return w.err - } - w.err = errors.New("leveldb/journal: closed Writer") - return nil -} - -// Flush finishes the current journal, writes to the underlying writer, and -// flushes it if that writer implements interface{ Flush() error }. -func (w *Writer) Flush() error { - w.seq++ - w.writePending() - if w.err != nil { - return w.err - } - if w.f != nil { - w.err = w.f.Flush() - return w.err - } - return nil -} - -// Reset resets the journal writer, allows reuse of the journal writer. Reset -// will also closes the journal writer if not already. -func (w *Writer) Reset(writer io.Writer) (err error) { - w.seq++ - if w.err == nil { - w.writePending() - err = w.err - } - w.w = writer - w.f, _ = writer.(flusher) - w.i = 0 - w.j = 0 - w.written = 0 - w.first = false - w.pending = false - w.err = nil - return -} - -// Next returns a writer for the next journal. The writer returned becomes stale -// after the next Close, Flush or Next call, and should no longer be used. -func (w *Writer) Next() (io.Writer, error) { - w.seq++ - if w.err != nil { - return nil, w.err - } - if w.pending { - w.fillHeader(true) - } - w.i = w.j - w.j = w.j + headerSize - // Check if there is room in the block for the header. - if w.j > blockSize { - // Fill in the rest of the block with zeroes. - for k := w.i; k < blockSize; k++ { - w.buf[k] = 0 - } - w.writeBlock() - if w.err != nil { - return nil, w.err - } - } - w.first = true - w.pending = true - return singleWriter{w, w.seq}, nil -} - -type singleWriter struct { - w *Writer - seq int -} - -func (x singleWriter) Write(p []byte) (int, error) { - w := x.w - if w.seq != x.seq { - return 0, errors.New("leveldb/journal: stale writer") - } - if w.err != nil { - return 0, w.err - } - n0 := len(p) - for len(p) > 0 { - // Write a block, if it is full. - if w.j == blockSize { - w.fillHeader(false) - w.writeBlock() - if w.err != nil { - return 0, w.err - } - w.first = false - } - // Copy bytes into the buffer. - n := copy(w.buf[w.j:], p) - w.j += n - p = p[n:] - } - return n0, nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go deleted file mode 100644 index 0fcf22599..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go +++ /dev/null @@ -1,818 +0,0 @@ -// Copyright 2011 The LevelDB-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record_test.go?r=df1fa28f7f3be6c3935548169002309c12967135 -// License, authors and contributors informations can be found at bellow URLs respectively: -// https://code.google.com/p/leveldb-go/source/browse/LICENSE -// https://code.google.com/p/leveldb-go/source/browse/AUTHORS -// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS - -package journal - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "io/ioutil" - "math/rand" - "strings" - "testing" -) - -type dropper struct { - t *testing.T -} - -func (d dropper) Drop(err error) { - d.t.Log(err) -} - -func short(s string) string { - if len(s) < 64 { - return s - } - return fmt.Sprintf("%s...(skipping %d bytes)...%s", s[:20], len(s)-40, s[len(s)-20:]) -} - -// big returns a string of length n, composed of repetitions of partial. -func big(partial string, n int) string { - return strings.Repeat(partial, n/len(partial)+1)[:n] -} - -func TestEmpty(t *testing.T) { - buf := new(bytes.Buffer) - r := NewReader(buf, dropper{t}, true, true) - if _, err := r.Next(); err != io.EOF { - t.Fatalf("got %v, want %v", err, io.EOF) - } -} - -func testGenerator(t *testing.T, reset func(), gen func() (string, bool)) { - buf := new(bytes.Buffer) - - reset() - w := NewWriter(buf) - for { - s, ok := gen() - if !ok { - break - } - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write([]byte(s)); err != nil { - t.Fatal(err) - } - } - if err := w.Close(); err != nil { - t.Fatal(err) - } - - reset() - r := NewReader(buf, dropper{t}, true, true) - for { - s, ok := gen() - if !ok { - break - } - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - x, err := ioutil.ReadAll(rr) - if err != nil { - t.Fatal(err) - } - if string(x) != s { - t.Fatalf("got %q, want %q", short(string(x)), short(s)) - } - } - if _, err := r.Next(); err != io.EOF { - t.Fatalf("got %v, want %v", err, io.EOF) - } -} - -func testLiterals(t *testing.T, s []string) { - var i int - reset := func() { - i = 0 - } - gen := func() (string, bool) { - if i == len(s) { - return "", false - } - i++ - return s[i-1], true - } - testGenerator(t, reset, gen) -} - -func TestMany(t *testing.T) { - const n = 1e5 - var i int - reset := func() { - i = 0 - } - gen := func() (string, bool) { - if i == n { - return "", false - } - i++ - return fmt.Sprintf("%d.", i-1), true - } - testGenerator(t, reset, gen) -} - -func TestRandom(t *testing.T) { - const n = 1e2 - var ( - i int - r *rand.Rand - ) - reset := func() { - i, r = 0, rand.New(rand.NewSource(0)) - } - gen := func() (string, bool) { - if i == n { - return "", false - } - i++ - return strings.Repeat(string(uint8(i)), r.Intn(2*blockSize+16)), true - } - testGenerator(t, reset, gen) -} - -func TestBasic(t *testing.T) { - testLiterals(t, []string{ - strings.Repeat("a", 1000), - strings.Repeat("b", 97270), - strings.Repeat("c", 8000), - }) -} - -func TestBoundary(t *testing.T) { - for i := blockSize - 16; i < blockSize+16; i++ { - s0 := big("abcd", i) - for j := blockSize - 16; j < blockSize+16; j++ { - s1 := big("ABCDE", j) - testLiterals(t, []string{s0, s1}) - testLiterals(t, []string{s0, "", s1}) - testLiterals(t, []string{s0, "x", s1}) - } - } -} - -func TestFlush(t *testing.T) { - buf := new(bytes.Buffer) - w := NewWriter(buf) - // Write a couple of records. Everything should still be held - // in the record.Writer buffer, so that buf.Len should be 0. - w0, _ := w.Next() - w0.Write([]byte("0")) - w1, _ := w.Next() - w1.Write([]byte("11")) - if got, want := buf.Len(), 0; got != want { - t.Fatalf("buffer length #0: got %d want %d", got, want) - } - // Flush the record.Writer buffer, which should yield 17 bytes. - // 17 = 2*7 + 1 + 2, which is two headers and 1 + 2 payload bytes. - if err := w.Flush(); err != nil { - t.Fatal(err) - } - if got, want := buf.Len(), 17; got != want { - t.Fatalf("buffer length #1: got %d want %d", got, want) - } - // Do another write, one that isn't large enough to complete the block. - // The write should not have flowed through to buf. - w2, _ := w.Next() - w2.Write(bytes.Repeat([]byte("2"), 10000)) - if got, want := buf.Len(), 17; got != want { - t.Fatalf("buffer length #2: got %d want %d", got, want) - } - // Flushing should get us up to 10024 bytes written. - // 10024 = 17 + 7 + 10000. - if err := w.Flush(); err != nil { - t.Fatal(err) - } - if got, want := buf.Len(), 10024; got != want { - t.Fatalf("buffer length #3: got %d want %d", got, want) - } - // Do a bigger write, one that completes the current block. - // We should now have 32768 bytes (a complete block), without - // an explicit flush. - w3, _ := w.Next() - w3.Write(bytes.Repeat([]byte("3"), 40000)) - if got, want := buf.Len(), 32768; got != want { - t.Fatalf("buffer length #4: got %d want %d", got, want) - } - // Flushing should get us up to 50038 bytes written. - // 50038 = 10024 + 2*7 + 40000. There are two headers because - // the one record was split into two chunks. - if err := w.Flush(); err != nil { - t.Fatal(err) - } - if got, want := buf.Len(), 50038; got != want { - t.Fatalf("buffer length #5: got %d want %d", got, want) - } - // Check that reading those records give the right lengths. - r := NewReader(buf, dropper{t}, true, true) - wants := []int64{1, 2, 10000, 40000} - for i, want := range wants { - rr, _ := r.Next() - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #%d: %v", i, err) - } - if n != want { - t.Fatalf("read #%d: got %d bytes want %d", i, n, want) - } - } -} - -func TestNonExhaustiveRead(t *testing.T) { - const n = 100 - buf := new(bytes.Buffer) - p := make([]byte, 10) - rnd := rand.New(rand.NewSource(1)) - - w := NewWriter(buf) - for i := 0; i < n; i++ { - length := len(p) + rnd.Intn(3*blockSize) - s := string(uint8(i)) + "123456789abcdefgh" - ww, _ := w.Next() - ww.Write([]byte(big(s, length))) - } - if err := w.Close(); err != nil { - t.Fatal(err) - } - - r := NewReader(buf, dropper{t}, true, true) - for i := 0; i < n; i++ { - rr, _ := r.Next() - _, err := io.ReadFull(rr, p) - if err != nil { - t.Fatal(err) - } - want := string(uint8(i)) + "123456789" - if got := string(p); got != want { - t.Fatalf("read #%d: got %q want %q", i, got, want) - } - } -} - -func TestStaleReader(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - w0, err := w.Next() - if err != nil { - t.Fatal(err) - } - w0.Write([]byte("0")) - w1, err := w.Next() - if err != nil { - t.Fatal(err) - } - w1.Write([]byte("11")) - if err := w.Close(); err != nil { - t.Fatal(err) - } - - r := NewReader(buf, dropper{t}, true, true) - r0, err := r.Next() - if err != nil { - t.Fatal(err) - } - r1, err := r.Next() - if err != nil { - t.Fatal(err) - } - p := make([]byte, 1) - if _, err := r0.Read(p); err == nil || !strings.Contains(err.Error(), "stale") { - t.Fatalf("stale read #0: unexpected error: %v", err) - } - if _, err := r1.Read(p); err != nil { - t.Fatalf("fresh read #1: got %v want nil error", err) - } - if p[0] != '1' { - t.Fatalf("fresh read #1: byte contents: got '%c' want '1'", p[0]) - } -} - -func TestStaleWriter(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - w0, err := w.Next() - if err != nil { - t.Fatal(err) - } - w1, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := w0.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") { - t.Fatalf("stale write #0: unexpected error: %v", err) - } - if _, err := w1.Write([]byte("11")); err != nil { - t.Fatalf("fresh write #1: got %v want nil error", err) - } - if err := w.Flush(); err != nil { - t.Fatalf("flush: %v", err) - } - if _, err := w1.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") { - t.Fatalf("stale write #1: unexpected error: %v", err) - } -} - -func TestCorrupt_MissingLastBlock(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-1024)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - // Cut the last block. - b := buf.Bytes()[:blockSize] - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read. - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if n != blockSize-1024 { - t.Fatalf("read #0: got %d bytes want %d", n, blockSize-1024) - } - - // Second read. - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != io.ErrUnexpectedEOF { - t.Fatalf("read #1: unexpected error: %v", err) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_CorruptedFirstBlock(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - // Fourth record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { - t.Fatalf("write #3: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting block #0. - for i := 0; i < 1024; i++ { - b[i] = '1' - } - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (third record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize-headerSize) + 1; n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (fourth record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #1: %v", err) - } - if want := int64(blockSize-headerSize) + 2; n != want { - t.Fatalf("read #1: got %d bytes want %d", n, want) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_CorruptedMiddleBlock(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - // Fourth record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { - t.Fatalf("write #3: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting block #1. - for i := 0; i < 1024; i++ { - b[blockSize+i] = '1' - } - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (first record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize / 2); n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (second record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != io.ErrUnexpectedEOF { - t.Fatalf("read #1: unexpected error: %v", err) - } - - // Third read (fourth record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #2: %v", err) - } - if want := int64(blockSize-headerSize) + 2; n != want { - t.Fatalf("read #2: got %d bytes want %d", n, want) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_CorruptedLastBlock(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - // Fourth record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { - t.Fatalf("write #3: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting block #3. - for i := len(b) - 1; i > len(b)-1024; i-- { - b[i] = '1' - } - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (first record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize / 2); n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (second record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #1: %v", err) - } - if want := int64(blockSize - headerSize); n != want { - t.Fatalf("read #1: got %d bytes want %d", n, want) - } - - // Third read (third record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #2: %v", err) - } - if want := int64(blockSize-headerSize) + 1; n != want { - t.Fatalf("read #2: got %d bytes want %d", n, want) - } - - // Fourth read (fourth record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != io.ErrUnexpectedEOF { - t.Fatalf("read #3: unexpected error: %v", err) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_FirstChuckLengthOverflow(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting record #1. - x := blockSize - binary.LittleEndian.PutUint16(b[x+4:], 0xffff) - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (first record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize / 2); n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (second record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != io.ErrUnexpectedEOF { - t.Fatalf("read #1: unexpected error: %v", err) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_MiddleChuckLengthOverflow(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting record #1. - x := blockSize/2 + headerSize - binary.LittleEndian.PutUint16(b[x+4:], 0xffff) - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (first record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize / 2); n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (third record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #1: %v", err) - } - if want := int64(blockSize-headerSize) + 1; n != want { - t.Fatalf("read #1: got %d bytes want %d", n, want) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go deleted file mode 100644 index 769c83be0..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "encoding/binary" - "fmt" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" -) - -type ErrIkeyCorrupted struct { - Ikey []byte - Reason string -} - -func (e *ErrIkeyCorrupted) Error() string { - return fmt.Sprintf("leveldb: iKey %q corrupted: %s", e.Ikey, e.Reason) -} - -func newErrIkeyCorrupted(ikey []byte, reason string) error { - return errors.NewErrCorrupted(nil, &ErrIkeyCorrupted{append([]byte{}, ikey...), reason}) -} - -type kType int - -func (kt kType) String() string { - switch kt { - case ktDel: - return "d" - case ktVal: - return "v" - } - return "x" -} - -// Value types encoded as the last component of internal keys. -// Don't modify; this value are saved to disk. -const ( - ktDel kType = iota - ktVal -) - -// ktSeek defines the kType that should be passed when constructing an -// internal key for seeking to a particular sequence number (since we -// sort sequence numbers in decreasing order and the value type is -// embedded as the low 8 bits in the sequence number in internal keys, -// we need to use the highest-numbered ValueType, not the lowest). -const ktSeek = ktVal - -const ( - // Maximum value possible for sequence number; the 8-bits are - // used by value type, so its can packed together in single - // 64-bit integer. - kMaxSeq uint64 = (uint64(1) << 56) - 1 - // Maximum value possible for packed sequence number and type. - kMaxNum uint64 = (kMaxSeq << 8) | uint64(ktSeek) -) - -// Maximum number encoded in bytes. -var kMaxNumBytes = make([]byte, 8) - -func init() { - binary.LittleEndian.PutUint64(kMaxNumBytes, kMaxNum) -} - -type iKey []byte - -func newIkey(ukey []byte, seq uint64, kt kType) iKey { - if seq > kMaxSeq { - panic("leveldb: invalid sequence number") - } else if kt > ktVal { - panic("leveldb: invalid type") - } - - ik := make(iKey, len(ukey)+8) - copy(ik, ukey) - binary.LittleEndian.PutUint64(ik[len(ukey):], (seq<<8)|uint64(kt)) - return ik -} - -func parseIkey(ik []byte) (ukey []byte, seq uint64, kt kType, err error) { - if len(ik) < 8 { - return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid length") - } - num := binary.LittleEndian.Uint64(ik[len(ik)-8:]) - seq, kt = uint64(num>>8), kType(num&0xff) - if kt > ktVal { - return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid type") - } - ukey = ik[:len(ik)-8] - return -} - -func validIkey(ik []byte) bool { - _, _, _, err := parseIkey(ik) - return err == nil -} - -func (ik iKey) assert() { - if ik == nil { - panic("leveldb: nil iKey") - } - if len(ik) < 8 { - panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid length", []byte(ik), len(ik))) - } -} - -func (ik iKey) ukey() []byte { - ik.assert() - return ik[:len(ik)-8] -} - -func (ik iKey) num() uint64 { - ik.assert() - return binary.LittleEndian.Uint64(ik[len(ik)-8:]) -} - -func (ik iKey) parseNum() (seq uint64, kt kType) { - num := ik.num() - seq, kt = uint64(num>>8), kType(num&0xff) - if kt > ktVal { - panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt)) - } - return -} - -func (ik iKey) String() string { - if ik == nil { - return "" - } - - if ukey, seq, kt, err := parseIkey(ik); err == nil { - return fmt.Sprintf("%s,%s%d", shorten(string(ukey)), kt, seq) - } else { - return "" - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go deleted file mode 100644 index 5c368cd67..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "testing" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" -) - -var defaultIComparer = &iComparer{comparer.DefaultComparer} - -func ikey(key string, seq uint64, kt kType) iKey { - return newIkey([]byte(key), uint64(seq), kt) -} - -func shortSep(a, b []byte) []byte { - dst := make([]byte, len(a)) - dst = defaultIComparer.Separator(dst[:0], a, b) - if dst == nil { - return a - } - return dst -} - -func shortSuccessor(b []byte) []byte { - dst := make([]byte, len(b)) - dst = defaultIComparer.Successor(dst[:0], b) - if dst == nil { - return b - } - return dst -} - -func testSingleKey(t *testing.T, key string, seq uint64, kt kType) { - ik := ikey(key, seq, kt) - - if !bytes.Equal(ik.ukey(), []byte(key)) { - t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key) - } - - rseq, rt := ik.parseNum() - if rseq != seq { - t.Errorf("seq number does not equal, got %v, want %v", rseq, seq) - } - if rt != kt { - t.Errorf("type does not equal, got %v, want %v", rt, kt) - } - - if rukey, rseq, rt, kerr := parseIkey(ik); kerr == nil { - if !bytes.Equal(rukey, []byte(key)) { - t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key) - } - if rseq != seq { - t.Errorf("seq number does not equal, got %v, want %v", rseq, seq) - } - if rt != kt { - t.Errorf("type does not equal, got %v, want %v", rt, kt) - } - } else { - t.Errorf("key error: %v", kerr) - } -} - -func TestIkey_EncodeDecode(t *testing.T) { - keys := []string{"", "k", "hello", "longggggggggggggggggggggg"} - seqs := []uint64{ - 1, 2, 3, - (1 << 8) - 1, 1 << 8, (1 << 8) + 1, - (1 << 16) - 1, 1 << 16, (1 << 16) + 1, - (1 << 32) - 1, 1 << 32, (1 << 32) + 1, - } - for _, key := range keys { - for _, seq := range seqs { - testSingleKey(t, key, seq, ktVal) - testSingleKey(t, "hello", 1, ktDel) - } - } -} - -func assertBytes(t *testing.T, want, got []byte) { - if !bytes.Equal(got, want) { - t.Errorf("assert failed, got %v, want %v", got, want) - } -} - -func TestIkeyShortSeparator(t *testing.T) { - // When user keys are same - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foo", 99, ktVal))) - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foo", 101, ktVal))) - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foo", 100, ktVal))) - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foo", 100, ktDel))) - - // When user keys are misordered - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("bar", 99, ktVal))) - - // When user keys are different, but correctly ordered - assertBytes(t, ikey("g", uint64(kMaxSeq), ktSeek), - shortSep(ikey("foo", 100, ktVal), - ikey("hello", 200, ktVal))) - - // When start user key is prefix of limit user key - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foobar", 200, ktVal))) - - // When limit user key is prefix of start user key - assertBytes(t, ikey("foobar", 100, ktVal), - shortSep(ikey("foobar", 100, ktVal), - ikey("foo", 200, ktVal))) -} - -func TestIkeyShortestSuccessor(t *testing.T) { - assertBytes(t, ikey("g", uint64(kMaxSeq), ktSeek), - shortSuccessor(ikey("foo", 100, ktVal))) - assertBytes(t, ikey("\xff\xff", 100, ktVal), - shortSuccessor(ikey("\xff\xff", 100, ktVal))) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go deleted file mode 100644 index df1164374..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package leveldb - -import ( - "testing" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestLevelDB(t *testing.T) { - testutil.RunSuite(t, "LevelDB Suite") -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go deleted file mode 100644 index 6656ce9a9..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package memdb - -import ( - "encoding/binary" - "math/rand" - "testing" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" -) - -func BenchmarkPut(b *testing.B) { - buf := make([][4]byte, b.N) - for i := range buf { - binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) - } - - b.ResetTimer() - p := New(comparer.DefaultComparer, 0) - for i := range buf { - p.Put(buf[i][:], nil) - } -} - -func BenchmarkPutRandom(b *testing.B) { - buf := make([][4]byte, b.N) - for i := range buf { - binary.LittleEndian.PutUint32(buf[i][:], uint32(rand.Int())) - } - - b.ResetTimer() - p := New(comparer.DefaultComparer, 0) - for i := range buf { - p.Put(buf[i][:], nil) - } -} - -func BenchmarkGet(b *testing.B) { - buf := make([][4]byte, b.N) - for i := range buf { - binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) - } - - p := New(comparer.DefaultComparer, 0) - for i := range buf { - p.Put(buf[i][:], nil) - } - - b.ResetTimer() - for i := range buf { - p.Get(buf[i][:]) - } -} - -func BenchmarkGetRandom(b *testing.B) { - buf := make([][4]byte, b.N) - for i := range buf { - binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) - } - - p := New(comparer.DefaultComparer, 0) - for i := range buf { - p.Put(buf[i][:], nil) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - p.Get(buf[rand.Int()%b.N][:]) - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go deleted file mode 100644 index 4e03a7cd5..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go +++ /dev/null @@ -1,468 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package memdb provides in-memory key/value database implementation. -package memdb - -import ( - "math/rand" - "sync" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - ErrNotFound = errors.ErrNotFound - ErrIterReleased = errors.New("leveldb/memdb: iterator released") -) - -const tMaxHeight = 12 - -type dbIter struct { - util.BasicReleaser - p *DB - slice *util.Range - node int - forward bool - key, value []byte - err error -} - -func (i *dbIter) fill(checkStart, checkLimit bool) bool { - if i.node != 0 { - n := i.p.nodeData[i.node] - m := n + i.p.nodeData[i.node+nKey] - i.key = i.p.kvData[n:m] - if i.slice != nil { - switch { - case checkLimit && i.slice.Limit != nil && i.p.cmp.Compare(i.key, i.slice.Limit) >= 0: - fallthrough - case checkStart && i.slice.Start != nil && i.p.cmp.Compare(i.key, i.slice.Start) < 0: - i.node = 0 - goto bail - } - } - i.value = i.p.kvData[m : m+i.p.nodeData[i.node+nVal]] - return true - } -bail: - i.key = nil - i.value = nil - return false -} - -func (i *dbIter) Valid() bool { - return i.node != 0 -} - -func (i *dbIter) First() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.forward = true - i.p.mu.RLock() - defer i.p.mu.RUnlock() - if i.slice != nil && i.slice.Start != nil { - i.node, _ = i.p.findGE(i.slice.Start, false) - } else { - i.node = i.p.nodeData[nNext] - } - return i.fill(false, true) -} - -func (i *dbIter) Last() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.forward = false - i.p.mu.RLock() - defer i.p.mu.RUnlock() - if i.slice != nil && i.slice.Limit != nil { - i.node = i.p.findLT(i.slice.Limit) - } else { - i.node = i.p.findLast() - } - return i.fill(true, false) -} - -func (i *dbIter) Seek(key []byte) bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.forward = true - i.p.mu.RLock() - defer i.p.mu.RUnlock() - if i.slice != nil && i.slice.Start != nil && i.p.cmp.Compare(key, i.slice.Start) < 0 { - key = i.slice.Start - } - i.node, _ = i.p.findGE(key, false) - return i.fill(false, true) -} - -func (i *dbIter) Next() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - if i.node == 0 { - if !i.forward { - return i.First() - } - return false - } - i.forward = true - i.p.mu.RLock() - defer i.p.mu.RUnlock() - i.node = i.p.nodeData[i.node+nNext] - return i.fill(false, true) -} - -func (i *dbIter) Prev() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - if i.node == 0 { - if i.forward { - return i.Last() - } - return false - } - i.forward = false - i.p.mu.RLock() - defer i.p.mu.RUnlock() - i.node = i.p.findLT(i.key) - return i.fill(true, false) -} - -func (i *dbIter) Key() []byte { - return i.key -} - -func (i *dbIter) Value() []byte { - return i.value -} - -func (i *dbIter) Error() error { return i.err } - -func (i *dbIter) Release() { - if !i.Released() { - i.p = nil - i.node = 0 - i.key = nil - i.value = nil - i.BasicReleaser.Release() - } -} - -const ( - nKV = iota - nKey - nVal - nHeight - nNext -) - -// DB is an in-memory key/value database. -type DB struct { - cmp comparer.BasicComparer - rnd *rand.Rand - - mu sync.RWMutex - kvData []byte - // Node data: - // [0] : KV offset - // [1] : Key length - // [2] : Value length - // [3] : Height - // [3..height] : Next nodes - nodeData []int - prevNode [tMaxHeight]int - maxHeight int - n int - kvSize int -} - -func (p *DB) randHeight() (h int) { - const branching = 4 - h = 1 - for h < tMaxHeight && p.rnd.Int()%branching == 0 { - h++ - } - return -} - -func (p *DB) findGE(key []byte, prev bool) (int, bool) { - node := 0 - h := p.maxHeight - 1 - for { - next := p.nodeData[node+nNext+h] - cmp := 1 - if next != 0 { - o := p.nodeData[next] - cmp = p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) - } - if cmp < 0 { - // Keep searching in this list - node = next - } else { - if prev { - p.prevNode[h] = node - } else if cmp == 0 { - return next, true - } - if h == 0 { - return next, cmp == 0 - } - h-- - } - } -} - -func (p *DB) findLT(key []byte) int { - node := 0 - h := p.maxHeight - 1 - for { - next := p.nodeData[node+nNext+h] - o := p.nodeData[next] - if next == 0 || p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) >= 0 { - if h == 0 { - break - } - h-- - } else { - node = next - } - } - return node -} - -func (p *DB) findLast() int { - node := 0 - h := p.maxHeight - 1 - for { - next := p.nodeData[node+nNext+h] - if next == 0 { - if h == 0 { - break - } - h-- - } else { - node = next - } - } - return node -} - -// Put sets the value for the given key. It overwrites any previous value -// for that key; a DB is not a multi-map. -// -// It is safe to modify the contents of the arguments after Put returns. -func (p *DB) Put(key []byte, value []byte) error { - p.mu.Lock() - defer p.mu.Unlock() - - if node, exact := p.findGE(key, true); exact { - kvOffset := len(p.kvData) - p.kvData = append(p.kvData, key...) - p.kvData = append(p.kvData, value...) - p.nodeData[node] = kvOffset - m := p.nodeData[node+nVal] - p.nodeData[node+nVal] = len(value) - p.kvSize += len(value) - m - return nil - } - - h := p.randHeight() - if h > p.maxHeight { - for i := p.maxHeight; i < h; i++ { - p.prevNode[i] = 0 - } - p.maxHeight = h - } - - kvOffset := len(p.kvData) - p.kvData = append(p.kvData, key...) - p.kvData = append(p.kvData, value...) - // Node - node := len(p.nodeData) - p.nodeData = append(p.nodeData, kvOffset, len(key), len(value), h) - for i, n := range p.prevNode[:h] { - m := n + 4 + i - p.nodeData = append(p.nodeData, p.nodeData[m]) - p.nodeData[m] = node - } - - p.kvSize += len(key) + len(value) - p.n++ - return nil -} - -// Delete deletes the value for the given key. It returns ErrNotFound if -// the DB does not contain the key. -// -// It is safe to modify the contents of the arguments after Delete returns. -func (p *DB) Delete(key []byte) error { - p.mu.Lock() - defer p.mu.Unlock() - - node, exact := p.findGE(key, true) - if !exact { - return ErrNotFound - } - - h := p.nodeData[node+nHeight] - for i, n := range p.prevNode[:h] { - m := n + 4 + i - p.nodeData[m] = p.nodeData[p.nodeData[m]+nNext+i] - } - - p.kvSize -= p.nodeData[node+nKey] + p.nodeData[node+nVal] - p.n-- - return nil -} - -// Contains returns true if the given key are in the DB. -// -// It is safe to modify the contents of the arguments after Contains returns. -func (p *DB) Contains(key []byte) bool { - p.mu.RLock() - _, exact := p.findGE(key, false) - p.mu.RUnlock() - return exact -} - -// Get gets the value for the given key. It returns error.ErrNotFound if the -// DB does not contain the key. -// -// The caller should not modify the contents of the returned slice, but -// it is safe to modify the contents of the argument after Get returns. -func (p *DB) Get(key []byte) (value []byte, err error) { - p.mu.RLock() - if node, exact := p.findGE(key, false); exact { - o := p.nodeData[node] + p.nodeData[node+nKey] - value = p.kvData[o : o+p.nodeData[node+nVal]] - } else { - err = ErrNotFound - } - p.mu.RUnlock() - return -} - -// Find finds key/value pair whose key is greater than or equal to the -// given key. It returns ErrNotFound if the table doesn't contain -// such pair. -// -// The caller should not modify the contents of the returned slice, but -// it is safe to modify the contents of the argument after Find returns. -func (p *DB) Find(key []byte) (rkey, value []byte, err error) { - p.mu.RLock() - if node, _ := p.findGE(key, false); node != 0 { - n := p.nodeData[node] - m := n + p.nodeData[node+nKey] - rkey = p.kvData[n:m] - value = p.kvData[m : m+p.nodeData[node+nVal]] - } else { - err = ErrNotFound - } - p.mu.RUnlock() - return -} - -// NewIterator returns an iterator of the DB. -// The returned iterator is not goroutine-safe, but it is safe to use -// multiple iterators concurrently, with each in a dedicated goroutine. -// It is also safe to use an iterator concurrently with modifying its -// underlying DB. However, the resultant key/value pairs are not guaranteed -// to be a consistent snapshot of the DB at a particular point in time. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// DB. And a nil Range.Limit is treated as a key after all keys in -// the DB. -// -// The iterator must be released after use, by calling Release method. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (p *DB) NewIterator(slice *util.Range) iterator.Iterator { - return &dbIter{p: p, slice: slice} -} - -// Capacity returns keys/values buffer capacity. -func (p *DB) Capacity() int { - p.mu.RLock() - defer p.mu.RUnlock() - return cap(p.kvData) -} - -// Size returns sum of keys and values length. Note that deleted -// key/value will not be accouted for, but it will still consume -// the buffer, since the buffer is append only. -func (p *DB) Size() int { - p.mu.RLock() - defer p.mu.RUnlock() - return p.kvSize -} - -// Free returns keys/values free buffer before need to grow. -func (p *DB) Free() int { - p.mu.RLock() - defer p.mu.RUnlock() - return cap(p.kvData) - len(p.kvData) -} - -// Len returns the number of entries in the DB. -func (p *DB) Len() int { - p.mu.RLock() - defer p.mu.RUnlock() - return p.n -} - -// Reset resets the DB to initial empty state. Allows reuse the buffer. -func (p *DB) Reset() { - p.rnd = rand.New(rand.NewSource(0xdeadbeef)) - p.maxHeight = 1 - p.n = 0 - p.kvSize = 0 - p.kvData = p.kvData[:0] - p.nodeData = p.nodeData[:4+tMaxHeight] - p.nodeData[nKV] = 0 - p.nodeData[nKey] = 0 - p.nodeData[nVal] = 0 - p.nodeData[nHeight] = tMaxHeight - for n := 0; n < tMaxHeight; n++ { - p.nodeData[4+n] = 0 - p.prevNode[n] = 0 - } -} - -// New creates a new initalized in-memory key/value DB. The capacity -// is the initial key/value buffer capacity. The capacity is advisory, -// not enforced. -// -// The returned DB instance is goroutine-safe. -func New(cmp comparer.BasicComparer, capacity int) *DB { - p := &DB{ - cmp: cmp, - rnd: rand.New(rand.NewSource(0xdeadbeef)), - maxHeight: 1, - kvData: make([]byte, 0, capacity), - nodeData: make([]int, 4+tMaxHeight), - } - p.nodeData[nHeight] = tMaxHeight - return p -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go deleted file mode 100644 index 7116b8f28..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package memdb - -import ( - "testing" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestMemDB(t *testing.T) { - testutil.RunSuite(t, "MemDB Suite") -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go deleted file mode 100644 index ef6a7b21f..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package memdb - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -func (p *DB) TestFindLT(key []byte) (rkey, value []byte, err error) { - p.mu.RLock() - if node := p.findLT(key); node != 0 { - n := p.nodeData[node] - m := n + p.nodeData[node+nKey] - rkey = p.kvData[n:m] - value = p.kvData[m : m+p.nodeData[node+nVal]] - } else { - err = ErrNotFound - } - p.mu.RUnlock() - return -} - -func (p *DB) TestFindLast() (rkey, value []byte, err error) { - p.mu.RLock() - if node := p.findLast(); node != 0 { - n := p.nodeData[node] - m := n + p.nodeData[node+nKey] - rkey = p.kvData[n:m] - value = p.kvData[m : m+p.nodeData[node+nVal]] - } else { - err = ErrNotFound - } - p.mu.RUnlock() - return -} - -func (p *DB) TestPut(key []byte, value []byte) error { - p.Put(key, value) - return nil -} - -func (p *DB) TestDelete(key []byte) error { - p.Delete(key) - return nil -} - -func (p *DB) TestFind(key []byte) (rkey, rvalue []byte, err error) { - return p.Find(key) -} - -func (p *DB) TestGet(key []byte) (value []byte, err error) { - return p.Get(key) -} - -func (p *DB) TestNewIterator(slice *util.Range) iterator.Iterator { - return p.NewIterator(slice) -} - -var _ = testutil.Defer(func() { - Describe("Memdb", func() { - Describe("write test", func() { - It("should do write correctly", func() { - db := New(comparer.DefaultComparer, 0) - t := testutil.DBTesting{ - DB: db, - Deleted: testutil.KeyValue_Generate(nil, 1000, 1, 30, 5, 5).Clone(), - PostFn: func(t *testutil.DBTesting) { - Expect(db.Len()).Should(Equal(t.Present.Len())) - Expect(db.Size()).Should(Equal(t.Present.Size())) - switch t.Act { - case testutil.DBPut, testutil.DBOverwrite: - Expect(db.Contains(t.ActKey)).Should(BeTrue()) - default: - Expect(db.Contains(t.ActKey)).Should(BeFalse()) - } - }, - } - testutil.DoDBTesting(&t) - }) - }) - - Describe("read test", func() { - testutil.AllKeyValueTesting(nil, func(kv testutil.KeyValue) testutil.DB { - // Building the DB. - db := New(comparer.DefaultComparer, 0) - kv.IterateShuffled(nil, func(i int, key, value []byte) { - db.Put(key, value) - }) - - if kv.Len() > 1 { - It("Should find correct keys with findLT", func() { - testutil.ShuffledIndex(nil, kv.Len()-1, 1, func(i int) { - key_, key, _ := kv.IndexInexact(i + 1) - expectedKey, expectedValue := kv.Index(i) - - // Using key that exist. - rkey, rvalue, err := db.TestFindLT(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q -> %q", key, expectedKey) - Expect(rkey).Should(Equal(expectedKey), "Key") - Expect(rvalue).Should(Equal(expectedValue), "Value for key %q -> %q", key, expectedKey) - - // Using key that doesn't exist. - rkey, rvalue, err = db.TestFindLT(key_) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q) -> %q", key_, key, expectedKey) - Expect(rkey).Should(Equal(expectedKey)) - Expect(rvalue).Should(Equal(expectedValue), "Value for key %q (%q) -> %q", key_, key, expectedKey) - }) - }) - } - - if kv.Len() > 0 { - It("Should find last key with findLast", func() { - key, value := kv.Index(kv.Len() - 1) - rkey, rvalue, err := db.TestFindLast() - Expect(err).ShouldNot(HaveOccurred()) - Expect(rkey).Should(Equal(key)) - Expect(rvalue).Should(Equal(value)) - }) - } - - return db - }, nil, nil) - }) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go deleted file mode 100644 index 79ab9e748..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go +++ /dev/null @@ -1,639 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package opt provides sets of options used by LevelDB. -package opt - -import ( - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter" - "math" -) - -const ( - KiB = 1024 - MiB = KiB * 1024 - GiB = MiB * 1024 -) - -var ( - DefaultBlockCacher = LRUCacher - DefaultBlockCacheCapacity = 8 * MiB - DefaultBlockRestartInterval = 16 - DefaultBlockSize = 4 * KiB - DefaultCompactionExpandLimitFactor = 25 - DefaultCompactionGPOverlapsFactor = 10 - DefaultCompactionL0Trigger = 4 - DefaultCompactionSourceLimitFactor = 1 - DefaultCompactionTableSize = 2 * MiB - DefaultCompactionTableSizeMultiplier = 1.0 - DefaultCompactionTotalSize = 10 * MiB - DefaultCompactionTotalSizeMultiplier = 10.0 - DefaultCompressionType = SnappyCompression - DefaultIteratorSamplingRate = 1 * MiB - DefaultMaxMemCompationLevel = 2 - DefaultNumLevel = 7 - DefaultOpenFilesCacher = LRUCacher - DefaultOpenFilesCacheCapacity = 500 - DefaultWriteBuffer = 4 * MiB - DefaultWriteL0PauseTrigger = 12 - DefaultWriteL0SlowdownTrigger = 8 -) - -// Cacher is a caching algorithm. -type Cacher interface { - New(capacity int) cache.Cacher -} - -type CacherFunc struct { - NewFunc func(capacity int) cache.Cacher -} - -func (f *CacherFunc) New(capacity int) cache.Cacher { - if f.NewFunc != nil { - return f.NewFunc(capacity) - } - return nil -} - -func noCacher(int) cache.Cacher { return nil } - -var ( - // LRUCacher is the LRU-cache algorithm. - LRUCacher = &CacherFunc{cache.NewLRU} - - // NoCacher is the value to disable caching algorithm. - NoCacher = &CacherFunc{} -) - -// Compression is the 'sorted table' block compression algorithm to use. -type Compression uint - -func (c Compression) String() string { - switch c { - case DefaultCompression: - return "default" - case NoCompression: - return "none" - case SnappyCompression: - return "snappy" - } - return "invalid" -} - -const ( - DefaultCompression Compression = iota - NoCompression - SnappyCompression - nCompression -) - -// Strict is the DB 'strict level'. -type Strict uint - -const ( - // If present then a corrupted or invalid chunk or block in manifest - // journal will cause an error instead of being dropped. - // This will prevent database with corrupted manifest to be opened. - StrictManifest Strict = 1 << iota - - // If present then journal chunk checksum will be verified. - StrictJournalChecksum - - // If present then a corrupted or invalid chunk or block in journal - // will cause an error instead of being dropped. - // This will prevent database with corrupted journal to be opened. - StrictJournal - - // If present then 'sorted table' block checksum will be verified. - // This has effect on both 'read operation' and compaction. - StrictBlockChecksum - - // If present then a corrupted 'sorted table' will fails compaction. - // The database will enter read-only mode. - StrictCompaction - - // If present then a corrupted 'sorted table' will halts 'read operation'. - StrictReader - - // If present then leveldb.Recover will drop corrupted 'sorted table'. - StrictRecovery - - // This only applicable for ReadOptions, if present then this ReadOptions - // 'strict level' will override global ones. - StrictOverride - - // StrictAll enables all strict flags. - StrictAll = StrictManifest | StrictJournalChecksum | StrictJournal | StrictBlockChecksum | StrictCompaction | StrictReader | StrictRecovery - - // DefaultStrict is the default strict flags. Specify any strict flags - // will override default strict flags as whole (i.e. not OR'ed). - DefaultStrict = StrictJournalChecksum | StrictBlockChecksum | StrictCompaction | StrictReader - - // NoStrict disables all strict flags. Override default strict flags. - NoStrict = ^StrictAll -) - -// Options holds the optional parameters for the DB at large. -type Options struct { - // AltFilters defines one or more 'alternative filters'. - // 'alternative filters' will be used during reads if a filter block - // does not match with the 'effective filter'. - // - // The default value is nil - AltFilters []filter.Filter - - // BlockCacher provides cache algorithm for LevelDB 'sorted table' block caching. - // Specify NoCacher to disable caching algorithm. - // - // The default value is LRUCacher. - BlockCacher Cacher - - // BlockCacheCapacity defines the capacity of the 'sorted table' block caching. - // Use -1 for zero, this has same effect as specifying NoCacher to BlockCacher. - // - // The default value is 8MiB. - BlockCacheCapacity int - - // BlockRestartInterval is the number of keys between restart points for - // delta encoding of keys. - // - // The default value is 16. - BlockRestartInterval int - - // BlockSize is the minimum uncompressed size in bytes of each 'sorted table' - // block. - // - // The default value is 4KiB. - BlockSize int - - // CompactionExpandLimitFactor limits compaction size after expanded. - // This will be multiplied by table size limit at compaction target level. - // - // The default value is 25. - CompactionExpandLimitFactor int - - // CompactionGPOverlapsFactor limits overlaps in grandparent (Level + 2) that a - // single 'sorted table' generates. - // This will be multiplied by table size limit at grandparent level. - // - // The default value is 10. - CompactionGPOverlapsFactor int - - // CompactionL0Trigger defines number of 'sorted table' at level-0 that will - // trigger compaction. - // - // The default value is 4. - CompactionL0Trigger int - - // CompactionSourceLimitFactor limits compaction source size. This doesn't apply to - // level-0. - // This will be multiplied by table size limit at compaction target level. - // - // The default value is 1. - CompactionSourceLimitFactor int - - // CompactionTableSize limits size of 'sorted table' that compaction generates. - // The limits for each level will be calculated as: - // CompactionTableSize * (CompactionTableSizeMultiplier ^ Level) - // The multiplier for each level can also fine-tuned using CompactionTableSizeMultiplierPerLevel. - // - // The default value is 2MiB. - CompactionTableSize int - - // CompactionTableSizeMultiplier defines multiplier for CompactionTableSize. - // - // The default value is 1. - CompactionTableSizeMultiplier float64 - - // CompactionTableSizeMultiplierPerLevel defines per-level multiplier for - // CompactionTableSize. - // Use zero to skip a level. - // - // The default value is nil. - CompactionTableSizeMultiplierPerLevel []float64 - - // CompactionTotalSize limits total size of 'sorted table' for each level. - // The limits for each level will be calculated as: - // CompactionTotalSize * (CompactionTotalSizeMultiplier ^ Level) - // The multiplier for each level can also fine-tuned using - // CompactionTotalSizeMultiplierPerLevel. - // - // The default value is 10MiB. - CompactionTotalSize int - - // CompactionTotalSizeMultiplier defines multiplier for CompactionTotalSize. - // - // The default value is 10. - CompactionTotalSizeMultiplier float64 - - // CompactionTotalSizeMultiplierPerLevel defines per-level multiplier for - // CompactionTotalSize. - // Use zero to skip a level. - // - // The default value is nil. - CompactionTotalSizeMultiplierPerLevel []float64 - - // Comparer defines a total ordering over the space of []byte keys: a 'less - // than' relationship. The same comparison algorithm must be used for reads - // and writes over the lifetime of the DB. - // - // The default value uses the same ordering as bytes.Compare. - Comparer comparer.Comparer - - // Compression defines the 'sorted table' block compression to use. - // - // The default value (DefaultCompression) uses snappy compression. - Compression Compression - - // DisableBlockCache allows disable use of cache.Cache functionality on - // 'sorted table' block. - // - // The default value is false. - DisableBlockCache bool - - // DisableCompactionBackoff allows disable compaction retry backoff. - // - // The default value is false. - DisableCompactionBackoff bool - - // ErrorIfExist defines whether an error should returned if the DB already - // exist. - // - // The default value is false. - ErrorIfExist bool - - // ErrorIfMissing defines whether an error should returned if the DB is - // missing. If false then the database will be created if missing, otherwise - // an error will be returned. - // - // The default value is false. - ErrorIfMissing bool - - // Filter defines an 'effective filter' to use. An 'effective filter' - // if defined will be used to generate per-table filter block. - // The filter name will be stored on disk. - // During reads LevelDB will try to find matching filter from - // 'effective filter' and 'alternative filters'. - // - // Filter can be changed after a DB has been created. It is recommended - // to put old filter to the 'alternative filters' to mitigate lack of - // filter during transition period. - // - // A filter is used to reduce disk reads when looking for a specific key. - // - // The default value is nil. - Filter filter.Filter - - // IteratorSamplingRate defines approximate gap (in bytes) between read - // sampling of an iterator. The samples will be used to determine when - // compaction should be triggered. - // - // The default is 1MiB. - IteratorSamplingRate int - - // MaxMemCompationLevel defines maximum level a newly compacted 'memdb' - // will be pushed into if doesn't creates overlap. This should less than - // NumLevel. Use -1 for level-0. - // - // The default is 2. - MaxMemCompationLevel int - - // NumLevel defines number of database level. The level shouldn't changed - // between opens, or the database will panic. - // - // The default is 7. - NumLevel int - - // OpenFilesCacher provides cache algorithm for open files caching. - // Specify NoCacher to disable caching algorithm. - // - // The default value is LRUCacher. - OpenFilesCacher Cacher - - // OpenFilesCacheCapacity defines the capacity of the open files caching. - // Use -1 for zero, this has same effect as specifying NoCacher to OpenFilesCacher. - // - // The default value is 500. - OpenFilesCacheCapacity int - - // Strict defines the DB strict level. - Strict Strict - - // WriteBuffer defines maximum size of a 'memdb' before flushed to - // 'sorted table'. 'memdb' is an in-memory DB backed by an on-disk - // unsorted journal. - // - // LevelDB may held up to two 'memdb' at the same time. - // - // The default value is 4MiB. - WriteBuffer int - - // WriteL0StopTrigger defines number of 'sorted table' at level-0 that will - // pause write. - // - // The default value is 12. - WriteL0PauseTrigger int - - // WriteL0SlowdownTrigger defines number of 'sorted table' at level-0 that - // will trigger write slowdown. - // - // The default value is 8. - WriteL0SlowdownTrigger int -} - -func (o *Options) GetAltFilters() []filter.Filter { - if o == nil { - return nil - } - return o.AltFilters -} - -func (o *Options) GetBlockCacher() Cacher { - if o == nil || o.BlockCacher == nil { - return DefaultBlockCacher - } else if o.BlockCacher == NoCacher { - return nil - } - return o.BlockCacher -} - -func (o *Options) GetBlockCacheCapacity() int { - if o == nil || o.BlockCacheCapacity == 0 { - return DefaultBlockCacheCapacity - } else if o.BlockCacheCapacity < 0 { - return 0 - } - return o.BlockCacheCapacity -} - -func (o *Options) GetBlockRestartInterval() int { - if o == nil || o.BlockRestartInterval <= 0 { - return DefaultBlockRestartInterval - } - return o.BlockRestartInterval -} - -func (o *Options) GetBlockSize() int { - if o == nil || o.BlockSize <= 0 { - return DefaultBlockSize - } - return o.BlockSize -} - -func (o *Options) GetCompactionExpandLimit(level int) int { - factor := DefaultCompactionExpandLimitFactor - if o != nil && o.CompactionExpandLimitFactor > 0 { - factor = o.CompactionExpandLimitFactor - } - return o.GetCompactionTableSize(level+1) * factor -} - -func (o *Options) GetCompactionGPOverlaps(level int) int { - factor := DefaultCompactionGPOverlapsFactor - if o != nil && o.CompactionGPOverlapsFactor > 0 { - factor = o.CompactionGPOverlapsFactor - } - return o.GetCompactionTableSize(level+2) * factor -} - -func (o *Options) GetCompactionL0Trigger() int { - if o == nil || o.CompactionL0Trigger == 0 { - return DefaultCompactionL0Trigger - } - return o.CompactionL0Trigger -} - -func (o *Options) GetCompactionSourceLimit(level int) int { - factor := DefaultCompactionSourceLimitFactor - if o != nil && o.CompactionSourceLimitFactor > 0 { - factor = o.CompactionSourceLimitFactor - } - return o.GetCompactionTableSize(level+1) * factor -} - -func (o *Options) GetCompactionTableSize(level int) int { - var ( - base = DefaultCompactionTableSize - mult float64 - ) - if o != nil { - if o.CompactionTableSize > 0 { - base = o.CompactionTableSize - } - if len(o.CompactionTableSizeMultiplierPerLevel) > level && o.CompactionTableSizeMultiplierPerLevel[level] > 0 { - mult = o.CompactionTableSizeMultiplierPerLevel[level] - } else if o.CompactionTableSizeMultiplier > 0 { - mult = math.Pow(o.CompactionTableSizeMultiplier, float64(level)) - } - } - if mult == 0 { - mult = math.Pow(DefaultCompactionTableSizeMultiplier, float64(level)) - } - return int(float64(base) * mult) -} - -func (o *Options) GetCompactionTotalSize(level int) int64 { - var ( - base = DefaultCompactionTotalSize - mult float64 - ) - if o != nil { - if o.CompactionTotalSize > 0 { - base = o.CompactionTotalSize - } - if len(o.CompactionTotalSizeMultiplierPerLevel) > level && o.CompactionTotalSizeMultiplierPerLevel[level] > 0 { - mult = o.CompactionTotalSizeMultiplierPerLevel[level] - } else if o.CompactionTotalSizeMultiplier > 0 { - mult = math.Pow(o.CompactionTotalSizeMultiplier, float64(level)) - } - } - if mult == 0 { - mult = math.Pow(DefaultCompactionTotalSizeMultiplier, float64(level)) - } - return int64(float64(base) * mult) -} - -func (o *Options) GetComparer() comparer.Comparer { - if o == nil || o.Comparer == nil { - return comparer.DefaultComparer - } - return o.Comparer -} - -func (o *Options) GetCompression() Compression { - if o == nil || o.Compression <= DefaultCompression || o.Compression >= nCompression { - return DefaultCompressionType - } - return o.Compression -} - -func (o *Options) GetDisableCompactionBackoff() bool { - if o == nil { - return false - } - return o.DisableCompactionBackoff -} - -func (o *Options) GetErrorIfExist() bool { - if o == nil { - return false - } - return o.ErrorIfExist -} - -func (o *Options) GetErrorIfMissing() bool { - if o == nil { - return false - } - return o.ErrorIfMissing -} - -func (o *Options) GetFilter() filter.Filter { - if o == nil { - return nil - } - return o.Filter -} - -func (o *Options) GetIteratorSamplingRate() int { - if o == nil || o.IteratorSamplingRate <= 0 { - return DefaultIteratorSamplingRate - } - return o.IteratorSamplingRate -} - -func (o *Options) GetMaxMemCompationLevel() int { - level := DefaultMaxMemCompationLevel - if o != nil { - if o.MaxMemCompationLevel > 0 { - level = o.MaxMemCompationLevel - } else if o.MaxMemCompationLevel < 0 { - level = 0 - } - } - if level >= o.GetNumLevel() { - return o.GetNumLevel() - 1 - } - return level -} - -func (o *Options) GetNumLevel() int { - if o == nil || o.NumLevel <= 0 { - return DefaultNumLevel - } - return o.NumLevel -} - -func (o *Options) GetOpenFilesCacher() Cacher { - if o == nil || o.OpenFilesCacher == nil { - return DefaultOpenFilesCacher - } - if o.OpenFilesCacher == NoCacher { - return nil - } - return o.OpenFilesCacher -} - -func (o *Options) GetOpenFilesCacheCapacity() int { - if o == nil || o.OpenFilesCacheCapacity == 0 { - return DefaultOpenFilesCacheCapacity - } else if o.OpenFilesCacheCapacity < 0 { - return 0 - } - return o.OpenFilesCacheCapacity -} - -func (o *Options) GetStrict(strict Strict) bool { - if o == nil || o.Strict == 0 { - return DefaultStrict&strict != 0 - } - return o.Strict&strict != 0 -} - -func (o *Options) GetWriteBuffer() int { - if o == nil || o.WriteBuffer <= 0 { - return DefaultWriteBuffer - } - return o.WriteBuffer -} - -func (o *Options) GetWriteL0PauseTrigger() int { - if o == nil || o.WriteL0PauseTrigger == 0 { - return DefaultWriteL0PauseTrigger - } - return o.WriteL0PauseTrigger -} - -func (o *Options) GetWriteL0SlowdownTrigger() int { - if o == nil || o.WriteL0SlowdownTrigger == 0 { - return DefaultWriteL0SlowdownTrigger - } - return o.WriteL0SlowdownTrigger -} - -// ReadOptions holds the optional parameters for 'read operation'. The -// 'read operation' includes Get, Find and NewIterator. -type ReadOptions struct { - // DontFillCache defines whether block reads for this 'read operation' - // should be cached. If false then the block will be cached. This does - // not affects already cached block. - // - // The default value is false. - DontFillCache bool - - // Strict will be OR'ed with global DB 'strict level' unless StrictOverride - // is present. Currently only StrictReader that has effect here. - Strict Strict -} - -func (ro *ReadOptions) GetDontFillCache() bool { - if ro == nil { - return false - } - return ro.DontFillCache -} - -func (ro *ReadOptions) GetStrict(strict Strict) bool { - if ro == nil { - return false - } - return ro.Strict&strict != 0 -} - -// WriteOptions holds the optional parameters for 'write operation'. The -// 'write operation' includes Write, Put and Delete. -type WriteOptions struct { - // Sync is whether to sync underlying writes from the OS buffer cache - // through to actual disk, if applicable. Setting Sync can result in - // slower writes. - // - // If false, and the machine crashes, then some recent writes may be lost. - // Note that if it is just the process that crashes (and the machine does - // not) then no writes will be lost. - // - // In other words, Sync being false has the same semantics as a write - // system call. Sync being true means write followed by fsync. - // - // The default value is false. - Sync bool -} - -func (wo *WriteOptions) GetSync() bool { - if wo == nil { - return false - } - return wo.Sync -} - -func GetStrict(o *Options, ro *ReadOptions, strict Strict) bool { - if ro.GetStrict(StrictOverride) { - return ro.GetStrict(strict) - } else { - return o.GetStrict(strict) || ro.GetStrict(strict) - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go deleted file mode 100644 index e1478b1b9..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" -) - -func dupOptions(o *opt.Options) *opt.Options { - newo := &opt.Options{} - if o != nil { - *newo = *o - } - if newo.Strict == 0 { - newo.Strict = opt.DefaultStrict - } - return newo -} - -func (s *session) setOptions(o *opt.Options) { - no := dupOptions(o) - // Alternative filters. - if filters := o.GetAltFilters(); len(filters) > 0 { - no.AltFilters = make([]filter.Filter, len(filters)) - for i, filter := range filters { - no.AltFilters[i] = &iFilter{filter} - } - } - // Comparer. - s.icmp = &iComparer{o.GetComparer()} - no.Comparer = s.icmp - // Filter. - if filter := o.GetFilter(); filter != nil { - no.Filter = &iFilter{filter} - } - - s.o = &cachedOptions{Options: no} - s.o.cache() -} - -type cachedOptions struct { - *opt.Options - - compactionExpandLimit []int - compactionGPOverlaps []int - compactionSourceLimit []int - compactionTableSize []int - compactionTotalSize []int64 -} - -func (co *cachedOptions) cache() { - numLevel := co.Options.GetNumLevel() - - co.compactionExpandLimit = make([]int, numLevel) - co.compactionGPOverlaps = make([]int, numLevel) - co.compactionSourceLimit = make([]int, numLevel) - co.compactionTableSize = make([]int, numLevel) - co.compactionTotalSize = make([]int64, numLevel) - - for level := 0; level < numLevel; level++ { - co.compactionExpandLimit[level] = co.Options.GetCompactionExpandLimit(level) - co.compactionGPOverlaps[level] = co.Options.GetCompactionGPOverlaps(level) - co.compactionSourceLimit[level] = co.Options.GetCompactionSourceLimit(level) - co.compactionTableSize[level] = co.Options.GetCompactionTableSize(level) - co.compactionTotalSize[level] = co.Options.GetCompactionTotalSize(level) - } -} - -func (co *cachedOptions) GetCompactionExpandLimit(level int) int { - return co.compactionExpandLimit[level] -} - -func (co *cachedOptions) GetCompactionGPOverlaps(level int) int { - return co.compactionGPOverlaps[level] -} - -func (co *cachedOptions) GetCompactionSourceLimit(level int) int { - return co.compactionSourceLimit[level] -} - -func (co *cachedOptions) GetCompactionTableSize(level int) int { - return co.compactionTableSize[level] -} - -func (co *cachedOptions) GetCompactionTotalSize(level int) int64 { - return co.compactionTotalSize[level] -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go deleted file mode 100644 index 097c0f225..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go +++ /dev/null @@ -1,455 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "io" - "os" - "sync" - "sync/atomic" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -type ErrManifestCorrupted struct { - Field string - Reason string -} - -func (e *ErrManifestCorrupted) Error() string { - return fmt.Sprintf("leveldb: manifest corrupted (field '%s'): %s", e.Field, e.Reason) -} - -func newErrManifestCorrupted(f storage.File, field, reason string) error { - return errors.NewErrCorrupted(f, &ErrManifestCorrupted{field, reason}) -} - -// session represent a persistent database session. -type session struct { - // Need 64-bit alignment. - stNextFileNum uint64 // current unused file number - stJournalNum uint64 // current journal file number; need external synchronization - stPrevJournalNum uint64 // prev journal file number; no longer used; for compatibility with older version of leveldb - stSeqNum uint64 // last mem compacted seq; need external synchronization - stTempFileNum uint64 - - stor storage.Storage - storLock util.Releaser - o *cachedOptions - icmp *iComparer - tops *tOps - - manifest *journal.Writer - manifestWriter storage.Writer - manifestFile storage.File - - stCompPtrs []iKey // compaction pointers; need external synchronization - stVersion *version // current version - vmu sync.Mutex -} - -// Creates new initialized session instance. -func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) { - if stor == nil { - return nil, os.ErrInvalid - } - storLock, err := stor.Lock() - if err != nil { - return - } - s = &session{ - stor: stor, - storLock: storLock, - stCompPtrs: make([]iKey, o.GetNumLevel()), - } - s.setOptions(o) - s.tops = newTableOps(s) - s.setVersion(newVersion(s)) - s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed") - return -} - -// Close session. -func (s *session) close() { - s.tops.close() - if s.manifest != nil { - s.manifest.Close() - } - if s.manifestWriter != nil { - s.manifestWriter.Close() - } - s.manifest = nil - s.manifestWriter = nil - s.manifestFile = nil - s.stVersion = nil -} - -// Release session lock. -func (s *session) release() { - s.storLock.Release() -} - -// Create a new database session; need external synchronization. -func (s *session) create() error { - // create manifest - return s.newManifest(nil, nil) -} - -// Recover a database session; need external synchronization. -func (s *session) recover() (err error) { - defer func() { - if os.IsNotExist(err) { - // Don't return os.ErrNotExist if the underlying storage contains - // other files that belong to LevelDB. So the DB won't get trashed. - if files, _ := s.stor.GetFiles(storage.TypeAll); len(files) > 0 { - err = &errors.ErrCorrupted{File: &storage.FileInfo{Type: storage.TypeManifest}, Err: &errors.ErrMissingFiles{}} - } - } - }() - - m, err := s.stor.GetManifest() - if err != nil { - return - } - - reader, err := m.Open() - if err != nil { - return - } - defer reader.Close() - strict := s.o.GetStrict(opt.StrictManifest) - jr := journal.NewReader(reader, dropper{s, m}, strict, true) - - staging := s.stVersion.newStaging() - rec := &sessionRecord{numLevel: s.o.GetNumLevel()} - for { - var r io.Reader - r, err = jr.Next() - if err != nil { - if err == io.EOF { - err = nil - break - } - return errors.SetFile(err, m) - } - - err = rec.decode(r) - if err == nil { - // save compact pointers - for _, r := range rec.compPtrs { - s.stCompPtrs[r.level] = iKey(r.ikey) - } - // commit record to version staging - staging.commit(rec) - } else { - err = errors.SetFile(err, m) - if strict || !errors.IsCorrupted(err) { - return - } else { - s.logf("manifest error: %v (skipped)", errors.SetFile(err, m)) - } - } - rec.resetCompPtrs() - rec.resetAddedTables() - rec.resetDeletedTables() - } - - switch { - case !rec.has(recComparer): - return newErrManifestCorrupted(m, "comparer", "missing") - case rec.comparer != s.icmp.uName(): - return newErrManifestCorrupted(m, "comparer", fmt.Sprintf("mismatch: want '%s', got '%s'", s.icmp.uName(), rec.comparer)) - case !rec.has(recNextFileNum): - return newErrManifestCorrupted(m, "next-file-num", "missing") - case !rec.has(recJournalNum): - return newErrManifestCorrupted(m, "journal-file-num", "missing") - case !rec.has(recSeqNum): - return newErrManifestCorrupted(m, "seq-num", "missing") - } - - s.manifestFile = m - s.setVersion(staging.finish()) - s.setNextFileNum(rec.nextFileNum) - s.recordCommited(rec) - return nil -} - -// Commit session; need external synchronization. -func (s *session) commit(r *sessionRecord) (err error) { - v := s.version() - defer v.release() - - // spawn new version based on current version - nv := v.spawn(r) - - if s.manifest == nil { - // manifest journal writer not yet created, create one - err = s.newManifest(r, nv) - } else { - err = s.flushManifest(r) - } - - // finally, apply new version if no error rise - if err == nil { - s.setVersion(nv) - } - - return -} - -// Pick a compaction based on current state; need external synchronization. -func (s *session) pickCompaction() *compaction { - v := s.version() - - var level int - var t0 tFiles - if v.cScore >= 1 { - level = v.cLevel - cptr := s.stCompPtrs[level] - tables := v.tables[level] - for _, t := range tables { - if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 { - t0 = append(t0, t) - break - } - } - if len(t0) == 0 { - t0 = append(t0, tables[0]) - } - } else { - if p := atomic.LoadPointer(&v.cSeek); p != nil { - ts := (*tSet)(p) - level = ts.level - t0 = append(t0, ts.table) - } else { - v.release() - return nil - } - } - - return newCompaction(s, v, level, t0) -} - -// Create compaction from given level and range; need external synchronization. -func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction { - v := s.version() - - t0 := v.tables[level].getOverlaps(nil, s.icmp, umin, umax, level == 0) - if len(t0) == 0 { - v.release() - return nil - } - - // Avoid compacting too much in one shot in case the range is large. - // But we cannot do this for level-0 since level-0 files can overlap - // and we must not pick one file and drop another older file if the - // two files overlap. - if level > 0 { - limit := uint64(v.s.o.GetCompactionSourceLimit(level)) - total := uint64(0) - for i, t := range t0 { - total += t.size - if total >= limit { - s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1) - t0 = t0[:i+1] - break - } - } - } - - return newCompaction(s, v, level, t0) -} - -func newCompaction(s *session, v *version, level int, t0 tFiles) *compaction { - c := &compaction{ - s: s, - v: v, - level: level, - tables: [2]tFiles{t0, nil}, - maxGPOverlaps: uint64(s.o.GetCompactionGPOverlaps(level)), - tPtrs: make([]int, s.o.GetNumLevel()), - } - c.expand() - c.save() - return c -} - -// compaction represent a compaction state. -type compaction struct { - s *session - v *version - - level int - tables [2]tFiles - maxGPOverlaps uint64 - - gp tFiles - gpi int - seenKey bool - gpOverlappedBytes uint64 - imin, imax iKey - tPtrs []int - released bool - - snapGPI int - snapSeenKey bool - snapGPOverlappedBytes uint64 - snapTPtrs []int -} - -func (c *compaction) save() { - c.snapGPI = c.gpi - c.snapSeenKey = c.seenKey - c.snapGPOverlappedBytes = c.gpOverlappedBytes - c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...) -} - -func (c *compaction) restore() { - c.gpi = c.snapGPI - c.seenKey = c.snapSeenKey - c.gpOverlappedBytes = c.snapGPOverlappedBytes - c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...) -} - -func (c *compaction) release() { - if !c.released { - c.released = true - c.v.release() - } -} - -// Expand compacted tables; need external synchronization. -func (c *compaction) expand() { - limit := uint64(c.s.o.GetCompactionExpandLimit(c.level)) - vt0, vt1 := c.v.tables[c.level], c.v.tables[c.level+1] - - t0, t1 := c.tables[0], c.tables[1] - imin, imax := t0.getRange(c.s.icmp) - // We expand t0 here just incase ukey hop across tables. - t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.level == 0) - if len(t0) != len(c.tables[0]) { - imin, imax = t0.getRange(c.s.icmp) - } - t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false) - // Get entire range covered by compaction. - amin, amax := append(t0, t1...).getRange(c.s.icmp) - - // See if we can grow the number of inputs in "level" without - // changing the number of "level+1" files we pick up. - if len(t1) > 0 { - exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.level == 0) - if len(exp0) > len(t0) && t1.size()+exp0.size() < limit { - xmin, xmax := exp0.getRange(c.s.icmp) - exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false) - if len(exp1) == len(t1) { - c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)", - c.level, c.level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())), - len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size()))) - imin, imax = xmin, xmax - t0, t1 = exp0, exp1 - amin, amax = append(t0, t1...).getRange(c.s.icmp) - } - } - } - - // Compute the set of grandparent files that overlap this compaction - // (parent == level+1; grandparent == level+2) - if c.level+2 < c.s.o.GetNumLevel() { - c.gp = c.v.tables[c.level+2].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false) - } - - c.tables[0], c.tables[1] = t0, t1 - c.imin, c.imax = imin, imax -} - -// Check whether compaction is trivial. -func (c *compaction) trivial() bool { - return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= c.maxGPOverlaps -} - -func (c *compaction) baseLevelForKey(ukey []byte) bool { - for level, tables := range c.v.tables[c.level+2:] { - for c.tPtrs[level] < len(tables) { - t := tables[c.tPtrs[level]] - if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 { - // We've advanced far enough. - if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 { - // Key falls in this file's range, so definitely not base level. - return false - } - break - } - c.tPtrs[level]++ - } - } - return true -} - -func (c *compaction) shouldStopBefore(ikey iKey) bool { - for ; c.gpi < len(c.gp); c.gpi++ { - gp := c.gp[c.gpi] - if c.s.icmp.Compare(ikey, gp.imax) <= 0 { - break - } - if c.seenKey { - c.gpOverlappedBytes += gp.size - } - } - c.seenKey = true - - if c.gpOverlappedBytes > c.maxGPOverlaps { - // Too much overlap for current output; start new output. - c.gpOverlappedBytes = 0 - return true - } - return false -} - -// Creates an iterator. -func (c *compaction) newIterator() iterator.Iterator { - // Creates iterator slice. - icap := len(c.tables) - if c.level == 0 { - // Special case for level-0 - icap = len(c.tables[0]) + 1 - } - its := make([]iterator.Iterator, 0, icap) - - // Options. - ro := &opt.ReadOptions{ - DontFillCache: true, - Strict: opt.StrictOverride, - } - strict := c.s.o.GetStrict(opt.StrictCompaction) - if strict { - ro.Strict |= opt.StrictReader - } - - for i, tables := range c.tables { - if len(tables) == 0 { - continue - } - - // Level-0 is not sorted and may overlaps each other. - if c.level+i == 0 { - for _, t := range tables { - its = append(its, c.s.tops.newIterator(t, nil, ro)) - } - } else { - it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict) - its = append(its, it) - } - } - - return iterator.NewMergedIterator(its, c.s.icmp, strict) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go deleted file mode 100644 index 375d49beb..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go +++ /dev/null @@ -1,313 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bufio" - "encoding/binary" - "io" - "strings" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" -) - -type byteReader interface { - io.Reader - io.ByteReader -} - -// These numbers are written to disk and should not be changed. -const ( - recComparer = 1 - recJournalNum = 2 - recNextFileNum = 3 - recSeqNum = 4 - recCompPtr = 5 - recDelTable = 6 - recAddTable = 7 - // 8 was used for large value refs - recPrevJournalNum = 9 -) - -type cpRecord struct { - level int - ikey iKey -} - -type atRecord struct { - level int - num uint64 - size uint64 - imin iKey - imax iKey -} - -type dtRecord struct { - level int - num uint64 -} - -type sessionRecord struct { - numLevel int - - hasRec int - comparer string - journalNum uint64 - prevJournalNum uint64 - nextFileNum uint64 - seqNum uint64 - compPtrs []cpRecord - addedTables []atRecord - deletedTables []dtRecord - - scratch [binary.MaxVarintLen64]byte - err error -} - -func (p *sessionRecord) has(rec int) bool { - return p.hasRec&(1<= uint64(p.numLevel) { - p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "invalid level number"}) - return 0 - } - return int(x) -} - -func (p *sessionRecord) decode(r io.Reader) error { - br, ok := r.(byteReader) - if !ok { - br = bufio.NewReader(r) - } - p.err = nil - for p.err == nil { - rec := p.readUvarintMayEOF("field-header", br, true) - if p.err != nil { - if p.err == io.EOF { - return nil - } - return p.err - } - switch rec { - case recComparer: - x := p.readBytes("comparer", br) - if p.err == nil { - p.setComparer(string(x)) - } - case recJournalNum: - x := p.readUvarint("journal-num", br) - if p.err == nil { - p.setJournalNum(x) - } - case recPrevJournalNum: - x := p.readUvarint("prev-journal-num", br) - if p.err == nil { - p.setPrevJournalNum(x) - } - case recNextFileNum: - x := p.readUvarint("next-file-num", br) - if p.err == nil { - p.setNextFileNum(x) - } - case recSeqNum: - x := p.readUvarint("seq-num", br) - if p.err == nil { - p.setSeqNum(x) - } - case recCompPtr: - level := p.readLevel("comp-ptr.level", br) - ikey := p.readBytes("comp-ptr.ikey", br) - if p.err == nil { - p.addCompPtr(level, iKey(ikey)) - } - case recAddTable: - level := p.readLevel("add-table.level", br) - num := p.readUvarint("add-table.num", br) - size := p.readUvarint("add-table.size", br) - imin := p.readBytes("add-table.imin", br) - imax := p.readBytes("add-table.imax", br) - if p.err == nil { - p.addTable(level, num, size, imin, imax) - } - case recDelTable: - level := p.readLevel("del-table.level", br) - num := p.readUvarint("del-table.num", br) - if p.err == nil { - p.delTable(level, num) - } - } - } - - return p.err -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go deleted file mode 100644 index c83bf8a6a..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "testing" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" -) - -func decodeEncode(v *sessionRecord) (res bool, err error) { - b := new(bytes.Buffer) - err = v.encode(b) - if err != nil { - return - } - v2 := &sessionRecord{numLevel: opt.DefaultNumLevel} - err = v.decode(b) - if err != nil { - return - } - b2 := new(bytes.Buffer) - err = v2.encode(b2) - if err != nil { - return - } - return bytes.Equal(b.Bytes(), b2.Bytes()), nil -} - -func TestSessionRecord_EncodeDecode(t *testing.T) { - big := uint64(1) << 50 - v := &sessionRecord{numLevel: opt.DefaultNumLevel} - i := uint64(0) - test := func() { - res, err := decodeEncode(v) - if err != nil { - t.Fatalf("error when testing encode/decode sessionRecord: %v", err) - } - if !res { - t.Error("encode/decode test failed at iteration:", i) - } - } - - for ; i < 4; i++ { - test() - v.addTable(3, big+300+i, big+400+i, - newIkey([]byte("foo"), big+500+1, ktVal), - newIkey([]byte("zoo"), big+600+1, ktDel)) - v.delTable(4, big+700+i) - v.addCompPtr(int(i), newIkey([]byte("x"), big+900+1, ktVal)) - } - - v.setComparer("foo") - v.setJournalNum(big + 100) - v.setPrevJournalNum(big + 99) - v.setNextFileNum(big + 200) - v.setSeqNum(big + 1000) - test() -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go deleted file mode 100644 index 6e10d20e5..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go +++ /dev/null @@ -1,247 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "sync/atomic" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" -) - -type dropper struct { - s *session // Logging. - file storage.File -} - -func (d dropper) Drop(err error) { - if e, ok := err.(*journal.ErrCorrupted); ok { - d.s.logf("journal@drop %s-%d S·%s %q", d.file.Type(), d.file.Num(), shortenb(e.Size), e.Reason) - } else { - d.s.logf("journal@drop %s-%d %q", d.file.Type(), d.file.Num(), err) - } -} - -func (s *session) log(v ...interface{}) { s.stor.Log(fmt.Sprint(v...)) } -func (s *session) logf(format string, v ...interface{}) { s.stor.Log(fmt.Sprintf(format, v...)) } - -// File utils. - -func (s *session) getJournalFile(num uint64) storage.File { - return s.stor.GetFile(num, storage.TypeJournal) -} - -func (s *session) getTableFile(num uint64) storage.File { - return s.stor.GetFile(num, storage.TypeTable) -} - -func (s *session) getFiles(t storage.FileType) ([]storage.File, error) { - return s.stor.GetFiles(t) -} - -func (s *session) newTemp() storage.File { - num := atomic.AddUint64(&s.stTempFileNum, 1) - 1 - return s.stor.GetFile(num, storage.TypeTemp) -} - -func (s *session) tableFileFromRecord(r atRecord) *tFile { - return newTableFile(s.getTableFile(r.num), r.size, r.imin, r.imax) -} - -// Session state. - -// Get current version. This will incr version ref, must call -// version.release (exactly once) after use. -func (s *session) version() *version { - s.vmu.Lock() - defer s.vmu.Unlock() - s.stVersion.ref++ - return s.stVersion -} - -// Set current version to v. -func (s *session) setVersion(v *version) { - s.vmu.Lock() - v.ref = 1 // Holds by session. - if old := s.stVersion; old != nil { - v.ref++ // Holds by old version. - old.next = v - old.releaseNB() - } - s.stVersion = v - s.vmu.Unlock() -} - -// Get current unused file number. -func (s *session) nextFileNum() uint64 { - return atomic.LoadUint64(&s.stNextFileNum) -} - -// Set current unused file number to num. -func (s *session) setNextFileNum(num uint64) { - atomic.StoreUint64(&s.stNextFileNum, num) -} - -// Mark file number as used. -func (s *session) markFileNum(num uint64) { - nextFileNum := num + 1 - for { - old, x := s.stNextFileNum, nextFileNum - if old > x { - x = old - } - if atomic.CompareAndSwapUint64(&s.stNextFileNum, old, x) { - break - } - } -} - -// Allocate a file number. -func (s *session) allocFileNum() uint64 { - return atomic.AddUint64(&s.stNextFileNum, 1) - 1 -} - -// Reuse given file number. -func (s *session) reuseFileNum(num uint64) { - for { - old, x := s.stNextFileNum, num - if old != x+1 { - x = old - } - if atomic.CompareAndSwapUint64(&s.stNextFileNum, old, x) { - break - } - } -} - -// Manifest related utils. - -// Fill given session record obj with current states; need external -// synchronization. -func (s *session) fillRecord(r *sessionRecord, snapshot bool) { - r.setNextFileNum(s.nextFileNum()) - - if snapshot { - if !r.has(recJournalNum) { - r.setJournalNum(s.stJournalNum) - } - - if !r.has(recSeqNum) { - r.setSeqNum(s.stSeqNum) - } - - for level, ik := range s.stCompPtrs { - if ik != nil { - r.addCompPtr(level, ik) - } - } - - r.setComparer(s.icmp.uName()) - } -} - -// Mark if record has been committed, this will update session state; -// need external synchronization. -func (s *session) recordCommited(r *sessionRecord) { - if r.has(recJournalNum) { - s.stJournalNum = r.journalNum - } - - if r.has(recPrevJournalNum) { - s.stPrevJournalNum = r.prevJournalNum - } - - if r.has(recSeqNum) { - s.stSeqNum = r.seqNum - } - - for _, p := range r.compPtrs { - s.stCompPtrs[p.level] = iKey(p.ikey) - } -} - -// Create a new manifest file; need external synchronization. -func (s *session) newManifest(rec *sessionRecord, v *version) (err error) { - num := s.allocFileNum() - file := s.stor.GetFile(num, storage.TypeManifest) - writer, err := file.Create() - if err != nil { - return - } - jw := journal.NewWriter(writer) - - if v == nil { - v = s.version() - defer v.release() - } - if rec == nil { - rec = &sessionRecord{numLevel: s.o.GetNumLevel()} - } - s.fillRecord(rec, true) - v.fillRecord(rec) - - defer func() { - if err == nil { - s.recordCommited(rec) - if s.manifest != nil { - s.manifest.Close() - } - if s.manifestWriter != nil { - s.manifestWriter.Close() - } - if s.manifestFile != nil { - s.manifestFile.Remove() - } - s.manifestFile = file - s.manifestWriter = writer - s.manifest = jw - } else { - writer.Close() - file.Remove() - s.reuseFileNum(num) - } - }() - - w, err := jw.Next() - if err != nil { - return - } - err = rec.encode(w) - if err != nil { - return - } - err = jw.Flush() - if err != nil { - return - } - err = s.stor.SetManifest(file) - return -} - -// Flush record to disk. -func (s *session) flushManifest(rec *sessionRecord) (err error) { - s.fillRecord(rec, false) - w, err := s.manifest.Next() - if err != nil { - return - } - err = rec.encode(w) - if err != nil { - return - } - err = s.manifest.Flush() - if err != nil { - return - } - err = s.manifestWriter.Sync() - if err != nil { - return - } - s.recordCommited(rec) - return -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go deleted file mode 100644 index 34f05d45d..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go +++ /dev/null @@ -1,534 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reservefs. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" - "time" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -var errFileOpen = errors.New("leveldb/storage: file still open") - -type fileLock interface { - release() error -} - -type fileStorageLock struct { - fs *fileStorage -} - -func (lock *fileStorageLock) Release() { - fs := lock.fs - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.slock == lock { - fs.slock = nil - } - return -} - -// fileStorage is a file-system backed storage. -type fileStorage struct { - path string - - mu sync.Mutex - flock fileLock - slock *fileStorageLock - logw *os.File - buf []byte - // Opened file counter; if open < 0 means closed. - open int - day int -} - -// OpenFile returns a new filesytem-backed storage implementation with the given -// path. This also hold a file lock, so any subsequent attempt to open the same -// path will fail. -// -// The storage must be closed after use, by calling Close method. -func OpenFile(path string) (Storage, error) { - if err := os.MkdirAll(path, 0755); err != nil { - return nil, err - } - - flock, err := newFileLock(filepath.Join(path, "LOCK")) - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - flock.release() - } - }() - - rename(filepath.Join(path, "LOG"), filepath.Join(path, "LOG.old")) - logw, err := os.OpenFile(filepath.Join(path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644) - if err != nil { - return nil, err - } - - fs := &fileStorage{path: path, flock: flock, logw: logw} - runtime.SetFinalizer(fs, (*fileStorage).Close) - return fs, nil -} - -func (fs *fileStorage) Lock() (util.Releaser, error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return nil, ErrClosed - } - if fs.slock != nil { - return nil, ErrLocked - } - fs.slock = &fileStorageLock{fs: fs} - return fs.slock, nil -} - -func itoa(buf []byte, i int, wid int) []byte { - var u uint = uint(i) - if u == 0 && wid <= 1 { - return append(buf, '0') - } - - // Assemble decimal in reverse order. - var b [32]byte - bp := len(b) - for ; u > 0 || wid > 0; u /= 10 { - bp-- - wid-- - b[bp] = byte(u%10) + '0' - } - return append(buf, b[bp:]...) -} - -func (fs *fileStorage) printDay(t time.Time) { - if fs.day == t.Day() { - return - } - fs.day = t.Day() - fs.logw.Write([]byte("=============== " + t.Format("Jan 2, 2006 (MST)") + " ===============\n")) -} - -func (fs *fileStorage) doLog(t time.Time, str string) { - fs.printDay(t) - hour, min, sec := t.Clock() - msec := t.Nanosecond() / 1e3 - // time - fs.buf = itoa(fs.buf[:0], hour, 2) - fs.buf = append(fs.buf, ':') - fs.buf = itoa(fs.buf, min, 2) - fs.buf = append(fs.buf, ':') - fs.buf = itoa(fs.buf, sec, 2) - fs.buf = append(fs.buf, '.') - fs.buf = itoa(fs.buf, msec, 6) - fs.buf = append(fs.buf, ' ') - // write - fs.buf = append(fs.buf, []byte(str)...) - fs.buf = append(fs.buf, '\n') - fs.logw.Write(fs.buf) -} - -func (fs *fileStorage) Log(str string) { - t := time.Now() - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return - } - fs.doLog(t, str) -} - -func (fs *fileStorage) log(str string) { - fs.doLog(time.Now(), str) -} - -func (fs *fileStorage) GetFile(num uint64, t FileType) File { - return &file{fs: fs, num: num, t: t} -} - -func (fs *fileStorage) GetFiles(t FileType) (ff []File, err error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return nil, ErrClosed - } - dir, err := os.Open(fs.path) - if err != nil { - return - } - fnn, err := dir.Readdirnames(0) - // Close the dir first before checking for Readdirnames error. - if err := dir.Close(); err != nil { - fs.log(fmt.Sprintf("close dir: %v", err)) - } - if err != nil { - return - } - f := &file{fs: fs} - for _, fn := range fnn { - if f.parse(fn) && (f.t&t) != 0 { - ff = append(ff, f) - f = &file{fs: fs} - } - } - return -} - -func (fs *fileStorage) GetManifest() (f File, err error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return nil, ErrClosed - } - dir, err := os.Open(fs.path) - if err != nil { - return - } - fnn, err := dir.Readdirnames(0) - // Close the dir first before checking for Readdirnames error. - if err := dir.Close(); err != nil { - fs.log(fmt.Sprintf("close dir: %v", err)) - } - if err != nil { - return - } - // Find latest CURRENT file. - var rem []string - var pend bool - var cerr error - for _, fn := range fnn { - if strings.HasPrefix(fn, "CURRENT") { - pend1 := len(fn) > 7 - // Make sure it is valid name for a CURRENT file, otherwise skip it. - if pend1 { - if fn[7] != '.' || len(fn) < 9 { - fs.log(fmt.Sprintf("skipping %s: invalid file name", fn)) - continue - } - if _, e1 := strconv.ParseUint(fn[8:], 10, 0); e1 != nil { - fs.log(fmt.Sprintf("skipping %s: invalid file num: %v", fn, e1)) - continue - } - } - path := filepath.Join(fs.path, fn) - r, e1 := os.OpenFile(path, os.O_RDONLY, 0) - if e1 != nil { - return nil, e1 - } - b, e1 := ioutil.ReadAll(r) - if e1 != nil { - r.Close() - return nil, e1 - } - f1 := &file{fs: fs} - if len(b) < 1 || b[len(b)-1] != '\n' || !f1.parse(string(b[:len(b)-1])) { - fs.log(fmt.Sprintf("skipping %s: corrupted or incomplete", fn)) - if pend1 { - rem = append(rem, fn) - } - if !pend1 || cerr == nil { - cerr = fmt.Errorf("leveldb/storage: corrupted or incomplete %s file", fn) - } - } else if f != nil && f1.Num() < f.Num() { - fs.log(fmt.Sprintf("skipping %s: obsolete", fn)) - if pend1 { - rem = append(rem, fn) - } - } else { - f = f1 - pend = pend1 - } - if err := r.Close(); err != nil { - fs.log(fmt.Sprintf("close %s: %v", fn, err)) - } - } - } - // Don't remove any files if there is no valid CURRENT file. - if f == nil { - if cerr != nil { - err = cerr - } else { - err = os.ErrNotExist - } - return - } - // Rename pending CURRENT file to an effective CURRENT. - if pend { - path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), f.Num()) - if err := rename(path, filepath.Join(fs.path, "CURRENT")); err != nil { - fs.log(fmt.Sprintf("CURRENT.%d -> CURRENT: %v", f.Num(), err)) - } - } - // Remove obsolete or incomplete pending CURRENT files. - for _, fn := range rem { - path := filepath.Join(fs.path, fn) - if err := os.Remove(path); err != nil { - fs.log(fmt.Sprintf("remove %s: %v", fn, err)) - } - } - return -} - -func (fs *fileStorage) SetManifest(f File) (err error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return ErrClosed - } - f2, ok := f.(*file) - if !ok || f2.t != TypeManifest { - return ErrInvalidFile - } - defer func() { - if err != nil { - fs.log(fmt.Sprintf("CURRENT: %v", err)) - } - }() - path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), f2.Num()) - w, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return err - } - _, err = fmt.Fprintln(w, f2.name()) - // Close the file first. - if err := w.Close(); err != nil { - fs.log(fmt.Sprintf("close CURRENT.%d: %v", f2.num, err)) - } - if err != nil { - return err - } - return rename(path, filepath.Join(fs.path, "CURRENT")) -} - -func (fs *fileStorage) Close() error { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return ErrClosed - } - // Clear the finalizer. - runtime.SetFinalizer(fs, nil) - - if fs.open > 0 { - fs.log(fmt.Sprintf("refuse to close, %d files still open", fs.open)) - return fmt.Errorf("leveldb/storage: cannot close, %d files still open", fs.open) - } - fs.open = -1 - e1 := fs.logw.Close() - err := fs.flock.release() - if err == nil { - err = e1 - } - return err -} - -type fileWrap struct { - *os.File - f *file -} - -func (fw fileWrap) Sync() error { - if err := fw.File.Sync(); err != nil { - return err - } - if fw.f.Type() == TypeManifest { - // Also sync parent directory if file type is manifest. - // See: https://code.google.com/p/leveldb/issues/detail?id=190. - if err := syncDir(fw.f.fs.path); err != nil { - return err - } - } - return nil -} - -func (fw fileWrap) Close() error { - f := fw.f - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if !f.open { - return ErrClosed - } - f.open = false - f.fs.open-- - err := fw.File.Close() - if err != nil { - f.fs.log(fmt.Sprintf("close %s.%d: %v", f.Type(), f.Num(), err)) - } - return err -} - -type file struct { - fs *fileStorage - num uint64 - t FileType - open bool -} - -func (f *file) Open() (Reader, error) { - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if f.fs.open < 0 { - return nil, ErrClosed - } - if f.open { - return nil, errFileOpen - } - of, err := os.OpenFile(f.path(), os.O_RDONLY, 0) - if err != nil { - if f.hasOldName() && os.IsNotExist(err) { - of, err = os.OpenFile(f.oldPath(), os.O_RDONLY, 0) - if err == nil { - goto ok - } - } - return nil, err - } -ok: - f.open = true - f.fs.open++ - return fileWrap{of, f}, nil -} - -func (f *file) Create() (Writer, error) { - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if f.fs.open < 0 { - return nil, ErrClosed - } - if f.open { - return nil, errFileOpen - } - of, err := os.OpenFile(f.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return nil, err - } - f.open = true - f.fs.open++ - return fileWrap{of, f}, nil -} - -func (f *file) Replace(newfile File) error { - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if f.fs.open < 0 { - return ErrClosed - } - newfile2, ok := newfile.(*file) - if !ok { - return ErrInvalidFile - } - if f.open || newfile2.open { - return errFileOpen - } - return rename(newfile2.path(), f.path()) -} - -func (f *file) Type() FileType { - return f.t -} - -func (f *file) Num() uint64 { - return f.num -} - -func (f *file) Remove() error { - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if f.fs.open < 0 { - return ErrClosed - } - if f.open { - return errFileOpen - } - err := os.Remove(f.path()) - if err != nil { - f.fs.log(fmt.Sprintf("remove %s.%d: %v", f.Type(), f.Num(), err)) - } - // Also try remove file with old name, just in case. - if f.hasOldName() { - if e1 := os.Remove(f.oldPath()); !os.IsNotExist(e1) { - f.fs.log(fmt.Sprintf("remove %s.%d: %v (old name)", f.Type(), f.Num(), err)) - err = e1 - } - } - return err -} - -func (f *file) hasOldName() bool { - return f.t == TypeTable -} - -func (f *file) oldName() string { - switch f.t { - case TypeTable: - return fmt.Sprintf("%06d.sst", f.num) - } - return f.name() -} - -func (f *file) oldPath() string { - return filepath.Join(f.fs.path, f.oldName()) -} - -func (f *file) name() string { - switch f.t { - case TypeManifest: - return fmt.Sprintf("MANIFEST-%06d", f.num) - case TypeJournal: - return fmt.Sprintf("%06d.log", f.num) - case TypeTable: - return fmt.Sprintf("%06d.ldb", f.num) - case TypeTemp: - return fmt.Sprintf("%06d.tmp", f.num) - default: - panic("invalid file type") - } -} - -func (f *file) path() string { - return filepath.Join(f.fs.path, f.name()) -} - -func (f *file) parse(name string) bool { - var num uint64 - var tail string - _, err := fmt.Sscanf(name, "%d.%s", &num, &tail) - if err == nil { - switch tail { - case "log": - f.t = TypeJournal - case "ldb", "sst": - f.t = TypeTable - case "tmp": - f.t = TypeTemp - default: - return false - } - f.num = num - return true - } - n, _ := fmt.Sscanf(name, "MANIFEST-%d%s", &num, &tail) - if n == 1 { - f.t = TypeManifest - f.num = num - return true - } - - return false -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go deleted file mode 100644 index 42940d769..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "os" - "path/filepath" -) - -type plan9FileLock struct { - f *os.File -} - -func (fl *plan9FileLock) release() error { - return fl.f.Close() -} - -func newFileLock(path string) (fl fileLock, err error) { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0644) - if err != nil { - return - } - fl = &plan9FileLock{f: f} - return -} - -func rename(oldpath, newpath string) error { - if _, err := os.Stat(newpath); err == nil { - if err := os.Remove(newpath); err != nil { - return err - } - } - - _, fname := filepath.Split(newpath) - return os.Rename(oldpath, fname) -} - -func syncDir(name string) error { - f, err := os.Open(name) - if err != nil { - return err - } - defer f.Close() - if err := f.Sync(); err != nil { - return err - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go deleted file mode 100644 index 102031bfd..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build solaris - -package storage - -import ( - "os" - "syscall" -) - -type unixFileLock struct { - f *os.File -} - -func (fl *unixFileLock) release() error { - if err := setFileLock(fl.f, false); err != nil { - return err - } - return fl.f.Close() -} - -func newFileLock(path string) (fl fileLock, err error) { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) - if err != nil { - return - } - err = setFileLock(f, true) - if err != nil { - f.Close() - return - } - fl = &unixFileLock{f: f} - return -} - -func setFileLock(f *os.File, lock bool) error { - flock := syscall.Flock_t{ - Type: syscall.F_UNLCK, - Start: 0, - Len: 0, - Whence: 1, - } - if lock { - flock.Type = syscall.F_WRLCK - } - return syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &flock) -} - -func rename(oldpath, newpath string) error { - return os.Rename(oldpath, newpath) -} - -func syncDir(name string) error { - f, err := os.Open(name) - if err != nil { - return err - } - defer f.Close() - if err := f.Sync(); err != nil { - return err - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go deleted file mode 100644 index 92abcbb7d..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "fmt" - "os" - "path/filepath" - "testing" -) - -var cases = []struct { - oldName []string - name string - ftype FileType - num uint64 -}{ - {nil, "000100.log", TypeJournal, 100}, - {nil, "000000.log", TypeJournal, 0}, - {[]string{"000000.sst"}, "000000.ldb", TypeTable, 0}, - {nil, "MANIFEST-000002", TypeManifest, 2}, - {nil, "MANIFEST-000007", TypeManifest, 7}, - {nil, "18446744073709551615.log", TypeJournal, 18446744073709551615}, - {nil, "000100.tmp", TypeTemp, 100}, -} - -var invalidCases = []string{ - "", - "foo", - "foo-dx-100.log", - ".log", - "", - "manifest", - "CURREN", - "CURRENTX", - "MANIFES", - "MANIFEST", - "MANIFEST-", - "XMANIFEST-3", - "MANIFEST-3x", - "LOC", - "LOCKx", - "LO", - "LOGx", - "18446744073709551616.log", - "184467440737095516150.log", - "100", - "100.", - "100.lop", -} - -func TestFileStorage_CreateFileName(t *testing.T) { - for _, c := range cases { - f := &file{num: c.num, t: c.ftype} - if f.name() != c.name { - t.Errorf("invalid filename got '%s', want '%s'", f.name(), c.name) - } - } -} - -func TestFileStorage_ParseFileName(t *testing.T) { - for _, c := range cases { - for _, name := range append([]string{c.name}, c.oldName...) { - f := new(file) - if !f.parse(name) { - t.Errorf("cannot parse filename '%s'", name) - continue - } - if f.Type() != c.ftype { - t.Errorf("filename '%s' invalid type got '%d', want '%d'", name, f.Type(), c.ftype) - } - if f.Num() != c.num { - t.Errorf("filename '%s' invalid number got '%d', want '%d'", name, f.Num(), c.num) - } - } - } -} - -func TestFileStorage_InvalidFileName(t *testing.T) { - for _, name := range invalidCases { - f := new(file) - if f.parse(name) { - t.Errorf("filename '%s' should be invalid", name) - } - } -} - -func TestFileStorage_Locking(t *testing.T) { - path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestfd-%d", os.Getuid())) - - _, err := os.Stat(path) - if err == nil { - err = os.RemoveAll(path) - if err != nil { - t.Fatal("RemoveAll: got error: ", err) - } - } - - p1, err := OpenFile(path) - if err != nil { - t.Fatal("OpenFile(1): got error: ", err) - } - - defer os.RemoveAll(path) - - p2, err := OpenFile(path) - if err != nil { - t.Logf("OpenFile(2): got error: %s (expected)", err) - } else { - p2.Close() - p1.Close() - t.Fatal("OpenFile(2): expect error") - } - - p1.Close() - - p3, err := OpenFile(path) - if err != nil { - t.Fatal("OpenFile(3): got error: ", err) - } - defer p3.Close() - - l, err := p3.Lock() - if err != nil { - t.Fatal("storage lock failed(1): ", err) - } - _, err = p3.Lock() - if err == nil { - t.Fatal("expect error for second storage lock attempt") - } else { - t.Logf("storage lock got error: %s (expected)", err) - } - l.Release() - _, err = p3.Lock() - if err != nil { - t.Fatal("storage lock failed(2): ", err) - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go deleted file mode 100644 index d0a604b7a..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd - -package storage - -import ( - "os" - "syscall" -) - -type unixFileLock struct { - f *os.File -} - -func (fl *unixFileLock) release() error { - if err := setFileLock(fl.f, false); err != nil { - return err - } - return fl.f.Close() -} - -func newFileLock(path string) (fl fileLock, err error) { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) - if err != nil { - return - } - err = setFileLock(f, true) - if err != nil { - f.Close() - return - } - fl = &unixFileLock{f: f} - return -} - -func setFileLock(f *os.File, lock bool) error { - how := syscall.LOCK_UN - if lock { - how = syscall.LOCK_EX - } - return syscall.Flock(int(f.Fd()), how|syscall.LOCK_NB) -} - -func rename(oldpath, newpath string) error { - return os.Rename(oldpath, newpath) -} - -func syncDir(name string) error { - f, err := os.Open(name) - if err != nil { - return err - } - defer f.Close() - if err := f.Sync(); err != nil { - return err - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go deleted file mode 100644 index 50c3c454e..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "syscall" - "unsafe" -) - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - - procMoveFileExW = modkernel32.NewProc("MoveFileExW") -) - -const ( - _MOVEFILE_REPLACE_EXISTING = 1 -) - -type windowsFileLock struct { - fd syscall.Handle -} - -func (fl *windowsFileLock) release() error { - return syscall.Close(fl.fd) -} - -func newFileLock(path string) (fl fileLock, err error) { - pathp, err := syscall.UTF16PtrFromString(path) - if err != nil { - return - } - fd, err := syscall.CreateFile(pathp, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.CREATE_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0) - if err != nil { - return - } - fl = &windowsFileLock{fd: fd} - return -} - -func moveFileEx(from *uint16, to *uint16, flags uint32) error { - r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) - if r1 == 0 { - if e1 != 0 { - return error(e1) - } else { - return syscall.EINVAL - } - } - return nil -} - -func rename(oldpath, newpath string) error { - from, err := syscall.UTF16PtrFromString(oldpath) - if err != nil { - return err - } - to, err := syscall.UTF16PtrFromString(newpath) - if err != nil { - return err - } - return moveFileEx(from, to, _MOVEFILE_REPLACE_EXISTING) -} - -func syncDir(name string) error { return nil } diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go deleted file mode 100644 index d3f3e136e..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "bytes" - "os" - "sync" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -const typeShift = 3 - -type memStorageLock struct { - ms *memStorage -} - -func (lock *memStorageLock) Release() { - ms := lock.ms - ms.mu.Lock() - defer ms.mu.Unlock() - if ms.slock == lock { - ms.slock = nil - } - return -} - -// memStorage is a memory-backed storage. -type memStorage struct { - mu sync.Mutex - slock *memStorageLock - files map[uint64]*memFile - manifest *memFilePtr -} - -// NewMemStorage returns a new memory-backed storage implementation. -func NewMemStorage() Storage { - return &memStorage{ - files: make(map[uint64]*memFile), - } -} - -func (ms *memStorage) Lock() (util.Releaser, error) { - ms.mu.Lock() - defer ms.mu.Unlock() - if ms.slock != nil { - return nil, ErrLocked - } - ms.slock = &memStorageLock{ms: ms} - return ms.slock, nil -} - -func (*memStorage) Log(str string) {} - -func (ms *memStorage) GetFile(num uint64, t FileType) File { - return &memFilePtr{ms: ms, num: num, t: t} -} - -func (ms *memStorage) GetFiles(t FileType) ([]File, error) { - ms.mu.Lock() - var ff []File - for x, _ := range ms.files { - num, mt := x>>typeShift, FileType(x)&TypeAll - if mt&t == 0 { - continue - } - ff = append(ff, &memFilePtr{ms: ms, num: num, t: mt}) - } - ms.mu.Unlock() - return ff, nil -} - -func (ms *memStorage) GetManifest() (File, error) { - ms.mu.Lock() - defer ms.mu.Unlock() - if ms.manifest == nil { - return nil, os.ErrNotExist - } - return ms.manifest, nil -} - -func (ms *memStorage) SetManifest(f File) error { - fm, ok := f.(*memFilePtr) - if !ok || fm.t != TypeManifest { - return ErrInvalidFile - } - ms.mu.Lock() - ms.manifest = fm - ms.mu.Unlock() - return nil -} - -func (*memStorage) Close() error { return nil } - -type memReader struct { - *bytes.Reader - m *memFile -} - -func (mr *memReader) Close() error { - return mr.m.Close() -} - -type memFile struct { - bytes.Buffer - ms *memStorage - open bool -} - -func (*memFile) Sync() error { return nil } -func (m *memFile) Close() error { - m.ms.mu.Lock() - m.open = false - m.ms.mu.Unlock() - return nil -} - -type memFilePtr struct { - ms *memStorage - num uint64 - t FileType -} - -func (p *memFilePtr) x() uint64 { - return p.Num()< -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "bytes" - "testing" -) - -func TestMemStorage(t *testing.T) { - m := NewMemStorage() - - l, err := m.Lock() - if err != nil { - t.Fatal("storage lock failed(1): ", err) - } - _, err = m.Lock() - if err == nil { - t.Fatal("expect error for second storage lock attempt") - } else { - t.Logf("storage lock got error: %s (expected)", err) - } - l.Release() - _, err = m.Lock() - if err != nil { - t.Fatal("storage lock failed(2): ", err) - } - - f := m.GetFile(1, TypeTable) - if f.Num() != 1 && f.Type() != TypeTable { - t.Fatal("invalid file number and type") - } - w, _ := f.Create() - w.Write([]byte("abc")) - w.Close() - if ff, _ := m.GetFiles(TypeAll); len(ff) != 1 { - t.Fatal("invalid GetFiles len") - } - buf := new(bytes.Buffer) - r, err := f.Open() - if err != nil { - t.Fatal("Open: got error: ", err) - } - buf.ReadFrom(r) - r.Close() - if got := buf.String(); got != "abc" { - t.Fatalf("Read: invalid value, want=abc got=%s", got) - } - if _, err := f.Open(); err != nil { - t.Fatal("Open: got error: ", err) - } - if _, err := m.GetFile(1, TypeTable).Open(); err == nil { - t.Fatal("expecting error") - } - f.Remove() - if ff, _ := m.GetFiles(TypeAll); len(ff) != 0 { - t.Fatal("invalid GetFiles len", len(ff)) - } - if _, err := f.Open(); err == nil { - t.Fatal("expecting error") - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go deleted file mode 100644 index 0914bea89..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package storage provides storage abstraction for LevelDB. -package storage - -import ( - "errors" - "fmt" - "io" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -type FileType uint32 - -const ( - TypeManifest FileType = 1 << iota - TypeJournal - TypeTable - TypeTemp - - TypeAll = TypeManifest | TypeJournal | TypeTable | TypeTemp -) - -func (t FileType) String() string { - switch t { - case TypeManifest: - return "manifest" - case TypeJournal: - return "journal" - case TypeTable: - return "table" - case TypeTemp: - return "temp" - } - return fmt.Sprintf("", t) -} - -var ( - ErrInvalidFile = errors.New("leveldb/storage: invalid file for argument") - ErrLocked = errors.New("leveldb/storage: already locked") - ErrClosed = errors.New("leveldb/storage: closed") -) - -// Syncer is the interface that wraps basic Sync method. -type Syncer interface { - // Sync commits the current contents of the file to stable storage. - Sync() error -} - -// Reader is the interface that groups the basic Read, Seek, ReadAt and Close -// methods. -type Reader interface { - io.ReadSeeker - io.ReaderAt - io.Closer -} - -// Writer is the interface that groups the basic Write, Sync and Close -// methods. -type Writer interface { - io.WriteCloser - Syncer -} - -// File is the file. A file instance must be goroutine-safe. -type File interface { - // Open opens the file for read. Returns os.ErrNotExist error - // if the file does not exist. - // Returns ErrClosed if the underlying storage is closed. - Open() (r Reader, err error) - - // Create creates the file for writting. Truncate the file if - // already exist. - // Returns ErrClosed if the underlying storage is closed. - Create() (w Writer, err error) - - // Replace replaces file with newfile. - // Returns ErrClosed if the underlying storage is closed. - Replace(newfile File) error - - // Type returns the file type - Type() FileType - - // Num returns the file number. - Num() uint64 - - // Remove removes the file. - // Returns ErrClosed if the underlying storage is closed. - Remove() error -} - -// Storage is the storage. A storage instance must be goroutine-safe. -type Storage interface { - // Lock locks the storage. Any subsequent attempt to call Lock will fail - // until the last lock released. - // After use the caller should call the Release method. - Lock() (l util.Releaser, err error) - - // Log logs a string. This is used for logging. An implementation - // may write to a file, stdout or simply do nothing. - Log(str string) - - // GetFile returns a file for the given number and type. GetFile will never - // returns nil, even if the underlying storage is closed. - GetFile(num uint64, t FileType) File - - // GetFiles returns a slice of files that match the given file types. - // The file types may be OR'ed together. - GetFiles(t FileType) ([]File, error) - - // GetManifest returns a manifest file. Returns os.ErrNotExist if manifest - // file does not exist. - GetManifest() (File, error) - - // SetManifest sets the given file as manifest file. The given file should - // be a manifest file type or error will be returned. - SetManifest(f File) error - - // Close closes the storage. It is valid to call Close multiple times. - // Other methods should not be called after the storage has been closed. - Close() error -} - -// FileInfo wraps basic file info. -type FileInfo struct { - Type FileType - Num uint64 -} - -func (fi FileInfo) String() string { - switch fi.Type { - case TypeManifest: - return fmt.Sprintf("MANIFEST-%06d", fi.Num) - case TypeJournal: - return fmt.Sprintf("%06d.log", fi.Num) - case TypeTable: - return fmt.Sprintf("%06d.ldb", fi.Num) - case TypeTemp: - return fmt.Sprintf("%06d.tmp", fi.Num) - default: - return fmt.Sprintf("%#x-%d", fi.Type, fi.Num) - } -} - -// NewFileInfo creates new FileInfo from the given File. It will returns nil -// if File is nil. -func NewFileInfo(f File) *FileInfo { - if f == nil { - return nil - } - return &FileInfo{f.Type(), f.Num()} -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go deleted file mode 100644 index 482ae83ad..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go +++ /dev/null @@ -1,539 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENE file. - -package leveldb - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "math/rand" - "os" - "path/filepath" - "sync" - "testing" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -const typeShift = 4 - -var ( - tsErrInvalidFile = errors.New("leveldb.testStorage: invalid file for argument") - tsErrFileOpen = errors.New("leveldb.testStorage: file still open") -) - -var ( - tsFSEnv = os.Getenv("GOLEVELDB_USEFS") - tsTempdir = os.Getenv("GOLEVELDB_TEMPDIR") - tsKeepFS = tsFSEnv == "2" - tsFS = tsKeepFS || tsFSEnv == "" || tsFSEnv == "1" - tsMU = &sync.Mutex{} - tsNum = 0 -) - -type tsOp uint - -const ( - tsOpOpen tsOp = iota - tsOpCreate - tsOpRead - tsOpReadAt - tsOpWrite - tsOpSync - - tsOpNum -) - -type tsLock struct { - ts *testStorage - r util.Releaser -} - -func (l tsLock) Release() { - l.r.Release() - l.ts.t.Log("I: storage lock released") -} - -type tsReader struct { - tf tsFile - storage.Reader -} - -func (tr tsReader) Read(b []byte) (n int, err error) { - ts := tr.tf.ts - ts.countRead(tr.tf.Type()) - if tr.tf.shouldErrLocked(tsOpRead) { - return 0, errors.New("leveldb.testStorage: emulated read error") - } - n, err = tr.Reader.Read(b) - if err != nil && err != io.EOF { - ts.t.Errorf("E: read error, num=%d type=%v n=%d: %v", tr.tf.Num(), tr.tf.Type(), n, err) - } - return -} - -func (tr tsReader) ReadAt(b []byte, off int64) (n int, err error) { - ts := tr.tf.ts - ts.countRead(tr.tf.Type()) - if tr.tf.shouldErrLocked(tsOpReadAt) { - return 0, errors.New("leveldb.testStorage: emulated readAt error") - } - n, err = tr.Reader.ReadAt(b, off) - if err != nil && err != io.EOF { - ts.t.Errorf("E: readAt error, num=%d type=%v off=%d n=%d: %v", tr.tf.Num(), tr.tf.Type(), off, n, err) - } - return -} - -func (tr tsReader) Close() (err error) { - err = tr.Reader.Close() - tr.tf.close("reader", err) - return -} - -type tsWriter struct { - tf tsFile - storage.Writer -} - -func (tw tsWriter) Write(b []byte) (n int, err error) { - if tw.tf.shouldErrLocked(tsOpWrite) { - return 0, errors.New("leveldb.testStorage: emulated write error") - } - n, err = tw.Writer.Write(b) - if err != nil { - tw.tf.ts.t.Errorf("E: write error, num=%d type=%v n=%d: %v", tw.tf.Num(), tw.tf.Type(), n, err) - } - return -} - -func (tw tsWriter) Sync() (err error) { - ts := tw.tf.ts - ts.mu.Lock() - for ts.emuDelaySync&tw.tf.Type() != 0 { - ts.cond.Wait() - } - ts.mu.Unlock() - if tw.tf.shouldErrLocked(tsOpSync) { - return errors.New("leveldb.testStorage: emulated sync error") - } - err = tw.Writer.Sync() - if err != nil { - tw.tf.ts.t.Errorf("E: sync error, num=%d type=%v: %v", tw.tf.Num(), tw.tf.Type(), err) - } - return -} - -func (tw tsWriter) Close() (err error) { - err = tw.Writer.Close() - tw.tf.close("writer", err) - return -} - -type tsFile struct { - ts *testStorage - storage.File -} - -func (tf tsFile) x() uint64 { - return tf.Num()<>typeShift, storage.FileType(x)&storage.TypeAll - ts.t.Errorf("E: * num=%d type=%v writer=%v", num, tt, writer) - } - } - ts.mu.Unlock() -} - -func newTestStorage(t *testing.T) *testStorage { - var stor storage.Storage - var closeFn func() error - if tsFS { - for { - tsMU.Lock() - num := tsNum - tsNum++ - tsMU.Unlock() - tempdir := tsTempdir - if tempdir == "" { - tempdir = os.TempDir() - } - path := filepath.Join(tempdir, fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num)) - if _, err := os.Stat(path); err != nil { - stor, err = storage.OpenFile(path) - if err != nil { - t.Fatalf("F: cannot create storage: %v", err) - } - t.Logf("I: storage created: %s", path) - closeFn = func() error { - for _, name := range []string{"LOG.old", "LOG"} { - f, err := os.Open(filepath.Join(path, name)) - if err != nil { - continue - } - if log, err := ioutil.ReadAll(f); err != nil { - t.Logf("---------------------- %s ----------------------", name) - t.Logf("cannot read log: %v", err) - t.Logf("---------------------- %s ----------------------", name) - } else if len(log) > 0 { - t.Logf("---------------------- %s ----------------------\n%s", name, string(log)) - t.Logf("---------------------- %s ----------------------", name) - } - f.Close() - } - if t.Failed() { - t.Logf("testing failed, test DB preserved at %s", path) - return nil - } - if tsKeepFS { - return nil - } - return os.RemoveAll(path) - } - - break - } - } - } else { - stor = storage.NewMemStorage() - } - ts := &testStorage{ - t: t, - Storage: stor, - closeFn: closeFn, - opens: make(map[uint64]bool), - emuErrOnceMap: make(map[uint64]uint), - emuRandErrProb: 0x999, - emuRandRand: rand.New(rand.NewSource(0xfacedead)), - } - ts.cond.L = &ts.mu - return ts -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go deleted file mode 100644 index 18dd9a665..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go +++ /dev/null @@ -1,521 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "sort" - "sync/atomic" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -// tFile holds basic information about a table. -type tFile struct { - file storage.File - seekLeft int32 - size uint64 - imin, imax iKey -} - -// Returns true if given key is after largest key of this table. -func (t *tFile) after(icmp *iComparer, ukey []byte) bool { - return ukey != nil && icmp.uCompare(ukey, t.imax.ukey()) > 0 -} - -// Returns true if given key is before smallest key of this table. -func (t *tFile) before(icmp *iComparer, ukey []byte) bool { - return ukey != nil && icmp.uCompare(ukey, t.imin.ukey()) < 0 -} - -// Returns true if given key range overlaps with this table key range. -func (t *tFile) overlaps(icmp *iComparer, umin, umax []byte) bool { - return !t.after(icmp, umin) && !t.before(icmp, umax) -} - -// Cosumes one seek and return current seeks left. -func (t *tFile) consumeSeek() int32 { - return atomic.AddInt32(&t.seekLeft, -1) -} - -// Creates new tFile. -func newTableFile(file storage.File, size uint64, imin, imax iKey) *tFile { - f := &tFile{ - file: file, - size: size, - imin: imin, - imax: imax, - } - - // We arrange to automatically compact this file after - // a certain number of seeks. Let's assume: - // (1) One seek costs 10ms - // (2) Writing or reading 1MB costs 10ms (100MB/s) - // (3) A compaction of 1MB does 25MB of IO: - // 1MB read from this level - // 10-12MB read from next level (boundaries may be misaligned) - // 10-12MB written to next level - // This implies that 25 seeks cost the same as the compaction - // of 1MB of data. I.e., one seek costs approximately the - // same as the compaction of 40KB of data. We are a little - // conservative and allow approximately one seek for every 16KB - // of data before triggering a compaction. - f.seekLeft = int32(size / 16384) - if f.seekLeft < 100 { - f.seekLeft = 100 - } - - return f -} - -// tFiles hold multiple tFile. -type tFiles []*tFile - -func (tf tFiles) Len() int { return len(tf) } -func (tf tFiles) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] } - -func (tf tFiles) nums() string { - x := "[ " - for i, f := range tf { - if i != 0 { - x += ", " - } - x += fmt.Sprint(f.file.Num()) - } - x += " ]" - return x -} - -// Returns true if i smallest key is less than j. -// This used for sort by key in ascending order. -func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool { - a, b := tf[i], tf[j] - n := icmp.Compare(a.imin, b.imin) - if n == 0 { - return a.file.Num() < b.file.Num() - } - return n < 0 -} - -// Returns true if i file number is greater than j. -// This used for sort by file number in descending order. -func (tf tFiles) lessByNum(i, j int) bool { - return tf[i].file.Num() > tf[j].file.Num() -} - -// Sorts tables by key in ascending order. -func (tf tFiles) sortByKey(icmp *iComparer) { - sort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp}) -} - -// Sorts tables by file number in descending order. -func (tf tFiles) sortByNum() { - sort.Sort(&tFilesSortByNum{tFiles: tf}) -} - -// Returns sum of all tables size. -func (tf tFiles) size() (sum uint64) { - for _, t := range tf { - sum += t.size - } - return sum -} - -// Searches smallest index of tables whose its smallest -// key is after or equal with given key. -func (tf tFiles) searchMin(icmp *iComparer, ikey iKey) int { - return sort.Search(len(tf), func(i int) bool { - return icmp.Compare(tf[i].imin, ikey) >= 0 - }) -} - -// Searches smallest index of tables whose its largest -// key is after or equal with given key. -func (tf tFiles) searchMax(icmp *iComparer, ikey iKey) int { - return sort.Search(len(tf), func(i int) bool { - return icmp.Compare(tf[i].imax, ikey) >= 0 - }) -} - -// Returns true if given key range overlaps with one or more -// tables key range. If unsorted is true then binary search will not be used. -func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) bool { - if unsorted { - // Check against all files. - for _, t := range tf { - if t.overlaps(icmp, umin, umax) { - return true - } - } - return false - } - - i := 0 - if len(umin) > 0 { - // Find the earliest possible internal key for min. - i = tf.searchMax(icmp, newIkey(umin, kMaxSeq, ktSeek)) - } - if i >= len(tf) { - // Beginning of range is after all files, so no overlap. - return false - } - return !tf[i].before(icmp, umax) -} - -// Returns tables whose its key range overlaps with given key range. -// Range will be expanded if ukey found hop across tables. -// If overlapped is true then the search will be restarted if umax -// expanded. -// The dst content will be overwritten. -func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles { - dst = dst[:0] - for i := 0; i < len(tf); { - t := tf[i] - if t.overlaps(icmp, umin, umax) { - if umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 { - umin = t.imin.ukey() - dst = dst[:0] - i = 0 - continue - } else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 { - umax = t.imax.ukey() - // Restart search if it is overlapped. - if overlapped { - dst = dst[:0] - i = 0 - continue - } - } - - dst = append(dst, t) - } - i++ - } - - return dst -} - -// Returns tables key range. -func (tf tFiles) getRange(icmp *iComparer) (imin, imax iKey) { - for i, t := range tf { - if i == 0 { - imin, imax = t.imin, t.imax - continue - } - if icmp.Compare(t.imin, imin) < 0 { - imin = t.imin - } - if icmp.Compare(t.imax, imax) > 0 { - imax = t.imax - } - } - - return -} - -// Creates iterator index from tables. -func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range, ro *opt.ReadOptions) iterator.IteratorIndexer { - if slice != nil { - var start, limit int - if slice.Start != nil { - start = tf.searchMax(icmp, iKey(slice.Start)) - } - if slice.Limit != nil { - limit = tf.searchMin(icmp, iKey(slice.Limit)) - } else { - limit = tf.Len() - } - tf = tf[start:limit] - } - return iterator.NewArrayIndexer(&tFilesArrayIndexer{ - tFiles: tf, - tops: tops, - icmp: icmp, - slice: slice, - ro: ro, - }) -} - -// Tables iterator index. -type tFilesArrayIndexer struct { - tFiles - tops *tOps - icmp *iComparer - slice *util.Range - ro *opt.ReadOptions -} - -func (a *tFilesArrayIndexer) Search(key []byte) int { - return a.searchMax(a.icmp, iKey(key)) -} - -func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator { - if i == 0 || i == a.Len()-1 { - return a.tops.newIterator(a.tFiles[i], a.slice, a.ro) - } - return a.tops.newIterator(a.tFiles[i], nil, a.ro) -} - -// Helper type for sortByKey. -type tFilesSortByKey struct { - tFiles - icmp *iComparer -} - -func (x *tFilesSortByKey) Less(i, j int) bool { - return x.lessByKey(x.icmp, i, j) -} - -// Helper type for sortByNum. -type tFilesSortByNum struct { - tFiles -} - -func (x *tFilesSortByNum) Less(i, j int) bool { - return x.lessByNum(i, j) -} - -// Table operations. -type tOps struct { - s *session - cache *cache.Cache - bcache *cache.Cache - bpool *util.BufferPool -} - -// Creates an empty table and returns table writer. -func (t *tOps) create() (*tWriter, error) { - file := t.s.getTableFile(t.s.allocFileNum()) - fw, err := file.Create() - if err != nil { - return nil, err - } - return &tWriter{ - t: t, - file: file, - w: fw, - tw: table.NewWriter(fw, t.s.o.Options), - }, nil -} - -// Builds table from src iterator. -func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) { - w, err := t.create() - if err != nil { - return - } - - defer func() { - if err != nil { - w.drop() - } - }() - - for src.Next() { - err = w.append(src.Key(), src.Value()) - if err != nil { - return - } - } - err = src.Error() - if err != nil { - return - } - - n = w.tw.EntriesLen() - f, err = w.finish() - return -} - -// Opens table. It returns a cache handle, which should -// be released after use. -func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) { - num := f.file.Num() - ch = t.cache.Get(0, num, func() (size int, value cache.Value) { - var r storage.Reader - r, err = f.file.Open() - if err != nil { - return 0, nil - } - - var bcache *cache.CacheGetter - if t.bcache != nil { - bcache = &cache.CacheGetter{Cache: t.bcache, NS: num} - } - - var tr *table.Reader - tr, err = table.NewReader(r, int64(f.size), storage.NewFileInfo(f.file), bcache, t.bpool, t.s.o.Options) - if err != nil { - r.Close() - return 0, nil - } - return 1, tr - - }) - if ch == nil && err == nil { - err = ErrClosed - } - return -} - -// Finds key/value pair whose key is greater than or equal to the -// given key. -func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []byte, err error) { - ch, err := t.open(f) - if err != nil { - return nil, nil, err - } - defer ch.Release() - return ch.Value().(*table.Reader).Find(key, true, ro) -} - -// Finds key that is greater than or equal to the given key. -func (t *tOps) findKey(f *tFile, key []byte, ro *opt.ReadOptions) (rkey []byte, err error) { - ch, err := t.open(f) - if err != nil { - return nil, err - } - defer ch.Release() - return ch.Value().(*table.Reader).FindKey(key, true, ro) -} - -// Returns approximate offset of the given key. -func (t *tOps) offsetOf(f *tFile, key []byte) (offset uint64, err error) { - ch, err := t.open(f) - if err != nil { - return - } - defer ch.Release() - offset_, err := ch.Value().(*table.Reader).OffsetOf(key) - return uint64(offset_), err -} - -// Creates an iterator from the given table. -func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - ch, err := t.open(f) - if err != nil { - return iterator.NewEmptyIterator(err) - } - iter := ch.Value().(*table.Reader).NewIterator(slice, ro) - iter.SetReleaser(ch) - return iter -} - -// Removes table from persistent storage. It waits until -// no one use the the table. -func (t *tOps) remove(f *tFile) { - num := f.file.Num() - t.cache.Delete(0, num, func() { - if err := f.file.Remove(); err != nil { - t.s.logf("table@remove removing @%d %q", num, err) - } else { - t.s.logf("table@remove removed @%d", num) - } - if t.bcache != nil { - t.bcache.EvictNS(num) - } - }) -} - -// Closes the table ops instance. It will close all tables, -// regadless still used or not. -func (t *tOps) close() { - t.bpool.Close() - t.cache.Close() - if t.bcache != nil { - t.bcache.Close() - } -} - -// Creates new initialized table ops instance. -func newTableOps(s *session) *tOps { - var ( - cacher cache.Cacher - bcache *cache.Cache - ) - if s.o.GetOpenFilesCacheCapacity() > 0 { - cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity()) - } - if !s.o.DisableBlockCache { - var bcacher cache.Cacher - if s.o.GetBlockCacheCapacity() > 0 { - bcacher = cache.NewLRU(s.o.GetBlockCacheCapacity()) - } - bcache = cache.NewCache(bcacher) - } - return &tOps{ - s: s, - cache: cache.NewCache(cacher), - bcache: bcache, - bpool: util.NewBufferPool(s.o.GetBlockSize() + 5), - } -} - -// tWriter wraps the table writer. It keep track of file descriptor -// and added key range. -type tWriter struct { - t *tOps - - file storage.File - w storage.Writer - tw *table.Writer - - first, last []byte -} - -// Append key/value pair to the table. -func (w *tWriter) append(key, value []byte) error { - if w.first == nil { - w.first = append([]byte{}, key...) - } - w.last = append(w.last[:0], key...) - return w.tw.Append(key, value) -} - -// Returns true if the table is empty. -func (w *tWriter) empty() bool { - return w.first == nil -} - -// Closes the storage.Writer. -func (w *tWriter) close() { - if w.w != nil { - w.w.Close() - w.w = nil - } -} - -// Finalizes the table and returns table file. -func (w *tWriter) finish() (f *tFile, err error) { - defer w.close() - err = w.tw.Close() - if err != nil { - return - } - err = w.w.Sync() - if err != nil { - return - } - f = newTableFile(w.file, uint64(w.tw.BytesLen()), iKey(w.first), iKey(w.last)) - return -} - -// Drops the table. -func (w *tWriter) drop() { - w.close() - w.file.Remove() - w.t.s.reuseFileNum(w.file.Num()) - w.file = nil - w.tw = nil - w.first = nil - w.last = nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go deleted file mode 100644 index 8622228ce..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package table - -import ( - "encoding/binary" - "fmt" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -type blockTesting struct { - tr *Reader - b *block -} - -func (t *blockTesting) TestNewIterator(slice *util.Range) iterator.Iterator { - return t.tr.newBlockIter(t.b, nil, slice, false) -} - -var _ = testutil.Defer(func() { - Describe("Block", func() { - Build := func(kv *testutil.KeyValue, restartInterval int) *blockTesting { - // Building the block. - bw := &blockWriter{ - restartInterval: restartInterval, - scratch: make([]byte, 30), - } - kv.Iterate(func(i int, key, value []byte) { - bw.append(key, value) - }) - bw.finish() - - // Opening the block. - data := bw.buf.Bytes() - restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:])) - return &blockTesting{ - tr: &Reader{cmp: comparer.DefaultComparer}, - b: &block{ - data: data, - restartsLen: restartsLen, - restartsOffset: len(data) - (restartsLen+1)*4, - }, - } - } - - Describe("read test", func() { - for restartInterval := 1; restartInterval <= 5; restartInterval++ { - Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() { - kv := &testutil.KeyValue{} - Text := func() string { - return fmt.Sprintf("and %d keys", kv.Len()) - } - - Test := func() { - // Make block. - br := Build(kv, restartInterval) - // Do testing. - testutil.KeyValueTesting(nil, kv.Clone(), br, nil, nil) - } - - Describe(Text(), Test) - - kv.PutString("", "empty") - Describe(Text(), Test) - - kv.PutString("a1", "foo") - Describe(Text(), Test) - - kv.PutString("a2", "v") - Describe(Text(), Test) - - kv.PutString("a3qqwrkks", "hello") - Describe(Text(), Test) - - kv.PutString("a4", "bar") - Describe(Text(), Test) - - kv.PutString("a5111111", "v5") - kv.PutString("a6", "") - kv.PutString("a7", "v7") - kv.PutString("a8", "vvvvvvvvvvvvvvvvvvvvvv8") - kv.PutString("b", "v9") - kv.PutString("c9", "v9") - kv.PutString("c91", "v9") - kv.PutString("d0", "v9") - Describe(Text(), Test) - }) - } - }) - - Describe("out-of-bound slice test", func() { - kv := &testutil.KeyValue{} - kv.PutString("k1", "v1") - kv.PutString("k2", "v2") - kv.PutString("k3abcdefgg", "v3") - kv.PutString("k4", "v4") - kv.PutString("k5", "v5") - for restartInterval := 1; restartInterval <= 5; restartInterval++ { - Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() { - // Make block. - bt := Build(kv, restartInterval) - - Test := func(r *util.Range) func(done Done) { - return func(done Done) { - iter := bt.TestNewIterator(r) - Expect(iter.Error()).ShouldNot(HaveOccurred()) - - t := testutil.IteratorTesting{ - KeyValue: kv.Clone(), - Iter: iter, - } - - testutil.DoIteratorTesting(&t) - iter.Release() - done <- true - } - } - - It("Should do iterations and seeks correctly #0", - Test(&util.Range{Start: []byte("k0"), Limit: []byte("k6")}), 2.0) - - It("Should do iterations and seeks correctly #1", - Test(&util.Range{Start: []byte(""), Limit: []byte("zzzzzzz")}), 2.0) - }) - } - }) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go deleted file mode 100644 index 152923d26..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go +++ /dev/null @@ -1,1106 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package table - -import ( - "encoding/binary" - "fmt" - "io" - "sort" - "strings" - "sync" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy" -) - -var ( - ErrNotFound = errors.ErrNotFound - ErrReaderReleased = errors.New("leveldb/table: reader released") - ErrIterReleased = errors.New("leveldb/table: iterator released") -) - -type ErrCorrupted struct { - Pos int64 - Size int64 - Kind string - Reason string -} - -func (e *ErrCorrupted) Error() string { - return fmt.Sprintf("leveldb/table: corruption on %s (pos=%d): %s", e.Kind, e.Pos, e.Reason) -} - -func max(x, y int) int { - if x > y { - return x - } - return y -} - -type block struct { - bpool *util.BufferPool - bh blockHandle - data []byte - restartsLen int - restartsOffset int -} - -func (b *block) seek(cmp comparer.Comparer, rstart, rlimit int, key []byte) (index, offset int, err error) { - index = sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool { - offset := int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) - offset += 1 // shared always zero, since this is a restart point - v1, n1 := binary.Uvarint(b.data[offset:]) // key length - _, n2 := binary.Uvarint(b.data[offset+n1:]) // value length - m := offset + n1 + n2 - return cmp.Compare(b.data[m:m+int(v1)], key) > 0 - }) + rstart - 1 - if index < rstart { - // The smallest key is greater-than key sought. - index = rstart - } - offset = int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:])) - return -} - -func (b *block) restartIndex(rstart, rlimit, offset int) int { - return sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool { - return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) > offset - }) + rstart - 1 -} - -func (b *block) restartOffset(index int) int { - return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:])) -} - -func (b *block) entry(offset int) (key, value []byte, nShared, n int, err error) { - if offset >= b.restartsOffset { - if offset != b.restartsOffset { - err = &ErrCorrupted{Reason: "entries offset not aligned"} - } - return - } - v0, n0 := binary.Uvarint(b.data[offset:]) // Shared prefix length - v1, n1 := binary.Uvarint(b.data[offset+n0:]) // Key length - v2, n2 := binary.Uvarint(b.data[offset+n0+n1:]) // Value length - m := n0 + n1 + n2 - n = m + int(v1) + int(v2) - if n0 <= 0 || n1 <= 0 || n2 <= 0 || offset+n > b.restartsOffset { - err = &ErrCorrupted{Reason: "entries corrupted"} - return - } - key = b.data[offset+m : offset+m+int(v1)] - value = b.data[offset+m+int(v1) : offset+n] - nShared = int(v0) - return -} - -func (b *block) Release() { - b.bpool.Put(b.data) - b.bpool = nil - b.data = nil -} - -type dir int - -const ( - dirReleased dir = iota - 1 - dirSOI - dirEOI - dirBackward - dirForward -) - -type blockIter struct { - tr *Reader - block *block - blockReleaser util.Releaser - releaser util.Releaser - key, value []byte - offset int - // Previous offset, only filled by Next. - prevOffset int - prevNode []int - prevKeys []byte - restartIndex int - // Iterator direction. - dir dir - // Restart index slice range. - riStart int - riLimit int - // Offset slice range. - offsetStart int - offsetRealStart int - offsetLimit int - // Error. - err error -} - -func (i *blockIter) sErr(err error) { - i.err = err - i.key = nil - i.value = nil - i.prevNode = nil - i.prevKeys = nil -} - -func (i *blockIter) reset() { - if i.dir == dirBackward { - i.prevNode = i.prevNode[:0] - i.prevKeys = i.prevKeys[:0] - } - i.restartIndex = i.riStart - i.offset = i.offsetStart - i.dir = dirSOI - i.key = i.key[:0] - i.value = nil -} - -func (i *blockIter) isFirst() bool { - switch i.dir { - case dirForward: - return i.prevOffset == i.offsetRealStart - case dirBackward: - return len(i.prevNode) == 1 && i.restartIndex == i.riStart - } - return false -} - -func (i *blockIter) isLast() bool { - switch i.dir { - case dirForward, dirBackward: - return i.offset == i.offsetLimit - } - return false -} - -func (i *blockIter) First() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.dir == dirBackward { - i.prevNode = i.prevNode[:0] - i.prevKeys = i.prevKeys[:0] - } - i.dir = dirSOI - return i.Next() -} - -func (i *blockIter) Last() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.dir == dirBackward { - i.prevNode = i.prevNode[:0] - i.prevKeys = i.prevKeys[:0] - } - i.dir = dirEOI - return i.Prev() -} - -func (i *blockIter) Seek(key []byte) bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - ri, offset, err := i.block.seek(i.tr.cmp, i.riStart, i.riLimit, key) - if err != nil { - i.sErr(err) - return false - } - i.restartIndex = ri - i.offset = max(i.offsetStart, offset) - if i.dir == dirSOI || i.dir == dirEOI { - i.dir = dirForward - } - for i.Next() { - if i.tr.cmp.Compare(i.key, key) >= 0 { - return true - } - } - return false -} - -func (i *blockIter) Next() bool { - if i.dir == dirEOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.dir == dirSOI { - i.restartIndex = i.riStart - i.offset = i.offsetStart - } else if i.dir == dirBackward { - i.prevNode = i.prevNode[:0] - i.prevKeys = i.prevKeys[:0] - } - for i.offset < i.offsetRealStart { - key, value, nShared, n, err := i.block.entry(i.offset) - if err != nil { - i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err)) - return false - } - if n == 0 { - i.dir = dirEOI - return false - } - i.key = append(i.key[:nShared], key...) - i.value = value - i.offset += n - } - if i.offset >= i.offsetLimit { - i.dir = dirEOI - if i.offset != i.offsetLimit { - i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned")) - } - return false - } - key, value, nShared, n, err := i.block.entry(i.offset) - if err != nil { - i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err)) - return false - } - if n == 0 { - i.dir = dirEOI - return false - } - i.key = append(i.key[:nShared], key...) - i.value = value - i.prevOffset = i.offset - i.offset += n - i.dir = dirForward - return true -} - -func (i *blockIter) Prev() bool { - if i.dir == dirSOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - var ri int - if i.dir == dirForward { - // Change direction. - i.offset = i.prevOffset - if i.offset == i.offsetRealStart { - i.dir = dirSOI - return false - } - ri = i.block.restartIndex(i.restartIndex, i.riLimit, i.offset) - i.dir = dirBackward - } else if i.dir == dirEOI { - // At the end of iterator. - i.restartIndex = i.riLimit - i.offset = i.offsetLimit - if i.offset == i.offsetRealStart { - i.dir = dirSOI - return false - } - ri = i.riLimit - 1 - i.dir = dirBackward - } else if len(i.prevNode) == 1 { - // This is the end of a restart range. - i.offset = i.prevNode[0] - i.prevNode = i.prevNode[:0] - if i.restartIndex == i.riStart { - i.dir = dirSOI - return false - } - i.restartIndex-- - ri = i.restartIndex - } else { - // In the middle of restart range, get from cache. - n := len(i.prevNode) - 3 - node := i.prevNode[n:] - i.prevNode = i.prevNode[:n] - // Get the key. - ko := node[0] - i.key = append(i.key[:0], i.prevKeys[ko:]...) - i.prevKeys = i.prevKeys[:ko] - // Get the value. - vo := node[1] - vl := vo + node[2] - i.value = i.block.data[vo:vl] - i.offset = vl - return true - } - // Build entries cache. - i.key = i.key[:0] - i.value = nil - offset := i.block.restartOffset(ri) - if offset == i.offset { - ri -= 1 - if ri < 0 { - i.dir = dirSOI - return false - } - offset = i.block.restartOffset(ri) - } - i.prevNode = append(i.prevNode, offset) - for { - key, value, nShared, n, err := i.block.entry(offset) - if err != nil { - i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err)) - return false - } - if offset >= i.offsetRealStart { - if i.value != nil { - // Appends 3 variables: - // 1. Previous keys offset - // 2. Value offset in the data block - // 3. Value length - i.prevNode = append(i.prevNode, len(i.prevKeys), offset-len(i.value), len(i.value)) - i.prevKeys = append(i.prevKeys, i.key...) - } - i.value = value - } - i.key = append(i.key[:nShared], key...) - offset += n - // Stop if target offset reached. - if offset >= i.offset { - if offset != i.offset { - i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned")) - return false - } - - break - } - } - i.restartIndex = ri - i.offset = offset - return true -} - -func (i *blockIter) Key() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.key -} - -func (i *blockIter) Value() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.value -} - -func (i *blockIter) Release() { - if i.dir != dirReleased { - i.tr = nil - i.block = nil - i.prevNode = nil - i.prevKeys = nil - i.key = nil - i.value = nil - i.dir = dirReleased - if i.blockReleaser != nil { - i.blockReleaser.Release() - i.blockReleaser = nil - } - if i.releaser != nil { - i.releaser.Release() - i.releaser = nil - } - } -} - -func (i *blockIter) SetReleaser(releaser util.Releaser) { - if i.dir == dirReleased { - panic(util.ErrReleased) - } - if i.releaser != nil && releaser != nil { - panic(util.ErrHasReleaser) - } - i.releaser = releaser -} - -func (i *blockIter) Valid() bool { - return i.err == nil && (i.dir == dirBackward || i.dir == dirForward) -} - -func (i *blockIter) Error() error { - return i.err -} - -type filterBlock struct { - bpool *util.BufferPool - data []byte - oOffset int - baseLg uint - filtersNum int -} - -func (b *filterBlock) contains(filter filter.Filter, offset uint64, key []byte) bool { - i := int(offset >> b.baseLg) - if i < b.filtersNum { - o := b.data[b.oOffset+i*4:] - n := int(binary.LittleEndian.Uint32(o)) - m := int(binary.LittleEndian.Uint32(o[4:])) - if n < m && m <= b.oOffset { - return filter.Contains(b.data[n:m], key) - } else if n == m { - return false - } - } - return true -} - -func (b *filterBlock) Release() { - b.bpool.Put(b.data) - b.bpool = nil - b.data = nil -} - -type indexIter struct { - *blockIter - tr *Reader - slice *util.Range - // Options - fillCache bool -} - -func (i *indexIter) Get() iterator.Iterator { - value := i.Value() - if value == nil { - return nil - } - dataBH, n := decodeBlockHandle(value) - if n == 0 { - return iterator.NewEmptyIterator(i.tr.newErrCorruptedBH(i.tr.indexBH, "bad data block handle")) - } - - var slice *util.Range - if i.slice != nil && (i.blockIter.isFirst() || i.blockIter.isLast()) { - slice = i.slice - } - return i.tr.getDataIterErr(dataBH, slice, i.tr.verifyChecksum, i.fillCache) -} - -// Reader is a table reader. -type Reader struct { - mu sync.RWMutex - fi *storage.FileInfo - reader io.ReaderAt - cache *cache.CacheGetter - err error - bpool *util.BufferPool - // Options - o *opt.Options - cmp comparer.Comparer - filter filter.Filter - verifyChecksum bool - - dataEnd int64 - metaBH, indexBH, filterBH blockHandle - indexBlock *block - filterBlock *filterBlock -} - -func (r *Reader) blockKind(bh blockHandle) string { - switch bh.offset { - case r.metaBH.offset: - return "meta-block" - case r.indexBH.offset: - return "index-block" - case r.filterBH.offset: - if r.filterBH.length > 0 { - return "filter-block" - } - } - return "data-block" -} - -func (r *Reader) newErrCorrupted(pos, size int64, kind, reason string) error { - return &errors.ErrCorrupted{File: r.fi, Err: &ErrCorrupted{Pos: pos, Size: size, Kind: kind, Reason: reason}} -} - -func (r *Reader) newErrCorruptedBH(bh blockHandle, reason string) error { - return r.newErrCorrupted(int64(bh.offset), int64(bh.length), r.blockKind(bh), reason) -} - -func (r *Reader) fixErrCorruptedBH(bh blockHandle, err error) error { - if cerr, ok := err.(*ErrCorrupted); ok { - cerr.Pos = int64(bh.offset) - cerr.Size = int64(bh.length) - cerr.Kind = r.blockKind(bh) - return &errors.ErrCorrupted{File: r.fi, Err: cerr} - } - return err -} - -func (r *Reader) readRawBlock(bh blockHandle, verifyChecksum bool) ([]byte, error) { - data := r.bpool.Get(int(bh.length + blockTrailerLen)) - if _, err := r.reader.ReadAt(data, int64(bh.offset)); err != nil && err != io.EOF { - return nil, err - } - - if verifyChecksum { - n := bh.length + 1 - checksum0 := binary.LittleEndian.Uint32(data[n:]) - checksum1 := util.NewCRC(data[:n]).Value() - if checksum0 != checksum1 { - r.bpool.Put(data) - return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("checksum mismatch, want=%#x got=%#x", checksum0, checksum1)) - } - } - - switch data[bh.length] { - case blockTypeNoCompression: - data = data[:bh.length] - case blockTypeSnappyCompression: - decLen, err := snappy.DecodedLen(data[:bh.length]) - if err != nil { - return nil, r.newErrCorruptedBH(bh, err.Error()) - } - decData := r.bpool.Get(decLen) - decData, err = snappy.Decode(decData, data[:bh.length]) - r.bpool.Put(data) - if err != nil { - r.bpool.Put(decData) - return nil, r.newErrCorruptedBH(bh, err.Error()) - } - data = decData - default: - r.bpool.Put(data) - return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("unknown compression type %#x", data[bh.length])) - } - return data, nil -} - -func (r *Reader) readBlock(bh blockHandle, verifyChecksum bool) (*block, error) { - data, err := r.readRawBlock(bh, verifyChecksum) - if err != nil { - return nil, err - } - restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:])) - b := &block{ - bpool: r.bpool, - bh: bh, - data: data, - restartsLen: restartsLen, - restartsOffset: len(data) - (restartsLen+1)*4, - } - return b, nil -} - -func (r *Reader) readBlockCached(bh blockHandle, verifyChecksum, fillCache bool) (*block, util.Releaser, error) { - if r.cache != nil { - var ( - err error - ch *cache.Handle - ) - if fillCache { - ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) { - var b *block - b, err = r.readBlock(bh, verifyChecksum) - if err != nil { - return 0, nil - } - return cap(b.data), b - }) - } else { - ch = r.cache.Get(bh.offset, nil) - } - if ch != nil { - b, ok := ch.Value().(*block) - if !ok { - ch.Release() - return nil, nil, errors.New("leveldb/table: inconsistent block type") - } - return b, ch, err - } else if err != nil { - return nil, nil, err - } - } - - b, err := r.readBlock(bh, verifyChecksum) - return b, b, err -} - -func (r *Reader) readFilterBlock(bh blockHandle) (*filterBlock, error) { - data, err := r.readRawBlock(bh, true) - if err != nil { - return nil, err - } - n := len(data) - if n < 5 { - return nil, r.newErrCorruptedBH(bh, "too short") - } - m := n - 5 - oOffset := int(binary.LittleEndian.Uint32(data[m:])) - if oOffset > m { - return nil, r.newErrCorruptedBH(bh, "invalid data-offsets offset") - } - b := &filterBlock{ - bpool: r.bpool, - data: data, - oOffset: oOffset, - baseLg: uint(data[n-1]), - filtersNum: (m - oOffset) / 4, - } - return b, nil -} - -func (r *Reader) readFilterBlockCached(bh blockHandle, fillCache bool) (*filterBlock, util.Releaser, error) { - if r.cache != nil { - var ( - err error - ch *cache.Handle - ) - if fillCache { - ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) { - var b *filterBlock - b, err = r.readFilterBlock(bh) - if err != nil { - return 0, nil - } - return cap(b.data), b - }) - } else { - ch = r.cache.Get(bh.offset, nil) - } - if ch != nil { - b, ok := ch.Value().(*filterBlock) - if !ok { - ch.Release() - return nil, nil, errors.New("leveldb/table: inconsistent block type") - } - return b, ch, err - } else if err != nil { - return nil, nil, err - } - } - - b, err := r.readFilterBlock(bh) - return b, b, err -} - -func (r *Reader) getIndexBlock(fillCache bool) (b *block, rel util.Releaser, err error) { - if r.indexBlock == nil { - return r.readBlockCached(r.indexBH, true, fillCache) - } - return r.indexBlock, util.NoopReleaser{}, nil -} - -func (r *Reader) getFilterBlock(fillCache bool) (*filterBlock, util.Releaser, error) { - if r.filterBlock == nil { - return r.readFilterBlockCached(r.filterBH, fillCache) - } - return r.filterBlock, util.NoopReleaser{}, nil -} - -func (r *Reader) newBlockIter(b *block, bReleaser util.Releaser, slice *util.Range, inclLimit bool) *blockIter { - bi := &blockIter{ - tr: r, - block: b, - blockReleaser: bReleaser, - // Valid key should never be nil. - key: make([]byte, 0), - dir: dirSOI, - riStart: 0, - riLimit: b.restartsLen, - offsetStart: 0, - offsetRealStart: 0, - offsetLimit: b.restartsOffset, - } - if slice != nil { - if slice.Start != nil { - if bi.Seek(slice.Start) { - bi.riStart = b.restartIndex(bi.restartIndex, b.restartsLen, bi.prevOffset) - bi.offsetStart = b.restartOffset(bi.riStart) - bi.offsetRealStart = bi.prevOffset - } else { - bi.riStart = b.restartsLen - bi.offsetStart = b.restartsOffset - bi.offsetRealStart = b.restartsOffset - } - } - if slice.Limit != nil { - if bi.Seek(slice.Limit) && (!inclLimit || bi.Next()) { - bi.offsetLimit = bi.prevOffset - bi.riLimit = bi.restartIndex + 1 - } - } - bi.reset() - if bi.offsetStart > bi.offsetLimit { - bi.sErr(errors.New("leveldb/table: invalid slice range")) - } - } - return bi -} - -func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator { - b, rel, err := r.readBlockCached(dataBH, verifyChecksum, fillCache) - if err != nil { - return iterator.NewEmptyIterator(err) - } - return r.newBlockIter(b, rel, slice, false) -} - -func (r *Reader) getDataIterErr(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - return iterator.NewEmptyIterator(r.err) - } - - return r.getDataIter(dataBH, slice, verifyChecksum, fillCache) -} - -// NewIterator creates an iterator from the table. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// table. And a nil Range.Limit is treated as a key after all keys in -// the table. -// -// The returned iterator is not goroutine-safe and should be released -// when not used. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (r *Reader) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - return iterator.NewEmptyIterator(r.err) - } - - fillCache := !ro.GetDontFillCache() - indexBlock, rel, err := r.getIndexBlock(fillCache) - if err != nil { - return iterator.NewEmptyIterator(err) - } - index := &indexIter{ - blockIter: r.newBlockIter(indexBlock, rel, slice, true), - tr: r, - slice: slice, - fillCache: !ro.GetDontFillCache(), - } - return iterator.NewIndexedIterator(index, opt.GetStrict(r.o, ro, opt.StrictReader)) -} - -func (r *Reader) find(key []byte, filtered bool, ro *opt.ReadOptions, noValue bool) (rkey, value []byte, err error) { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - err = r.err - return - } - - indexBlock, rel, err := r.getIndexBlock(true) - if err != nil { - return - } - defer rel.Release() - - index := r.newBlockIter(indexBlock, nil, nil, true) - defer index.Release() - if !index.Seek(key) { - err = index.Error() - if err == nil { - err = ErrNotFound - } - return - } - dataBH, n := decodeBlockHandle(index.Value()) - if n == 0 { - r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle") - return - } - if filtered && r.filter != nil { - filterBlock, frel, ferr := r.getFilterBlock(true) - if ferr == nil { - if !filterBlock.contains(r.filter, dataBH.offset, key) { - frel.Release() - return nil, nil, ErrNotFound - } - frel.Release() - } else if !errors.IsCorrupted(ferr) { - err = ferr - return - } - } - data := r.getDataIter(dataBH, nil, r.verifyChecksum, !ro.GetDontFillCache()) - defer data.Release() - if !data.Seek(key) { - err = data.Error() - if err == nil { - err = ErrNotFound - } - return - } - // Don't use block buffer, no need to copy the buffer. - rkey = data.Key() - if !noValue { - if r.bpool == nil { - value = data.Value() - } else { - // Use block buffer, and since the buffer will be recycled, the buffer - // need to be copied. - value = append([]byte{}, data.Value()...) - } - } - return -} - -// Find finds key/value pair whose key is greater than or equal to the -// given key. It returns ErrNotFound if the table doesn't contain -// such pair. -// If filtered is true then the nearest 'block' will be checked against -// 'filter data' (if present) and will immediately return ErrNotFound if -// 'filter data' indicates that such pair doesn't exist. -// -// The caller may modify the contents of the returned slice as it is its -// own copy. -// It is safe to modify the contents of the argument after Find returns. -func (r *Reader) Find(key []byte, filtered bool, ro *opt.ReadOptions) (rkey, value []byte, err error) { - return r.find(key, filtered, ro, false) -} - -// Find finds key that is greater than or equal to the given key. -// It returns ErrNotFound if the table doesn't contain such key. -// If filtered is true then the nearest 'block' will be checked against -// 'filter data' (if present) and will immediately return ErrNotFound if -// 'filter data' indicates that such key doesn't exist. -// -// The caller may modify the contents of the returned slice as it is its -// own copy. -// It is safe to modify the contents of the argument after Find returns. -func (r *Reader) FindKey(key []byte, filtered bool, ro *opt.ReadOptions) (rkey []byte, err error) { - rkey, _, err = r.find(key, filtered, ro, true) - return -} - -// Get gets the value for the given key. It returns errors.ErrNotFound -// if the table does not contain the key. -// -// The caller may modify the contents of the returned slice as it is its -// own copy. -// It is safe to modify the contents of the argument after Find returns. -func (r *Reader) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - err = r.err - return - } - - rkey, value, err := r.find(key, false, ro, false) - if err == nil && r.cmp.Compare(rkey, key) != 0 { - value = nil - err = ErrNotFound - } - return -} - -// OffsetOf returns approximate offset for the given key. -// -// It is safe to modify the contents of the argument after Get returns. -func (r *Reader) OffsetOf(key []byte) (offset int64, err error) { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - err = r.err - return - } - - indexBlock, rel, err := r.readBlockCached(r.indexBH, true, true) - if err != nil { - return - } - defer rel.Release() - - index := r.newBlockIter(indexBlock, nil, nil, true) - defer index.Release() - if index.Seek(key) { - dataBH, n := decodeBlockHandle(index.Value()) - if n == 0 { - r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle") - return - } - offset = int64(dataBH.offset) - return - } - err = index.Error() - if err == nil { - offset = r.dataEnd - } - return -} - -// Release implements util.Releaser. -// It also close the file if it is an io.Closer. -func (r *Reader) Release() { - r.mu.Lock() - defer r.mu.Unlock() - - if closer, ok := r.reader.(io.Closer); ok { - closer.Close() - } - if r.indexBlock != nil { - r.indexBlock.Release() - r.indexBlock = nil - } - if r.filterBlock != nil { - r.filterBlock.Release() - r.filterBlock = nil - } - r.reader = nil - r.cache = nil - r.bpool = nil - r.err = ErrReaderReleased -} - -// NewReader creates a new initialized table reader for the file. -// The fi, cache and bpool is optional and can be nil. -// -// The returned table reader instance is goroutine-safe. -func NewReader(f io.ReaderAt, size int64, fi *storage.FileInfo, cache *cache.CacheGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) { - if f == nil { - return nil, errors.New("leveldb/table: nil file") - } - - r := &Reader{ - fi: fi, - reader: f, - cache: cache, - bpool: bpool, - o: o, - cmp: o.GetComparer(), - verifyChecksum: o.GetStrict(opt.StrictBlockChecksum), - } - - if size < footerLen { - r.err = r.newErrCorrupted(0, size, "table", "too small") - return r, nil - } - - footerPos := size - footerLen - var footer [footerLen]byte - if _, err := r.reader.ReadAt(footer[:], footerPos); err != nil && err != io.EOF { - return nil, err - } - if string(footer[footerLen-len(magic):footerLen]) != magic { - r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad magic number") - return r, nil - } - - var n int - // Decode the metaindex block handle. - r.metaBH, n = decodeBlockHandle(footer[:]) - if n == 0 { - r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad metaindex block handle") - return r, nil - } - - // Decode the index block handle. - r.indexBH, n = decodeBlockHandle(footer[n:]) - if n == 0 { - r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad index block handle") - return r, nil - } - - // Read metaindex block. - metaBlock, err := r.readBlock(r.metaBH, true) - if err != nil { - if errors.IsCorrupted(err) { - r.err = err - return r, nil - } else { - return nil, err - } - } - - // Set data end. - r.dataEnd = int64(r.metaBH.offset) - - // Read metaindex. - metaIter := r.newBlockIter(metaBlock, nil, nil, true) - for metaIter.Next() { - key := string(metaIter.Key()) - if !strings.HasPrefix(key, "filter.") { - continue - } - fn := key[7:] - if f0 := o.GetFilter(); f0 != nil && f0.Name() == fn { - r.filter = f0 - } else { - for _, f0 := range o.GetAltFilters() { - if f0.Name() == fn { - r.filter = f0 - break - } - } - } - if r.filter != nil { - filterBH, n := decodeBlockHandle(metaIter.Value()) - if n == 0 { - continue - } - r.filterBH = filterBH - // Update data end. - r.dataEnd = int64(filterBH.offset) - break - } - } - metaIter.Release() - metaBlock.Release() - - // Cache index and filter block locally, since we don't have global cache. - if cache == nil { - r.indexBlock, err = r.readBlock(r.indexBH, true) - if err != nil { - if errors.IsCorrupted(err) { - r.err = err - return r, nil - } else { - return nil, err - } - } - if r.filter != nil { - r.filterBlock, err = r.readFilterBlock(r.filterBH) - if err != nil { - if !errors.IsCorrupted(err) { - return nil, err - } - - // Don't use filter then. - r.filter = nil - } - } - } - - return r, nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go deleted file mode 100644 index beacdc1f0..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package table allows read and write sorted key/value. -package table - -import ( - "encoding/binary" -) - -/* -Table: - -Table is consist of one or more data blocks, an optional filter block -a metaindex block, an index block and a table footer. Metaindex block -is a special block used to keep parameters of the table, such as filter -block name and its block handle. Index block is a special block used to -keep record of data blocks offset and length, index block use one as -restart interval. The key used by index block are the last key of preceding -block, shorter separator of adjacent blocks or shorter successor of the -last key of the last block. Filter block is an optional block contains -sequence of filter data generated by a filter generator. - -Table data structure: - + optional - / - +--------------+--------------+--------------+------+-------+-----------------+-------------+--------+ - | data block 1 | ... | data block n | filter block | metaindex block | index block | footer | - +--------------+--------------+--------------+--------------+-----------------+-------------+--------+ - - Each block followed by a 5-bytes trailer contains compression type and checksum. - -Table block trailer: - - +---------------------------+-------------------+ - | compression type (1-byte) | checksum (4-byte) | - +---------------------------+-------------------+ - - The checksum is a CRC-32 computed using Castagnoli's polynomial. Compression - type also included in the checksum. - -Table footer: - - +------------------- 40-bytes -------------------+ - / \ - +------------------------+--------------------+------+-----------------+ - | metaindex block handle / index block handle / ---- | magic (8-bytes) | - +------------------------+--------------------+------+-----------------+ - - The magic are first 64-bit of SHA-1 sum of "http://code.google.com/p/leveldb/". - -NOTE: All fixed-length integer are little-endian. -*/ - -/* -Block: - -Block is consist of one or more key/value entries and a block trailer. -Block entry shares key prefix with its preceding key until a restart -point reached. A block should contains at least one restart point. -First restart point are always zero. - -Block data structure: - - + restart point + restart point (depends on restart interval) - / / - +---------------+---------------+---------------+---------------+---------+ - | block entry 1 | block entry 2 | ... | block entry n | trailer | - +---------------+---------------+---------------+---------------+---------+ - -Key/value entry: - - +---- key len ----+ - / \ - +-------+---------+-----------+---------+--------------------+--------------+----------------+ - | shared (varint) | not shared (varint) | value len (varint) | key (varlen) | value (varlen) | - +-----------------+---------------------+--------------------+--------------+----------------+ - - Block entry shares key prefix with its preceding key: - Conditions: - restart_interval=2 - entry one : key=deck,value=v1 - entry two : key=dock,value=v2 - entry three: key=duck,value=v3 - The entries will be encoded as follow: - - + restart point (offset=0) + restart point (offset=16) - / / - +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+ - | 0 | 4 | 2 | "deck" | "v1" | 1 | 3 | 2 | "ock" | "v2" | 0 | 4 | 2 | "duck" | "v3" | - +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+ - \ / \ / \ / - +----------- entry one -----------+ +----------- entry two ----------+ +---------- entry three ----------+ - - The block trailer will contains two restart points: - - +------------+-----------+--------+ - | 0 | 16 | 2 | - +------------+-----------+---+----+ - \ / \ - +-- restart points --+ + restart points length - -Block trailer: - - +-- 4-bytes --+ - / \ - +-----------------+-----------------+-----------------+------------------------------+ - | restart point 1 | .... | restart point n | restart points len (4-bytes) | - +-----------------+-----------------+-----------------+------------------------------+ - - -NOTE: All fixed-length integer are little-endian. -*/ - -/* -Filter block: - -Filter block consist of one or more filter data and a filter block trailer. -The trailer contains filter data offsets, a trailer offset and a 1-byte base Lg. - -Filter block data structure: - - + offset 1 + offset 2 + offset n + trailer offset - / / / / - +---------------+---------------+---------------+---------+ - | filter data 1 | ... | filter data n | trailer | - +---------------+---------------+---------------+---------+ - -Filter block trailer: - - +- 4-bytes -+ - / \ - +---------------+---------------+---------------+-------------------------------+------------------+ - | data 1 offset | .... | data n offset | data-offsets offset (4-bytes) | base Lg (1-byte) | - +-------------- +---------------+---------------+-------------------------------+------------------+ - - -NOTE: All fixed-length integer are little-endian. -*/ - -const ( - blockTrailerLen = 5 - footerLen = 48 - - magic = "\x57\xfb\x80\x8b\x24\x75\x47\xdb" - - // The block type gives the per-block compression format. - // These constants are part of the file format and should not be changed. - blockTypeNoCompression = 0 - blockTypeSnappyCompression = 1 - - // Generate new filter every 2KB of data - filterBaseLg = 11 - filterBase = 1 << filterBaseLg -) - -type blockHandle struct { - offset, length uint64 -} - -func decodeBlockHandle(src []byte) (blockHandle, int) { - offset, n := binary.Uvarint(src) - length, m := binary.Uvarint(src[n:]) - if n == 0 || m == 0 { - return blockHandle{}, 0 - } - return blockHandle{offset, length}, n + m -} - -func encodeBlockHandle(dst []byte, b blockHandle) int { - n := binary.PutUvarint(dst, b.offset) - m := binary.PutUvarint(dst[n:], b.length) - return n + m -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go deleted file mode 100644 index bd8a086e2..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package table - -import ( - "testing" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestTable(t *testing.T) { - testutil.RunSuite(t, "Table Suite") -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go deleted file mode 100644 index 105f62b0f..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package table - -import ( - "bytes" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -type tableWrapper struct { - *Reader -} - -func (t tableWrapper) TestFind(key []byte) (rkey, rvalue []byte, err error) { - return t.Reader.Find(key, false, nil) -} - -func (t tableWrapper) TestGet(key []byte) (value []byte, err error) { - return t.Reader.Get(key, nil) -} - -func (t tableWrapper) TestNewIterator(slice *util.Range) iterator.Iterator { - return t.Reader.NewIterator(slice, nil) -} - -var _ = testutil.Defer(func() { - Describe("Table", func() { - Describe("approximate offset test", func() { - var ( - buf = &bytes.Buffer{} - o = &opt.Options{ - BlockSize: 1024, - Compression: opt.NoCompression, - } - ) - - // Building the table. - tw := NewWriter(buf, o) - tw.Append([]byte("k01"), []byte("hello")) - tw.Append([]byte("k02"), []byte("hello2")) - tw.Append([]byte("k03"), bytes.Repeat([]byte{'x'}, 10000)) - tw.Append([]byte("k04"), bytes.Repeat([]byte{'x'}, 200000)) - tw.Append([]byte("k05"), bytes.Repeat([]byte{'x'}, 300000)) - tw.Append([]byte("k06"), []byte("hello3")) - tw.Append([]byte("k07"), bytes.Repeat([]byte{'x'}, 100000)) - err := tw.Close() - - It("Should be able to approximate offset of a key correctly", func() { - Expect(err).ShouldNot(HaveOccurred()) - - tr, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, nil, o) - Expect(err).ShouldNot(HaveOccurred()) - CheckOffset := func(key string, expect, threshold int) { - offset, err := tr.OffsetOf([]byte(key)) - Expect(err).ShouldNot(HaveOccurred()) - Expect(offset).Should(BeNumerically("~", expect, threshold), "Offset of key %q", key) - } - - CheckOffset("k0", 0, 0) - CheckOffset("k01a", 0, 0) - CheckOffset("k02", 0, 0) - CheckOffset("k03", 0, 0) - CheckOffset("k04", 10000, 1000) - CheckOffset("k04a", 210000, 1000) - CheckOffset("k05", 210000, 1000) - CheckOffset("k06", 510000, 1000) - CheckOffset("k07", 510000, 1000) - CheckOffset("xyz", 610000, 2000) - }) - }) - - Describe("read test", func() { - Build := func(kv testutil.KeyValue) testutil.DB { - o := &opt.Options{ - BlockSize: 512, - BlockRestartInterval: 3, - } - buf := &bytes.Buffer{} - - // Building the table. - tw := NewWriter(buf, o) - kv.Iterate(func(i int, key, value []byte) { - tw.Append(key, value) - }) - tw.Close() - - // Opening the table. - tr, _ := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, nil, o) - return tableWrapper{tr} - } - Test := func(kv *testutil.KeyValue, body func(r *Reader)) func() { - return func() { - db := Build(*kv) - if body != nil { - body(db.(tableWrapper).Reader) - } - testutil.KeyValueTesting(nil, *kv, db, nil, nil) - } - } - - testutil.AllKeyValueTesting(nil, Build, nil, nil) - Describe("with one key per block", Test(testutil.KeyValue_Generate(nil, 9, 1, 10, 512, 512), func(r *Reader) { - It("should have correct blocks number", func() { - indexBlock, err := r.readBlock(r.indexBH, true) - Expect(err).To(BeNil()) - Expect(indexBlock.restartsLen).Should(Equal(9)) - }) - })) - }) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go deleted file mode 100644 index e15df55e9..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go +++ /dev/null @@ -1,378 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package table - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy" -) - -func sharedPrefixLen(a, b []byte) int { - i, n := 0, len(a) - if n > len(b) { - n = len(b) - } - for i < n && a[i] == b[i] { - i++ - } - return i -} - -type blockWriter struct { - restartInterval int - buf util.Buffer - nEntries int - prevKey []byte - restarts []uint32 - scratch []byte -} - -func (w *blockWriter) append(key, value []byte) { - nShared := 0 - if w.nEntries%w.restartInterval == 0 { - w.restarts = append(w.restarts, uint32(w.buf.Len())) - } else { - nShared = sharedPrefixLen(w.prevKey, key) - } - n := binary.PutUvarint(w.scratch[0:], uint64(nShared)) - n += binary.PutUvarint(w.scratch[n:], uint64(len(key)-nShared)) - n += binary.PutUvarint(w.scratch[n:], uint64(len(value))) - w.buf.Write(w.scratch[:n]) - w.buf.Write(key[nShared:]) - w.buf.Write(value) - w.prevKey = append(w.prevKey[:0], key...) - w.nEntries++ -} - -func (w *blockWriter) finish() { - // Write restarts entry. - if w.nEntries == 0 { - // Must have at least one restart entry. - w.restarts = append(w.restarts, 0) - } - w.restarts = append(w.restarts, uint32(len(w.restarts))) - for _, x := range w.restarts { - buf4 := w.buf.Alloc(4) - binary.LittleEndian.PutUint32(buf4, x) - } -} - -func (w *blockWriter) reset() { - w.buf.Reset() - w.nEntries = 0 - w.restarts = w.restarts[:0] -} - -func (w *blockWriter) bytesLen() int { - restartsLen := len(w.restarts) - if restartsLen == 0 { - restartsLen = 1 - } - return w.buf.Len() + 4*restartsLen + 4 -} - -type filterWriter struct { - generator filter.FilterGenerator - buf util.Buffer - nKeys int - offsets []uint32 -} - -func (w *filterWriter) add(key []byte) { - if w.generator == nil { - return - } - w.generator.Add(key) - w.nKeys++ -} - -func (w *filterWriter) flush(offset uint64) { - if w.generator == nil { - return - } - for x := int(offset / filterBase); x > len(w.offsets); { - w.generate() - } -} - -func (w *filterWriter) finish() { - if w.generator == nil { - return - } - // Generate last keys. - - if w.nKeys > 0 { - w.generate() - } - w.offsets = append(w.offsets, uint32(w.buf.Len())) - for _, x := range w.offsets { - buf4 := w.buf.Alloc(4) - binary.LittleEndian.PutUint32(buf4, x) - } - w.buf.WriteByte(filterBaseLg) -} - -func (w *filterWriter) generate() { - // Record offset. - w.offsets = append(w.offsets, uint32(w.buf.Len())) - // Generate filters. - if w.nKeys > 0 { - w.generator.Generate(&w.buf) - w.nKeys = 0 - } -} - -// Writer is a table writer. -type Writer struct { - writer io.Writer - err error - // Options - cmp comparer.Comparer - filter filter.Filter - compression opt.Compression - blockSize int - - dataBlock blockWriter - indexBlock blockWriter - filterBlock filterWriter - pendingBH blockHandle - offset uint64 - nEntries int - // Scratch allocated enough for 5 uvarint. Block writer should not use - // first 20-bytes since it will be used to encode block handle, which - // then passed to the block writer itself. - scratch [50]byte - comparerScratch []byte - compressionScratch []byte -} - -func (w *Writer) writeBlock(buf *util.Buffer, compression opt.Compression) (bh blockHandle, err error) { - // Compress the buffer if necessary. - var b []byte - if compression == opt.SnappyCompression { - // Allocate scratch enough for compression and block trailer. - if n := snappy.MaxEncodedLen(buf.Len()) + blockTrailerLen; len(w.compressionScratch) < n { - w.compressionScratch = make([]byte, n) - } - var compressed []byte - compressed, err = snappy.Encode(w.compressionScratch, buf.Bytes()) - if err != nil { - return - } - n := len(compressed) - b = compressed[:n+blockTrailerLen] - b[n] = blockTypeSnappyCompression - } else { - tmp := buf.Alloc(blockTrailerLen) - tmp[0] = blockTypeNoCompression - b = buf.Bytes() - } - - // Calculate the checksum. - n := len(b) - 4 - checksum := util.NewCRC(b[:n]).Value() - binary.LittleEndian.PutUint32(b[n:], checksum) - - // Write the buffer to the file. - _, err = w.writer.Write(b) - if err != nil { - return - } - bh = blockHandle{w.offset, uint64(len(b) - blockTrailerLen)} - w.offset += uint64(len(b)) - return -} - -func (w *Writer) flushPendingBH(key []byte) { - if w.pendingBH.length == 0 { - return - } - var separator []byte - if len(key) == 0 { - separator = w.cmp.Successor(w.comparerScratch[:0], w.dataBlock.prevKey) - } else { - separator = w.cmp.Separator(w.comparerScratch[:0], w.dataBlock.prevKey, key) - } - if separator == nil { - separator = w.dataBlock.prevKey - } else { - w.comparerScratch = separator - } - n := encodeBlockHandle(w.scratch[:20], w.pendingBH) - // Append the block handle to the index block. - w.indexBlock.append(separator, w.scratch[:n]) - // Reset prev key of the data block. - w.dataBlock.prevKey = w.dataBlock.prevKey[:0] - // Clear pending block handle. - w.pendingBH = blockHandle{} -} - -func (w *Writer) finishBlock() error { - w.dataBlock.finish() - bh, err := w.writeBlock(&w.dataBlock.buf, w.compression) - if err != nil { - return err - } - w.pendingBH = bh - // Reset the data block. - w.dataBlock.reset() - // Flush the filter block. - w.filterBlock.flush(w.offset) - return nil -} - -// Append appends key/value pair to the table. The keys passed must -// be in increasing order. -// -// It is safe to modify the contents of the arguments after Append returns. -func (w *Writer) Append(key, value []byte) error { - if w.err != nil { - return w.err - } - if w.nEntries > 0 && w.cmp.Compare(w.dataBlock.prevKey, key) >= 0 { - w.err = fmt.Errorf("leveldb/table: Writer: keys are not in increasing order: %q, %q", w.dataBlock.prevKey, key) - return w.err - } - - w.flushPendingBH(key) - // Append key/value pair to the data block. - w.dataBlock.append(key, value) - // Add key to the filter block. - w.filterBlock.add(key) - - // Finish the data block if block size target reached. - if w.dataBlock.bytesLen() >= w.blockSize { - if err := w.finishBlock(); err != nil { - w.err = err - return w.err - } - } - w.nEntries++ - return nil -} - -// BlocksLen returns number of blocks written so far. -func (w *Writer) BlocksLen() int { - n := w.indexBlock.nEntries - if w.pendingBH.length > 0 { - // Includes the pending block. - n++ - } - return n -} - -// EntriesLen returns number of entries added so far. -func (w *Writer) EntriesLen() int { - return w.nEntries -} - -// BytesLen returns number of bytes written so far. -func (w *Writer) BytesLen() int { - return int(w.offset) -} - -// Close will finalize the table. Calling Append is not possible -// after Close, but calling BlocksLen, EntriesLen and BytesLen -// is still possible. -func (w *Writer) Close() error { - if w.err != nil { - return w.err - } - - // Write the last data block. Or empty data block if there - // aren't any data blocks at all. - if w.dataBlock.nEntries > 0 || w.nEntries == 0 { - if err := w.finishBlock(); err != nil { - w.err = err - return w.err - } - } - w.flushPendingBH(nil) - - // Write the filter block. - var filterBH blockHandle - w.filterBlock.finish() - if buf := &w.filterBlock.buf; buf.Len() > 0 { - filterBH, w.err = w.writeBlock(buf, opt.NoCompression) - if w.err != nil { - return w.err - } - } - - // Write the metaindex block. - if filterBH.length > 0 { - key := []byte("filter." + w.filter.Name()) - n := encodeBlockHandle(w.scratch[:20], filterBH) - w.dataBlock.append(key, w.scratch[:n]) - } - w.dataBlock.finish() - metaindexBH, err := w.writeBlock(&w.dataBlock.buf, w.compression) - if err != nil { - w.err = err - return w.err - } - - // Write the index block. - w.indexBlock.finish() - indexBH, err := w.writeBlock(&w.indexBlock.buf, w.compression) - if err != nil { - w.err = err - return w.err - } - - // Write the table footer. - footer := w.scratch[:footerLen] - for i := range footer { - footer[i] = 0 - } - n := encodeBlockHandle(footer, metaindexBH) - encodeBlockHandle(footer[n:], indexBH) - copy(footer[footerLen-len(magic):], magic) - if _, err := w.writer.Write(footer); err != nil { - w.err = err - return w.err - } - w.offset += footerLen - - w.err = errors.New("leveldb/table: writer is closed") - return nil -} - -// NewWriter creates a new initialized table writer for the file. -// -// Table writer is not goroutine-safe. -func NewWriter(f io.Writer, o *opt.Options) *Writer { - w := &Writer{ - writer: f, - cmp: o.GetComparer(), - filter: o.GetFilter(), - compression: o.GetCompression(), - blockSize: o.GetBlockSize(), - comparerScratch: make([]byte, 0), - } - // data block - w.dataBlock.restartInterval = o.GetBlockRestartInterval() - // The first 20-bytes are used for encoding block handle. - w.dataBlock.scratch = w.scratch[20:] - // index block - w.indexBlock.restartInterval = 1 - w.indexBlock.scratch = w.scratch[20:] - // filter block - if w.filter != nil { - w.filterBlock.generator = w.filter.NewGenerator() - w.filterBlock.flush(0) - } - return w -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go deleted file mode 100644 index 5a499fb91..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "fmt" - "math/rand" - - . "github.com/onsi/gomega" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -type DB interface{} - -type Put interface { - TestPut(key []byte, value []byte) error -} - -type Delete interface { - TestDelete(key []byte) error -} - -type Find interface { - TestFind(key []byte) (rkey, rvalue []byte, err error) -} - -type Get interface { - TestGet(key []byte) (value []byte, err error) -} - -type Has interface { - TestHas(key []byte) (ret bool, err error) -} - -type NewIterator interface { - TestNewIterator(slice *util.Range) iterator.Iterator -} - -type DBAct int - -func (a DBAct) String() string { - switch a { - case DBNone: - return "none" - case DBPut: - return "put" - case DBOverwrite: - return "overwrite" - case DBDelete: - return "delete" - case DBDeleteNA: - return "delete_na" - } - return "unknown" -} - -const ( - DBNone DBAct = iota - DBPut - DBOverwrite - DBDelete - DBDeleteNA -) - -type DBTesting struct { - Rand *rand.Rand - DB interface { - Get - Put - Delete - } - PostFn func(t *DBTesting) - Deleted, Present KeyValue - Act, LastAct DBAct - ActKey, LastActKey []byte -} - -func (t *DBTesting) post() { - if t.PostFn != nil { - t.PostFn(t) - } -} - -func (t *DBTesting) setAct(act DBAct, key []byte) { - t.LastAct, t.Act = t.Act, act - t.LastActKey, t.ActKey = t.ActKey, key -} - -func (t *DBTesting) text() string { - return fmt.Sprintf("last action was <%v> %q, <%v> %q", t.LastAct, t.LastActKey, t.Act, t.ActKey) -} - -func (t *DBTesting) Text() string { - return "DBTesting " + t.text() -} - -func (t *DBTesting) TestPresentKV(key, value []byte) { - rvalue, err := t.DB.TestGet(key) - Expect(err).ShouldNot(HaveOccurred(), "Get on key %q, %s", key, t.text()) - Expect(rvalue).Should(Equal(value), "Value for key %q, %s", key, t.text()) -} - -func (t *DBTesting) TestAllPresent() { - t.Present.IterateShuffled(t.Rand, func(i int, key, value []byte) { - t.TestPresentKV(key, value) - }) -} - -func (t *DBTesting) TestDeletedKey(key []byte) { - _, err := t.DB.TestGet(key) - Expect(err).Should(Equal(errors.ErrNotFound), "Get on deleted key %q, %s", key, t.text()) -} - -func (t *DBTesting) TestAllDeleted() { - t.Deleted.IterateShuffled(t.Rand, func(i int, key, value []byte) { - t.TestDeletedKey(key) - }) -} - -func (t *DBTesting) TestAll() { - dn := t.Deleted.Len() - pn := t.Present.Len() - ShuffledIndex(t.Rand, dn+pn, 1, func(i int) { - if i >= dn { - key, value := t.Present.Index(i - dn) - t.TestPresentKV(key, value) - } else { - t.TestDeletedKey(t.Deleted.KeyAt(i)) - } - }) -} - -func (t *DBTesting) Put(key, value []byte) { - if new := t.Present.PutU(key, value); new { - t.setAct(DBPut, key) - } else { - t.setAct(DBOverwrite, key) - } - t.Deleted.Delete(key) - err := t.DB.TestPut(key, value) - Expect(err).ShouldNot(HaveOccurred(), t.Text()) - t.TestPresentKV(key, value) - t.post() -} - -func (t *DBTesting) PutRandom() bool { - if t.Deleted.Len() > 0 { - i := t.Rand.Intn(t.Deleted.Len()) - key, value := t.Deleted.Index(i) - t.Put(key, value) - return true - } - return false -} - -func (t *DBTesting) Delete(key []byte) { - if exist, value := t.Present.Delete(key); exist { - t.setAct(DBDelete, key) - t.Deleted.PutU(key, value) - } else { - t.setAct(DBDeleteNA, key) - } - err := t.DB.TestDelete(key) - Expect(err).ShouldNot(HaveOccurred(), t.Text()) - t.TestDeletedKey(key) - t.post() -} - -func (t *DBTesting) DeleteRandom() bool { - if t.Present.Len() > 0 { - i := t.Rand.Intn(t.Present.Len()) - t.Delete(t.Present.KeyAt(i)) - return true - } - return false -} - -func (t *DBTesting) RandomAct(round int) { - for i := 0; i < round; i++ { - if t.Rand.Int()%2 == 0 { - t.PutRandom() - } else { - t.DeleteRandom() - } - } -} - -func DoDBTesting(t *DBTesting) { - if t.Rand == nil { - t.Rand = NewRand() - } - - t.DeleteRandom() - t.PutRandom() - t.DeleteRandom() - t.DeleteRandom() - for i := t.Deleted.Len() / 2; i >= 0; i-- { - t.PutRandom() - } - t.RandomAct((t.Deleted.Len() + t.Present.Len()) * 10) - - // Additional iterator testing - if db, ok := t.DB.(NewIterator); ok { - iter := db.TestNewIterator(nil) - Expect(iter.Error()).NotTo(HaveOccurred()) - - it := IteratorTesting{ - KeyValue: t.Present, - Iter: iter, - } - - DoIteratorTesting(&it) - iter.Release() - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go deleted file mode 100644 index 82f3d0e81..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go +++ /dev/null @@ -1,21 +0,0 @@ -package testutil - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func RunSuite(t GinkgoTestingT, name string) { - RunDefer() - - SynchronizedBeforeSuite(func() []byte { - RunDefer("setup") - return nil - }, func(data []byte) {}) - SynchronizedAfterSuite(func() { - RunDefer("teardown") - }, func() {}) - - RegisterFailHandler(Fail) - RunSpecs(t, name) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go deleted file mode 100644 index 0e4a7f109..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go +++ /dev/null @@ -1,327 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "fmt" - "math/rand" - - . "github.com/onsi/gomega" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" -) - -type IterAct int - -func (a IterAct) String() string { - switch a { - case IterNone: - return "none" - case IterFirst: - return "first" - case IterLast: - return "last" - case IterPrev: - return "prev" - case IterNext: - return "next" - case IterSeek: - return "seek" - case IterSOI: - return "soi" - case IterEOI: - return "eoi" - } - return "unknown" -} - -const ( - IterNone IterAct = iota - IterFirst - IterLast - IterPrev - IterNext - IterSeek - IterSOI - IterEOI -) - -type IteratorTesting struct { - KeyValue - Iter iterator.Iterator - Rand *rand.Rand - PostFn func(t *IteratorTesting) - Pos int - Act, LastAct IterAct - - once bool -} - -func (t *IteratorTesting) init() { - if !t.once { - t.Pos = -1 - t.once = true - } -} - -func (t *IteratorTesting) post() { - if t.PostFn != nil { - t.PostFn(t) - } -} - -func (t *IteratorTesting) setAct(act IterAct) { - t.LastAct, t.Act = t.Act, act -} - -func (t *IteratorTesting) text() string { - return fmt.Sprintf("at pos %d and last action was <%v> -> <%v>", t.Pos, t.LastAct, t.Act) -} - -func (t *IteratorTesting) Text() string { - return "IteratorTesting is " + t.text() -} - -func (t *IteratorTesting) IsFirst() bool { - t.init() - return t.Len() > 0 && t.Pos == 0 -} - -func (t *IteratorTesting) IsLast() bool { - t.init() - return t.Len() > 0 && t.Pos == t.Len()-1 -} - -func (t *IteratorTesting) TestKV() { - t.init() - key, value := t.Index(t.Pos) - Expect(t.Iter.Key()).NotTo(BeNil()) - Expect(t.Iter.Key()).Should(Equal(key), "Key is invalid, %s", t.text()) - Expect(t.Iter.Value()).Should(Equal(value), "Value for key %q, %s", key, t.text()) -} - -func (t *IteratorTesting) First() { - t.init() - t.setAct(IterFirst) - - ok := t.Iter.First() - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if t.Len() > 0 { - t.Pos = 0 - Expect(ok).Should(BeTrue(), t.Text()) - t.TestKV() - } else { - t.Pos = -1 - Expect(ok).ShouldNot(BeTrue(), t.Text()) - } - t.post() -} - -func (t *IteratorTesting) Last() { - t.init() - t.setAct(IterLast) - - ok := t.Iter.Last() - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if t.Len() > 0 { - t.Pos = t.Len() - 1 - Expect(ok).Should(BeTrue(), t.Text()) - t.TestKV() - } else { - t.Pos = 0 - Expect(ok).ShouldNot(BeTrue(), t.Text()) - } - t.post() -} - -func (t *IteratorTesting) Next() { - t.init() - t.setAct(IterNext) - - ok := t.Iter.Next() - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if t.Pos < t.Len()-1 { - t.Pos++ - Expect(ok).Should(BeTrue(), t.Text()) - t.TestKV() - } else { - t.Pos = t.Len() - Expect(ok).ShouldNot(BeTrue(), t.Text()) - } - t.post() -} - -func (t *IteratorTesting) Prev() { - t.init() - t.setAct(IterPrev) - - ok := t.Iter.Prev() - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if t.Pos > 0 { - t.Pos-- - Expect(ok).Should(BeTrue(), t.Text()) - t.TestKV() - } else { - t.Pos = -1 - Expect(ok).ShouldNot(BeTrue(), t.Text()) - } - t.post() -} - -func (t *IteratorTesting) Seek(i int) { - t.init() - t.setAct(IterSeek) - - key, _ := t.Index(i) - oldKey, _ := t.IndexOrNil(t.Pos) - - ok := t.Iter.Seek(key) - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q, to pos %d, %s", oldKey, key, i, t.text())) - - t.Pos = i - t.TestKV() - t.post() -} - -func (t *IteratorTesting) SeekInexact(i int) { - t.init() - t.setAct(IterSeek) - var key0 []byte - key1, _ := t.Index(i) - if i > 0 { - key0, _ = t.Index(i - 1) - } - key := BytesSeparator(key0, key1) - oldKey, _ := t.IndexOrNil(t.Pos) - - ok := t.Iter.Seek(key) - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q (%q), to pos %d, %s", oldKey, key, key1, i, t.text())) - - t.Pos = i - t.TestKV() - t.post() -} - -func (t *IteratorTesting) SeekKey(key []byte) { - t.init() - t.setAct(IterSeek) - oldKey, _ := t.IndexOrNil(t.Pos) - i := t.Search(key) - - ok := t.Iter.Seek(key) - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if i < t.Len() { - key_, _ := t.Index(i) - Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q (%q), to pos %d, %s", oldKey, key, key_, i, t.text())) - t.Pos = i - t.TestKV() - } else { - Expect(ok).ShouldNot(BeTrue(), fmt.Sprintf("Seek from key %q to %q, %s", oldKey, key, t.text())) - } - - t.Pos = i - t.post() -} - -func (t *IteratorTesting) SOI() { - t.init() - t.setAct(IterSOI) - Expect(t.Pos).Should(BeNumerically("<=", 0), t.Text()) - for i := 0; i < 3; i++ { - t.Prev() - } - t.post() -} - -func (t *IteratorTesting) EOI() { - t.init() - t.setAct(IterEOI) - Expect(t.Pos).Should(BeNumerically(">=", t.Len()-1), t.Text()) - for i := 0; i < 3; i++ { - t.Next() - } - t.post() -} - -func (t *IteratorTesting) WalkPrev(fn func(t *IteratorTesting)) { - t.init() - for old := t.Pos; t.Pos > 0; old = t.Pos { - fn(t) - Expect(t.Pos).Should(BeNumerically("<", old), t.Text()) - } -} - -func (t *IteratorTesting) WalkNext(fn func(t *IteratorTesting)) { - t.init() - for old := t.Pos; t.Pos < t.Len()-1; old = t.Pos { - fn(t) - Expect(t.Pos).Should(BeNumerically(">", old), t.Text()) - } -} - -func (t *IteratorTesting) PrevAll() { - t.WalkPrev(func(t *IteratorTesting) { - t.Prev() - }) -} - -func (t *IteratorTesting) NextAll() { - t.WalkNext(func(t *IteratorTesting) { - t.Next() - }) -} - -func DoIteratorTesting(t *IteratorTesting) { - if t.Rand == nil { - t.Rand = NewRand() - } - t.SOI() - t.NextAll() - t.First() - t.SOI() - t.NextAll() - t.EOI() - t.PrevAll() - t.Last() - t.EOI() - t.PrevAll() - t.SOI() - - t.NextAll() - t.PrevAll() - t.NextAll() - t.Last() - t.PrevAll() - t.First() - t.NextAll() - t.EOI() - - ShuffledIndex(t.Rand, t.Len(), 1, func(i int) { - t.Seek(i) - }) - - ShuffledIndex(t.Rand, t.Len(), 1, func(i int) { - t.SeekInexact(i) - }) - - ShuffledIndex(t.Rand, t.Len(), 1, func(i int) { - t.Seek(i) - if i%2 != 0 { - t.PrevAll() - t.SOI() - } else { - t.NextAll() - t.EOI() - } - }) - - for _, key := range []string{"", "foo", "bar", "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"} { - t.SeekKey([]byte(key)) - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go deleted file mode 100644 index 15f7726b2..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go +++ /dev/null @@ -1,352 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "fmt" - "math/rand" - "sort" - "strings" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -type KeyValueEntry struct { - key, value []byte -} - -type KeyValue struct { - entries []KeyValueEntry - nbytes int -} - -func (kv *KeyValue) Put(key, value []byte) { - if n := len(kv.entries); n > 0 && cmp.Compare(kv.entries[n-1].key, key) >= 0 { - panic(fmt.Sprintf("Put: keys are not in increasing order: %q, %q", kv.entries[n-1].key, key)) - } - kv.entries = append(kv.entries, KeyValueEntry{key, value}) - kv.nbytes += len(key) + len(value) -} - -func (kv *KeyValue) PutString(key, value string) { - kv.Put([]byte(key), []byte(value)) -} - -func (kv *KeyValue) PutU(key, value []byte) bool { - if i, exist := kv.Get(key); !exist { - if i < kv.Len() { - kv.entries = append(kv.entries[:i+1], kv.entries[i:]...) - kv.entries[i] = KeyValueEntry{key, value} - } else { - kv.entries = append(kv.entries, KeyValueEntry{key, value}) - } - kv.nbytes += len(key) + len(value) - return true - } else { - kv.nbytes += len(value) - len(kv.ValueAt(i)) - kv.entries[i].value = value - } - return false -} - -func (kv *KeyValue) PutUString(key, value string) bool { - return kv.PutU([]byte(key), []byte(value)) -} - -func (kv *KeyValue) Delete(key []byte) (exist bool, value []byte) { - i, exist := kv.Get(key) - if exist { - value = kv.entries[i].value - kv.DeleteIndex(i) - } - return -} - -func (kv *KeyValue) DeleteIndex(i int) bool { - if i < kv.Len() { - kv.nbytes -= len(kv.KeyAt(i)) + len(kv.ValueAt(i)) - kv.entries = append(kv.entries[:i], kv.entries[i+1:]...) - return true - } - return false -} - -func (kv KeyValue) Len() int { - return len(kv.entries) -} - -func (kv *KeyValue) Size() int { - return kv.nbytes -} - -func (kv KeyValue) KeyAt(i int) []byte { - return kv.entries[i].key -} - -func (kv KeyValue) ValueAt(i int) []byte { - return kv.entries[i].value -} - -func (kv KeyValue) Index(i int) (key, value []byte) { - if i < 0 || i >= len(kv.entries) { - panic(fmt.Sprintf("Index #%d: out of range", i)) - } - return kv.entries[i].key, kv.entries[i].value -} - -func (kv KeyValue) IndexInexact(i int) (key_, key, value []byte) { - key, value = kv.Index(i) - var key0 []byte - var key1 = kv.KeyAt(i) - if i > 0 { - key0 = kv.KeyAt(i - 1) - } - key_ = BytesSeparator(key0, key1) - return -} - -func (kv KeyValue) IndexOrNil(i int) (key, value []byte) { - if i >= 0 && i < len(kv.entries) { - return kv.entries[i].key, kv.entries[i].value - } - return nil, nil -} - -func (kv KeyValue) IndexString(i int) (key, value string) { - key_, _value := kv.Index(i) - return string(key_), string(_value) -} - -func (kv KeyValue) Search(key []byte) int { - return sort.Search(kv.Len(), func(i int) bool { - return cmp.Compare(kv.KeyAt(i), key) >= 0 - }) -} - -func (kv KeyValue) SearchString(key string) int { - return kv.Search([]byte(key)) -} - -func (kv KeyValue) Get(key []byte) (i int, exist bool) { - i = kv.Search(key) - if i < kv.Len() && cmp.Compare(kv.KeyAt(i), key) == 0 { - exist = true - } - return -} - -func (kv KeyValue) GetString(key string) (i int, exist bool) { - return kv.Get([]byte(key)) -} - -func (kv KeyValue) Iterate(fn func(i int, key, value []byte)) { - for i, x := range kv.entries { - fn(i, x.key, x.value) - } -} - -func (kv KeyValue) IterateString(fn func(i int, key, value string)) { - kv.Iterate(func(i int, key, value []byte) { - fn(i, string(key), string(value)) - }) -} - -func (kv KeyValue) IterateShuffled(rnd *rand.Rand, fn func(i int, key, value []byte)) { - ShuffledIndex(rnd, kv.Len(), 1, func(i int) { - fn(i, kv.entries[i].key, kv.entries[i].value) - }) -} - -func (kv KeyValue) IterateShuffledString(rnd *rand.Rand, fn func(i int, key, value string)) { - kv.IterateShuffled(rnd, func(i int, key, value []byte) { - fn(i, string(key), string(value)) - }) -} - -func (kv KeyValue) IterateInexact(fn func(i int, key_, key, value []byte)) { - for i := range kv.entries { - key_, key, value := kv.IndexInexact(i) - fn(i, key_, key, value) - } -} - -func (kv KeyValue) IterateInexactString(fn func(i int, key_, key, value string)) { - kv.IterateInexact(func(i int, key_, key, value []byte) { - fn(i, string(key_), string(key), string(value)) - }) -} - -func (kv KeyValue) Clone() KeyValue { - return KeyValue{append([]KeyValueEntry{}, kv.entries...), kv.nbytes} -} - -func (kv KeyValue) Slice(start, limit int) KeyValue { - if start < 0 || limit > kv.Len() { - panic(fmt.Sprintf("Slice %d .. %d: out of range", start, limit)) - } else if limit < start { - panic(fmt.Sprintf("Slice %d .. %d: invalid range", start, limit)) - } - return KeyValue{append([]KeyValueEntry{}, kv.entries[start:limit]...), kv.nbytes} -} - -func (kv KeyValue) SliceKey(start, limit []byte) KeyValue { - start_ := 0 - limit_ := kv.Len() - if start != nil { - start_ = kv.Search(start) - } - if limit != nil { - limit_ = kv.Search(limit) - } - return kv.Slice(start_, limit_) -} - -func (kv KeyValue) SliceKeyString(start, limit string) KeyValue { - return kv.SliceKey([]byte(start), []byte(limit)) -} - -func (kv KeyValue) SliceRange(r *util.Range) KeyValue { - if r != nil { - return kv.SliceKey(r.Start, r.Limit) - } - return kv.Clone() -} - -func (kv KeyValue) Range(start, limit int) (r util.Range) { - if kv.Len() > 0 { - if start == kv.Len() { - r.Start = BytesAfter(kv.KeyAt(start - 1)) - } else { - r.Start = kv.KeyAt(start) - } - } - if limit < kv.Len() { - r.Limit = kv.KeyAt(limit) - } - return -} - -func KeyValue_EmptyKey() *KeyValue { - kv := &KeyValue{} - kv.PutString("", "v") - return kv -} - -func KeyValue_EmptyValue() *KeyValue { - kv := &KeyValue{} - kv.PutString("abc", "") - kv.PutString("abcd", "") - return kv -} - -func KeyValue_OneKeyValue() *KeyValue { - kv := &KeyValue{} - kv.PutString("abc", "v") - return kv -} - -func KeyValue_BigValue() *KeyValue { - kv := &KeyValue{} - kv.PutString("big1", strings.Repeat("1", 200000)) - return kv -} - -func KeyValue_SpecialKey() *KeyValue { - kv := &KeyValue{} - kv.PutString("\xff\xff", "v3") - return kv -} - -func KeyValue_MultipleKeyValue() *KeyValue { - kv := &KeyValue{} - kv.PutString("a", "v") - kv.PutString("aa", "v1") - kv.PutString("aaa", "v2") - kv.PutString("aaacccccccccc", "v2") - kv.PutString("aaaccccccccccd", "v3") - kv.PutString("aaaccccccccccf", "v4") - kv.PutString("aaaccccccccccfg", "v5") - kv.PutString("ab", "v6") - kv.PutString("abc", "v7") - kv.PutString("abcd", "v8") - kv.PutString("accccccccccccccc", "v9") - kv.PutString("b", "v10") - kv.PutString("bb", "v11") - kv.PutString("bc", "v12") - kv.PutString("c", "v13") - kv.PutString("c1", "v13") - kv.PutString("czzzzzzzzzzzzzz", "v14") - kv.PutString("fffffffffffffff", "v15") - kv.PutString("g11", "v15") - kv.PutString("g111", "v15") - kv.PutString("g111\xff", "v15") - kv.PutString("zz", "v16") - kv.PutString("zzzzzzz", "v16") - kv.PutString("zzzzzzzzzzzzzzzz", "v16") - return kv -} - -var keymap = []byte("012345678ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxy") - -func KeyValue_Generate(rnd *rand.Rand, n, minlen, maxlen, vminlen, vmaxlen int) *KeyValue { - if rnd == nil { - rnd = NewRand() - } - if maxlen < minlen { - panic("max len should >= min len") - } - - rrand := func(min, max int) int { - if min == max { - return max - } - return rnd.Intn(max-min) + min - } - - kv := &KeyValue{} - endC := byte(len(keymap) - 1) - gen := make([]byte, 0, maxlen) - for i := 0; i < n; i++ { - m := rrand(minlen, maxlen) - last := gen - retry: - gen = last[:m] - if k := len(last); m > k { - for j := k; j < m; j++ { - gen[j] = 0 - } - } else { - for j := m - 1; j >= 0; j-- { - c := last[j] - if c == endC { - continue - } - gen[j] = c + 1 - for j += 1; j < m; j++ { - gen[j] = 0 - } - goto ok - } - if m < maxlen { - m++ - goto retry - } - panic(fmt.Sprintf("only able to generate %d keys out of %d keys, try increasing max len", kv.Len(), n)) - ok: - } - key := make([]byte, m) - for j := 0; j < m; j++ { - key[j] = keymap[gen[j]] - } - value := make([]byte, rrand(vminlen, vmaxlen)) - for n := copy(value, []byte(fmt.Sprintf("v%d", i))); n < len(value); n++ { - value[n] = 'x' - } - kv.Put(key, value) - } - return kv -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go deleted file mode 100644 index ad244a9d1..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "fmt" - "math/rand" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -func KeyValueTesting(rnd *rand.Rand, kv KeyValue, p DB, setup func(KeyValue) DB, teardown func(DB)) { - if rnd == nil { - rnd = NewRand() - } - - if p == nil { - BeforeEach(func() { - p = setup(kv) - }) - if teardown != nil { - AfterEach(func() { - teardown(p) - }) - } - } - - It("Should find all keys with Find", func() { - if db, ok := p.(Find); ok { - ShuffledIndex(nil, kv.Len(), 1, func(i int) { - key_, key, value := kv.IndexInexact(i) - - // Using exact key. - rkey, rvalue, err := db.TestFind(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) - Expect(rkey).Should(Equal(key), "Key") - Expect(rvalue).Should(Equal(value), "Value for key %q", key) - - // Using inexact key. - rkey, rvalue, err = db.TestFind(key_) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q)", key_, key) - Expect(rkey).Should(Equal(key)) - Expect(rvalue).Should(Equal(value), "Value for key %q (%q)", key_, key) - }) - } - }) - - It("Should return error if the key is not present", func() { - if db, ok := p.(Find); ok { - var key []byte - if kv.Len() > 0 { - key_, _ := kv.Index(kv.Len() - 1) - key = BytesAfter(key_) - } - rkey, _, err := db.TestFind(key) - Expect(err).Should(HaveOccurred(), "Find for key %q yield key %q", key, rkey) - Expect(err).Should(Equal(errors.ErrNotFound)) - } - }) - - It("Should only find exact key with Get", func() { - if db, ok := p.(Get); ok { - ShuffledIndex(nil, kv.Len(), 1, func(i int) { - key_, key, value := kv.IndexInexact(i) - - // Using exact key. - rvalue, err := db.TestGet(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) - Expect(rvalue).Should(Equal(value), "Value for key %q", key) - - // Using inexact key. - if len(key_) > 0 { - _, err = db.TestGet(key_) - Expect(err).Should(HaveOccurred(), "Error for key %q", key_) - Expect(err).Should(Equal(errors.ErrNotFound)) - } - }) - } - }) - - It("Should only find present key with Has", func() { - if db, ok := p.(Has); ok { - ShuffledIndex(nil, kv.Len(), 1, func(i int) { - key_, key, _ := kv.IndexInexact(i) - - // Using exact key. - ret, err := db.TestHas(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) - Expect(ret).Should(BeTrue(), "False for key %q", key) - - // Using inexact key. - if len(key_) > 0 { - ret, err = db.TestHas(key_) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key_) - Expect(ret).ShouldNot(BeTrue(), "True for key %q", key) - } - }) - } - }) - - TestIter := func(r *util.Range, _kv KeyValue) { - if db, ok := p.(NewIterator); ok { - iter := db.TestNewIterator(r) - Expect(iter.Error()).ShouldNot(HaveOccurred()) - - t := IteratorTesting{ - KeyValue: _kv, - Iter: iter, - } - - DoIteratorTesting(&t) - iter.Release() - } - } - - It("Should iterates and seeks correctly", func(done Done) { - TestIter(nil, kv.Clone()) - done <- true - }, 3.0) - - RandomIndex(rnd, kv.Len(), Min(kv.Len(), 50), func(i int) { - type slice struct { - r *util.Range - start, limit int - } - - key_, _, _ := kv.IndexInexact(i) - for _, x := range []slice{ - {&util.Range{Start: key_, Limit: nil}, i, kv.Len()}, - {&util.Range{Start: nil, Limit: key_}, 0, i}, - } { - It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", x.start, x.limit), func(done Done) { - TestIter(x.r, kv.Slice(x.start, x.limit)) - done <- true - }, 3.0) - } - }) - - RandomRange(rnd, kv.Len(), Min(kv.Len(), 50), func(start, limit int) { - It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", start, limit), func(done Done) { - r := kv.Range(start, limit) - TestIter(&r, kv.Slice(start, limit)) - done <- true - }, 3.0) - }) -} - -func AllKeyValueTesting(rnd *rand.Rand, body, setup func(KeyValue) DB, teardown func(DB)) { - Test := func(kv *KeyValue) func() { - return func() { - var p DB - if setup != nil { - Defer("setup", func() { - p = setup(*kv) - }) - } - if teardown != nil { - Defer("teardown", func() { - teardown(p) - }) - } - if body != nil { - p = body(*kv) - } - KeyValueTesting(rnd, *kv, p, func(KeyValue) DB { - return p - }, nil) - } - } - - Describe("with no key/value (empty)", Test(&KeyValue{})) - Describe("with empty key", Test(KeyValue_EmptyKey())) - Describe("with empty value", Test(KeyValue_EmptyValue())) - Describe("with one key/value", Test(KeyValue_OneKeyValue())) - Describe("with big value", Test(KeyValue_BigValue())) - Describe("with special key", Test(KeyValue_SpecialKey())) - Describe("with multiple key/value", Test(KeyValue_MultipleKeyValue())) - Describe("with generated key/value", Test(KeyValue_Generate(nil, 120, 1, 50, 10, 120))) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go deleted file mode 100644 index 5695bda91..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go +++ /dev/null @@ -1,586 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "runtime" - "strings" - "sync" - - . "github.com/onsi/gomega" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - storageMu sync.Mutex - storageUseFS bool = true - storageKeepFS bool = false - storageNum int -) - -type StorageMode int - -const ( - ModeOpen StorageMode = 1 << iota - ModeCreate - ModeRemove - ModeRead - ModeWrite - ModeSync - ModeClose -) - -const ( - modeOpen = iota - modeCreate - modeRemove - modeRead - modeWrite - modeSync - modeClose - - modeCount -) - -const ( - typeManifest = iota - typeJournal - typeTable - typeTemp - - typeCount -) - -const flattenCount = modeCount * typeCount - -func flattenType(m StorageMode, t storage.FileType) int { - var x int - switch m { - case ModeOpen: - x = modeOpen - case ModeCreate: - x = modeCreate - case ModeRemove: - x = modeRemove - case ModeRead: - x = modeRead - case ModeWrite: - x = modeWrite - case ModeSync: - x = modeSync - case ModeClose: - x = modeClose - default: - panic("invalid storage mode") - } - x *= typeCount - switch t { - case storage.TypeManifest: - return x + typeManifest - case storage.TypeJournal: - return x + typeJournal - case storage.TypeTable: - return x + typeTable - case storage.TypeTemp: - return x + typeTemp - default: - panic("invalid file type") - } -} - -func listFlattenType(m StorageMode, t storage.FileType) []int { - ret := make([]int, 0, flattenCount) - add := func(x int) { - x *= typeCount - switch { - case t&storage.TypeManifest != 0: - ret = append(ret, x+typeManifest) - case t&storage.TypeJournal != 0: - ret = append(ret, x+typeJournal) - case t&storage.TypeTable != 0: - ret = append(ret, x+typeTable) - case t&storage.TypeTemp != 0: - ret = append(ret, x+typeTemp) - } - } - switch { - case m&ModeOpen != 0: - add(modeOpen) - case m&ModeCreate != 0: - add(modeCreate) - case m&ModeRemove != 0: - add(modeRemove) - case m&ModeRead != 0: - add(modeRead) - case m&ModeWrite != 0: - add(modeWrite) - case m&ModeSync != 0: - add(modeSync) - case m&ModeClose != 0: - add(modeClose) - } - return ret -} - -func packFile(num uint64, t storage.FileType) uint64 { - if num>>(64-typeCount) != 0 { - panic("overflow") - } - return num<> typeCount, storage.FileType(x) & storage.TypeAll -} - -type emulatedError struct { - err error -} - -func (err emulatedError) Error() string { - return fmt.Sprintf("emulated storage error: %v", err.err) -} - -type storageLock struct { - s *Storage - r util.Releaser -} - -func (l storageLock) Release() { - l.r.Release() - l.s.logI("storage lock released") -} - -type reader struct { - f *file - storage.Reader -} - -func (r *reader) Read(p []byte) (n int, err error) { - err = r.f.s.emulateError(ModeRead, r.f.Type()) - if err == nil { - r.f.s.stall(ModeRead, r.f.Type()) - n, err = r.Reader.Read(p) - } - r.f.s.count(ModeRead, r.f.Type(), n) - if err != nil && err != io.EOF { - r.f.s.logI("read error, num=%d type=%v n=%d err=%v", r.f.Num(), r.f.Type(), n, err) - } - return -} - -func (r *reader) ReadAt(p []byte, off int64) (n int, err error) { - err = r.f.s.emulateError(ModeRead, r.f.Type()) - if err == nil { - r.f.s.stall(ModeRead, r.f.Type()) - n, err = r.Reader.ReadAt(p, off) - } - r.f.s.count(ModeRead, r.f.Type(), n) - if err != nil && err != io.EOF { - r.f.s.logI("readAt error, num=%d type=%v offset=%d n=%d err=%v", r.f.Num(), r.f.Type(), off, n, err) - } - return -} - -func (r *reader) Close() (err error) { - return r.f.doClose(r.Reader) -} - -type writer struct { - f *file - storage.Writer -} - -func (w *writer) Write(p []byte) (n int, err error) { - err = w.f.s.emulateError(ModeWrite, w.f.Type()) - if err == nil { - w.f.s.stall(ModeWrite, w.f.Type()) - n, err = w.Writer.Write(p) - } - w.f.s.count(ModeWrite, w.f.Type(), n) - if err != nil && err != io.EOF { - w.f.s.logI("write error, num=%d type=%v n=%d err=%v", w.f.Num(), w.f.Type(), n, err) - } - return -} - -func (w *writer) Sync() (err error) { - err = w.f.s.emulateError(ModeSync, w.f.Type()) - if err == nil { - w.f.s.stall(ModeSync, w.f.Type()) - err = w.Writer.Sync() - } - w.f.s.count(ModeSync, w.f.Type(), 0) - if err != nil { - w.f.s.logI("sync error, num=%d type=%v err=%v", w.f.Num(), w.f.Type(), err) - } - return -} - -func (w *writer) Close() (err error) { - return w.f.doClose(w.Writer) -} - -type file struct { - s *Storage - storage.File -} - -func (f *file) pack() uint64 { - return packFile(f.Num(), f.Type()) -} - -func (f *file) assertOpen() { - ExpectWithOffset(2, f.s.opens).NotTo(HaveKey(f.pack()), "File open, num=%d type=%v writer=%v", f.Num(), f.Type(), f.s.opens[f.pack()]) -} - -func (f *file) doClose(closer io.Closer) (err error) { - err = f.s.emulateError(ModeClose, f.Type()) - if err == nil { - f.s.stall(ModeClose, f.Type()) - } - f.s.mu.Lock() - defer f.s.mu.Unlock() - if err == nil { - ExpectWithOffset(2, f.s.opens).To(HaveKey(f.pack()), "File closed, num=%d type=%v", f.Num(), f.Type()) - err = closer.Close() - } - f.s.countNB(ModeClose, f.Type(), 0) - writer := f.s.opens[f.pack()] - if err != nil { - f.s.logISkip(1, "file close failed, num=%d type=%v writer=%v err=%v", f.Num(), f.Type(), writer, err) - } else { - f.s.logISkip(1, "file closed, num=%d type=%v writer=%v", f.Num(), f.Type(), writer) - delete(f.s.opens, f.pack()) - } - return -} - -func (f *file) Open() (r storage.Reader, err error) { - err = f.s.emulateError(ModeOpen, f.Type()) - if err == nil { - f.s.stall(ModeOpen, f.Type()) - } - f.s.mu.Lock() - defer f.s.mu.Unlock() - if err == nil { - f.assertOpen() - f.s.countNB(ModeOpen, f.Type(), 0) - r, err = f.File.Open() - } - if err != nil { - f.s.logI("file open failed, num=%d type=%v err=%v", f.Num(), f.Type(), err) - } else { - f.s.logI("file opened, num=%d type=%v", f.Num(), f.Type()) - f.s.opens[f.pack()] = false - r = &reader{f, r} - } - return -} - -func (f *file) Create() (w storage.Writer, err error) { - err = f.s.emulateError(ModeCreate, f.Type()) - if err == nil { - f.s.stall(ModeCreate, f.Type()) - } - f.s.mu.Lock() - defer f.s.mu.Unlock() - if err == nil { - f.assertOpen() - f.s.countNB(ModeCreate, f.Type(), 0) - w, err = f.File.Create() - } - if err != nil { - f.s.logI("file create failed, num=%d type=%v err=%v", f.Num(), f.Type(), err) - } else { - f.s.logI("file created, num=%d type=%v", f.Num(), f.Type()) - f.s.opens[f.pack()] = true - w = &writer{f, w} - } - return -} - -func (f *file) Remove() (err error) { - err = f.s.emulateError(ModeRemove, f.Type()) - if err == nil { - f.s.stall(ModeRemove, f.Type()) - } - f.s.mu.Lock() - defer f.s.mu.Unlock() - if err == nil { - f.assertOpen() - f.s.countNB(ModeRemove, f.Type(), 0) - err = f.File.Remove() - } - if err != nil { - f.s.logI("file remove failed, num=%d type=%v err=%v", f.Num(), f.Type(), err) - } else { - f.s.logI("file removed, num=%d type=%v", f.Num(), f.Type()) - } - return -} - -type Storage struct { - storage.Storage - closeFn func() error - - lmu sync.Mutex - lb bytes.Buffer - - mu sync.Mutex - // Open files, true=writer, false=reader - opens map[uint64]bool - counters [flattenCount]int - bytesCounter [flattenCount]int64 - emulatedError [flattenCount]error - stallCond sync.Cond - stalled [flattenCount]bool -} - -func (s *Storage) log(skip int, str string) { - s.lmu.Lock() - defer s.lmu.Unlock() - _, file, line, ok := runtime.Caller(skip + 2) - if ok { - // Truncate file name at last file name separator. - if index := strings.LastIndex(file, "/"); index >= 0 { - file = file[index+1:] - } else if index = strings.LastIndex(file, "\\"); index >= 0 { - file = file[index+1:] - } - } else { - file = "???" - line = 1 - } - fmt.Fprintf(&s.lb, "%s:%d: ", file, line) - lines := strings.Split(str, "\n") - if l := len(lines); l > 1 && lines[l-1] == "" { - lines = lines[:l-1] - } - for i, line := range lines { - if i > 0 { - s.lb.WriteString("\n\t") - } - s.lb.WriteString(line) - } - s.lb.WriteByte('\n') -} - -func (s *Storage) logISkip(skip int, format string, args ...interface{}) { - pc, _, _, ok := runtime.Caller(skip + 1) - if ok { - if f := runtime.FuncForPC(pc); f != nil { - fname := f.Name() - if index := strings.LastIndex(fname, "."); index >= 0 { - fname = fname[index+1:] - } - format = fname + ": " + format - } - } - s.log(skip+1, fmt.Sprintf(format, args...)) -} - -func (s *Storage) logI(format string, args ...interface{}) { - s.logISkip(1, format, args...) -} - -func (s *Storage) Log(str string) { - s.log(1, "Log: "+str) - s.Storage.Log(str) -} - -func (s *Storage) Lock() (r util.Releaser, err error) { - r, err = s.Storage.Lock() - if err != nil { - s.logI("storage locking failed, err=%v", err) - } else { - s.logI("storage locked") - r = storageLock{s, r} - } - return -} - -func (s *Storage) GetFile(num uint64, t storage.FileType) storage.File { - return &file{s, s.Storage.GetFile(num, t)} -} - -func (s *Storage) GetFiles(t storage.FileType) (files []storage.File, err error) { - rfiles, err := s.Storage.GetFiles(t) - if err != nil { - s.logI("get files failed, err=%v", err) - return - } - files = make([]storage.File, len(rfiles)) - for i, f := range rfiles { - files[i] = &file{s, f} - } - s.logI("get files, type=0x%x count=%d", int(t), len(files)) - return -} - -func (s *Storage) GetManifest() (f storage.File, err error) { - manifest, err := s.Storage.GetManifest() - if err != nil { - if !os.IsNotExist(err) { - s.logI("get manifest failed, err=%v", err) - } - return - } - s.logI("get manifest, num=%d", manifest.Num()) - return &file{s, manifest}, nil -} - -func (s *Storage) SetManifest(f storage.File) error { - f_, ok := f.(*file) - ExpectWithOffset(1, ok).To(BeTrue()) - ExpectWithOffset(1, f_.Type()).To(Equal(storage.TypeManifest)) - err := s.Storage.SetManifest(f_.File) - if err != nil { - s.logI("set manifest failed, err=%v", err) - } else { - s.logI("set manifest, num=%d", f_.Num()) - } - return err -} - -func (s *Storage) openFiles() string { - out := "Open files:" - for x, writer := range s.opens { - num, t := unpackFile(x) - out += fmt.Sprintf("\n · num=%d type=%v writer=%v", num, t, writer) - } - return out -} - -func (s *Storage) Close() error { - s.mu.Lock() - defer s.mu.Unlock() - ExpectWithOffset(1, s.opens).To(BeEmpty(), s.openFiles()) - err := s.Storage.Close() - if err != nil { - s.logI("storage closing failed, err=%v", err) - } else { - s.logI("storage closed") - } - if s.closeFn != nil { - if err1 := s.closeFn(); err1 != nil { - s.logI("close func error, err=%v", err1) - } - } - return err -} - -func (s *Storage) countNB(m StorageMode, t storage.FileType, n int) { - s.counters[flattenType(m, t)]++ - s.bytesCounter[flattenType(m, t)] += int64(n) -} - -func (s *Storage) count(m StorageMode, t storage.FileType, n int) { - s.mu.Lock() - defer s.mu.Unlock() - s.countNB(m, t, n) -} - -func (s *Storage) ResetCounter(m StorageMode, t storage.FileType) { - for _, x := range listFlattenType(m, t) { - s.counters[x] = 0 - s.bytesCounter[x] = 0 - } -} - -func (s *Storage) Counter(m StorageMode, t storage.FileType) (count int, bytes int64) { - for _, x := range listFlattenType(m, t) { - count += s.counters[x] - bytes += s.bytesCounter[x] - } - return -} - -func (s *Storage) emulateError(m StorageMode, t storage.FileType) error { - s.mu.Lock() - defer s.mu.Unlock() - err := s.emulatedError[flattenType(m, t)] - if err != nil { - return emulatedError{err} - } - return nil -} - -func (s *Storage) EmulateError(m StorageMode, t storage.FileType, err error) { - s.mu.Lock() - defer s.mu.Unlock() - for _, x := range listFlattenType(m, t) { - s.emulatedError[x] = err - } -} - -func (s *Storage) stall(m StorageMode, t storage.FileType) { - x := flattenType(m, t) - s.mu.Lock() - defer s.mu.Unlock() - for s.stalled[x] { - s.stallCond.Wait() - } -} - -func (s *Storage) Stall(m StorageMode, t storage.FileType) { - s.mu.Lock() - defer s.mu.Unlock() - for _, x := range listFlattenType(m, t) { - s.stalled[x] = true - } -} - -func (s *Storage) Release(m StorageMode, t storage.FileType) { - s.mu.Lock() - defer s.mu.Unlock() - for _, x := range listFlattenType(m, t) { - s.stalled[x] = false - } - s.stallCond.Broadcast() -} - -func NewStorage() *Storage { - var stor storage.Storage - var closeFn func() error - if storageUseFS { - for { - storageMu.Lock() - num := storageNum - storageNum++ - storageMu.Unlock() - path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num)) - if _, err := os.Stat(path); os.IsNotExist(err) { - stor, err = storage.OpenFile(path) - ExpectWithOffset(1, err).NotTo(HaveOccurred(), "creating storage at %s", path) - closeFn = func() error { - if storageKeepFS { - return nil - } - return os.RemoveAll(path) - } - break - } - } - } else { - stor = storage.NewMemStorage() - } - s := &Storage{ - Storage: stor, - closeFn: closeFn, - opens: make(map[uint64]bool), - } - s.stallCond.L = &s.mu - return s -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go deleted file mode 100644 index 918c86510..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "bytes" - "flag" - "math/rand" - "reflect" - "sync" - - "github.com/onsi/ginkgo/config" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" -) - -var ( - runfn = make(map[string][]func()) - runmu sync.Mutex -) - -func Defer(args ...interface{}) bool { - var ( - group string - fn func() - ) - for _, arg := range args { - v := reflect.ValueOf(arg) - switch v.Kind() { - case reflect.String: - group = v.String() - case reflect.Func: - r := reflect.ValueOf(&fn).Elem() - r.Set(v) - } - } - if fn != nil { - runmu.Lock() - runfn[group] = append(runfn[group], fn) - runmu.Unlock() - } - return true -} - -func RunDefer(groups ...string) bool { - if len(groups) == 0 { - groups = append(groups, "") - } - runmu.Lock() - var runfn_ []func() - for _, group := range groups { - runfn_ = append(runfn_, runfn[group]...) - delete(runfn, group) - } - runmu.Unlock() - for _, fn := range runfn_ { - fn() - } - return runfn_ != nil -} - -func RandomSeed() int64 { - if !flag.Parsed() { - panic("random seed not initialized") - } - return config.GinkgoConfig.RandomSeed -} - -func NewRand() *rand.Rand { - return rand.New(rand.NewSource(RandomSeed())) -} - -var cmp = comparer.DefaultComparer - -func BytesSeparator(a, b []byte) []byte { - if bytes.Equal(a, b) { - return b - } - i, n := 0, len(a) - if n > len(b) { - n = len(b) - } - for ; i < n && (a[i] == b[i]); i++ { - } - x := append([]byte{}, a[:i]...) - if i < n { - if c := a[i] + 1; c < b[i] { - return append(x, c) - } - x = append(x, a[i]) - i++ - } - for ; i < len(a); i++ { - if c := a[i]; c < 0xff { - return append(x, c+1) - } else { - x = append(x, c) - } - } - if len(b) > i && b[i] > 0 { - return append(x, b[i]-1) - } - return append(x, 'x') -} - -func BytesAfter(b []byte) []byte { - var x []byte - for _, c := range b { - if c < 0xff { - return append(x, c+1) - } else { - x = append(x, c) - } - } - return append(x, 'x') -} - -func RandomIndex(rnd *rand.Rand, n, round int, fn func(i int)) { - if rnd == nil { - rnd = NewRand() - } - for x := 0; x < round; x++ { - fn(rnd.Intn(n)) - } - return -} - -func ShuffledIndex(rnd *rand.Rand, n, round int, fn func(i int)) { - if rnd == nil { - rnd = NewRand() - } - for x := 0; x < round; x++ { - for _, i := range rnd.Perm(n) { - fn(i) - } - } - return -} - -func RandomRange(rnd *rand.Rand, n, round int, fn func(start, limit int)) { - if rnd == nil { - rnd = NewRand() - } - for x := 0; x < round; x++ { - start := rnd.Intn(n) - length := 0 - if j := n - start; j > 0 { - length = rnd.Intn(j) - } - fn(start, start+length) - } - return -} - -func Max(x, y int) int { - if x > y { - return x - } - return y -} - -func Min(x, y int) int { - if x < y { - return x - } - return y -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go deleted file mode 100644 index 930ac0165..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - . "github.com/onsi/gomega" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -type testingDB struct { - *DB - ro *opt.ReadOptions - wo *opt.WriteOptions - stor *testutil.Storage -} - -func (t *testingDB) TestPut(key []byte, value []byte) error { - return t.Put(key, value, t.wo) -} - -func (t *testingDB) TestDelete(key []byte) error { - return t.Delete(key, t.wo) -} - -func (t *testingDB) TestGet(key []byte) (value []byte, err error) { - return t.Get(key, t.ro) -} - -func (t *testingDB) TestHas(key []byte) (ret bool, err error) { - return t.Has(key, t.ro) -} - -func (t *testingDB) TestNewIterator(slice *util.Range) iterator.Iterator { - return t.NewIterator(slice, t.ro) -} - -func (t *testingDB) TestClose() { - err := t.Close() - ExpectWithOffset(1, err).NotTo(HaveOccurred()) - err = t.stor.Close() - ExpectWithOffset(1, err).NotTo(HaveOccurred()) -} - -func newTestingDB(o *opt.Options, ro *opt.ReadOptions, wo *opt.WriteOptions) *testingDB { - stor := testutil.NewStorage() - db, err := Open(stor, o) - // FIXME: This may be called from outside It, which may cause panic. - Expect(err).NotTo(HaveOccurred()) - return &testingDB{ - DB: db, - ro: ro, - wo: wo, - stor: stor, - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go deleted file mode 100644 index 974215244..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "sort" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" -) - -func shorten(str string) string { - if len(str) <= 8 { - return str - } - return str[:3] + ".." + str[len(str)-3:] -} - -var bunits = [...]string{"", "Ki", "Mi", "Gi"} - -func shortenb(bytes int) string { - i := 0 - for ; bytes > 1024 && i < 4; i++ { - bytes /= 1024 - } - return fmt.Sprintf("%d%sB", bytes, bunits[i]) -} - -func sshortenb(bytes int) string { - if bytes == 0 { - return "~" - } - sign := "+" - if bytes < 0 { - sign = "-" - bytes *= -1 - } - i := 0 - for ; bytes > 1024 && i < 4; i++ { - bytes /= 1024 - } - return fmt.Sprintf("%s%d%sB", sign, bytes, bunits[i]) -} - -func sint(x int) string { - if x == 0 { - return "~" - } - sign := "+" - if x < 0 { - sign = "-" - x *= -1 - } - return fmt.Sprintf("%s%d", sign, x) -} - -func minInt(a, b int) int { - if a < b { - return a - } - return b -} - -func maxInt(a, b int) int { - if a > b { - return a - } - return b -} - -type files []storage.File - -func (p files) Len() int { - return len(p) -} - -func (p files) Less(i, j int) bool { - return p[i].Num() < p[j].Num() -} - -func (p files) Swap(i, j int) { - p[i], p[j] = p[j], p[i] -} - -func (p files) sort() { - sort.Sort(p) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go deleted file mode 100644 index 21de24255..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package util - -// This a copy of Go std bytes.Buffer with some modification -// and some features stripped. - -import ( - "bytes" - "io" -) - -// A Buffer is a variable-sized buffer of bytes with Read and Write methods. -// The zero value for Buffer is an empty buffer ready to use. -type Buffer struct { - buf []byte // contents are the bytes buf[off : len(buf)] - off int // read at &buf[off], write at &buf[len(buf)] - bootstrap [64]byte // memory to hold first slice; helps small buffers (Printf) avoid allocation. -} - -// Bytes returns a slice of the contents of the unread portion of the buffer; -// len(b.Bytes()) == b.Len(). If the caller changes the contents of the -// returned slice, the contents of the buffer will change provided there -// are no intervening method calls on the Buffer. -func (b *Buffer) Bytes() []byte { return b.buf[b.off:] } - -// String returns the contents of the unread portion of the buffer -// as a string. If the Buffer is a nil pointer, it returns "". -func (b *Buffer) String() string { - if b == nil { - // Special case, useful in debugging. - return "" - } - return string(b.buf[b.off:]) -} - -// Len returns the number of bytes of the unread portion of the buffer; -// b.Len() == len(b.Bytes()). -func (b *Buffer) Len() int { return len(b.buf) - b.off } - -// Truncate discards all but the first n unread bytes from the buffer. -// It panics if n is negative or greater than the length of the buffer. -func (b *Buffer) Truncate(n int) { - switch { - case n < 0 || n > b.Len(): - panic("leveldb/util.Buffer: truncation out of range") - case n == 0: - // Reuse buffer space. - b.off = 0 - } - b.buf = b.buf[0 : b.off+n] -} - -// Reset resets the buffer so it has no content. -// b.Reset() is the same as b.Truncate(0). -func (b *Buffer) Reset() { b.Truncate(0) } - -// grow grows the buffer to guarantee space for n more bytes. -// It returns the index where bytes should be written. -// If the buffer can't grow it will panic with bytes.ErrTooLarge. -func (b *Buffer) grow(n int) int { - m := b.Len() - // If buffer is empty, reset to recover space. - if m == 0 && b.off != 0 { - b.Truncate(0) - } - if len(b.buf)+n > cap(b.buf) { - var buf []byte - if b.buf == nil && n <= len(b.bootstrap) { - buf = b.bootstrap[0:] - } else if m+n <= cap(b.buf)/2 { - // We can slide things down instead of allocating a new - // slice. We only need m+n <= cap(b.buf) to slide, but - // we instead let capacity get twice as large so we - // don't spend all our time copying. - copy(b.buf[:], b.buf[b.off:]) - buf = b.buf[:m] - } else { - // not enough space anywhere - buf = makeSlice(2*cap(b.buf) + n) - copy(buf, b.buf[b.off:]) - } - b.buf = buf - b.off = 0 - } - b.buf = b.buf[0 : b.off+m+n] - return b.off + m -} - -// Alloc allocs n bytes of slice from the buffer, growing the buffer as -// needed. If n is negative, Alloc will panic. -// If the buffer can't grow it will panic with bytes.ErrTooLarge. -func (b *Buffer) Alloc(n int) []byte { - if n < 0 { - panic("leveldb/util.Buffer.Alloc: negative count") - } - m := b.grow(n) - return b.buf[m:] -} - -// Grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After Grow(n), at least n bytes can be written to the -// buffer without another allocation. -// If n is negative, Grow will panic. -// If the buffer can't grow it will panic with bytes.ErrTooLarge. -func (b *Buffer) Grow(n int) { - if n < 0 { - panic("leveldb/util.Buffer.Grow: negative count") - } - m := b.grow(n) - b.buf = b.buf[0:m] -} - -// Write appends the contents of p to the buffer, growing the buffer as -// needed. The return value n is the length of p; err is always nil. If the -// buffer becomes too large, Write will panic with bytes.ErrTooLarge. -func (b *Buffer) Write(p []byte) (n int, err error) { - m := b.grow(len(p)) - return copy(b.buf[m:], p), nil -} - -// MinRead is the minimum slice size passed to a Read call by -// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond -// what is required to hold the contents of r, ReadFrom will not grow the -// underlying buffer. -const MinRead = 512 - -// ReadFrom reads data from r until EOF and appends it to the buffer, growing -// the buffer as needed. The return value n is the number of bytes read. Any -// error except io.EOF encountered during the read is also returned. If the -// buffer becomes too large, ReadFrom will panic with bytes.ErrTooLarge. -func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) { - // If buffer is empty, reset to recover space. - if b.off >= len(b.buf) { - b.Truncate(0) - } - for { - if free := cap(b.buf) - len(b.buf); free < MinRead { - // not enough space at end - newBuf := b.buf - if b.off+free < MinRead { - // not enough space using beginning of buffer; - // double buffer capacity - newBuf = makeSlice(2*cap(b.buf) + MinRead) - } - copy(newBuf, b.buf[b.off:]) - b.buf = newBuf[:len(b.buf)-b.off] - b.off = 0 - } - m, e := r.Read(b.buf[len(b.buf):cap(b.buf)]) - b.buf = b.buf[0 : len(b.buf)+m] - n += int64(m) - if e == io.EOF { - break - } - if e != nil { - return n, e - } - } - return n, nil // err is EOF, so return nil explicitly -} - -// makeSlice allocates a slice of size n. If the allocation fails, it panics -// with bytes.ErrTooLarge. -func makeSlice(n int) []byte { - // If the make fails, give a known error. - defer func() { - if recover() != nil { - panic(bytes.ErrTooLarge) - } - }() - return make([]byte, n) -} - -// WriteTo writes data to w until the buffer is drained or an error occurs. -// The return value n is the number of bytes written; it always fits into an -// int, but it is int64 to match the io.WriterTo interface. Any error -// encountered during the write is also returned. -func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) { - if b.off < len(b.buf) { - nBytes := b.Len() - m, e := w.Write(b.buf[b.off:]) - if m > nBytes { - panic("leveldb/util.Buffer.WriteTo: invalid Write count") - } - b.off += m - n = int64(m) - if e != nil { - return n, e - } - // all bytes should have been written, by definition of - // Write method in io.Writer - if m != nBytes { - return n, io.ErrShortWrite - } - } - // Buffer is now empty; reset. - b.Truncate(0) - return -} - -// WriteByte appends the byte c to the buffer, growing the buffer as needed. -// The returned error is always nil, but is included to match bufio.Writer's -// WriteByte. If the buffer becomes too large, WriteByte will panic with -// bytes.ErrTooLarge. -func (b *Buffer) WriteByte(c byte) error { - m := b.grow(1) - b.buf[m] = c - return nil -} - -// Read reads the next len(p) bytes from the buffer or until the buffer -// is drained. The return value n is the number of bytes read. If the -// buffer has no data to return, err is io.EOF (unless len(p) is zero); -// otherwise it is nil. -func (b *Buffer) Read(p []byte) (n int, err error) { - if b.off >= len(b.buf) { - // Buffer is empty, reset to recover space. - b.Truncate(0) - if len(p) == 0 { - return - } - return 0, io.EOF - } - n = copy(p, b.buf[b.off:]) - b.off += n - return -} - -// Next returns a slice containing the next n bytes from the buffer, -// advancing the buffer as if the bytes had been returned by Read. -// If there are fewer than n bytes in the buffer, Next returns the entire buffer. -// The slice is only valid until the next call to a read or write method. -func (b *Buffer) Next(n int) []byte { - m := b.Len() - if n > m { - n = m - } - data := b.buf[b.off : b.off+n] - b.off += n - return data -} - -// ReadByte reads and returns the next byte from the buffer. -// If no byte is available, it returns error io.EOF. -func (b *Buffer) ReadByte() (c byte, err error) { - if b.off >= len(b.buf) { - // Buffer is empty, reset to recover space. - b.Truncate(0) - return 0, io.EOF - } - c = b.buf[b.off] - b.off++ - return c, nil -} - -// ReadBytes reads until the first occurrence of delim in the input, -// returning a slice containing the data up to and including the delimiter. -// If ReadBytes encounters an error before finding a delimiter, -// it returns the data read before the error and the error itself (often io.EOF). -// ReadBytes returns err != nil if and only if the returned data does not end in -// delim. -func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) { - slice, err := b.readSlice(delim) - // return a copy of slice. The buffer's backing array may - // be overwritten by later calls. - line = append(line, slice...) - return -} - -// readSlice is like ReadBytes but returns a reference to internal buffer data. -func (b *Buffer) readSlice(delim byte) (line []byte, err error) { - i := bytes.IndexByte(b.buf[b.off:], delim) - end := b.off + i + 1 - if i < 0 { - end = len(b.buf) - err = io.EOF - } - line = b.buf[b.off:end] - b.off = end - return line, err -} - -// NewBuffer creates and initializes a new Buffer using buf as its initial -// contents. It is intended to prepare a Buffer to read existing data. It -// can also be used to size the internal buffer for writing. To do that, -// buf should have the desired capacity but a length of zero. -// -// In most cases, new(Buffer) (or just declaring a Buffer variable) is -// sufficient to initialize a Buffer. -func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} } diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go deleted file mode 100644 index 2b8453d75..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package util - -import ( - "fmt" - "sync" - "sync/atomic" - "time" -) - -type buffer struct { - b []byte - miss int -} - -// BufferPool is a 'buffer pool'. -type BufferPool struct { - pool [6]chan []byte - size [5]uint32 - sizeMiss [5]uint32 - sizeHalf [5]uint32 - baseline [4]int - baseline0 int - - mu sync.RWMutex - closed bool - closeC chan struct{} - - get uint32 - put uint32 - half uint32 - less uint32 - equal uint32 - greater uint32 - miss uint32 -} - -func (p *BufferPool) poolNum(n int) int { - if n <= p.baseline0 && n > p.baseline0/2 { - return 0 - } - for i, x := range p.baseline { - if n <= x { - return i + 1 - } - } - return len(p.baseline) + 1 -} - -// Get returns buffer with length of n. -func (p *BufferPool) Get(n int) []byte { - if p == nil { - return make([]byte, n) - } - - p.mu.RLock() - defer p.mu.RUnlock() - - if p.closed { - return make([]byte, n) - } - - atomic.AddUint32(&p.get, 1) - - poolNum := p.poolNum(n) - pool := p.pool[poolNum] - if poolNum == 0 { - // Fast path. - select { - case b := <-pool: - switch { - case cap(b) > n: - if cap(b)-n >= n { - atomic.AddUint32(&p.half, 1) - select { - case pool <- b: - default: - } - return make([]byte, n) - } else { - atomic.AddUint32(&p.less, 1) - return b[:n] - } - case cap(b) == n: - atomic.AddUint32(&p.equal, 1) - return b[:n] - default: - atomic.AddUint32(&p.greater, 1) - } - default: - atomic.AddUint32(&p.miss, 1) - } - - return make([]byte, n, p.baseline0) - } else { - sizePtr := &p.size[poolNum-1] - - select { - case b := <-pool: - switch { - case cap(b) > n: - if cap(b)-n >= n { - atomic.AddUint32(&p.half, 1) - sizeHalfPtr := &p.sizeHalf[poolNum-1] - if atomic.AddUint32(sizeHalfPtr, 1) == 20 { - atomic.StoreUint32(sizePtr, uint32(cap(b)/2)) - atomic.StoreUint32(sizeHalfPtr, 0) - } else { - select { - case pool <- b: - default: - } - } - return make([]byte, n) - } else { - atomic.AddUint32(&p.less, 1) - return b[:n] - } - case cap(b) == n: - atomic.AddUint32(&p.equal, 1) - return b[:n] - default: - atomic.AddUint32(&p.greater, 1) - if uint32(cap(b)) >= atomic.LoadUint32(sizePtr) { - select { - case pool <- b: - default: - } - } - } - default: - atomic.AddUint32(&p.miss, 1) - } - - if size := atomic.LoadUint32(sizePtr); uint32(n) > size { - if size == 0 { - atomic.CompareAndSwapUint32(sizePtr, 0, uint32(n)) - } else { - sizeMissPtr := &p.sizeMiss[poolNum-1] - if atomic.AddUint32(sizeMissPtr, 1) == 20 { - atomic.StoreUint32(sizePtr, uint32(n)) - atomic.StoreUint32(sizeMissPtr, 0) - } - } - return make([]byte, n) - } else { - return make([]byte, n, size) - } - } -} - -// Put adds given buffer to the pool. -func (p *BufferPool) Put(b []byte) { - if p == nil { - return - } - - p.mu.RLock() - defer p.mu.RUnlock() - - if p.closed { - return - } - - atomic.AddUint32(&p.put, 1) - - pool := p.pool[p.poolNum(cap(b))] - select { - case pool <- b: - default: - } - -} - -func (p *BufferPool) Close() { - if p == nil { - return - } - - p.mu.Lock() - if !p.closed { - p.closed = true - p.closeC <- struct{}{} - } - p.mu.Unlock() -} - -func (p *BufferPool) String() string { - if p == nil { - return "" - } - - return fmt.Sprintf("BufferPool{B·%d Z·%v Zm·%v Zh·%v G·%d P·%d H·%d <·%d =·%d >·%d M·%d}", - p.baseline0, p.size, p.sizeMiss, p.sizeHalf, p.get, p.put, p.half, p.less, p.equal, p.greater, p.miss) -} - -func (p *BufferPool) drain() { - ticker := time.NewTicker(2 * time.Second) - for { - select { - case <-ticker.C: - for _, ch := range p.pool { - select { - case <-ch: - default: - } - } - case <-p.closeC: - close(p.closeC) - for _, ch := range p.pool { - close(ch) - } - return - } - } -} - -// NewBufferPool creates a new initialized 'buffer pool'. -func NewBufferPool(baseline int) *BufferPool { - if baseline <= 0 { - panic("baseline can't be <= 0") - } - p := &BufferPool{ - baseline0: baseline, - baseline: [...]int{baseline / 4, baseline / 2, baseline * 2, baseline * 4}, - closeC: make(chan struct{}, 1), - } - for i, cap := range []int{2, 2, 4, 4, 2, 1} { - p.pool[i] = make(chan []byte, cap) - } - go p.drain() - return p -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go deleted file mode 100644 index 87d96739c..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go +++ /dev/null @@ -1,369 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package util - -import ( - "bytes" - "io" - "math/rand" - "runtime" - "testing" -) - -const N = 10000 // make this bigger for a larger (and slower) test -var data string // test data for write tests -var testBytes []byte // test data; same as data but as a slice. - -func init() { - testBytes = make([]byte, N) - for i := 0; i < N; i++ { - testBytes[i] = 'a' + byte(i%26) - } - data = string(testBytes) -} - -// Verify that contents of buf match the string s. -func check(t *testing.T, testname string, buf *Buffer, s string) { - bytes := buf.Bytes() - str := buf.String() - if buf.Len() != len(bytes) { - t.Errorf("%s: buf.Len() == %d, len(buf.Bytes()) == %d", testname, buf.Len(), len(bytes)) - } - - if buf.Len() != len(str) { - t.Errorf("%s: buf.Len() == %d, len(buf.String()) == %d", testname, buf.Len(), len(str)) - } - - if buf.Len() != len(s) { - t.Errorf("%s: buf.Len() == %d, len(s) == %d", testname, buf.Len(), len(s)) - } - - if string(bytes) != s { - t.Errorf("%s: string(buf.Bytes()) == %q, s == %q", testname, string(bytes), s) - } -} - -// Fill buf through n writes of byte slice fub. -// The initial contents of buf corresponds to the string s; -// the result is the final contents of buf returned as a string. -func fillBytes(t *testing.T, testname string, buf *Buffer, s string, n int, fub []byte) string { - check(t, testname+" (fill 1)", buf, s) - for ; n > 0; n-- { - m, err := buf.Write(fub) - if m != len(fub) { - t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fub)) - } - if err != nil { - t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err) - } - s += string(fub) - check(t, testname+" (fill 4)", buf, s) - } - return s -} - -func TestNewBuffer(t *testing.T) { - buf := NewBuffer(testBytes) - check(t, "NewBuffer", buf, data) -} - -// Empty buf through repeated reads into fub. -// The initial contents of buf corresponds to the string s. -func empty(t *testing.T, testname string, buf *Buffer, s string, fub []byte) { - check(t, testname+" (empty 1)", buf, s) - - for { - n, err := buf.Read(fub) - if n == 0 { - break - } - if err != nil { - t.Errorf(testname+" (empty 2): err should always be nil, found err == %s", err) - } - s = s[n:] - check(t, testname+" (empty 3)", buf, s) - } - - check(t, testname+" (empty 4)", buf, "") -} - -func TestBasicOperations(t *testing.T) { - var buf Buffer - - for i := 0; i < 5; i++ { - check(t, "TestBasicOperations (1)", &buf, "") - - buf.Reset() - check(t, "TestBasicOperations (2)", &buf, "") - - buf.Truncate(0) - check(t, "TestBasicOperations (3)", &buf, "") - - n, err := buf.Write([]byte(data[0:1])) - if n != 1 { - t.Errorf("wrote 1 byte, but n == %d", n) - } - if err != nil { - t.Errorf("err should always be nil, but err == %s", err) - } - check(t, "TestBasicOperations (4)", &buf, "a") - - buf.WriteByte(data[1]) - check(t, "TestBasicOperations (5)", &buf, "ab") - - n, err = buf.Write([]byte(data[2:26])) - if n != 24 { - t.Errorf("wrote 25 bytes, but n == %d", n) - } - check(t, "TestBasicOperations (6)", &buf, string(data[0:26])) - - buf.Truncate(26) - check(t, "TestBasicOperations (7)", &buf, string(data[0:26])) - - buf.Truncate(20) - check(t, "TestBasicOperations (8)", &buf, string(data[0:20])) - - empty(t, "TestBasicOperations (9)", &buf, string(data[0:20]), make([]byte, 5)) - empty(t, "TestBasicOperations (10)", &buf, "", make([]byte, 100)) - - buf.WriteByte(data[1]) - c, err := buf.ReadByte() - if err != nil { - t.Error("ReadByte unexpected eof") - } - if c != data[1] { - t.Errorf("ReadByte wrong value c=%v", c) - } - c, err = buf.ReadByte() - if err == nil { - t.Error("ReadByte unexpected not eof") - } - } -} - -func TestLargeByteWrites(t *testing.T) { - var buf Buffer - limit := 30 - if testing.Short() { - limit = 9 - } - for i := 3; i < limit; i += 3 { - s := fillBytes(t, "TestLargeWrites (1)", &buf, "", 5, testBytes) - empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(data)/i)) - } - check(t, "TestLargeByteWrites (3)", &buf, "") -} - -func TestLargeByteReads(t *testing.T) { - var buf Buffer - for i := 3; i < 30; i += 3 { - s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) - empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data))) - } - check(t, "TestLargeByteReads (3)", &buf, "") -} - -func TestMixedReadsAndWrites(t *testing.T) { - var buf Buffer - s := "" - for i := 0; i < 50; i++ { - wlen := rand.Intn(len(data)) - s = fillBytes(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testBytes[0:wlen]) - rlen := rand.Intn(len(data)) - fub := make([]byte, rlen) - n, _ := buf.Read(fub) - s = s[n:] - } - empty(t, "TestMixedReadsAndWrites (2)", &buf, s, make([]byte, buf.Len())) -} - -func TestNil(t *testing.T) { - var b *Buffer - if b.String() != "" { - t.Errorf("expected ; got %q", b.String()) - } -} - -func TestReadFrom(t *testing.T) { - var buf Buffer - for i := 3; i < 30; i += 3 { - s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) - var b Buffer - b.ReadFrom(&buf) - empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(data))) - } -} - -func TestWriteTo(t *testing.T) { - var buf Buffer - for i := 3; i < 30; i += 3 { - s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) - var b Buffer - buf.WriteTo(&b) - empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(data))) - } -} - -func TestNext(t *testing.T) { - b := []byte{0, 1, 2, 3, 4} - tmp := make([]byte, 5) - for i := 0; i <= 5; i++ { - for j := i; j <= 5; j++ { - for k := 0; k <= 6; k++ { - // 0 <= i <= j <= 5; 0 <= k <= 6 - // Check that if we start with a buffer - // of length j at offset i and ask for - // Next(k), we get the right bytes. - buf := NewBuffer(b[0:j]) - n, _ := buf.Read(tmp[0:i]) - if n != i { - t.Fatalf("Read %d returned %d", i, n) - } - bb := buf.Next(k) - want := k - if want > j-i { - want = j - i - } - if len(bb) != want { - t.Fatalf("in %d,%d: len(Next(%d)) == %d", i, j, k, len(bb)) - } - for l, v := range bb { - if v != byte(l+i) { - t.Fatalf("in %d,%d: Next(%d)[%d] = %d, want %d", i, j, k, l, v, l+i) - } - } - } - } - } -} - -var readBytesTests = []struct { - buffer string - delim byte - expected []string - err error -}{ - {"", 0, []string{""}, io.EOF}, - {"a\x00", 0, []string{"a\x00"}, nil}, - {"abbbaaaba", 'b', []string{"ab", "b", "b", "aaab"}, nil}, - {"hello\x01world", 1, []string{"hello\x01"}, nil}, - {"foo\nbar", 0, []string{"foo\nbar"}, io.EOF}, - {"alpha\nbeta\ngamma\n", '\n', []string{"alpha\n", "beta\n", "gamma\n"}, nil}, - {"alpha\nbeta\ngamma", '\n', []string{"alpha\n", "beta\n", "gamma"}, io.EOF}, -} - -func TestReadBytes(t *testing.T) { - for _, test := range readBytesTests { - buf := NewBuffer([]byte(test.buffer)) - var err error - for _, expected := range test.expected { - var bytes []byte - bytes, err = buf.ReadBytes(test.delim) - if string(bytes) != expected { - t.Errorf("expected %q, got %q", expected, bytes) - } - if err != nil { - break - } - } - if err != test.err { - t.Errorf("expected error %v, got %v", test.err, err) - } - } -} - -func TestGrow(t *testing.T) { - x := []byte{'x'} - y := []byte{'y'} - tmp := make([]byte, 72) - for _, startLen := range []int{0, 100, 1000, 10000, 100000} { - xBytes := bytes.Repeat(x, startLen) - for _, growLen := range []int{0, 100, 1000, 10000, 100000} { - buf := NewBuffer(xBytes) - // If we read, this affects buf.off, which is good to test. - readBytes, _ := buf.Read(tmp) - buf.Grow(growLen) - yBytes := bytes.Repeat(y, growLen) - // Check no allocation occurs in write, as long as we're single-threaded. - var m1, m2 runtime.MemStats - runtime.ReadMemStats(&m1) - buf.Write(yBytes) - runtime.ReadMemStats(&m2) - if runtime.GOMAXPROCS(-1) == 1 && m1.Mallocs != m2.Mallocs { - t.Errorf("allocation occurred during write") - } - // Check that buffer has correct data. - if !bytes.Equal(buf.Bytes()[0:startLen-readBytes], xBytes[readBytes:]) { - t.Errorf("bad initial data at %d %d", startLen, growLen) - } - if !bytes.Equal(buf.Bytes()[startLen-readBytes:startLen-readBytes+growLen], yBytes) { - t.Errorf("bad written data at %d %d", startLen, growLen) - } - } - } -} - -// Was a bug: used to give EOF reading empty slice at EOF. -func TestReadEmptyAtEOF(t *testing.T) { - b := new(Buffer) - slice := make([]byte, 0) - n, err := b.Read(slice) - if err != nil { - t.Errorf("read error: %v", err) - } - if n != 0 { - t.Errorf("wrong count; got %d want 0", n) - } -} - -// Tests that we occasionally compact. Issue 5154. -func TestBufferGrowth(t *testing.T) { - var b Buffer - buf := make([]byte, 1024) - b.Write(buf[0:1]) - var cap0 int - for i := 0; i < 5<<10; i++ { - b.Write(buf) - b.Read(buf) - if i == 0 { - cap0 = cap(b.buf) - } - } - cap1 := cap(b.buf) - // (*Buffer).grow allows for 2x capacity slop before sliding, - // so set our error threshold at 3x. - if cap1 > cap0*3 { - t.Errorf("buffer cap = %d; too big (grew from %d)", cap1, cap0) - } -} - -// From Issue 5154. -func BenchmarkBufferNotEmptyWriteRead(b *testing.B) { - buf := make([]byte, 1024) - for i := 0; i < b.N; i++ { - var b Buffer - b.Write(buf[0:1]) - for i := 0; i < 5<<10; i++ { - b.Write(buf) - b.Read(buf) - } - } -} - -// Check that we don't compact too often. From Issue 5154. -func BenchmarkBufferFullSmallReads(b *testing.B) { - buf := make([]byte, 1024) - for i := 0; i < b.N; i++ { - var b Buffer - b.Write(buf) - for b.Len()+20 < cap(b.buf) { - b.Write(buf[:10]) - } - for i := 0; i < 5<<10; i++ { - b.Read(buf[:1]) - b.Write(buf[:1]) - } - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go deleted file mode 100644 index 631c9d610..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2011 The LevelDB-Go Authors. All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package util - -import ( - "hash/crc32" -) - -var table = crc32.MakeTable(crc32.Castagnoli) - -// CRC is a CRC-32 checksum computed using Castagnoli's polynomial. -type CRC uint32 - -// NewCRC creates a new crc based on the given bytes. -func NewCRC(b []byte) CRC { - return CRC(0).Update(b) -} - -// Update updates the crc with the given bytes. -func (c CRC) Update(b []byte) CRC { - return CRC(crc32.Update(uint32(c), table, b)) -} - -// Value returns a masked crc. -func (c CRC) Value() uint32 { - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go deleted file mode 100644 index 54903660f..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package util - -import ( - "bytes" - "encoding/binary" -) - -// Hash return hash of the given data. -func Hash(data []byte, seed uint32) uint32 { - // Similar to murmur hash - var m uint32 = 0xc6a4a793 - var r uint32 = 24 - h := seed ^ (uint32(len(data)) * m) - - buf := bytes.NewBuffer(data) - for buf.Len() >= 4 { - var w uint32 - binary.Read(buf, binary.LittleEndian, &w) - h += w - h *= m - h ^= (h >> 16) - } - - rest := buf.Bytes() - switch len(rest) { - default: - panic("not reached") - case 3: - h += uint32(rest[2]) << 16 - fallthrough - case 2: - h += uint32(rest[1]) << 8 - fallthrough - case 1: - h += uint32(rest[0]) - h *= m - h ^= (h >> r) - case 0: - } - - return h -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go deleted file mode 100644 index 1f7fdd41f..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build go1.3 - -package util - -import ( - "sync" -) - -type Pool struct { - sync.Pool -} - -func NewPool(cap int) *Pool { - return &Pool{} -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go deleted file mode 100644 index 27b8d03be..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build !go1.3 - -package util - -type Pool struct { - pool chan interface{} -} - -func (p *Pool) Get() interface{} { - select { - case x := <-p.pool: - return x - default: - return nil - } -} - -func (p *Pool) Put(x interface{}) { - select { - case p.pool <- x: - default: - } -} - -func NewPool(cap int) *Pool { - return &Pool{pool: make(chan interface{}, cap)} -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go deleted file mode 100644 index 85159583d..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package util - -// Range is a key range. -type Range struct { - // Start of the key range, include in the range. - Start []byte - - // Limit of the key range, not include in the range. - Limit []byte -} - -// BytesPrefix returns key range that satisfy the given prefix. -// This only applicable for the standard 'bytes comparer'. -func BytesPrefix(prefix []byte) *Range { - var limit []byte - for i := len(prefix) - 1; i >= 0; i-- { - c := prefix[i] - if c < 0xff { - limit = make([]byte, i+1) - copy(limit, prefix) - limit[i] = c + 1 - break - } - } - return &Range{prefix, limit} -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go deleted file mode 100644 index f35976865..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package util provides utilities used throughout leveldb. -package util - -import ( - "errors" -) - -var ( - ErrReleased = errors.New("leveldb: resource already relesed") - ErrHasReleaser = errors.New("leveldb: releaser already defined") -) - -// Releaser is the interface that wraps the basic Release method. -type Releaser interface { - // Release releases associated resources. Release should always success - // and can be called multipe times without causing error. - Release() -} - -// ReleaseSetter is the interface that wraps the basic SetReleaser method. -type ReleaseSetter interface { - // SetReleaser associates the given releaser to the resources. The - // releaser will be called once coresponding resources released. - // Calling SetReleaser with nil will clear the releaser. - // - // This will panic if a releaser already present or coresponding - // resource is already released. Releaser should be cleared first - // before assigned a new one. - SetReleaser(releaser Releaser) -} - -// BasicReleaser provides basic implementation of Releaser and ReleaseSetter. -type BasicReleaser struct { - releaser Releaser - released bool -} - -// Released returns whether Release method already called. -func (r *BasicReleaser) Released() bool { - return r.released -} - -// Release implements Releaser.Release. -func (r *BasicReleaser) Release() { - if !r.released { - if r.releaser != nil { - r.releaser.Release() - r.releaser = nil - } - r.released = true - } -} - -// SetReleaser implements ReleaseSetter.SetReleaser. -func (r *BasicReleaser) SetReleaser(releaser Releaser) { - if r.released { - panic(ErrReleased) - } - if r.releaser != nil && releaser != nil { - panic(ErrHasReleaser) - } - r.releaser = releaser -} - -type NoopReleaser struct{} - -func (NoopReleaser) Release() {} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go deleted file mode 100644 index e3cc28d2d..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go +++ /dev/null @@ -1,457 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "sync/atomic" - "unsafe" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -type tSet struct { - level int - table *tFile -} - -type version struct { - s *session - - tables []tFiles - - // Level that should be compacted next and its compaction score. - // Score < 1 means compaction is not strictly needed. These fields - // are initialized by computeCompaction() - cLevel int - cScore float64 - - cSeek unsafe.Pointer - - ref int - // Succeeding version. - next *version -} - -func newVersion(s *session) *version { - return &version{s: s, tables: make([]tFiles, s.o.GetNumLevel())} -} - -func (v *version) releaseNB() { - v.ref-- - if v.ref > 0 { - return - } - if v.ref < 0 { - panic("negative version ref") - } - - tables := make(map[uint64]bool) - for _, tt := range v.next.tables { - for _, t := range tt { - num := t.file.Num() - tables[num] = true - } - } - - for _, tt := range v.tables { - for _, t := range tt { - num := t.file.Num() - if _, ok := tables[num]; !ok { - v.s.tops.remove(t) - } - } - } - - v.next.releaseNB() - v.next = nil -} - -func (v *version) release() { - v.s.vmu.Lock() - v.releaseNB() - v.s.vmu.Unlock() -} - -func (v *version) walkOverlapping(ikey iKey, f func(level int, t *tFile) bool, lf func(level int) bool) { - ukey := ikey.ukey() - - // Walk tables level-by-level. - for level, tables := range v.tables { - if len(tables) == 0 { - continue - } - - if level == 0 { - // Level-0 files may overlap each other. Find all files that - // overlap ukey. - for _, t := range tables { - if t.overlaps(v.s.icmp, ukey, ukey) { - if !f(level, t) { - return - } - } - } - } else { - if i := tables.searchMax(v.s.icmp, ikey); i < len(tables) { - t := tables[i] - if v.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 { - if !f(level, t) { - return - } - } - } - } - - if lf != nil && !lf(level) { - return - } - } -} - -func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byte, tcomp bool, err error) { - ukey := ikey.ukey() - - var ( - tset *tSet - tseek bool - - // Level-0. - zfound bool - zseq uint64 - zkt kType - zval []byte - ) - - err = ErrNotFound - - // Since entries never hope across level, finding key/value - // in smaller level make later levels irrelevant. - v.walkOverlapping(ikey, func(level int, t *tFile) bool { - if !tseek { - if tset == nil { - tset = &tSet{level, t} - } else { - tseek = true - } - } - - var ( - fikey, fval []byte - ferr error - ) - if noValue { - fikey, ferr = v.s.tops.findKey(t, ikey, ro) - } else { - fikey, fval, ferr = v.s.tops.find(t, ikey, ro) - } - switch ferr { - case nil: - case ErrNotFound: - return true - default: - err = ferr - return false - } - - if fukey, fseq, fkt, fkerr := parseIkey(fikey); fkerr == nil { - if v.s.icmp.uCompare(ukey, fukey) == 0 { - if level == 0 { - if fseq >= zseq { - zfound = true - zseq = fseq - zkt = fkt - zval = fval - } - } else { - switch fkt { - case ktVal: - value = fval - err = nil - case ktDel: - default: - panic("leveldb: invalid iKey type") - } - return false - } - } - } else { - err = fkerr - return false - } - - return true - }, func(level int) bool { - if zfound { - switch zkt { - case ktVal: - value = zval - err = nil - case ktDel: - default: - panic("leveldb: invalid iKey type") - } - return false - } - - return true - }) - - if tseek && tset.table.consumeSeek() <= 0 { - tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset)) - } - - return -} - -func (v *version) sampleSeek(ikey iKey) (tcomp bool) { - var tset *tSet - - v.walkOverlapping(ikey, func(level int, t *tFile) bool { - if tset == nil { - tset = &tSet{level, t} - return true - } else { - if tset.table.consumeSeek() <= 0 { - tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset)) - } - return false - } - }, nil) - - return -} - -func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []iterator.Iterator) { - // Merge all level zero files together since they may overlap - for _, t := range v.tables[0] { - it := v.s.tops.newIterator(t, slice, ro) - its = append(its, it) - } - - strict := opt.GetStrict(v.s.o.Options, ro, opt.StrictReader) - for _, tables := range v.tables[1:] { - if len(tables) == 0 { - continue - } - - it := iterator.NewIndexedIterator(tables.newIndexIterator(v.s.tops, v.s.icmp, slice, ro), strict) - its = append(its, it) - } - - return -} - -func (v *version) newStaging() *versionStaging { - return &versionStaging{base: v, tables: make([]tablesScratch, v.s.o.GetNumLevel())} -} - -// Spawn a new version based on this version. -func (v *version) spawn(r *sessionRecord) *version { - staging := v.newStaging() - staging.commit(r) - return staging.finish() -} - -func (v *version) fillRecord(r *sessionRecord) { - for level, ts := range v.tables { - for _, t := range ts { - r.addTableFile(level, t) - } - } -} - -func (v *version) tLen(level int) int { - return len(v.tables[level]) -} - -func (v *version) offsetOf(ikey iKey) (n uint64, err error) { - for level, tables := range v.tables { - for _, t := range tables { - if v.s.icmp.Compare(t.imax, ikey) <= 0 { - // Entire file is before "ikey", so just add the file size - n += t.size - } else if v.s.icmp.Compare(t.imin, ikey) > 0 { - // Entire file is after "ikey", so ignore - if level > 0 { - // Files other than level 0 are sorted by meta->min, so - // no further files in this level will contain data for - // "ikey". - break - } - } else { - // "ikey" falls in the range for this table. Add the - // approximate offset of "ikey" within the table. - var nn uint64 - nn, err = v.s.tops.offsetOf(t, ikey) - if err != nil { - return 0, err - } - n += nn - } - } - } - - return -} - -func (v *version) pickLevel(umin, umax []byte) (level int) { - if !v.tables[0].overlaps(v.s.icmp, umin, umax, true) { - var overlaps tFiles - maxLevel := v.s.o.GetMaxMemCompationLevel() - for ; level < maxLevel; level++ { - if v.tables[level+1].overlaps(v.s.icmp, umin, umax, false) { - break - } - overlaps = v.tables[level+2].getOverlaps(overlaps, v.s.icmp, umin, umax, false) - if overlaps.size() > uint64(v.s.o.GetCompactionGPOverlaps(level)) { - break - } - } - } - - return -} - -func (v *version) computeCompaction() { - // Precomputed best level for next compaction - var bestLevel int = -1 - var bestScore float64 = -1 - - for level, tables := range v.tables { - var score float64 - if level == 0 { - // We treat level-0 specially by bounding the number of files - // instead of number of bytes for two reasons: - // - // (1) With larger write-buffer sizes, it is nice not to do too - // many level-0 compactions. - // - // (2) The files in level-0 are merged on every read and - // therefore we wish to avoid too many files when the individual - // file size is small (perhaps because of a small write-buffer - // setting, or very high compression ratios, or lots of - // overwrites/deletions). - score = float64(len(tables)) / float64(v.s.o.GetCompactionL0Trigger()) - } else { - score = float64(tables.size()) / float64(v.s.o.GetCompactionTotalSize(level)) - } - - if score > bestScore { - bestLevel = level - bestScore = score - } - } - - v.cLevel = bestLevel - v.cScore = bestScore -} - -func (v *version) needCompaction() bool { - return v.cScore >= 1 || atomic.LoadPointer(&v.cSeek) != nil -} - -type tablesScratch struct { - added map[uint64]atRecord - deleted map[uint64]struct{} -} - -type versionStaging struct { - base *version - tables []tablesScratch -} - -func (p *versionStaging) commit(r *sessionRecord) { - // Deleted tables. - for _, r := range r.deletedTables { - tm := &(p.tables[r.level]) - - if len(p.base.tables[r.level]) > 0 { - if tm.deleted == nil { - tm.deleted = make(map[uint64]struct{}) - } - tm.deleted[r.num] = struct{}{} - } - - if tm.added != nil { - delete(tm.added, r.num) - } - } - - // New tables. - for _, r := range r.addedTables { - tm := &(p.tables[r.level]) - - if tm.added == nil { - tm.added = make(map[uint64]atRecord) - } - tm.added[r.num] = r - - if tm.deleted != nil { - delete(tm.deleted, r.num) - } - } -} - -func (p *versionStaging) finish() *version { - // Build new version. - nv := newVersion(p.base.s) - for level, tm := range p.tables { - btables := p.base.tables[level] - - n := len(btables) + len(tm.added) - len(tm.deleted) - if n < 0 { - n = 0 - } - nt := make(tFiles, 0, n) - - // Base tables. - for _, t := range btables { - if _, ok := tm.deleted[t.file.Num()]; ok { - continue - } - if _, ok := tm.added[t.file.Num()]; ok { - continue - } - nt = append(nt, t) - } - - // New tables. - for _, r := range tm.added { - nt = append(nt, p.base.s.tableFileFromRecord(r)) - } - - // Sort tables. - if level == 0 { - nt.sortByNum() - } else { - nt.sortByKey(p.base.s.icmp) - } - nv.tables[level] = nt - } - - // Compute compaction score for new version. - nv.computeCompaction() - - return nv -} - -type versionReleaser struct { - v *version - once bool -} - -func (vr *versionReleaser) Release() { - v := vr.v - v.s.vmu.Lock() - if !vr.once { - v.releaseNB() - vr.once = true - } - v.s.vmu.Unlock() -} diff --git a/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/decode.go b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/decode.go deleted file mode 100644 index 552a17bfb..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/decode.go +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -var ( - // ErrCorrupt reports that the input is invalid. - ErrCorrupt = errors.New("snappy: corrupt input") - // ErrUnsupported reports that the input isn't supported. - ErrUnsupported = errors.New("snappy: unsupported input") -) - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n == 0 { - return 0, 0, ErrCorrupt - } - if uint64(int(v)) != v { - return 0, 0, errors.New("snappy: decoded block is too large") - } - return int(v), n, nil -} - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// It is valid to pass a nil dst. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if len(dst) < dLen { - dst = make([]byte, dLen) - } - - var d, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint(src[s] >> 2) - switch { - case x < 60: - s += 1 - case x == 60: - s += 2 - if s > len(src) { - return nil, ErrCorrupt - } - x = uint(src[s-1]) - case x == 61: - s += 3 - if s > len(src) { - return nil, ErrCorrupt - } - x = uint(src[s-2]) | uint(src[s-1])<<8 - case x == 62: - s += 4 - if s > len(src) { - return nil, ErrCorrupt - } - x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16 - case x == 63: - s += 5 - if s > len(src) { - return nil, ErrCorrupt - } - x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24 - } - length = int(x + 1) - if length <= 0 { - return nil, errors.New("snappy: unsupported literal length") - } - if length > len(dst)-d || length > len(src)-s { - return nil, ErrCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if s > len(src) { - return nil, ErrCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(src[s-2])&0xe0<<3 | int(src[s-1]) - - case tagCopy2: - s += 3 - if s > len(src) { - return nil, ErrCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(src[s-2]) | int(src[s-1])<<8 - - case tagCopy4: - return nil, errors.New("snappy: unsupported COPY_4 tag") - } - - end := d + length - if offset > d || end > len(dst) { - return nil, ErrCorrupt - } - for ; d < end; d++ { - dst[d] = dst[d-offset] - } - } - if d != dLen { - return nil, ErrCorrupt - } - return dst[:d], nil -} - -// NewReader returns a new Reader that decompresses from r, using the framing -// format described at -// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt -func NewReader(r io.Reader) *Reader { - return &Reader{ - r: r, - decoded: make([]byte, maxUncompressedChunkLen), - buf: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)+checksumSize), - } -} - -// Reader is an io.Reader than can read Snappy-compressed bytes. -type Reader struct { - r io.Reader - err error - decoded []byte - buf []byte - // decoded[i:j] contains decoded bytes that have not yet been passed on. - i, j int - readHeader bool -} - -// Reset discards any buffered data, resets all state, and switches the Snappy -// reader to read from r. This permits reusing a Reader rather than allocating -// a new one. -func (r *Reader) Reset(reader io.Reader) { - r.r = reader - r.err = nil - r.i = 0 - r.j = 0 - r.readHeader = false -} - -func (r *Reader) readFull(p []byte) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF { - r.err = ErrCorrupt - } - return false - } - return true -} - -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - for { - if r.i < r.j { - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil - } - if !r.readFull(r.buf[:4]) { - return 0, r.err - } - chunkType := r.buf[0] - if !r.readHeader { - if chunkType != chunkTypeStreamIdentifier { - r.err = ErrCorrupt - return 0, r.err - } - r.readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - r.err = ErrUnsupported - return 0, r.err - } - - // The chunk types are specified at - // https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return 0, r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf) { - return 0, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[checksumSize:] - - n, err := DecodedLen(buf) - if err != nil { - r.err = err - return 0, r.err - } - if n > len(r.decoded) { - r.err = ErrCorrupt - return 0, r.err - } - if _, err := Decode(r.decoded, buf); err != nil { - r.err = err - return 0, r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return 0, r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeUncompressedData: - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return 0, r.err - } - buf := r.buf[:checksumSize] - if !r.readFull(buf) { - return 0, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - checksumSize - if !r.readFull(r.decoded[:n]) { - return 0, r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return 0, r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - r.err = ErrCorrupt - return 0, r.err - } - if !r.readFull(r.buf[:len(magicBody)]) { - return 0, r.err - } - for i := 0; i < len(magicBody); i++ { - if r.buf[i] != magicBody[i] { - r.err = ErrCorrupt - return 0, r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - r.err = ErrUnsupported - return 0, r.err - - } else { - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen]) { - return 0, r.err - } - } - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/encode.go b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/encode.go deleted file mode 100644 index dda372422..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/encode.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "io" -) - -// We limit how far copy back-references can go, the same as the C++ code. -const maxOffset = 1 << 15 - -// emitLiteral writes a literal chunk and returns the number of bytes written. -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - case n < 1<<16: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - case n < 1<<24: - dst[0] = 62<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - dst[3] = uint8(n >> 16) - i = 4 - case int64(n) < 1<<32: - dst[0] = 63<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - dst[3] = uint8(n >> 16) - dst[4] = uint8(n >> 24) - i = 5 - default: - panic("snappy: source buffer is too long") - } - if copy(dst[i:], lit) != len(lit) { - panic("snappy: destination buffer is too short") - } - return i + len(lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -func emitCopy(dst []byte, offset, length int) int { - i := 0 - for length > 0 { - x := length - 4 - if 0 <= x && x < 1<<3 && offset < 1<<11 { - dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - i += 2 - break - } - - x = length - if x > 1<<6 { - x = 1 << 6 - } - dst[i+0] = uint8(x-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= x - } - return i -} - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// It is valid to pass a nil dst. -func Encode(dst, src []byte) ([]byte, error) { - if n := MaxEncodedLen(len(src)); len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - // Return early if src is short. - if len(src) <= 4 { - if len(src) != 0 { - d += emitLiteral(dst[d:], src) - } - return dst[:d], nil - } - - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - const maxTableSize = 1 << 14 - shift, tableSize := uint(32-8), 1<<8 - for tableSize < maxTableSize && tableSize < len(src) { - shift-- - tableSize *= 2 - } - var table [maxTableSize]int - - // Iterate over the source bytes. - var ( - s int // The iterator position. - t int // The last position with the same hash as s. - lit int // The start position of any pending literal bytes. - ) - for s+3 < len(src) { - // Update the hash table. - b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3] - h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24 - p := &table[(h*0x1e35a7bd)>>shift] - // We need to to store values in [-1, inf) in table. To save - // some initialization time, (re)use the table's zero value - // and shift the values against this zero: add 1 on writes, - // subtract 1 on reads. - t, *p = *p-1, s+1 - // If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte. - if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] { - s++ - continue - } - // Otherwise, we have a match. First, emit any pending literal bytes. - if lit != s { - d += emitLiteral(dst[d:], src[lit:s]) - } - // Extend the match to be as long as possible. - s0 := s - s, t = s+4, t+4 - for s < len(src) && src[s] == src[t] { - s++ - t++ - } - // Emit the copied bytes. - d += emitCopy(dst[d:], s-t, s-s0) - lit = s - } - - // Emit any final pending literal bytes and return. - if lit != len(src) { - d += emitLiteral(dst[d:], src[lit:]) - } - return dst[:d], nil -} - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -func MaxEncodedLen(srcLen int) int { - // Compressed data can be defined as: - // compressed := item* literal* - // item := literal* copy - // - // The trailing literal sequence has a space blowup of at most 62/60 - // since a literal of length 60 needs one tag byte + one extra byte - // for length information. - // - // Item blowup is trickier to measure. Suppose the "copy" op copies - // 4 bytes of data. Because of a special check in the encoding code, - // we produce a 4-byte copy only if the offset is < 65536. Therefore - // the copy op takes 3 bytes to encode, and this type of item leads - // to at most the 62/60 blowup for representing literals. - // - // Suppose the "copy" op copies 5 bytes of data. If the offset is big - // enough, it will take 5 bytes to encode the copy op. Therefore the - // worst case here is a one-byte literal followed by a five-byte copy. - // That is, 6 bytes of input turn into 7 bytes of "compressed" data. - // - // This last factor dominates the blowup, so the final estimate is: - return 32 + srcLen + srcLen/6 -} - -// NewWriter returns a new Writer that compresses to w, using the framing -// format described at -// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt -func NewWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - enc: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)), - } -} - -// Writer is an io.Writer than can write Snappy-compressed bytes. -type Writer struct { - w io.Writer - err error - enc []byte - buf [checksumSize + chunkHeaderSize]byte - wroteHeader bool -} - -// Reset discards the writer's state and switches the Snappy writer to write to -// w. This permits reusing a Writer rather than allocating a new one. -func (w *Writer) Reset(writer io.Writer) { - w.w = writer - w.err = nil - w.wroteHeader = false -} - -// Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (n int, errRet error) { - if w.err != nil { - return 0, w.err - } - if !w.wroteHeader { - copy(w.enc, magicChunk) - if _, err := w.w.Write(w.enc[:len(magicChunk)]); err != nil { - w.err = err - return n, err - } - w.wroteHeader = true - } - for len(p) > 0 { - var uncompressed []byte - if len(p) > maxUncompressedChunkLen { - uncompressed, p = p[:maxUncompressedChunkLen], p[maxUncompressedChunkLen:] - } else { - uncompressed, p = p, nil - } - checksum := crc(uncompressed) - - // Compress the buffer, discarding the result if the improvement - // isn't at least 12.5%. - chunkType := uint8(chunkTypeCompressedData) - chunkBody, err := Encode(w.enc, uncompressed) - if err != nil { - w.err = err - return n, err - } - if len(chunkBody) >= len(uncompressed)-len(uncompressed)/8 { - chunkType, chunkBody = chunkTypeUncompressedData, uncompressed - } - - chunkLen := 4 + len(chunkBody) - w.buf[0] = chunkType - w.buf[1] = uint8(chunkLen >> 0) - w.buf[2] = uint8(chunkLen >> 8) - w.buf[3] = uint8(chunkLen >> 16) - w.buf[4] = uint8(checksum >> 0) - w.buf[5] = uint8(checksum >> 8) - w.buf[6] = uint8(checksum >> 16) - w.buf[7] = uint8(checksum >> 24) - if _, err = w.w.Write(w.buf[:]); err != nil { - w.err = err - return n, err - } - if _, err = w.w.Write(chunkBody); err != nil { - w.err = err - return n, err - } - n += len(uncompressed) - } - return n, nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy.go b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy.go deleted file mode 100644 index 043bf3d81..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package snappy implements the snappy block-based compression format. -// It aims for very high speeds and reasonable compression. -// -// The C++ snappy implementation is at http://code.google.com/p/snappy/ -package snappy - -import ( - "hash/crc32" -) - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: - - If m < 60, the next 1 + m bytes are literal bytes. - - Otherwise, let n be the little-endian unsigned integer denoted by the next - m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: - - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). - The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 - of the offset. The next byte is bits 0-7 of the offset. - - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). - The length is 1 + m. The offset is the little-endian unsigned integer - denoted by the next 2 bytes. - - For l == 3, this tag is a legacy format that is no longer supported. -*/ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) - -const ( - checksumSize = 4 - chunkHeaderSize = 4 - magicChunk = "\xff\x06\x00\x00" + magicBody - magicBody = "sNaPpY" - // https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 bytes". - maxUncompressedChunkLen = 65536 -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt -func crc(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy_test.go b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy_test.go deleted file mode 100644 index 78113f55b..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy_test.go +++ /dev/null @@ -1,364 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "bytes" - "flag" - "fmt" - "io" - "io/ioutil" - "math/rand" - "net/http" - "os" - "path/filepath" - "strings" - "testing" -) - -var ( - download = flag.Bool("download", false, "If true, download any missing files before running benchmarks") - testdata = flag.String("testdata", "testdata", "Directory containing the test data") -) - -func roundtrip(b, ebuf, dbuf []byte) error { - e, err := Encode(ebuf, b) - if err != nil { - return fmt.Errorf("encoding error: %v", err) - } - d, err := Decode(dbuf, e) - if err != nil { - return fmt.Errorf("decoding error: %v", err) - } - if !bytes.Equal(b, d) { - return fmt.Errorf("roundtrip mismatch:\n\twant %v\n\tgot %v", b, d) - } - return nil -} - -func TestEmpty(t *testing.T) { - if err := roundtrip(nil, nil, nil); err != nil { - t.Fatal(err) - } -} - -func TestSmallCopy(t *testing.T) { - for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { - for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { - for i := 0; i < 32; i++ { - s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb" - if err := roundtrip([]byte(s), ebuf, dbuf); err != nil { - t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err) - } - } - } - } -} - -func TestSmallRand(t *testing.T) { - rng := rand.New(rand.NewSource(27354294)) - for n := 1; n < 20000; n += 23 { - b := make([]byte, n) - for i := range b { - b[i] = uint8(rng.Uint32()) - } - if err := roundtrip(b, nil, nil); err != nil { - t.Fatal(err) - } - } -} - -func TestSmallRegular(t *testing.T) { - for n := 1; n < 20000; n += 23 { - b := make([]byte, n) - for i := range b { - b[i] = uint8(i%10 + 'a') - } - if err := roundtrip(b, nil, nil); err != nil { - t.Fatal(err) - } - } -} - -func cmp(a, b []byte) error { - if len(a) != len(b) { - return fmt.Errorf("got %d bytes, want %d", len(a), len(b)) - } - for i := range a { - if a[i] != b[i] { - return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i]) - } - } - return nil -} - -func TestFramingFormat(t *testing.T) { - // src is comprised of alternating 1e5-sized sequences of random - // (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen - // because it is larger than maxUncompressedChunkLen (64k). - src := make([]byte, 1e6) - rng := rand.New(rand.NewSource(1)) - for i := 0; i < 10; i++ { - if i%2 == 0 { - for j := 0; j < 1e5; j++ { - src[1e5*i+j] = uint8(rng.Intn(256)) - } - } else { - for j := 0; j < 1e5; j++ { - src[1e5*i+j] = uint8(i) - } - } - } - - buf := new(bytes.Buffer) - if _, err := NewWriter(buf).Write(src); err != nil { - t.Fatalf("Write: encoding: %v", err) - } - dst, err := ioutil.ReadAll(NewReader(buf)) - if err != nil { - t.Fatalf("ReadAll: decoding: %v", err) - } - if err := cmp(dst, src); err != nil { - t.Fatal(err) - } -} - -func TestReaderReset(t *testing.T) { - gold := bytes.Repeat([]byte("All that is gold does not glitter,\n"), 10000) - buf := new(bytes.Buffer) - if _, err := NewWriter(buf).Write(gold); err != nil { - t.Fatalf("Write: %v", err) - } - encoded, invalid, partial := buf.String(), "invalid", "partial" - r := NewReader(nil) - for i, s := range []string{encoded, invalid, partial, encoded, partial, invalid, encoded, encoded} { - if s == partial { - r.Reset(strings.NewReader(encoded)) - if _, err := r.Read(make([]byte, 101)); err != nil { - t.Errorf("#%d: %v", i, err) - continue - } - continue - } - r.Reset(strings.NewReader(s)) - got, err := ioutil.ReadAll(r) - switch s { - case encoded: - if err != nil { - t.Errorf("#%d: %v", i, err) - continue - } - if err := cmp(got, gold); err != nil { - t.Errorf("#%d: %v", i, err) - continue - } - case invalid: - if err == nil { - t.Errorf("#%d: got nil error, want non-nil", i) - continue - } - } - } -} - -func TestWriterReset(t *testing.T) { - gold := bytes.Repeat([]byte("Not all those who wander are lost;\n"), 10000) - var gots, wants [][]byte - const n = 20 - w, failed := NewWriter(nil), false - for i := 0; i <= n; i++ { - buf := new(bytes.Buffer) - w.Reset(buf) - want := gold[:len(gold)*i/n] - if _, err := w.Write(want); err != nil { - t.Errorf("#%d: Write: %v", i, err) - failed = true - continue - } - got, err := ioutil.ReadAll(NewReader(buf)) - if err != nil { - t.Errorf("#%d: ReadAll: %v", i, err) - failed = true - continue - } - gots = append(gots, got) - wants = append(wants, want) - } - if failed { - return - } - for i := range gots { - if err := cmp(gots[i], wants[i]); err != nil { - t.Errorf("#%d: %v", i, err) - } - } -} - -func benchDecode(b *testing.B, src []byte) { - encoded, err := Encode(nil, src) - if err != nil { - b.Fatal(err) - } - // Bandwidth is in amount of uncompressed data. - b.SetBytes(int64(len(src))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - Decode(src, encoded) - } -} - -func benchEncode(b *testing.B, src []byte) { - // Bandwidth is in amount of uncompressed data. - b.SetBytes(int64(len(src))) - dst := make([]byte, MaxEncodedLen(len(src))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - Encode(dst, src) - } -} - -func readFile(b testing.TB, filename string) []byte { - src, err := ioutil.ReadFile(filename) - if err != nil { - b.Fatalf("failed reading %s: %s", filename, err) - } - if len(src) == 0 { - b.Fatalf("%s has zero length", filename) - } - return src -} - -// expand returns a slice of length n containing repeated copies of src. -func expand(src []byte, n int) []byte { - dst := make([]byte, n) - for x := dst; len(x) > 0; { - i := copy(x, src) - x = x[i:] - } - return dst -} - -func benchWords(b *testing.B, n int, decode bool) { - // NOTE: The file is OS-language dependent so the resulting values are not - // directly comparable for non-US-English OS installations. - data := expand(readFile(b, "/usr/share/dict/words"), n) - if decode { - benchDecode(b, data) - } else { - benchEncode(b, data) - } -} - -func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) } -func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) } -func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) } -func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) } -func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) } -func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) } -func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) } -func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) } - -// testFiles' values are copied directly from -// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc -// The label field is unused in snappy-go. -var testFiles = []struct { - label string - filename string -}{ - {"html", "html"}, - {"urls", "urls.10K"}, - {"jpg", "fireworks.jpeg"}, - {"jpg_200", "fireworks.jpeg"}, - {"pdf", "paper-100k.pdf"}, - {"html4", "html_x_4"}, - {"txt1", "alice29.txt"}, - {"txt2", "asyoulik.txt"}, - {"txt3", "lcet10.txt"}, - {"txt4", "plrabn12.txt"}, - {"pb", "geo.protodata"}, - {"gaviota", "kppkn.gtb"}, -} - -// The test data files are present at this canonical URL. -const baseURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/" - -func downloadTestdata(basename string) (errRet error) { - filename := filepath.Join(*testdata, basename) - if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 { - return nil - } - - if !*download { - return fmt.Errorf("test data not found; skipping benchmark without the -download flag") - } - // Download the official snappy C++ implementation reference test data - // files for benchmarking. - if err := os.Mkdir(*testdata, 0777); err != nil && !os.IsExist(err) { - return fmt.Errorf("failed to create testdata: %s", err) - } - - f, err := os.Create(filename) - if err != nil { - return fmt.Errorf("failed to create %s: %s", filename, err) - } - defer f.Close() - defer func() { - if errRet != nil { - os.Remove(filename) - } - }() - url := baseURL + basename - resp, err := http.Get(url) - if err != nil { - return fmt.Errorf("failed to download %s: %s", url, err) - } - defer resp.Body.Close() - if s := resp.StatusCode; s != http.StatusOK { - return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s)) - } - _, err = io.Copy(f, resp.Body) - if err != nil { - return fmt.Errorf("failed to download %s to %s: %s", url, filename, err) - } - return nil -} - -func benchFile(b *testing.B, n int, decode bool) { - if err := downloadTestdata(testFiles[n].filename); err != nil { - b.Fatalf("failed to download testdata: %s", err) - } - data := readFile(b, filepath.Join(*testdata, testFiles[n].filename)) - if decode { - benchDecode(b, data) - } else { - benchEncode(b, data) - } -} - -// Naming convention is kept similar to what snappy's C++ implementation uses. -func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) } -func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) } -func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) } -func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) } -func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) } -func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) } -func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) } -func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) } -func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) } -func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) } -func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) } -func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) } -func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) } -func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) } -func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) } -func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) } -func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) } -func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) } -func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) } -func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) } -func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) } -func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) } -func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) } -func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) } diff --git a/Godeps/_workspace/src/github.com/whyrusleeping/go-sysinfo/info.go b/Godeps/_workspace/src/github.com/whyrusleeping/go-sysinfo/info.go deleted file mode 100644 index d1be75341..000000000 --- a/Godeps/_workspace/src/github.com/whyrusleeping/go-sysinfo/info.go +++ /dev/null @@ -1,38 +0,0 @@ -package sysinfo - -import ( - "errors" -) - -var ErrPlatformNotSupported = errors.New("this operation is not supported on your platform") - -type DiskStats struct { - Free uint64 - Total uint64 - FsType string -} - -var diskUsageImpl func(string) (*DiskStats, error) - -func DiskUsage(path string) (*DiskStats, error) { - if diskUsageImpl == nil { - return nil, ErrPlatformNotSupported - } - - return diskUsageImpl(path) -} - -type MemStats struct { - Swap uint64 - Used uint64 -} - -var memInfoImpl func() (*MemStats, error) - -func MemoryInfo() (*MemStats, error) { - if memInfoImpl == nil { - return nil, ErrPlatformNotSupported - } - - return memInfoImpl() -} diff --git a/Godeps/_workspace/src/github.com/whyrusleeping/go-sysinfo/info_darwin.go b/Godeps/_workspace/src/github.com/whyrusleeping/go-sysinfo/info_darwin.go deleted file mode 100644 index c4e17fe44..000000000 --- a/Godeps/_workspace/src/github.com/whyrusleeping/go-sysinfo/info_darwin.go +++ /dev/null @@ -1,32 +0,0 @@ -package sysinfo - -import ( - "fmt" - "syscall" -) - -func init() { - diskUsageImpl = darwinDiskUsage - memInfoImpl = darwinMemInfo -} - -func darwinDiskUsage(path string) (*DiskStats, error) { - var stfst syscall.Statfs_t - err := syscall.Statfs(path, &stfst) - if err != nil { - return nil, err - } - - free := stfst.Bfree * uint64(stfst.Bsize) - total := stfst.Bavail * uint64(stfst.Bsize) - return &DiskStats{ - Free: free, - Total: total, - FsType: fmt.Sprint(stfst.Type), - }, nil -} - -func darwinMemInfo() (*MemStats, error) { - // TODO: use vm_stat on osx to gather memory information - return new(MemStats), nil -} diff --git a/Godeps/_workspace/src/github.com/whyrusleeping/go-sysinfo/info_linux.go b/Godeps/_workspace/src/github.com/whyrusleeping/go-sysinfo/info_linux.go deleted file mode 100644 index b53426201..000000000 --- a/Godeps/_workspace/src/github.com/whyrusleeping/go-sysinfo/info_linux.go +++ /dev/null @@ -1,70 +0,0 @@ -package sysinfo - -import ( - "bytes" - "fmt" - "io/ioutil" - "strings" - "syscall" - - humanize "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/dustin/go-humanize" -) - -func init() { - diskUsageImpl = linuxDiskUsage - memInfoImpl = linuxMemInfo -} - -func linuxDiskUsage(path string) (*DiskStats, error) { - var stfst syscall.Statfs_t - err := syscall.Statfs(path, &stfst) - if err != nil { - return nil, err - } - - free := stfst.Bfree * uint64(stfst.Bsize) - total := stfst.Bavail * uint64(stfst.Bsize) - return &DiskStats{ - Free: free, - Total: total, - FsType: fmt.Sprint(stfst.Type), - }, nil -} - -func linuxMemInfo() (*MemStats, error) { - info, err := ioutil.ReadFile("/proc/self/status") - if err != nil { - return nil, err - } - - var stats MemStats - for _, e := range bytes.Split(info, []byte("\n")) { - if !bytes.HasPrefix(e, []byte("Vm")) { - continue - } - - parts := bytes.Split(e, []byte(":")) - if len(parts) != 2 { - return nil, fmt.Errorf("unexpected line in proc stats: %q", string(e)) - } - - val := strings.Trim(string(parts[1]), " \n\t") - switch string(parts[0]) { - case "VmSize": - vmsize, err := humanize.ParseBytes(val) - if err != nil { - return nil, err - } - - stats.Used = vmsize - case "VmSwap": - swapsize, err := humanize.ParseBytes(val) - if err != nil { - return nil, err - } - - stats.Swap = swapsize - } - } - return &stats, nil -} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.gitignore b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.gitignore deleted file mode 100644 index 4cd0cbaf4..000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -# Setup a Global .gitignore for OS and editor generated files: -# https://help.github.com/articles/ignoring-files -# git config --global core.excludesfile ~/.gitignore_global - -.vagrant -*.sublime-project diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.travis.yml b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.travis.yml deleted file mode 100644 index 67467e140..000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -sudo: false -language: go - -go: - - 1.4.1 - -before_script: - - FIXED=$(go fmt ./... | wc -l); if [ $FIXED -gt 0 ]; then echo "gofmt - $FIXED file(s) not formatted correctly, please run gofmt to fix this." && exit 1; fi - -os: - - linux - - osx - -notifications: - email: false diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/AUTHORS b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/AUTHORS deleted file mode 100644 index 4e0e8284e..000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/AUTHORS +++ /dev/null @@ -1,34 +0,0 @@ -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# You can update this list using the following command: -# -# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' - -# Please keep the list sorted. - -Adrien Bustany -Caleb Spare -Case Nelson -Chris Howey -Christoffer Buchholz -Dave Cheney -Francisco Souza -Hari haran -John C Barstow -Kelvin Fo -Matt Layher -Nathan Youngman -Paul Hammond -Pieter Droogendijk -Pursuit92 -Rob Figueiredo -Soge Zhang -Tilak Sharma -Travis Cline -Tudor Golubenco -Yukang -bronze1man -debrando -henrikedwards diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CHANGELOG.md b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CHANGELOG.md deleted file mode 100644 index ea9428a2a..000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CHANGELOG.md +++ /dev/null @@ -1,263 +0,0 @@ -# Changelog - -## v1.2.0 / 2015-02-08 - -* inotify: use epoll to wake up readEvents [#66](https://github.com/go-fsnotify/fsnotify/pull/66) (thanks @PieterD) -* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/go-fsnotify/fsnotify/pull/63) (thanks @PieterD) -* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/go-fsnotify/fsnotify/issues/59) - -## v1.1.1 / 2015-02-05 - -* inotify: Retry read on EINTR [#61](https://github.com/go-fsnotify/fsnotify/issues/61) (thanks @PieterD) - -## v1.1.0 / 2014-12-12 - -* kqueue: rework internals [#43](https://github.com/go-fsnotify/fsnotify/pull/43) - * add low-level functions - * only need to store flags on directories - * less mutexes [#13](https://github.com/go-fsnotify/fsnotify/issues/13) - * done can be an unbuffered channel - * remove calls to os.NewSyscallError -* More efficient string concatenation for Event.String() [#52](https://github.com/go-fsnotify/fsnotify/pull/52) (thanks @mdlayher) -* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/go-fsnotify/fsnotify/issues/48) -* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/go-fsnotify/fsnotify/issues/51) - -## v1.0.4 / 2014-09-07 - -* kqueue: add dragonfly to the build tags. -* Rename source code files, rearrange code so exported APIs are at the top. -* Add done channel to example code. [#37](https://github.com/go-fsnotify/fsnotify/pull/37) (thanks @chenyukang) - -## v1.0.3 / 2014-08-19 - -* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/go-fsnotify/fsnotify/issues/36) - -## v1.0.2 / 2014-08-17 - -* [Fix] Missing create events on OS X. [#14](https://github.com/go-fsnotify/fsnotify/issues/14) (thanks @zhsso) -* [Fix] Make ./path and path equivalent. (thanks @zhsso) - -## v1.0.0 / 2014-08-15 - -* [API] Remove AddWatch on Windows, use Add. -* Improve documentation for exported identifiers. [#30](https://github.com/go-fsnotify/fsnotify/issues/30) -* Minor updates based on feedback from golint. - -## dev / 2014-07-09 - -* Moved to [github.com/go-fsnotify/fsnotify](https://github.com/go-fsnotify/fsnotify). -* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) - -## dev / 2014-07-04 - -* kqueue: fix incorrect mutex used in Close() -* Update example to demonstrate usage of Op. - -## dev / 2014-06-28 - -* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/go-fsnotify/fsnotify/issues/4) -* Fix for String() method on Event (thanks Alex Brainman) -* Don't build on Plan 9 or Solaris (thanks @4ad) - -## dev / 2014-06-21 - -* Events channel of type Event rather than *Event. -* [internal] use syscall constants directly for inotify and kqueue. -* [internal] kqueue: rename events to kevents and fileEvent to event. - -## dev / 2014-06-19 - -* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). -* [internal] remove cookie from Event struct (unused). -* [internal] Event struct has the same definition across every OS. -* [internal] remove internal watch and removeWatch methods. - -## dev / 2014-06-12 - -* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). -* [API] Pluralized channel names: Events and Errors. -* [API] Renamed FileEvent struct to Event. -* [API] Op constants replace methods like IsCreate(). - -## dev / 2014-06-12 - -* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) - -## dev / 2014-05-23 - -* [API] Remove current implementation of WatchFlags. - * current implementation doesn't take advantage of OS for efficiency - * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes - * no tests for the current implementation - * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) - -## v0.9.3 / 2014-12-31 - -* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/go-fsnotify/fsnotify/issues/51) - -## v0.9.2 / 2014-08-17 - -* [Backport] Fix missing create events on OS X. [#14](https://github.com/go-fsnotify/fsnotify/issues/14) (thanks @zhsso) - -## v0.9.1 / 2014-06-12 - -* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) - -## v0.9.0 / 2014-01-17 - -* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) -* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) -* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. - -## v0.8.12 / 2013-11-13 - -* [API] Remove FD_SET and friends from Linux adapter - -## v0.8.11 / 2013-11-02 - -* [Doc] Add Changelog [#72][] (thanks @nathany) -* [Doc] Spotlight and double modify events on OS X [#62][] (reported by @paulhammond) - -## v0.8.10 / 2013-10-19 - -* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) -* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) -* [Doc] specify OS-specific limits in README (thanks @debrando) - -## v0.8.9 / 2013-09-08 - -* [Doc] Contributing (thanks @nathany) -* [Doc] update package path in example code [#63][] (thanks @paulhammond) -* [Doc] GoCI badge in README (Linux only) [#60][] -* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) - -## v0.8.8 / 2013-06-17 - -* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) - -## v0.8.7 / 2013-06-03 - -* [API] Make syscall flags internal -* [Fix] inotify: ignore event changes -* [Fix] race in symlink test [#45][] (reported by @srid) -* [Fix] tests on Windows -* lower case error messages - -## v0.8.6 / 2013-05-23 - -* kqueue: Use EVT_ONLY flag on Darwin -* [Doc] Update README with full example - -## v0.8.5 / 2013-05-09 - -* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) - -## v0.8.4 / 2013-04-07 - -* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) - -## v0.8.3 / 2013-03-13 - -* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) -* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) - -## v0.8.2 / 2013-02-07 - -* [Doc] add Authors -* [Fix] fix data races for map access [#29][] (thanks @fsouza) - -## v0.8.1 / 2013-01-09 - -* [Fix] Windows path separators -* [Doc] BSD License - -## v0.8.0 / 2012-11-09 - -* kqueue: directory watching improvements (thanks @vmirage) -* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) -* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) - -## v0.7.4 / 2012-10-09 - -* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) -* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) -* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) -* [Fix] kqueue: modify after recreation of file - -## v0.7.3 / 2012-09-27 - -* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) -* [Fix] kqueue: no longer get duplicate CREATE events - -## v0.7.2 / 2012-09-01 - -* kqueue: events for created directories - -## v0.7.1 / 2012-07-14 - -* [Fix] for renaming files - -## v0.7.0 / 2012-07-02 - -* [Feature] FSNotify flags -* [Fix] inotify: Added file name back to event path - -## v0.6.0 / 2012-06-06 - -* kqueue: watch files after directory created (thanks @tmc) - -## v0.5.1 / 2012-05-22 - -* [Fix] inotify: remove all watches before Close() - -## v0.5.0 / 2012-05-03 - -* [API] kqueue: return errors during watch instead of sending over channel -* kqueue: match symlink behavior on Linux -* inotify: add `DELETE_SELF` (requested by @taralx) -* [Fix] kqueue: handle EINTR (reported by @robfig) -* [Doc] Godoc example [#1][] (thanks @davecheney) - -## v0.4.0 / 2012-03-30 - -* Go 1 released: build with go tool -* [Feature] Windows support using winfsnotify -* Windows does not have attribute change notifications -* Roll attribute notifications into IsModify - -## v0.3.0 / 2012-02-19 - -* kqueue: add files when watch directory - -## v0.2.0 / 2011-12-30 - -* update to latest Go weekly code - -## v0.1.0 / 2011-10-19 - -* kqueue: add watch on file creation to match inotify -* kqueue: create file event -* inotify: ignore `IN_IGNORED` events -* event String() -* linux: common FileEvent functions -* initial commit - -[#79]: https://github.com/howeyc/fsnotify/pull/79 -[#77]: https://github.com/howeyc/fsnotify/pull/77 -[#72]: https://github.com/howeyc/fsnotify/issues/72 -[#71]: https://github.com/howeyc/fsnotify/issues/71 -[#70]: https://github.com/howeyc/fsnotify/issues/70 -[#63]: https://github.com/howeyc/fsnotify/issues/63 -[#62]: https://github.com/howeyc/fsnotify/issues/62 -[#60]: https://github.com/howeyc/fsnotify/issues/60 -[#59]: https://github.com/howeyc/fsnotify/issues/59 -[#49]: https://github.com/howeyc/fsnotify/issues/49 -[#45]: https://github.com/howeyc/fsnotify/issues/45 -[#40]: https://github.com/howeyc/fsnotify/issues/40 -[#36]: https://github.com/howeyc/fsnotify/issues/36 -[#33]: https://github.com/howeyc/fsnotify/issues/33 -[#29]: https://github.com/howeyc/fsnotify/issues/29 -[#25]: https://github.com/howeyc/fsnotify/issues/25 -[#24]: https://github.com/howeyc/fsnotify/issues/24 -[#21]: https://github.com/howeyc/fsnotify/issues/21 - diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CONTRIBUTING.md b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CONTRIBUTING.md deleted file mode 100644 index 0f377f341..000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CONTRIBUTING.md +++ /dev/null @@ -1,77 +0,0 @@ -# Contributing - -## Issues - -* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/go-fsnotify/fsnotify/issues). -* Please indicate the platform you are using fsnotify on. -* A code example to reproduce the problem is appreciated. - -## Pull Requests - -### Contributor License Agreement - -fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/go-fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/go-fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). - -Please indicate that you have signed the CLA in your pull request. - -### How fsnotify is Developed - -* Development is done on feature branches. -* Tests are run on BSD, Linux, OS X and Windows. -* Pull requests are reviewed and [applied to master][am] using [hub][]. - * Maintainers may modify or squash commits rather than asking contributors to. -* To issue a new release, the maintainers will: - * Update the CHANGELOG - * Tag a version, which will become available through gopkg.in. - -### How to Fork - -For smooth sailing, always use the original import path. Installing with `go get` makes this easy. - -1. Install from GitHub (`go get -u github.com/go-fsnotify/fsnotify`) -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Ensure everything works and the tests pass (see below) -4. Commit your changes (`git commit -am 'Add some feature'`) - -Contribute upstream: - -1. Fork fsnotify on GitHub -2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) -3. Push to the branch (`git push fork my-new-feature`) -4. Create a new Pull Request on GitHub - -This workflow is [thoroughly explained by Katrina Owen](https://blog.splice.com/contributing-open-source-git-repositories-go/). - -### Testing - -fsnotify uses build tags to compile different code on Linux, BSD, OS X, and Windows. - -Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. - -To aid in cross-platform testing there is a Vagrantfile for Linux and BSD. - -* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/) -* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder. -* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password) -* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd go-fsnotify/fsnotify; go test'`. -* When you're done, you will want to halt or destroy the Vagrant boxes. - -Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory. - -Right now there is no equivalent solution for Windows and OS X, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads). - -### Maintainers - -Help maintaining fsnotify is welcome. To be a maintainer: - -* Submit a pull request and sign the CLA as above. -* You must be able to run the test suite on Mac, Windows, Linux and BSD. - -To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][]. - -All code changes should be internal pull requests. - -Releases are tagged using [Semantic Versioning](http://semver.org/). - -[hub]: https://github.com/github/hub -[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/LICENSE b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/LICENSE deleted file mode 100644 index f21e54080..000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012 fsnotify Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/NotUsed.xcworkspace b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/NotUsed.xcworkspace deleted file mode 100644 index e69de29bb..000000000 diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/README.md b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/README.md deleted file mode 100644 index 7a0b24736..000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# File system notifications for Go - -[![Coverage](http://gocover.io/_badge/github.com/go-fsnotify/fsnotify)](http://gocover.io/github.com/go-fsnotify/fsnotify) [![GoDoc](https://godoc.org/gopkg.in/fsnotify.v1?status.svg)](https://godoc.org/gopkg.in/fsnotify.v1) - -Go 1.3+ required. - -Cross platform: Windows, Linux, BSD and OS X. - -|Adapter |OS |Status | -|----------|----------|----------| -|inotify |Linux, Android\*|Supported [![Build Status](https://travis-ci.org/go-fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/go-fsnotify/fsnotify)| -|kqueue |BSD, OS X, iOS\*|Supported [![Circle CI](https://circleci.com/gh/go-fsnotify/fsnotify.svg?style=svg)](https://circleci.com/gh/go-fsnotify/fsnotify)| -|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)| -|FSEvents |OS X |[Planned](https://github.com/go-fsnotify/fsnotify/issues/11)| -|FEN |Solaris 11 |[Planned](https://github.com/go-fsnotify/fsnotify/issues/12)| -|fanotify |Linux 2.6.37+ | | -|USN Journals |Windows |[Maybe](https://github.com/go-fsnotify/fsnotify/issues/53)| -|Polling |*All* |[Maybe](https://github.com/go-fsnotify/fsnotify/issues/9)| - -\* Android and iOS are untested. - -Please see [the documentation](https://godoc.org/gopkg.in/fsnotify.v1) for usage. Consult the [Wiki](https://github.com/go-fsnotify/fsnotify/wiki) for the FAQ and further information. - -## API stability - -Two major versions of fsnotify exist. - -**[fsnotify.v0](https://gopkg.in/fsnotify.v0)** is API-compatible with [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify). Bugfixes *may* be backported, but I recommend upgrading to v1. - -```go -import "gopkg.in/fsnotify.v0" -``` - -\* Refer to the package as fsnotify (without the .v0 suffix). - -**[fsnotify.v1](https://gopkg.in/fsnotify.v1)** provides [a new API](https://godoc.org/gopkg.in/fsnotify.v1) based on [this design document](http://goo.gl/MrYxyA). You can import v1 with: - -```go -import "gopkg.in/fsnotify.v1" -``` - -Further API changes are [planned](https://github.com/go-fsnotify/fsnotify/milestones), but a new major revision will be tagged, so you can depend on the v1 API. - -**Master** may have unreleased changes. Use it to test the very latest code or when [contributing][], but don't expect it to remain API-compatible: - -```go -import "github.com/go-fsnotify/fsnotify" -``` - -## Contributing - -Please refer to [CONTRIBUTING][] before opening an issue or pull request. - -## Example - -See [example_test.go](https://github.com/go-fsnotify/fsnotify/blob/master/example_test.go). - - -[contributing]: https://github.com/go-fsnotify/fsnotify/blob/master/CONTRIBUTING.md diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/circle.yml b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/circle.yml deleted file mode 100644 index 204217fb0..000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/circle.yml +++ /dev/null @@ -1,26 +0,0 @@ -## OS X build (CircleCI iOS beta) - -# Pretend like it's an Xcode project, at least to get it running. -machine: - environment: - XCODE_WORKSPACE: NotUsed.xcworkspace - XCODE_SCHEME: NotUsed - # This is where the go project is actually checked out to: - CIRCLE_BUILD_DIR: $HOME/.go_project/src/github.com/go-fsnotify/fsnotify - -dependencies: - pre: - - brew upgrade go - -test: - override: - - go test ./... - -# Idealized future config, eventually with cross-platform build matrix :-) - -# machine: -# go: -# version: 1.4 -# os: -# - osx -# - linux diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/example_test.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/example_test.go deleted file mode 100644 index 306379660..000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/example_test.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9,!solaris - -package fsnotify_test - -import ( - "log" - - "github.com/go-fsnotify/fsnotify" -) - -func ExampleNewWatcher() { - watcher, err := fsnotify.NewWatcher() - if err != nil { - log.Fatal(err) - } - defer watcher.Close() - - done := make(chan bool) - go func() { - for { - select { - case event := <-watcher.Events: - log.Println("event:", event) - if event.Op&fsnotify.Write == fsnotify.Write { - log.Println("modified file:", event.Name) - } - case err := <-watcher.Errors: - log.Println("error:", err) - } - } - }() - - err = watcher.Add("/tmp/foo") - if err != nil { - log.Fatal(err) - } - <-done -} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/fsnotify.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/fsnotify.go deleted file mode 100644 index c899ee008..000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/fsnotify.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9,!solaris - -// Package fsnotify provides a platform-independent interface for file system notifications. -package fsnotify - -import ( - "bytes" - "fmt" -) - -// Event represents a single file system notification. -type Event struct { - Name string // Relative path to the file or directory. - Op Op // File operation that triggered the event. -} - -// Op describes a set of file operations. -type Op uint32 - -// These are the generalized file operations that can trigger a notification. -const ( - Create Op = 1 << iota - Write - Remove - Rename - Chmod -) - -// String returns a string representation of the event in the form -// "file: REMOVE|WRITE|..." -func (e Event) String() string { - // Use a buffer for efficient string concatenation - var buffer bytes.Buffer - - if e.Op&Create == Create { - buffer.WriteString("|CREATE") - } - if e.Op&Remove == Remove { - buffer.WriteString("|REMOVE") - } - if e.Op&Write == Write { - buffer.WriteString("|WRITE") - } - if e.Op&Rename == Rename { - buffer.WriteString("|RENAME") - } - if e.Op&Chmod == Chmod { - buffer.WriteString("|CHMOD") - } - - // If buffer remains empty, return no event names - if buffer.Len() == 0 { - return fmt.Sprintf("%q: ", e.Name) - } - - // Return a list of event names, with leading pipe character stripped - return fmt.Sprintf("%q: %s", e.Name, buffer.String()[1:]) -} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify.go deleted file mode 100644 index d7759ec8c..000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify.go +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "syscall" - "unsafe" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - mu sync.Mutex // Map access - fd int - poller *fdPoller - watches map[string]*watch // Map of inotify watches (key: path) - paths map[int]string // Map of watched paths (key: watch descriptor) - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - doneResp chan struct{} // Channel to respond to Close -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - // Create inotify fd - fd, errno := syscall.InotifyInit() - if fd == -1 { - return nil, errno - } - // Create epoll - poller, err := newFdPoller(fd) - if err != nil { - syscall.Close(fd) - return nil, err - } - w := &Watcher{ - fd: fd, - poller: poller, - watches: make(map[string]*watch), - paths: make(map[int]string), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - doneResp: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed() { - return nil - } - - // Send 'close' signal to goroutine, and set the Watcher to closed. - close(w.done) - - // Wake up goroutine - w.poller.wake() - - // Wait for goroutine to close - <-w.doneResp - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - name = filepath.Clean(name) - if w.isClosed() { - return errors.New("inotify instance already closed") - } - - const agnosticEvents = syscall.IN_MOVED_TO | syscall.IN_MOVED_FROM | - syscall.IN_CREATE | syscall.IN_ATTRIB | syscall.IN_MODIFY | - syscall.IN_MOVE_SELF | syscall.IN_DELETE | syscall.IN_DELETE_SELF - - var flags uint32 = agnosticEvents - - w.mu.Lock() - watchEntry, found := w.watches[name] - w.mu.Unlock() - if found { - watchEntry.flags |= flags - flags |= syscall.IN_MASK_ADD - } - wd, errno := syscall.InotifyAddWatch(w.fd, name, flags) - if wd == -1 { - return errno - } - - w.mu.Lock() - w.watches[name] = &watch{wd: uint32(wd), flags: flags} - w.paths[wd] = name - w.mu.Unlock() - - return nil -} - -// Remove stops watching the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - - // Fetch the watch. - w.mu.Lock() - defer w.mu.Unlock() - watch, ok := w.watches[name] - - // Remove it from inotify. - if !ok { - return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) - } - // inotify_rm_watch will return EINVAL if the file has been deleted; - // the inotify will already have been removed. - // That means we can safely delete it from our watches, whatever inotify_rm_watch does. - delete(w.watches, name) - success, errno := syscall.InotifyRmWatch(w.fd, watch.wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case. - // the only two possible errors are: - // EBADF, which happens when w.fd is not a valid file descriptor of any kind. - // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. - // Watch descriptors are invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. - return errno - } - return nil -} - -type watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) -} - -// readEvents reads from the inotify file descriptor, converts the -// received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { - var ( - buf [syscall.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - n int // Number of bytes read with read() - errno error // Syscall errno - ok bool // For poller.wait - ) - - defer close(w.doneResp) - defer close(w.Errors) - defer close(w.Events) - defer syscall.Close(w.fd) - defer w.poller.close() - - for { - // See if we have been closed. - if w.isClosed() { - return - } - - ok, errno = w.poller.wait() - if errno != nil { - select { - case w.Errors <- errno: - case <-w.done: - return - } - continue - } - - if !ok { - continue - } - - n, errno = syscall.Read(w.fd, buf[:]) - // If a signal interrupted execution, see if we've been asked to close, and try again. - // http://man7.org/linux/man-pages/man7/signal.7.html : - // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" - if errno == syscall.EINTR { - continue - } - - // syscall.Read might have been woken up by Close. If so, we're done. - if w.isClosed() { - return - } - - if n < syscall.SizeofInotifyEvent { - var err error - if n == 0 { - // If EOF is received. This should really never happen. - err = io.EOF - } else if n < 0 { - // If an error occured while reading. - err = errno - } else { - // Read was too short. - err = errors.New("notify: short read in readEvents()") - } - select { - case w.Errors <- err: - case <-w.done: - return - } - continue - } - - var offset uint32 - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... - for offset <= uint32(n-syscall.SizeofInotifyEvent) { - // Point "raw" to the event in the buffer - raw := (*syscall.InotifyEvent)(unsafe.Pointer(&buf[offset])) - - mask := uint32(raw.Mask) - nameLen := uint32(raw.Len) - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. - w.mu.Lock() - name := w.paths[int(raw.Wd)] - w.mu.Unlock() - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[syscall.PathMax]byte)(unsafe.Pointer(&buf[offset+syscall.SizeofInotifyEvent])) - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - } - - event := newEvent(name, mask) - - // Send the events that are not ignored on the events channel - if !event.ignoreLinux(mask) { - select { - case w.Events <- event: - case <-w.done: - return - } - } - - // Move to the next event in the buffer - offset += syscall.SizeofInotifyEvent + nameLen - } - } -} - -// Certain types of events can be "ignored" and not sent over the Events -// channel. Such as events marked ignore by the kernel, or MODIFY events -// against files that do not exist. -func (e *Event) ignoreLinux(mask uint32) bool { - // Ignore anything the inotify API says to ignore - if mask&syscall.IN_IGNORED == syscall.IN_IGNORED { - return true - } - - // If the event is not a DELETE or RENAME, the file must exist. - // Otherwise the event is ignored. - // *Note*: this was put in place because it was seen that a MODIFY - // event was sent after the DELETE. This ignores that MODIFY and - // assumes a DELETE will come or has come if the file doesn't exist. - if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { - _, statErr := os.Lstat(e.Name) - return os.IsNotExist(statErr) - } - return false -} - -// newEvent returns an platform-independent Event based on an inotify mask. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&syscall.IN_CREATE == syscall.IN_CREATE || mask&syscall.IN_MOVED_TO == syscall.IN_MOVED_TO { - e.Op |= Create - } - if mask&syscall.IN_DELETE_SELF == syscall.IN_DELETE_SELF || mask&syscall.IN_DELETE == syscall.IN_DELETE { - e.Op |= Remove - } - if mask&syscall.IN_MODIFY == syscall.IN_MODIFY { - e.Op |= Write - } - if mask&syscall.IN_MOVE_SELF == syscall.IN_MOVE_SELF || mask&syscall.IN_MOVED_FROM == syscall.IN_MOVED_FROM { - e.Op |= Rename - } - if mask&syscall.IN_ATTRIB == syscall.IN_ATTRIB { - e.Op |= Chmod - } - return e -} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller.go deleted file mode 100644 index 3b4178404..000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "errors" - "syscall" -) - -type fdPoller struct { - fd int // File descriptor (as returned by the inotify_init() syscall) - epfd int // Epoll file descriptor - pipe [2]int // Pipe for waking up -} - -func emptyPoller(fd int) *fdPoller { - poller := new(fdPoller) - poller.fd = fd - poller.epfd = -1 - poller.pipe[0] = -1 - poller.pipe[1] = -1 - return poller -} - -// Create a new inotify poller. -// This creates an inotify handler, and an epoll handler. -func newFdPoller(fd int) (*fdPoller, error) { - var errno error - poller := emptyPoller(fd) - defer func() { - if errno != nil { - poller.close() - } - }() - poller.fd = fd - - // Create epoll fd - poller.epfd, errno = syscall.EpollCreate(1) - if poller.epfd == -1 { - return nil, errno - } - // Create pipe; pipe[0] is the read end, pipe[1] the write end. - errno = syscall.Pipe2(poller.pipe[:], syscall.O_NONBLOCK) - if errno != nil { - return nil, errno - } - - // Register inotify fd with epoll - event := syscall.EpollEvent{ - Fd: int32(poller.fd), - Events: syscall.EPOLLIN, - } - errno = syscall.EpollCtl(poller.epfd, syscall.EPOLL_CTL_ADD, poller.fd, &event) - if errno != nil { - return nil, errno - } - - // Register pipe fd with epoll - event = syscall.EpollEvent{ - Fd: int32(poller.pipe[0]), - Events: syscall.EPOLLIN, - } - errno = syscall.EpollCtl(poller.epfd, syscall.EPOLL_CTL_ADD, poller.pipe[0], &event) - if errno != nil { - return nil, errno - } - - return poller, nil -} - -// Wait using epoll. -// Returns true if something is ready to be read, -// false if there is not. -func (poller *fdPoller) wait() (bool, error) { - // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. - // I don't know whether epoll_wait returns the number of events returned, - // or the total number of events ready. - // I decided to catch both by making the buffer one larger than the maximum. - events := make([]syscall.EpollEvent, 7) - for { - n, errno := syscall.EpollWait(poller.epfd, events, -1) - if n == -1 { - if errno == syscall.EINTR { - continue - } - return false, errno - } - if n == 0 { - // If there are no events, try again. - continue - } - if n > 6 { - // This should never happen. More events were returned than should be possible. - return false, errors.New("epoll_wait returned more events than I know what to do with") - } - ready := events[:n] - epollhup := false - epollerr := false - epollin := false - for _, event := range ready { - if event.Fd == int32(poller.fd) { - if event.Events&syscall.EPOLLHUP != 0 { - // This should not happen, but if it does, treat it as a wakeup. - epollhup = true - } - if event.Events&syscall.EPOLLERR != 0 { - // If an error is waiting on the file descriptor, we should pretend - // something is ready to read, and let syscall.Read pick up the error. - epollerr = true - } - if event.Events&syscall.EPOLLIN != 0 { - // There is data to read. - epollin = true - } - } - if event.Fd == int32(poller.pipe[0]) { - if event.Events&syscall.EPOLLHUP != 0 { - // Write pipe descriptor was closed, by us. This means we're closing down the - // watcher, and we should wake up. - } - if event.Events&syscall.EPOLLERR != 0 { - // If an error is waiting on the pipe file descriptor. - // This is an absolute mystery, and should never ever happen. - return false, errors.New("Error on the pipe descriptor.") - } - if event.Events&syscall.EPOLLIN != 0 { - // This is a regular wakeup, so we have to clear the buffer. - err := poller.clearWake() - if err != nil { - return false, err - } - } - } - } - - if epollhup || epollerr || epollin { - return true, nil - } - return false, nil - } -} - -// Close the write end of the poller. -func (poller *fdPoller) wake() error { - buf := make([]byte, 1) - n, errno := syscall.Write(poller.pipe[1], buf) - if n == -1 { - if errno == syscall.EAGAIN { - // Buffer is full, poller will wake. - return nil - } - return errno - } - return nil -} - -func (poller *fdPoller) clearWake() error { - // You have to be woken up a LOT in order to get to 100! - buf := make([]byte, 100) - n, errno := syscall.Read(poller.pipe[0], buf) - if n == -1 { - if errno == syscall.EAGAIN { - // Buffer is empty, someone else cleared our wake. - return nil - } - return errno - } - return nil -} - -// Close all poller file descriptors, but not the one passed to it. -func (poller *fdPoller) close() { - if poller.pipe[1] != -1 { - syscall.Close(poller.pipe[1]) - } - if poller.pipe[0] != -1 { - syscall.Close(poller.pipe[0]) - } - if poller.epfd != -1 { - syscall.Close(poller.epfd) - } -} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller_test.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller_test.go deleted file mode 100644 index af9f407f8..000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller_test.go +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "syscall" - "testing" - "time" -) - -type testFd [2]int - -func makeTestFd(t *testing.T) testFd { - var tfd testFd - errno := syscall.Pipe(tfd[:]) - if errno != nil { - t.Fatalf("Failed to create pipe: %v", errno) - } - return tfd -} - -func (tfd testFd) fd() int { - return tfd[0] -} - -func (tfd testFd) closeWrite(t *testing.T) { - errno := syscall.Close(tfd[1]) - if errno != nil { - t.Fatalf("Failed to close write end of pipe: %v", errno) - } -} - -func (tfd testFd) put(t *testing.T) { - buf := make([]byte, 10) - _, errno := syscall.Write(tfd[1], buf) - if errno != nil { - t.Fatalf("Failed to write to pipe: %v", errno) - } -} - -func (tfd testFd) get(t *testing.T) { - buf := make([]byte, 10) - _, errno := syscall.Read(tfd[0], buf) - if errno != nil { - t.Fatalf("Failed to read from pipe: %v", errno) - } -} - -func (tfd testFd) close() { - syscall.Close(tfd[1]) - syscall.Close(tfd[0]) -} - -func makePoller(t *testing.T) (testFd, *fdPoller) { - tfd := makeTestFd(t) - poller, err := newFdPoller(tfd.fd()) - if err != nil { - t.Fatalf("Failed to create poller: %v", err) - } - return tfd, poller -} - -func TestPollerWithBadFd(t *testing.T) { - _, err := newFdPoller(-1) - if err != syscall.EBADF { - t.Fatalf("Expected EBADF, got: %v", err) - } -} - -func TestPollerWithData(t *testing.T) { - tfd, poller := makePoller(t) - defer tfd.close() - defer poller.close() - - tfd.put(t) - ok, err := poller.wait() - if err != nil { - t.Fatalf("poller failed: %v", err) - } - if !ok { - t.Fatalf("expected poller to return true") - } - tfd.get(t) -} - -func TestPollerWithWakeup(t *testing.T) { - tfd, poller := makePoller(t) - defer tfd.close() - defer poller.close() - - err := poller.wake() - if err != nil { - t.Fatalf("wake failed: %v", err) - } - ok, err := poller.wait() - if err != nil { - t.Fatalf("poller failed: %v", err) - } - if ok { - t.Fatalf("expected poller to return false") - } -} - -func TestPollerWithClose(t *testing.T) { - tfd, poller := makePoller(t) - defer tfd.close() - defer poller.close() - - tfd.closeWrite(t) - ok, err := poller.wait() - if err != nil { - t.Fatalf("poller failed: %v", err) - } - if !ok { - t.Fatalf("expected poller to return true") - } -} - -func TestPollerWithWakeupAndData(t *testing.T) { - tfd, poller := makePoller(t) - defer tfd.close() - defer poller.close() - - tfd.put(t) - err := poller.wake() - if err != nil { - t.Fatalf("wake failed: %v", err) - } - - // both data and wakeup - ok, err := poller.wait() - if err != nil { - t.Fatalf("poller failed: %v", err) - } - if !ok { - t.Fatalf("expected poller to return true") - } - - // data is still in the buffer, wakeup is cleared - ok, err = poller.wait() - if err != nil { - t.Fatalf("poller failed: %v", err) - } - if !ok { - t.Fatalf("expected poller to return true") - } - - tfd.get(t) - // data is gone, only wakeup now - err = poller.wake() - if err != nil { - t.Fatalf("wake failed: %v", err) - } - ok, err = poller.wait() - if err != nil { - t.Fatalf("poller failed: %v", err) - } - if ok { - t.Fatalf("expected poller to return false") - } -} - -func TestPollerConcurrent(t *testing.T) { - tfd, poller := makePoller(t) - defer tfd.close() - defer poller.close() - - oks := make(chan bool) - live := make(chan bool) - defer close(live) - go func() { - defer close(oks) - for { - ok, err := poller.wait() - if err != nil { - t.Fatalf("poller failed: %v", err) - } - oks <- ok - if !<-live { - return - } - } - }() - - // Try a write - select { - case <-time.After(50 * time.Millisecond): - case <-oks: - t.Fatalf("poller did not wait") - } - tfd.put(t) - if !<-oks { - t.Fatalf("expected true") - } - tfd.get(t) - live <- true - - // Try a wakeup - select { - case <-time.After(50 * time.Millisecond): - case <-oks: - t.Fatalf("poller did not wait") - } - err := poller.wake() - if err != nil { - t.Fatalf("wake failed: %v", err) - } - if <-oks { - t.Fatalf("expected false") - } - live <- true - - // Try a close - select { - case <-time.After(50 * time.Millisecond): - case <-oks: - t.Fatalf("poller did not wait") - } - tfd.closeWrite(t) - if !<-oks { - t.Fatalf("expected true") - } - tfd.get(t) -} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_test.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_test.go deleted file mode 100644 index 035ee8f95..000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_test.go +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "os" - "path/filepath" - "syscall" - "testing" - "time" -) - -func TestInotifyCloseRightAway(t *testing.T) { - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher") - } - - // Close immediately; it won't even reach the first syscall.Read. - w.Close() - - // Wait for the close to complete. - <-time.After(50 * time.Millisecond) - isWatcherReallyClosed(t, w) -} - -func TestInotifyCloseSlightlyLater(t *testing.T) { - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher") - } - - // Wait until readEvents has reached syscall.Read, and Close. - <-time.After(50 * time.Millisecond) - w.Close() - - // Wait for the close to complete. - <-time.After(50 * time.Millisecond) - isWatcherReallyClosed(t, w) -} - -func TestInotifyCloseSlightlyLaterWithWatch(t *testing.T) { - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher") - } - w.Add(testDir) - - // Wait until readEvents has reached syscall.Read, and Close. - <-time.After(50 * time.Millisecond) - w.Close() - - // Wait for the close to complete. - <-time.After(50 * time.Millisecond) - isWatcherReallyClosed(t, w) -} - -func TestInotifyCloseAfterRead(t *testing.T) { - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher") - } - - err = w.Add(testDir) - if err != nil { - t.Fatalf("Failed to add .") - } - - // Generate an event. - os.Create(filepath.Join(testDir, "somethingSOMETHINGsomethingSOMETHING")) - - // Wait for readEvents to read the event, then close the watcher. - <-time.After(50 * time.Millisecond) - w.Close() - - // Wait for the close to complete. - <-time.After(50 * time.Millisecond) - isWatcherReallyClosed(t, w) -} - -func isWatcherReallyClosed(t *testing.T, w *Watcher) { - select { - case err, ok := <-w.Errors: - if ok { - t.Fatalf("w.Errors is not closed; readEvents is still alive after closing (error: %v)", err) - } - default: - t.Fatalf("w.Errors would have blocked; readEvents is still alive!") - } - - select { - case _, ok := <-w.Events: - if ok { - t.Fatalf("w.Events is not closed; readEvents is still alive after closing") - } - default: - t.Fatalf("w.Events would have blocked; readEvents is still alive!") - } -} - -func TestInotifyCloseCreate(t *testing.T) { - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher: %v", err) - } - defer w.Close() - - err = w.Add(testDir) - if err != nil { - t.Fatalf("Failed to add testDir: %v", err) - } - h, err := os.Create(filepath.Join(testDir, "testfile")) - if err != nil { - t.Fatalf("Failed to create file in testdir: %v", err) - } - h.Close() - select { - case _ = <-w.Events: - case err := <-w.Errors: - t.Fatalf("Error from watcher: %v", err) - case <-time.After(50 * time.Millisecond): - t.Fatalf("Took too long to wait for event") - } - - // At this point, we've received one event, so the goroutine is ready. - // It's also blocking on syscall.Read. - // Now we try to swap the file descriptor under its nose. - w.Close() - w, err = NewWatcher() - defer w.Close() - if err != nil { - t.Fatalf("Failed to create second watcher: %v", err) - } - - <-time.After(50 * time.Millisecond) - err = w.Add(testDir) - if err != nil { - t.Fatalf("Error adding testDir again: %v", err) - } -} - -func TestInotifyStress(t *testing.T) { - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - testFile := filepath.Join(testDir, "testfile") - - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher: %v", err) - } - defer w.Close() - - killchan := make(chan struct{}) - defer close(killchan) - - err = w.Add(testDir) - if err != nil { - t.Fatalf("Failed to add testDir: %v", err) - } - - proc, err := os.FindProcess(os.Getpid()) - if err != nil { - t.Fatalf("Error finding process: %v", err) - } - - go func() { - for { - select { - case <-time.After(5 * time.Millisecond): - err := proc.Signal(syscall.SIGUSR1) - if err != nil { - t.Fatalf("Signal failed: %v", err) - } - case <-killchan: - return - } - } - }() - - go func() { - for { - select { - case <-time.After(11 * time.Millisecond): - err := w.poller.wake() - if err != nil { - t.Fatalf("Wake failed: %v", err) - } - case <-killchan: - return - } - } - }() - - go func() { - for { - select { - case <-killchan: - return - default: - handle, err := os.Create(testFile) - if err != nil { - t.Fatalf("Create failed: %v", err) - } - handle.Close() - time.Sleep(time.Millisecond) - err = os.Remove(testFile) - if err != nil { - t.Fatalf("Remove failed: %v", err) - } - } - } - }() - - creates := 0 - removes := 0 - after := time.After(5 * time.Second) - for { - select { - case <-after: - if creates-removes > 1 || creates-removes < -1 { - t.Fatalf("Creates and removes should not be off by more than one: %d creates, %d removes", creates, removes) - } - if creates < 50 { - t.Fatalf("Expected at least 50 creates, got %d", creates) - } - return - case err := <-w.Errors: - t.Fatalf("Got an error from watcher: %v", err) - case evt := <-w.Events: - if evt.Name != testFile { - t.Fatalf("Got an event for an unknown file: %s", evt.Name) - } - if evt.Op == Create { - creates++ - } - if evt.Op == Remove { - removes++ - } - } - } -} - -func TestInotifyRemoveTwice(t *testing.T) { - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - testFile := filepath.Join(testDir, "testfile") - - handle, err := os.Create(testFile) - if err != nil { - t.Fatalf("Create failed: %v", err) - } - handle.Close() - - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher: %v", err) - } - defer w.Close() - - err = w.Add(testFile) - if err != nil { - t.Fatalf("Failed to add testFile: %v", err) - } - - err = os.Remove(testFile) - if err != nil { - t.Fatalf("Failed to remove testFile: %v", err) - } - - err = w.Remove(testFile) - if err != syscall.EINVAL { - t.Fatalf("Expected EINVAL from Remove, got: %v", err) - } - - err = w.Remove(testFile) - if err == syscall.EINVAL { - t.Fatalf("Got EINVAL again, watch was not removed") - } -} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/integration_test.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/integration_test.go deleted file mode 100644 index 59169c6af..000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/integration_test.go +++ /dev/null @@ -1,1135 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9,!solaris - -package fsnotify - -import ( - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "sync/atomic" - "testing" - "time" -) - -// An atomic counter -type counter struct { - val int32 -} - -func (c *counter) increment() { - atomic.AddInt32(&c.val, 1) -} - -func (c *counter) value() int32 { - return atomic.LoadInt32(&c.val) -} - -func (c *counter) reset() { - atomic.StoreInt32(&c.val, 0) -} - -// tempMkdir makes a temporary directory -func tempMkdir(t *testing.T) string { - dir, err := ioutil.TempDir("", "fsnotify") - if err != nil { - t.Fatalf("failed to create test directory: %s", err) - } - return dir -} - -// newWatcher initializes an fsnotify Watcher instance. -func newWatcher(t *testing.T) *Watcher { - watcher, err := NewWatcher() - if err != nil { - t.Fatalf("NewWatcher() failed: %s", err) - } - return watcher -} - -// addWatch adds a watch for a directory -func addWatch(t *testing.T, watcher *Watcher, dir string) { - if err := watcher.Add(dir); err != nil { - t.Fatalf("watcher.Add(%q) failed: %s", dir, err) - } -} - -func TestFsnotifyMultipleOperations(t *testing.T) { - watcher := newWatcher(t) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Create directory that's not watched - testDirToMoveFiles := tempMkdir(t) - defer os.RemoveAll(testDirToMoveFiles) - - testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile") - testFileRenamed := filepath.Join(testDirToMoveFiles, "TestFsnotifySeqRename.testfile") - - addWatch(t, watcher, testDir) - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var createReceived, modifyReceived, deleteReceived, renameReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) { - t.Logf("event received: %s", event) - if event.Op&Remove == Remove { - deleteReceived.increment() - } - if event.Op&Write == Write { - modifyReceived.increment() - } - if event.Op&Create == Create { - createReceived.increment() - } - if event.Op&Rename == Rename { - renameReceived.increment() - } - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - // Create a file - // This should add at least one event to the fsnotify event queue - var f *os.File - f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - - time.Sleep(time.Millisecond) - f.WriteString("data") - f.Sync() - f.Close() - - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - if err := testRename(testFile, testFileRenamed); err != nil { - t.Fatalf("rename failed: %s", err) - } - - // Modify the file outside of the watched dir - f, err = os.Open(testFileRenamed) - if err != nil { - t.Fatalf("open test renamed file failed: %s", err) - } - f.WriteString("data") - f.Sync() - f.Close() - - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - // Recreate the file that was moved - f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Close() - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - cReceived := createReceived.value() - if cReceived != 2 { - t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2) - } - mReceived := modifyReceived.value() - if mReceived != 1 { - t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1) - } - dReceived := deleteReceived.value() - rReceived := renameReceived.value() - if dReceived+rReceived != 1 { - t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", rReceived+dReceived, 1) - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } -} - -func TestFsnotifyMultipleCreates(t *testing.T) { - watcher := newWatcher(t) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile") - - addWatch(t, watcher, testDir) - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var createReceived, modifyReceived, deleteReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) { - t.Logf("event received: %s", event) - if event.Op&Remove == Remove { - deleteReceived.increment() - } - if event.Op&Create == Create { - createReceived.increment() - } - if event.Op&Write == Write { - modifyReceived.increment() - } - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - // Create a file - // This should add at least one event to the fsnotify event queue - var f *os.File - f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - - time.Sleep(time.Millisecond) - f.WriteString("data") - f.Sync() - f.Close() - - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - os.Remove(testFile) - - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - // Recreate the file - f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Close() - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - // Modify - f, err = os.OpenFile(testFile, os.O_WRONLY, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - - time.Sleep(time.Millisecond) - f.WriteString("data") - f.Sync() - f.Close() - - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - // Modify - f, err = os.OpenFile(testFile, os.O_WRONLY, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - - time.Sleep(time.Millisecond) - f.WriteString("data") - f.Sync() - f.Close() - - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - cReceived := createReceived.value() - if cReceived != 2 { - t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2) - } - mReceived := modifyReceived.value() - if mReceived < 3 { - t.Fatalf("incorrect number of modify events received after 500 ms (%d vs atleast %d)", mReceived, 3) - } - dReceived := deleteReceived.value() - if dReceived != 1 { - t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", dReceived, 1) - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } -} - -func TestFsnotifyDirOnly(t *testing.T) { - watcher := newWatcher(t) - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Create a file before watching directory - // This should NOT add any events to the fsnotify event queue - testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile") - { - var f *os.File - f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - f.Close() - } - - addWatch(t, watcher, testDir) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - testFile := filepath.Join(testDir, "TestFsnotifyDirOnly.testfile") - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var createReceived, modifyReceived, deleteReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileAlreadyExists) { - t.Logf("event received: %s", event) - if event.Op&Remove == Remove { - deleteReceived.increment() - } - if event.Op&Write == Write { - modifyReceived.increment() - } - if event.Op&Create == Create { - createReceived.increment() - } - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - // Create a file - // This should add at least one event to the fsnotify event queue - var f *os.File - f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - - time.Sleep(time.Millisecond) - f.WriteString("data") - f.Sync() - f.Close() - - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - os.Remove(testFile) - os.Remove(testFileAlreadyExists) - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - cReceived := createReceived.value() - if cReceived != 1 { - t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 1) - } - mReceived := modifyReceived.value() - if mReceived != 1 { - t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1) - } - dReceived := deleteReceived.value() - if dReceived != 2 { - t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2) - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } -} - -func TestFsnotifyDeleteWatchedDir(t *testing.T) { - watcher := newWatcher(t) - defer watcher.Close() - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Create a file before watching directory - testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile") - { - var f *os.File - f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - f.Close() - } - - addWatch(t, watcher, testDir) - - // Add a watch for testFile - addWatch(t, watcher, testFileAlreadyExists) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var deleteReceived counter - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFileAlreadyExists) { - t.Logf("event received: %s", event) - if event.Op&Remove == Remove { - deleteReceived.increment() - } - } else { - t.Logf("unexpected event received: %s", event) - } - } - }() - - os.RemoveAll(testDir) - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - dReceived := deleteReceived.value() - if dReceived < 2 { - t.Fatalf("did not receive at least %d delete events, received %d after 500 ms", 2, dReceived) - } -} - -func TestFsnotifySubDir(t *testing.T) { - watcher := newWatcher(t) - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - testFile1 := filepath.Join(testDir, "TestFsnotifyFile1.testfile") - testSubDir := filepath.Join(testDir, "sub") - testSubDirFile := filepath.Join(testDir, "sub/TestFsnotifyFile1.testfile") - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var createReceived, deleteReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testSubDir) || event.Name == filepath.Clean(testFile1) { - t.Logf("event received: %s", event) - if event.Op&Create == Create { - createReceived.increment() - } - if event.Op&Remove == Remove { - deleteReceived.increment() - } - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - addWatch(t, watcher, testDir) - - // Create sub-directory - if err := os.Mkdir(testSubDir, 0777); err != nil { - t.Fatalf("failed to create test sub-directory: %s", err) - } - - // Create a file - var f *os.File - f, err := os.OpenFile(testFile1, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - f.Close() - - // Create a file (Should not see this! we are not watching subdir) - var fs *os.File - fs, err = os.OpenFile(testSubDirFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - fs.Sync() - fs.Close() - - time.Sleep(200 * time.Millisecond) - - // Make sure receive deletes for both file and sub-directory - os.RemoveAll(testSubDir) - os.Remove(testFile1) - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - cReceived := createReceived.value() - if cReceived != 2 { - t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2) - } - dReceived := deleteReceived.value() - if dReceived != 2 { - t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2) - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } -} - -func TestFsnotifyRename(t *testing.T) { - watcher := newWatcher(t) - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - addWatch(t, watcher, testDir) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - testFile := filepath.Join(testDir, "TestFsnotifyEvents.testfile") - testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed") - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var renameReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) { - if event.Op&Rename == Rename { - renameReceived.increment() - } - t.Logf("event received: %s", event) - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - // Create a file - // This should add at least one event to the fsnotify event queue - var f *os.File - f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - - f.WriteString("data") - f.Sync() - f.Close() - - // Add a watch for testFile - addWatch(t, watcher, testFile) - - if err := testRename(testFile, testFileRenamed); err != nil { - t.Fatalf("rename failed: %s", err) - } - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - if renameReceived.value() == 0 { - t.Fatal("fsnotify rename events have not been received after 500 ms") - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } - - os.Remove(testFileRenamed) -} - -func TestFsnotifyRenameToCreate(t *testing.T) { - watcher := newWatcher(t) - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Create directory to get file - testDirFrom := tempMkdir(t) - defer os.RemoveAll(testDirFrom) - - addWatch(t, watcher, testDir) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile") - testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed") - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var createReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) { - if event.Op&Create == Create { - createReceived.increment() - } - t.Logf("event received: %s", event) - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - // Create a file - // This should add at least one event to the fsnotify event queue - var f *os.File - f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - f.Close() - - if err := testRename(testFile, testFileRenamed); err != nil { - t.Fatalf("rename failed: %s", err) - } - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - if createReceived.value() == 0 { - t.Fatal("fsnotify create events have not been received after 500 ms") - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } - - os.Remove(testFileRenamed) -} - -func TestFsnotifyRenameToOverwrite(t *testing.T) { - switch runtime.GOOS { - case "plan9", "windows": - t.Skipf("skipping test on %q (os.Rename over existing file does not create event).", runtime.GOOS) - } - - watcher := newWatcher(t) - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Create directory to get file - testDirFrom := tempMkdir(t) - defer os.RemoveAll(testDirFrom) - - testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile") - testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed") - - // Create a file - var fr *os.File - fr, err := os.OpenFile(testFileRenamed, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - fr.Sync() - fr.Close() - - addWatch(t, watcher, testDir) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var eventReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testFileRenamed) { - eventReceived.increment() - t.Logf("event received: %s", event) - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - // Create a file - // This should add at least one event to the fsnotify event queue - var f *os.File - f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - f.Close() - - if err := testRename(testFile, testFileRenamed); err != nil { - t.Fatalf("rename failed: %s", err) - } - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - if eventReceived.value() == 0 { - t.Fatal("fsnotify events have not been received after 500 ms") - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } - - os.Remove(testFileRenamed) -} - -func TestRemovalOfWatch(t *testing.T) { - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Create a file before watching directory - testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile") - { - var f *os.File - f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - f.Close() - } - - watcher := newWatcher(t) - defer watcher.Close() - - addWatch(t, watcher, testDir) - if err := watcher.Remove(testDir); err != nil { - t.Fatalf("Could not remove the watch: %v\n", err) - } - - go func() { - select { - case ev := <-watcher.Events: - t.Fatalf("We received event: %v\n", ev) - case <-time.After(500 * time.Millisecond): - t.Log("No event received, as expected.") - } - }() - - time.Sleep(200 * time.Millisecond) - // Modify the file outside of the watched dir - f, err := os.Open(testFileAlreadyExists) - if err != nil { - t.Fatalf("Open test file failed: %s", err) - } - f.WriteString("data") - f.Sync() - f.Close() - if err := os.Chmod(testFileAlreadyExists, 0700); err != nil { - t.Fatalf("chmod failed: %s", err) - } - time.Sleep(400 * time.Millisecond) -} - -func TestFsnotifyAttrib(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("attributes don't work on Windows.") - } - - watcher := newWatcher(t) - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - testFile := filepath.Join(testDir, "TestFsnotifyAttrib.testfile") - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - // The modifyReceived counter counts IsModify events that are not IsAttrib, - // and the attribReceived counts IsAttrib events (which are also IsModify as - // a consequence). - var modifyReceived counter - var attribReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) { - if event.Op&Write == Write { - modifyReceived.increment() - } - if event.Op&Chmod == Chmod { - attribReceived.increment() - } - t.Logf("event received: %s", event) - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - // Create a file - // This should add at least one event to the fsnotify event queue - var f *os.File - f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - - f.WriteString("data") - f.Sync() - f.Close() - - // Add a watch for testFile - addWatch(t, watcher, testFile) - - if err := os.Chmod(testFile, 0700); err != nil { - t.Fatalf("chmod failed: %s", err) - } - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - // Creating/writing a file changes also the mtime, so IsAttrib should be set to true here - time.Sleep(500 * time.Millisecond) - if modifyReceived.value() != 0 { - t.Fatal("received an unexpected modify event when creating a test file") - } - if attribReceived.value() == 0 { - t.Fatal("fsnotify attribute events have not received after 500 ms") - } - - // Modifying the contents of the file does not set the attrib flag (although eg. the mtime - // might have been modified). - modifyReceived.reset() - attribReceived.reset() - - f, err = os.OpenFile(testFile, os.O_WRONLY, 0) - if err != nil { - t.Fatalf("reopening test file failed: %s", err) - } - - f.WriteString("more data") - f.Sync() - f.Close() - - time.Sleep(500 * time.Millisecond) - - if modifyReceived.value() != 1 { - t.Fatal("didn't receive a modify event after changing test file contents") - } - - if attribReceived.value() != 0 { - t.Fatal("did receive an unexpected attrib event after changing test file contents") - } - - modifyReceived.reset() - attribReceived.reset() - - // Doing a chmod on the file should trigger an event with the "attrib" flag set (the contents - // of the file are not changed though) - if err := os.Chmod(testFile, 0600); err != nil { - t.Fatalf("chmod failed: %s", err) - } - - time.Sleep(500 * time.Millisecond) - - if attribReceived.value() != 1 { - t.Fatal("didn't receive an attribute change after 500ms") - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(1e9): - t.Fatal("event stream was not closed after 1 second") - } - - os.Remove(testFile) -} - -func TestFsnotifyClose(t *testing.T) { - watcher := newWatcher(t) - watcher.Close() - - var done int32 - go func() { - watcher.Close() - atomic.StoreInt32(&done, 1) - }() - - time.Sleep(50e6) // 50 ms - if atomic.LoadInt32(&done) == 0 { - t.Fatal("double Close() test failed: second Close() call didn't return") - } - - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - if err := watcher.Add(testDir); err == nil { - t.Fatal("expected error on Watch() after Close(), got nil") - } -} - -func TestFsnotifyFakeSymlink(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("symlinks don't work on Windows.") - } - - watcher := newWatcher(t) - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - var errorsReceived counter - // Receive errors on the error channel on a separate goroutine - go func() { - for errors := range watcher.Errors { - t.Logf("Received error: %s", errors) - errorsReceived.increment() - } - }() - - // Count the CREATE events received - var createEventsReceived, otherEventsReceived counter - go func() { - for ev := range watcher.Events { - t.Logf("event received: %s", ev) - if ev.Op&Create == Create { - createEventsReceived.increment() - } else { - otherEventsReceived.increment() - } - } - }() - - addWatch(t, watcher, testDir) - - if err := os.Symlink(filepath.Join(testDir, "zzz"), filepath.Join(testDir, "zzznew")); err != nil { - t.Fatalf("Failed to create bogus symlink: %s", err) - } - t.Logf("Created bogus symlink") - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - - // Should not be error, just no events for broken links (watching nothing) - if errorsReceived.value() > 0 { - t.Fatal("fsnotify errors have been received.") - } - if otherEventsReceived.value() > 0 { - t.Fatal("fsnotify other events received on the broken link") - } - - // Except for 1 create event (for the link itself) - if createEventsReceived.value() == 0 { - t.Fatal("fsnotify create events were not received after 500 ms") - } - if createEventsReceived.value() > 1 { - t.Fatal("fsnotify more create events received than expected") - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() -} - -// TestConcurrentRemovalOfWatch tests that concurrent calls to RemoveWatch do not race. -// See https://codereview.appspot.com/103300045/ -// go test -test.run=TestConcurrentRemovalOfWatch -test.cpu=1,1,1,1,1 -race -func TestConcurrentRemovalOfWatch(t *testing.T) { - if runtime.GOOS != "darwin" { - t.Skip("regression test for race only present on darwin") - } - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Create a file before watching directory - testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile") - { - var f *os.File - f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - f.Close() - } - - watcher := newWatcher(t) - defer watcher.Close() - - addWatch(t, watcher, testDir) - - // Test that RemoveWatch can be invoked concurrently, with no data races. - removed1 := make(chan struct{}) - go func() { - defer close(removed1) - watcher.Remove(testDir) - }() - removed2 := make(chan struct{}) - go func() { - close(removed2) - watcher.Remove(testDir) - }() - <-removed1 - <-removed2 -} - -func TestClose(t *testing.T) { - // Regression test for #59 bad file descriptor from Close - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - watcher := newWatcher(t) - if err := watcher.Add(testDir); err != nil { - t.Fatalf("Expected no error on Add, got %v", err) - } - err := watcher.Close() - if err != nil { - t.Fatalf("Expected no error on Close, got %v.", err) - } -} - -func testRename(file1, file2 string) error { - switch runtime.GOOS { - case "windows", "plan9": - return os.Rename(file1, file2) - default: - cmd := exec.Command("mv", file1, file2) - return cmd.Run() - } -} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/kqueue.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/kqueue.go deleted file mode 100644 index 265622d20..000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/kqueue.go +++ /dev/null @@ -1,463 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build freebsd openbsd netbsd dragonfly darwin - -package fsnotify - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - "syscall" - "time" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - done chan bool // Channel for sending a "quit message" to the reader goroutine - - kq int // File descriptor (as returned by the kqueue() syscall). - - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Map of watched file descriptors (key: path). - externalWatches map[string]bool // Map of watches added by user of the library. - dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. - paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. - fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called -} - -type pathInfo struct { - name string - isDir bool -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - kq, err := kqueue() - if err != nil { - return nil, err - } - - w := &Watcher{ - kq: kq, - watches: make(map[string]int), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]bool), - externalWatches: make(map[string]bool), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan bool), - } - - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return nil - } - w.isClosed = true - w.mu.Unlock() - - w.mu.Lock() - ws := w.watches - w.mu.Unlock() - - var err error - for name := range ws { - if e := w.Remove(name); e != nil && err == nil { - err = e - } - } - - // Send "quit" message to the reader goroutine: - w.done <- true - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - w.mu.Lock() - w.externalWatches[name] = true - w.mu.Unlock() - return w.addWatch(name, noteAllEvents) -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - w.mu.Lock() - watchfd, ok := w.watches[name] - w.mu.Unlock() - if !ok { - return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) - } - - const registerRemove = syscall.EV_DELETE - if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { - return err - } - - syscall.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - w.mu.Unlock() - - // Find all watched paths that are in this directory that are not external. - if isDir { - var pathsToRemove []string - w.mu.Lock() - for _, path := range w.paths { - wdir, _ := filepath.Split(path.name) - if filepath.Clean(wdir) == name { - if !w.externalWatches[path.name] { - pathsToRemove = append(pathsToRemove, path.name) - } - } - } - w.mu.Unlock() - for _, name := range pathsToRemove { - // Since these are internal, not much sense in propagating error - // to the user, as that will just confuse them with an error about - // a path they did not explicitly watch themselves. - w.Remove(name) - } - } - - return nil -} - -// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) -const noteAllEvents = syscall.NOTE_DELETE | syscall.NOTE_WRITE | syscall.NOTE_ATTRIB | syscall.NOTE_RENAME - -// keventWaitTime to block on each read from kevent -var keventWaitTime = durationToTimespec(100 * time.Millisecond) - -// addWatch adds name to the watched file set. -// The flags are interpreted as described in kevent(2). -func (w *Watcher) addWatch(name string, flags uint32) error { - var isDir bool - // Make ./name and name equivalent - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return errors.New("kevent instance already closed") - } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() - - if !alreadyWatching { - fi, err := os.Lstat(name) - if err != nil { - return err - } - - // Don't watch sockets. - if fi.Mode()&os.ModeSocket == os.ModeSocket { - return nil - } - - // Follow Symlinks - // Unfortunately, Linux can add bogus symlinks to watch list without - // issue, and Windows can't do symlinks period (AFAIK). To maintain - // consistency, we will act like everything is fine. There will simply - // be no file events for broken symlinks. - // Hence the returns of nil on errors. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - name, err = filepath.EvalSymlinks(name) - if err != nil { - return nil - } - - fi, err = os.Lstat(name) - if err != nil { - return nil - } - } - - watchfd, err = syscall.Open(name, openMode, 0700) - if watchfd == -1 { - return err - } - - isDir = fi.IsDir() - } - - const registerAdd = syscall.EV_ADD | syscall.EV_CLEAR | syscall.EV_ENABLE - if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { - syscall.Close(watchfd) - return err - } - - if !alreadyWatching { - w.mu.Lock() - w.watches[name] = watchfd - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() - } - - if isDir { - // Watch the directory if it has not been watched before, - // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - watchDir := (flags&syscall.NOTE_WRITE) == syscall.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&syscall.NOTE_WRITE) != syscall.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() - - if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { - return err - } - } - } - return nil -} - -// readEvents reads from kqueue and converts the received kevents into -// Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { - eventBuffer := make([]syscall.Kevent_t, 10) - - for { - // See if there is a message on the "done" channel - select { - case <-w.done: - err := syscall.Close(w.kq) - if err != nil { - w.Errors <- err - } - close(w.Events) - close(w.Errors) - return - default: - } - - // Get new events - kevents, err := read(w.kq, eventBuffer, &keventWaitTime) - // EINTR is okay, the syscall was interrupted before timeout expired. - if err != nil && err != syscall.EINTR { - w.Errors <- err - continue - } - - // Flush the events we received to the Events channel - for len(kevents) > 0 { - kevent := &kevents[0] - watchfd := int(kevent.Ident) - mask := uint32(kevent.Fflags) - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() - event := newEvent(path.name, mask) - - if path.isDir && !(event.Op&Remove == Remove) { - // Double check to make sure the directory exists. This can happen when - // we do a rm -fr on a recursively watched folders and we receive a - // modification event first but the folder has been deleted and later - // receive the delete event - if _, err := os.Lstat(event.Name); os.IsNotExist(err) { - // mark is as delete event - event.Op |= Remove - } - } - - if event.Op&Rename == Rename || event.Op&Remove == Remove { - w.Remove(event.Name) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() - } - - if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - // Send the event on the Events channel - w.Events <- event - } - - if event.Op&Remove == Remove { - // Look for a file that may have overwritten this. - // For example, mv f1 f2 will delete f2, then create f2. - fileDir, _ := filepath.Split(event.Name) - fileDir = filepath.Clean(fileDir) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() - if found { - // make sure the directory exists before we watch for changes. When we - // do a recursive watch and perform rm -fr, the parent directory might - // have gone missing, ignore the missing directory and let the - // upcoming delete event remove the watch from the parent directory. - if _, err := os.Lstat(fileDir); os.IsExist(err) { - w.sendDirectoryChangeEvents(fileDir) - // FIXME: should this be for events on files or just isDir? - } - } - } - - // Move to next event - kevents = kevents[1:] - } - } -} - -// newEvent returns an platform-independent Event based on kqueue Fflags. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&syscall.NOTE_DELETE == syscall.NOTE_DELETE { - e.Op |= Remove - } - if mask&syscall.NOTE_WRITE == syscall.NOTE_WRITE { - e.Op |= Write - } - if mask&syscall.NOTE_RENAME == syscall.NOTE_RENAME { - e.Op |= Rename - } - if mask&syscall.NOTE_ATTRIB == syscall.NOTE_ATTRIB { - e.Op |= Chmod - } - return e -} - -func newCreateEvent(name string) Event { - return Event{Name: name, Op: Create} -} - -// watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - return err - } - - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - if err := w.internalWatch(filePath, fileInfo); err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - } - - return nil -} - -// sendDirectoryEvents searches the directory for newly created files -// and sends them over the event channel. This functionality is to have -// the BSD version of fsnotify match Linux inotify which provides a -// create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - w.Errors <- err - } - - // Search for new files - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - // Send create event - w.Events <- newCreateEvent(filePath) - } - - // like watchDirectoryFiles (but without doing another ReadDir) - if err := w.internalWatch(filePath, fileInfo); err != nil { - return - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - } -} - -func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) error { - if fileInfo.IsDir() { - // mimic Linux providing delete events for subdirectories - // but preserve the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= syscall.NOTE_DELETE - return w.addWatch(name, flags) - } - - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) -} - -// kqueue creates a new kernel event queue and returns a descriptor. -func kqueue() (kq int, err error) { - kq, err = syscall.Kqueue() - if kq == -1 { - return kq, err - } - return kq, nil -} - -// register events with the queue -func register(kq int, fds []int, flags int, fflags uint32) error { - changes := make([]syscall.Kevent_t, len(fds)) - - for i, fd := range fds { - // SetKevent converts int to the platform-specific types: - syscall.SetKevent(&changes[i], fd, syscall.EVFILT_VNODE, flags) - changes[i].Fflags = fflags - } - - // register the events - success, err := syscall.Kevent(kq, changes, nil, nil) - if success == -1 { - return err - } - return nil -} - -// read retrieves pending events, or waits until an event occurs. -// A timeout of nil blocks indefinitely, while 0 polls the queue. -func read(kq int, events []syscall.Kevent_t, timeout *syscall.Timespec) ([]syscall.Kevent_t, error) { - n, err := syscall.Kevent(kq, nil, events, timeout) - if err != nil { - return nil, err - } - return events[0:n], nil -} - -// durationToTimespec prepares a timeout value -func durationToTimespec(d time.Duration) syscall.Timespec { - return syscall.NsecToTimespec(d.Nanoseconds()) -} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_bsd.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_bsd.go deleted file mode 100644 index c57ccb427..000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_bsd.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build freebsd openbsd netbsd dragonfly - -package fsnotify - -import "syscall" - -const openMode = syscall.O_NONBLOCK | syscall.O_RDONLY diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_darwin.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_darwin.go deleted file mode 100644 index 2bec296c5..000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_darwin.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin - -package fsnotify - -import "syscall" - -// note: This constant is not defined on BSD -const openMode = syscall.O_EVTONLY diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/windows.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/windows.go deleted file mode 100644 index 811585227..000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/windows.go +++ /dev/null @@ -1,561 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "runtime" - "sync" - "syscall" - "unsafe" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - isClosed bool // Set to true when Close() is first called - mu sync.Mutex // Map access - port syscall.Handle // Handle to completion port - watches watchMap // Map of watches (key: i-number) - input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) - if e != nil { - return nil, os.NewSyscallError("CreateIoCompletionPort", e) - } - w := &Watcher{ - port: port, - watches: make(watchMap), - input: make(chan *input, 1), - Events: make(chan Event, 50), - Errors: make(chan error), - quit: make(chan chan<- error, 1), - } - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed { - return nil - } - w.isClosed = true - - // Send "quit" message to the reader goroutine - ch := make(chan error) - w.quit <- ch - if err := w.wakeupReader(); err != nil { - return err - } - return <-ch -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - if w.isClosed { - return errors.New("watcher already closed") - } - in := &input{ - op: opAddWatch, - path: filepath.Clean(name), - flags: sys_FS_ALL_EVENTS, - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - in := &input{ - op: opRemoveWatch, - path: filepath.Clean(name), - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -const ( - // Options for AddWatch - sys_FS_ONESHOT = 0x80000000 - sys_FS_ONLYDIR = 0x1000000 - - // Events - sys_FS_ACCESS = 0x1 - sys_FS_ALL_EVENTS = 0xfff - sys_FS_ATTRIB = 0x4 - sys_FS_CLOSE = 0x18 - sys_FS_CREATE = 0x100 - sys_FS_DELETE = 0x200 - sys_FS_DELETE_SELF = 0x400 - sys_FS_MODIFY = 0x2 - sys_FS_MOVE = 0xc0 - sys_FS_MOVED_FROM = 0x40 - sys_FS_MOVED_TO = 0x80 - sys_FS_MOVE_SELF = 0x800 - - // Special events - sys_FS_IGNORED = 0x8000 - sys_FS_Q_OVERFLOW = 0x4000 -) - -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&sys_FS_CREATE == sys_FS_CREATE || mask&sys_FS_MOVED_TO == sys_FS_MOVED_TO { - e.Op |= Create - } - if mask&sys_FS_DELETE == sys_FS_DELETE || mask&sys_FS_DELETE_SELF == sys_FS_DELETE_SELF { - e.Op |= Remove - } - if mask&sys_FS_MODIFY == sys_FS_MODIFY { - e.Op |= Write - } - if mask&sys_FS_MOVE == sys_FS_MOVE || mask&sys_FS_MOVE_SELF == sys_FS_MOVE_SELF || mask&sys_FS_MOVED_FROM == sys_FS_MOVED_FROM { - e.Op |= Rename - } - if mask&sys_FS_ATTRIB == sys_FS_ATTRIB { - e.Op |= Chmod - } - return e -} - -const ( - opAddWatch = iota - opRemoveWatch -) - -const ( - provisional uint64 = 1 << (32 + iota) -) - -type input struct { - op int - path string - flags uint32 - reply chan error -} - -type inode struct { - handle syscall.Handle - volume uint32 - index uint64 -} - -type watch struct { - ov syscall.Overlapped - ino *inode // i-number - path string // Directory path - mask uint64 // Directory itself is being watched with these notify flags - names map[string]uint64 // Map of names being watched and their notify flags - rename string // Remembers the old name while renaming a file - buf [4096]byte -} - -type indexMap map[uint64]*watch -type watchMap map[uint32]indexMap - -func (w *Watcher) wakeupReader() error { - e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) - if e != nil { - return os.NewSyscallError("PostQueuedCompletionStatus", e) - } - return nil -} - -func getDir(pathname string) (dir string, err error) { - attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) - if e != nil { - return "", os.NewSyscallError("GetFileAttributes", e) - } - if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { - dir = pathname - } else { - dir, _ = filepath.Split(pathname) - dir = filepath.Clean(dir) - } - return -} - -func getIno(path string) (ino *inode, err error) { - h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), - syscall.FILE_LIST_DIRECTORY, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - nil, syscall.OPEN_EXISTING, - syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) - if e != nil { - return nil, os.NewSyscallError("CreateFile", e) - } - var fi syscall.ByHandleFileInformation - if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { - syscall.CloseHandle(h) - return nil, os.NewSyscallError("GetFileInformationByHandle", e) - } - ino = &inode{ - handle: h, - volume: fi.VolumeSerialNumber, - index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), - } - return ino, nil -} - -// Must run within the I/O thread. -func (m watchMap) get(ino *inode) *watch { - if i := m[ino.volume]; i != nil { - return i[ino.index] - } - return nil -} - -// Must run within the I/O thread. -func (m watchMap) set(ino *inode, watch *watch) { - i := m[ino.volume] - if i == nil { - i = make(indexMap) - m[ino.volume] = i - } - i[ino.index] = watch -} - -// Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - if flags&sys_FS_ONLYDIR != 0 && pathname != dir { - return nil - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watchEntry := w.watches.get(ino) - w.mu.Unlock() - if watchEntry == nil { - if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { - syscall.CloseHandle(ino.handle) - return os.NewSyscallError("CreateIoCompletionPort", e) - } - watchEntry = &watch{ - ino: ino, - path: dir, - names: make(map[string]uint64), - } - w.mu.Lock() - w.watches.set(ino, watchEntry) - w.mu.Unlock() - flags |= provisional - } else { - syscall.CloseHandle(ino.handle) - } - if pathname == dir { - watchEntry.mask |= flags - } else { - watchEntry.names[filepath.Base(pathname)] |= flags - } - if err = w.startRead(watchEntry); err != nil { - return err - } - if pathname == dir { - watchEntry.mask &= ^provisional - } else { - watchEntry.names[filepath.Base(pathname)] &= ^provisional - } - return nil -} - -// Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watch := w.watches.get(ino) - w.mu.Unlock() - if watch == nil { - return fmt.Errorf("can't remove non-existent watch for: %s", pathname) - } - if pathname == dir { - w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED) - watch.mask = 0 - } else { - name := filepath.Base(pathname) - w.sendEvent(watch.path+"\\"+name, watch.names[name]&sys_FS_IGNORED) - delete(watch.names, name) - } - return w.startRead(watch) -} - -// Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { - for name, mask := range watch.names { - if mask&provisional == 0 { - w.sendEvent(watch.path+"\\"+name, mask&sys_FS_IGNORED) - } - delete(watch.names, name) - } - if watch.mask != 0 { - if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED) - } - watch.mask = 0 - } -} - -// Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { - if e := syscall.CancelIo(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CancelIo", e) - w.deleteWatch(watch) - } - mask := toWindowsFlags(watch.mask) - for _, m := range watch.names { - mask |= toWindowsFlags(m) - } - if mask == 0 { - if e := syscall.CloseHandle(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CloseHandle", e) - } - w.mu.Lock() - delete(w.watches[watch.ino.volume], watch.ino.index) - w.mu.Unlock() - return nil - } - e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], - uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) - if e != nil { - err := os.NewSyscallError("ReadDirectoryChanges", e) - if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { - // Watched directory was probably removed - if w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF) { - if watch.mask&sys_FS_ONESHOT != 0 { - watch.mask = 0 - } - } - err = nil - } - w.deleteWatch(watch) - w.startRead(watch) - return err - } - return nil -} - -// readEvents reads from the I/O completion port, converts the -// received events into Event objects and sends them via the Events channel. -// Entry point to the I/O thread. -func (w *Watcher) readEvents() { - var ( - n, key uint32 - ov *syscall.Overlapped - ) - runtime.LockOSThread() - - for { - e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) - watch := (*watch)(unsafe.Pointer(ov)) - - if watch == nil { - select { - case ch := <-w.quit: - w.mu.Lock() - var indexes []indexMap - for _, index := range w.watches { - indexes = append(indexes, index) - } - w.mu.Unlock() - for _, index := range indexes { - for _, watch := range index { - w.deleteWatch(watch) - w.startRead(watch) - } - } - var err error - if e := syscall.CloseHandle(w.port); e != nil { - err = os.NewSyscallError("CloseHandle", e) - } - close(w.Events) - close(w.Errors) - ch <- err - return - case in := <-w.input: - switch in.op { - case opAddWatch: - in.reply <- w.addWatch(in.path, uint64(in.flags)) - case opRemoveWatch: - in.reply <- w.remWatch(in.path) - } - default: - } - continue - } - - switch e { - case syscall.ERROR_MORE_DATA: - if watch == nil { - w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") - } else { - // The i/o succeeded but the buffer is full. - // In theory we should be building up a full packet. - // In practice we can get away with just carrying on. - n = uint32(unsafe.Sizeof(watch.buf)) - } - case syscall.ERROR_ACCESS_DENIED: - // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF) - w.deleteWatch(watch) - w.startRead(watch) - continue - case syscall.ERROR_OPERATION_ABORTED: - // CancelIo was called on this handle - continue - default: - w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) - continue - case nil: - } - - var offset uint32 - for { - if n == 0 { - w.Events <- newEvent("", sys_FS_Q_OVERFLOW) - w.Errors <- errors.New("short read in readEvents()") - break - } - - // Point "raw" to the event in the buffer - raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) - buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) - name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) - fullname := watch.path + "\\" + name - - var mask uint64 - switch raw.Action { - case syscall.FILE_ACTION_REMOVED: - mask = sys_FS_DELETE_SELF - case syscall.FILE_ACTION_MODIFIED: - mask = sys_FS_MODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - watch.rename = name - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - if watch.names[watch.rename] != 0 { - watch.names[name] |= watch.names[watch.rename] - delete(watch.names, watch.rename) - mask = sys_FS_MOVE_SELF - } - } - - sendNameEvent := func() { - if w.sendEvent(fullname, watch.names[name]&mask) { - if watch.names[name]&sys_FS_ONESHOT != 0 { - delete(watch.names, name) - } - } - } - if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() - } - if raw.Action == syscall.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sys_FS_IGNORED) - delete(watch.names, name) - } - if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { - if watch.mask&sys_FS_ONESHOT != 0 { - watch.mask = 0 - } - } - if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { - fullname = watch.path + "\\" + watch.rename - sendNameEvent() - } - - // Move to the next event in the buffer - if raw.NextEntryOffset == 0 { - break - } - offset += raw.NextEntryOffset - - // Error! - if offset >= n { - w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") - break - } - } - - if err := w.startRead(watch); err != nil { - w.Errors <- err - } - } -} - -func (w *Watcher) sendEvent(name string, mask uint64) bool { - if mask == 0 { - return false - } - event := newEvent(name, uint32(mask)) - select { - case ch := <-w.quit: - w.quit <- ch - case w.Events <- event: - } - return true -} - -func toWindowsFlags(mask uint64) uint32 { - var m uint32 - if mask&sys_FS_ACCESS != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS - } - if mask&sys_FS_MODIFY != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE - } - if mask&sys_FS_ATTRIB != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES - } - if mask&(sys_FS_MOVE|sys_FS_CREATE|sys_FS_DELETE) != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME - } - return m -} - -func toFSnotifyFlags(action uint32) uint64 { - switch action { - case syscall.FILE_ACTION_ADDED: - return sys_FS_CREATE - case syscall.FILE_ACTION_REMOVED: - return sys_FS_DELETE - case syscall.FILE_ACTION_MODIFIED: - return sys_FS_MODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - return sys_FS_MOVED_FROM - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - return sys_FS_MOVED_TO - } - return 0 -} diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md new file mode 100644 index 000000000..af41f1ebe --- /dev/null +++ b/ISSUE_TEMPLATE.md @@ -0,0 +1,18 @@ +Version/Platform/Processor information (from `ipfs version --all`): + + + +Type (bug, feature, meta, test failure, question): +Area (api, commands, daemon, fuse, etc): +Priority (from P0: functioning, to P4: operations on fire): + +Description: + + + + +--------------------------------------------------- +This is for you! Please read, and then delete this text before posting it. +The go-ipfs issues are only for bug reports and directly actionable features. +Check https://github.com/ipfs/community/blob/master/contributing.md#reporting-issues if that doesn't fit. +Check https://github.com/ipfs/go-ipfs/blob/master/docs/github-issue-guide.md if you are not sure how to fill this issue. diff --git a/LICENSE b/LICENSE index c7386b3c9..9ce974446 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2014 Juan Batiz-Benet +Copyright (c) 2016 Juan Batiz-Benet Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/Makefile b/Makefile index 81a33e372..7346302cd 100644 --- a/Makefile +++ b/Makefile @@ -1,18 +1,23 @@ # Minimum version numbers for software required to build IPFS -IPFS_MIN_GO_VERSION = 1.5.2 +IPFS_MIN_GO_VERSION = 1.7 IPFS_MIN_GX_VERSION = 0.6 IPFS_MIN_GX_GO_VERSION = 1.1 ifeq ($(TEST_NO_FUSE),1) - go_test=go test -tags nofuse + go_test=IPFS_REUSEPORT=false go test -tags nofuse else - go_test=go test + go_test=IPFS_REUSEPORT=false go test endif +ifeq ($(OS),Windows_NT) + GOPATH_DELIMITER = ; +else + GOPATH_DELIMITER = : +endif -dist_root=/ipfs/QmXZQzBAFuoELw3NtjQZHkWSdA332PyQUj6pQjuhEukvg8 -gx_bin=bin/gx-v0.7.0 -gx-go_bin=bin/gx-go-v1.2.0 +dist_root=/ipfs/QmNZL8wNsvAGdVYr8uGeUE9aGfHjFpHegAWywQFEdSaJbp +gx_bin=bin/gx-v0.9.0 +gx-go_bin=bin/gx-go-v1.3.0 # use things in our bin before any other system binaries export PATH := bin:$(PATH) @@ -41,7 +46,7 @@ bin/gx-go-v%: gx_check: ${gx_bin} ${gx-go_bin} path_check: - @bin/check_go_path $(realpath $(shell pwd)) $(realpath $(GOPATH)/src/github.com/ipfs/go-ipfs) + @bin/check_go_path $(realpath $(shell pwd)) $(realpath $(addsuffix /src/github.com/ipfs/go-ipfs,$(subst $(GOPATH_DELIMITER), ,$(GOPATH)))) deps: go_check gx_check path_check ${gx_bin} --verbose install --global @@ -92,10 +97,10 @@ test_go_race: $(go_test) ./... -race test_sharness_short: - cd test/sharness/ && make + make -C test/sharness/ test_sharness_expensive: - cd test/sharness/ && TEST_EXPENSIVE=1 make + TEST_EXPENSIVE=1 make -C test/sharness/ test_all_commits: @echo "testing all commits between origin/master..HEAD" diff --git a/README.md b/README.md index 224bff40d..7e2ffd924 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,15 @@ -# IPFS implementation in Go -[![GoDoc](https://godoc.org/github.com/ipfs/go-ipfs?status.svg)](https://godoc.org/github.com/ipfs/go-ipfs) [![Build Status](https://travis-ci.org/ipfs/go-ipfs.svg?branch=master)](https://travis-ci.org/ipfs/go-ipfs) +# go-ipfs + +![banner](https://ipfs.io/ipfs/QmVk7srrwahXLNmcDYvyUEJptyoxpndnRa57YJ11L4jV26/ipfs.go.png) + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) +[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) +[![standard-readme compliant](https://img.shields.io/badge/standard--readme-OK-green.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) +[![GoDoc](https://godoc.org/github.com/ipfs/go-ipfs?status.svg)](https://godoc.org/github.com/ipfs/go-ipfs) +[![Build Status](https://travis-ci.org/ipfs/go-ipfs.svg?branch=master)](https://travis-ci.org/ipfs/go-ipfs) + +> IPFS implementation in Go IPFS is a global, versioned, peer-to-peer filesystem. It combines good ideas from Git, BitTorrent, Kademlia, SFS, and the Web. It is like a single bittorrent swarm, @@ -10,7 +20,7 @@ For more info see: https://github.com/ipfs/ipfs. Please put all issues regarding IPFS _design_ in the [ipfs repo issues](https://github.com/ipfs/ipfs/issues). -Please put all issues regarding Go IPFS _implementation_ in [this repo](https://github.com/ipfs/go-ipfs/issues). +Please put all issues regarding the Go IPFS _implementation_ in [this repo](https://github.com/ipfs/go-ipfs/issues). ## Table of Contents @@ -18,18 +28,19 @@ Please put all issues regarding Go IPFS _implementation_ in [this repo](https:// - [Install](#install) - [Install prebuilt packages](#install-prebuilt-packages) - [Build from Source](#build-from-source) - - [Prerequisite: Install Go](#prerequisite-install-go) - - [Download + Compile IPFS](#download--compile-ipfs) + - [Install Go](#install-go) + - [Download and Compile IPFS](#download-and-compile-ipfs) + - [Troubleshooting](#troubleshooting) - [Development Dependencies](#development-dependencies) -- [Updating](#updating) + - [Updating](#updating) - [Usage](#usage) - [Getting Started](#getting-started) - [Some things to try](#some-things-to-try) - [Docker usage](#docker-usage) - - [Docker usage with VirtualBox/boot2docker (OSX and Windows)](#docker-usage-with-virtualboxboot2docker-osx-and-windows) -- [Troubleshooting](#troubleshooting) -- [Contributing](#contributing) + - [Troubleshooting](#troubleshooting-1) - [Todo](#todo) +- [Contributing](#contributing) + - [Want to hack on IPFS?](#want-to-hack-on-ipfs) - [License](#license) ## Security Issues @@ -57,7 +68,8 @@ From there: #### Install Go -First, you'll need Go. If you don't have it: [Download Go 1.5.2+](https://golang.org/dl/). **Go 1.6 is not yet supported.** +The build process for ipfs requires Go 1.7 or higher. If you don't have it: [Download Go 1.7+](https://golang.org/dl/). + You'll need to add Go's bin directories to your `$PATH` environment variable e.g., by adding these lines to your `/etc/profile` (for a system-wide installation) or `$HOME/.profile`: @@ -68,7 +80,7 @@ export PATH=$PATH:$GOPATH/bin (If you run into trouble, see the [Go install instructions](https://golang.org/doc/install)). -#### Download + Compile IPFS +#### Download and Compile IPFS go-ipfs differs from the vanilla `go get` flow: it uses [gx](https://github.com/whyrusleeping/gx)/[gx-go](https://github.com/whyrusleeping/gx-go) @@ -93,7 +105,7 @@ $ make install * Separate [instructions are available for building on Windows](docs/windows.md). * `git` is required in order for `go get` to fetch all dependencies. * Package managers often contain out-of-date `golang` packages. - Ensure that `go version` reports at least 1.5.2. See above for how to install go. + Ensure that `go version` reports at least 1.7. See above for how to install go. * If you are interested in development, please install the development dependencies as well. * *WARNING: Older versions of OSX FUSE (for Mac OS X) can cause kernel panics when mounting!* @@ -107,11 +119,11 @@ dependencies as well. If you make changes to the protocol buffers, you will need to install the [protoc compiler](https://github.com/google/protobuf). -## Updating +### Updating + IPFS has an updating tool that can be accessed through `ipfs update`. The tool is not installed alongside IPFS in order to keep that logic independent of the main -codebase. To install `ipfs update`, either [download it here](https://gobuilder.me/github.com/ipfs/ipfs-update) -or install it from source with `go get -u github.com/ipfs/ipfs-update`. +codebase. To install `ipfs update`, [download it here](https://ipfs.io/ipns/dist.ipfs.io/#ipfs-update). ## Usage @@ -233,14 +245,6 @@ Stop the running container: docker stop ipfs_host -#### Docker usage with VirtualBox/boot2docker (OSX and Windows) - -Since docker is running in the boot2docker VM, you need to forward -relevant ports from the VM to your host for IPFS to act normally. This is -accomplished with the following command: - - boot2docker ssh -L 5001:localhost:5001 -L 4001:localhost:4001 -L 8080:localhost:8080 -fN - ### Troubleshooting If you have previously installed IPFS before and you are running into problems getting a newer version to work, try deleting (or backing up somewhere @@ -252,14 +256,20 @@ For any other problems, check the [issues list](https://github.com/ipfs/go-ipfs/ and if you dont see your problem there, either come talk to us on irc (freenode #ipfs) or file an issue of your own! -## Contributing - -Please see [Contribute.md](contribute.md)! - ## Todo An IPFS alpha version has been released in February 2015. Things left to be done are all marked as [issues](https://github.com/ipfs/go-ipfs/issues). +## Contributing + +Please see [Contribute.md](contribute.md)! + +This repository falls under the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). + +### Want to hack on IPFS? + +[![](https://cdn.rawgit.com/jbenet/contribute-ipfs-gif/master/img/contribute.gif)](https://github.com/ipfs/community/blob/master/contributing.md) + ## License MIT diff --git a/bin/check_go_path b/bin/check_go_path index b34660e3c..229887043 100755 --- a/bin/check_go_path +++ b/bin/check_go_path @@ -1,7 +1,6 @@ #!/bin/sh PWD=$1 -EXPECTED=$2 if [ -z "$PWD" ]; then echo "must pass in your current working directory" @@ -13,8 +12,13 @@ if [ -z "$GOPATH" ]; then exit 1 fi -if [ "$PWD" != "$EXPECTED" ]; then - echo "go-ipfs must be built from within your \$GOPATH directory." - echo "expected '$EXPECTED' but got '$PWD'" - exit 1 -fi +while [ ${#} -gt 1 ]; do + if [ "$PWD" = "$2" ]; then + exit 0 + fi + shift +done + +echo "go-ipfs must be built from within your \$GOPATH directory." +echo "expected within '$GOPATH' but got '$PWD'" +exit 1 diff --git a/bin/container_daemon b/bin/container_daemon index 91641e85d..fd939338f 100644 --- a/bin/container_daemon +++ b/bin/container_daemon @@ -19,4 +19,4 @@ else ipfs config Addresses.Gateway /ip4/0.0.0.0/tcp/8080 fi -exec ipfs daemon --enable-gc +exec ipfs daemon "$@" diff --git a/bin/dist_get b/bin/dist_get index bc25928b2..0f0cdb206 100755 --- a/bin/dist_get +++ b/bin/dist_get @@ -129,6 +129,9 @@ case $goenv in freebsd-*) archive="tar.gz" ;; + openbsd-*) + archive="tar.gz" + ;; *) echo "unrecognized system environment: $goenv" >&2 die "currently only linux, darwin, windows and freebsd are supported by this script" diff --git a/blocks/blocks.go b/blocks/blocks.go index 777ab4c90..d5f4df700 100644 --- a/blocks/blocks.go +++ b/blocks/blocks.go @@ -20,47 +20,47 @@ type Block interface { } // Block is a singular block of data in ipfs -type RawBlock struct { +type BasicBlock struct { multihash mh.Multihash data []byte } // NewBlock creates a Block object from opaque data. It will hash the data. -func NewBlock(data []byte) *RawBlock { - return &RawBlock{data: data, multihash: u.Hash(data)} +func NewBlock(data []byte) *BasicBlock { + return &BasicBlock{data: data, multihash: u.Hash(data)} } // NewBlockWithHash creates a new block when the hash of the data // is already known, this is used to save time in situations where // we are able to be confident that the data is correct -func NewBlockWithHash(data []byte, h mh.Multihash) (*RawBlock, error) { +func NewBlockWithHash(data []byte, h mh.Multihash) (*BasicBlock, error) { if u.Debug { chk := u.Hash(data) if string(chk) != string(h) { return nil, errors.New("Data did not match given hash!") } } - return &RawBlock{data: data, multihash: h}, nil + return &BasicBlock{data: data, multihash: h}, nil } -func (b *RawBlock) Multihash() mh.Multihash { +func (b *BasicBlock) Multihash() mh.Multihash { return b.multihash } -func (b *RawBlock) Data() []byte { +func (b *BasicBlock) Data() []byte { return b.data } // Key returns the block's Multihash as a Key value. -func (b *RawBlock) Key() key.Key { +func (b *BasicBlock) Key() key.Key { return key.Key(b.multihash) } -func (b *RawBlock) String() string { +func (b *BasicBlock) String() string { return fmt.Sprintf("[Block %s]", b.Key()) } -func (b *RawBlock) Loggable() map[string]interface{} { +func (b *BasicBlock) Loggable() map[string]interface{} { return map[string]interface{}{ "block": b.Key().String(), } diff --git a/blocks/blockstore/arc_cache.go b/blocks/blockstore/arc_cache.go new file mode 100644 index 000000000..63253ef9c --- /dev/null +++ b/blocks/blockstore/arc_cache.go @@ -0,0 +1,126 @@ +package blockstore + +import ( + "github.com/ipfs/go-ipfs/blocks" + key "github.com/ipfs/go-ipfs/blocks/key" + ds "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore" + lru "gx/ipfs/QmVYxfoJQiZijTgPNHCHgHELvQpbsJNTg6Crmc3dQkj3yy/golang-lru" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" +) + +type arccache struct { + arc *lru.ARCCache + blockstore Blockstore +} + +func arcCached(bs Blockstore, lruSize int) (*arccache, error) { + arc, err := lru.NewARC(lruSize) + if err != nil { + return nil, err + } + + return &arccache{arc: arc, blockstore: bs}, nil +} + +func (b *arccache) DeleteBlock(k key.Key) error { + if has, ok := b.hasCached(k); ok && !has { + return ErrNotFound + } + + b.arc.Remove(k) // Invalidate cache before deleting. + err := b.blockstore.DeleteBlock(k) + switch err { + case nil, ds.ErrNotFound, ErrNotFound: + b.arc.Add(k, false) + return nil + default: + return err + } +} + +// if ok == false has is inconclusive +// if ok == true then has respons to question: is it contained +func (b *arccache) hasCached(k key.Key) (has bool, ok bool) { + if k == "" { + // Return cache invalid so the call to blockstore happens + // in case of invalid key and correct error is created. + return false, false + } + + h, ok := b.arc.Get(k) + if ok { + return h.(bool), true + } + return false, false +} + +func (b *arccache) Has(k key.Key) (bool, error) { + if has, ok := b.hasCached(k); ok { + return has, nil + } + + res, err := b.blockstore.Has(k) + if err == nil { + b.arc.Add(k, res) + } + return res, err +} + +func (b *arccache) Get(k key.Key) (blocks.Block, error) { + if has, ok := b.hasCached(k); ok && !has { + return nil, ErrNotFound + } + + bl, err := b.blockstore.Get(k) + if bl == nil && err == ErrNotFound { + b.arc.Add(k, false) + } else if bl != nil { + b.arc.Add(k, true) + } + return bl, err +} + +func (b *arccache) Put(bl blocks.Block) error { + if has, ok := b.hasCached(bl.Key()); ok && has { + return nil + } + + err := b.blockstore.Put(bl) + if err == nil { + b.arc.Add(bl.Key(), true) + } + return err +} + +func (b *arccache) PutMany(bs []blocks.Block) error { + var good []blocks.Block + for _, block := range bs { + if has, ok := b.hasCached(block.Key()); !ok || (ok && !has) { + good = append(good, block) + } + } + err := b.blockstore.PutMany(bs) + if err != nil { + return err + } + for _, block := range bs { + b.arc.Add(block.Key(), true) + } + return nil +} + +func (b *arccache) AllKeysChan(ctx context.Context) (<-chan key.Key, error) { + return b.blockstore.AllKeysChan(ctx) +} + +func (b *arccache) GCLock() Unlocker { + return b.blockstore.(GCBlockstore).GCLock() +} + +func (b *arccache) PinLock() Unlocker { + return b.blockstore.(GCBlockstore).PinLock() +} + +func (b *arccache) GCRequested() bool { + return b.blockstore.(GCBlockstore).GCRequested() +} diff --git a/blocks/blockstore/arc_cache_test.go b/blocks/blockstore/arc_cache_test.go new file mode 100644 index 000000000..505f7e1ea --- /dev/null +++ b/blocks/blockstore/arc_cache_test.go @@ -0,0 +1,67 @@ +package blockstore + +import ( + "github.com/ipfs/go-ipfs/blocks" + "testing" + + ds "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore" + syncds "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore/sync" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" +) + +func testArcCached(bs GCBlockstore, ctx context.Context) (*arccache, error) { + if ctx == nil { + ctx = context.TODO() + } + opts := DefaultCacheOpts() + opts.HasBloomFilterSize = 0 + opts.HasBloomFilterHashes = 0 + bbs, err := CachedBlockstore(bs, ctx, opts) + if err == nil { + return bbs.(*arccache), nil + } else { + return nil, err + } +} + +func TestRemoveCacheEntryOnDelete(t *testing.T) { + b := blocks.NewBlock([]byte("foo")) + cd := &callbackDatastore{f: func() {}, ds: ds.NewMapDatastore()} + bs := NewBlockstore(syncds.MutexWrap(cd)) + cachedbs, err := testArcCached(bs, nil) + if err != nil { + t.Fatal(err) + } + cachedbs.Put(b) + + cd.Lock() + writeHitTheDatastore := false + cd.Unlock() + + cd.SetFunc(func() { + writeHitTheDatastore = true + }) + + cachedbs.DeleteBlock(b.Key()) + cachedbs.Put(b) + if !writeHitTheDatastore { + t.Fail() + } +} + +func TestElideDuplicateWrite(t *testing.T) { + cd := &callbackDatastore{f: func() {}, ds: ds.NewMapDatastore()} + bs := NewBlockstore(syncds.MutexWrap(cd)) + cachedbs, err := testArcCached(bs, nil) + if err != nil { + t.Fatal(err) + } + + b1 := blocks.NewBlock([]byte("foo")) + + cachedbs.Put(b1) + cd.SetFunc(func() { + t.Fatal("write hit the datastore") + }) + cachedbs.Put(b1) +} diff --git a/blocks/blockstore/blockstore.go b/blocks/blockstore/blockstore.go index d3a9b1aa1..380e0b640 100644 --- a/blocks/blockstore/blockstore.go +++ b/blocks/blockstore/blockstore.go @@ -7,14 +7,14 @@ import ( "sync" "sync/atomic" - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" - dsns "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/namespace" - dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query" blocks "github.com/ipfs/go-ipfs/blocks" key "github.com/ipfs/go-ipfs/blocks/key" + logging "gx/ipfs/QmNQynaz7qfriSUJkiEZUrm2Wen1u3Kj9goZzWtrPyu7XR/go-log" + ds "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore" + dsns "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore/namespace" + dsq "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore/query" mh "gx/ipfs/QmYf7ng2hG5XBtJA3tN34DQ2GUN5HNksEw1rLDkmr6vGku/go-multihash" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" ) var log = logging.Logger("blockstore") @@ -22,7 +22,8 @@ var log = logging.Logger("blockstore") // BlockPrefix namespaces blockstore datastores var BlockPrefix = ds.NewKey("blocks") -var ValueTypeMismatch = errors.New("The retrieved value is not a Block") +var ValueTypeMismatch = errors.New("the retrieved value is not a Block") +var ErrHashMismatch = errors.New("block in storage has different hash than requested") var ErrNotFound = errors.New("blockstore: block not found") @@ -71,6 +72,12 @@ type blockstore struct { lk sync.RWMutex gcreq int32 gcreqlk sync.Mutex + + rehash bool +} + +func (bs *blockstore) RuntimeHashing(enabled bool) { + bs.rehash = enabled } func (bs *blockstore) Get(k key.Key) (blocks.Block, error) { @@ -90,7 +97,16 @@ func (bs *blockstore) Get(k key.Key) (blocks.Block, error) { return nil, ValueTypeMismatch } - return blocks.NewBlockWithHash(bdata, mh.Multihash(k)) + if bs.rehash { + rb := blocks.NewBlock(bdata) + if rb.Key() != k { + return nil, ErrHashMismatch + } else { + return rb, nil + } + } else { + return blocks.NewBlockWithHash(bdata, mh.Multihash(k)) + } } func (bs *blockstore) Put(block blocks.Block) error { @@ -148,26 +164,31 @@ func (bs *blockstore) AllKeysChan(ctx context.Context) (<-chan key.Key, error) { } // this function is here to compartmentalize - get := func() (k key.Key, ok bool) { + get := func() (key.Key, bool) { select { case <-ctx.Done(): - return k, false + return "", false case e, more := <-res.Next(): if !more { - return k, false + return "", false } if e.Error != nil { log.Debug("blockstore.AllKeysChan got err:", e.Error) - return k, false + return "", false } // need to convert to key.Key using key.KeyFromDsKey. - k = key.KeyFromDsKey(ds.NewKey(e.Key)) + k, err := key.KeyFromDsKey(ds.NewKey(e.Key)) + if err != nil { + log.Warningf("error parsing key from DsKey: ", err) + return "", true + } log.Debug("blockstore: query got key", k) // key must be a multihash. else ignore it. - _, err := mh.Cast([]byte(k)) + _, err = mh.Cast([]byte(k)) if err != nil { + log.Warningf("key from datastore was not a multihash: ", err) return "", true } @@ -175,7 +196,7 @@ func (bs *blockstore) AllKeysChan(ctx context.Context) (<-chan key.Key, error) { } } - output := make(chan key.Key) + output := make(chan key.Key, dsq.KeysOnlyBufSize) go func() { defer func() { res.Process().Close() // ensure exit (signals early exit, too) diff --git a/blocks/blockstore/blockstore_test.go b/blocks/blockstore/blockstore_test.go index 8b0609f1f..2f0269141 100644 --- a/blocks/blockstore/blockstore_test.go +++ b/blocks/blockstore/blockstore_test.go @@ -5,9 +5,9 @@ import ( "fmt" "testing" - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" - dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query" - ds_sync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync" + ds "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore" + dsq "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore/query" + ds_sync "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore/sync" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" blocks "github.com/ipfs/go-ipfs/blocks" @@ -53,6 +53,22 @@ func TestPutThenGetBlock(t *testing.T) { } } +func TestRuntimeHashing(t *testing.T) { + bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) + bl := blocks.NewBlock([]byte("some data")) + blBad, err := blocks.NewBlockWithHash([]byte("some other data"), bl.Key().ToMultihash()) + if err != nil { + t.Fatal("Debug is enabled") + } + + bs.Put(blBad) + bs.RuntimeHashing(true) + + if _, err := bs.Get(bl.Key()); err != ErrHashMismatch { + t.Fatalf("Expected '%v' got '%v'\n", ErrHashMismatch, err) + } +} + func newBlockStoreWithKeys(t *testing.T, d ds.Datastore, N int) (Blockstore, []key.Key) { if d == nil { d = ds.NewMapDatastore() @@ -117,97 +133,42 @@ func TestAllKeysRespectsContext(t *testing.T) { errors <- nil // a nil one to signal break } - // Once without context, to make sure it all works - { - var results dsq.Results - var resultsmu = make(chan struct{}) - resultChan := make(chan dsq.Result) - d.SetFunc(func(q dsq.Query) (dsq.Results, error) { - results = dsq.ResultsWithChan(q, resultChan) - resultsmu <- struct{}{} - return results, nil - }) + var results dsq.Results + var resultsmu = make(chan struct{}) + resultChan := make(chan dsq.Result) + d.SetFunc(func(q dsq.Query) (dsq.Results, error) { + results = dsq.ResultsWithChan(q, resultChan) + resultsmu <- struct{}{} + return results, nil + }) - go getKeys(context.Background()) + go getKeys(context.Background()) - // make sure it's waiting. - <-started - <-resultsmu - select { - case <-done: - t.Fatal("sync is wrong") - case <-results.Process().Closing(): - t.Fatal("should not be closing") - case <-results.Process().Closed(): - t.Fatal("should not be closed") - default: - } - - e := dsq.Entry{Key: BlockPrefix.ChildString("foo").String()} - resultChan <- dsq.Result{Entry: e} // let it go. - close(resultChan) - <-done // should be done now. - <-results.Process().Closed() // should be closed now - - // print any errors - for err := range errors { - if err == nil { - break - } - t.Error(err) - } + // make sure it's waiting. + <-started + <-resultsmu + select { + case <-done: + t.Fatal("sync is wrong") + case <-results.Process().Closing(): + t.Fatal("should not be closing") + case <-results.Process().Closed(): + t.Fatal("should not be closed") + default: } - // Once with - { - var results dsq.Results - var resultsmu = make(chan struct{}) - resultChan := make(chan dsq.Result) - d.SetFunc(func(q dsq.Query) (dsq.Results, error) { - results = dsq.ResultsWithChan(q, resultChan) - resultsmu <- struct{}{} - return results, nil - }) + e := dsq.Entry{Key: BlockPrefix.ChildString("foo").String()} + resultChan <- dsq.Result{Entry: e} // let it go. + close(resultChan) + <-done // should be done now. + <-results.Process().Closed() // should be closed now - ctx, cancel := context.WithCancel(context.Background()) - go getKeys(ctx) - - // make sure it's waiting. - <-started - <-resultsmu - select { - case <-done: - t.Fatal("sync is wrong") - case <-results.Process().Closing(): - t.Fatal("should not be closing") - case <-results.Process().Closed(): - t.Fatal("should not be closed") - default: - } - - cancel() // let it go. - - select { - case <-done: - t.Fatal("sync is wrong") - case <-results.Process().Closed(): - t.Fatal("should not be closed") // should not be closed yet. - case <-results.Process().Closing(): - // should be closing now! - t.Log("closing correctly at this point.") - } - - close(resultChan) - <-done // should be done now. - <-results.Process().Closed() // should be closed now - - // print any errors - for err := range errors { - if err == nil { - break - } - t.Error(err) + // print any errors + for err := range errors { + if err == nil { + break } + t.Error(err) } } diff --git a/blocks/blockstore/bloom_cache.go b/blocks/blockstore/bloom_cache.go new file mode 100644 index 000000000..e10dacfaf --- /dev/null +++ b/blocks/blockstore/bloom_cache.go @@ -0,0 +1,158 @@ +package blockstore + +import ( + "github.com/ipfs/go-ipfs/blocks" + key "github.com/ipfs/go-ipfs/blocks/key" + bloom "gx/ipfs/QmWQ2SJisXwcCLsUXLwYCKSfyExXjFRW2WbBH5sqCUnwX5/bbloom" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + + "sync/atomic" +) + +// bloomCached returns Blockstore that caches Has requests using Bloom filter +// Size is size of bloom filter in bytes +func bloomCached(bs Blockstore, ctx context.Context, bloomSize, hashCount int) (*bloomcache, error) { + bl, err := bloom.New(float64(bloomSize), float64(hashCount)) + if err != nil { + return nil, err + } + bc := &bloomcache{blockstore: bs, bloom: bl} + bc.Invalidate() + go bc.Rebuild(ctx) + + return bc, nil +} + +type bloomcache struct { + bloom *bloom.Bloom + active int32 + + // This chan is only used for testing to wait for bloom to enable + rebuildChan chan struct{} + blockstore Blockstore + + // Statistics + hits uint64 + misses uint64 +} + +func (b *bloomcache) Invalidate() { + b.rebuildChan = make(chan struct{}) + atomic.StoreInt32(&b.active, 0) +} + +func (b *bloomcache) BloomActive() bool { + return atomic.LoadInt32(&b.active) != 0 +} + +func (b *bloomcache) Rebuild(ctx context.Context) { + evt := log.EventBegin(ctx, "bloomcache.Rebuild") + defer evt.Done() + + ch, err := b.blockstore.AllKeysChan(ctx) + if err != nil { + log.Errorf("AllKeysChan failed in bloomcache rebuild with: %v", err) + return + } + finish := false + for !finish { + select { + case key, ok := <-ch: + if ok { + b.bloom.AddTS([]byte(key)) // Use binary key, the more compact the better + } else { + finish = true + } + case <-ctx.Done(): + log.Warning("Cache rebuild closed by context finishing.") + return + } + } + close(b.rebuildChan) + atomic.StoreInt32(&b.active, 1) +} + +func (b *bloomcache) DeleteBlock(k key.Key) error { + if has, ok := b.hasCached(k); ok && !has { + return ErrNotFound + } + + return b.blockstore.DeleteBlock(k) +} + +// if ok == false has is inconclusive +// if ok == true then has respons to question: is it contained +func (b *bloomcache) hasCached(k key.Key) (has bool, ok bool) { + if k == "" { + // Return cache invalid so call to blockstore + // in case of invalid key is forwarded deeper + return false, false + } + if b.BloomActive() { + blr := b.bloom.HasTS([]byte(k)) + if blr == false { // not contained in bloom is only conclusive answer bloom gives + return false, true + } + } + return false, false +} + +func (b *bloomcache) Has(k key.Key) (bool, error) { + if has, ok := b.hasCached(k); ok { + return has, nil + } + + return b.blockstore.Has(k) +} + +func (b *bloomcache) Get(k key.Key) (blocks.Block, error) { + if has, ok := b.hasCached(k); ok && !has { + return nil, ErrNotFound + } + + return b.blockstore.Get(k) +} + +func (b *bloomcache) Put(bl blocks.Block) error { + if has, ok := b.hasCached(bl.Key()); ok && has { + return nil + } + + err := b.blockstore.Put(bl) + if err == nil { + b.bloom.AddTS([]byte(bl.Key())) + } + return err +} + +func (b *bloomcache) PutMany(bs []blocks.Block) error { + var good []blocks.Block + for _, block := range bs { + if has, ok := b.hasCached(block.Key()); !ok || (ok && !has) { + good = append(good, block) + } + } + err := b.blockstore.PutMany(bs) + if err == nil { + for _, block := range bs { + b.bloom.AddTS([]byte(block.Key())) + } + } + return err +} + +func (b *bloomcache) AllKeysChan(ctx context.Context) (<-chan key.Key, error) { + return b.blockstore.AllKeysChan(ctx) +} + +func (b *bloomcache) GCLock() Unlocker { + return b.blockstore.(GCBlockstore).GCLock() +} + +func (b *bloomcache) PinLock() Unlocker { + return b.blockstore.(GCBlockstore).PinLock() +} + +func (b *bloomcache) GCRequested() bool { + return b.blockstore.(GCBlockstore).GCRequested() +} diff --git a/blocks/blockstore/bloom_cache_test.go b/blocks/blockstore/bloom_cache_test.go new file mode 100644 index 000000000..fbffd42f5 --- /dev/null +++ b/blocks/blockstore/bloom_cache_test.go @@ -0,0 +1,116 @@ +package blockstore + +import ( + "fmt" + "sync" + "testing" + "time" + + "github.com/ipfs/go-ipfs/blocks" + + ds "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore" + dsq "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore/query" + syncds "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore/sync" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" +) + +func testBloomCached(bs GCBlockstore, ctx context.Context) (*bloomcache, error) { + if ctx == nil { + ctx = context.TODO() + } + opts := DefaultCacheOpts() + opts.HasARCCacheSize = 0 + bbs, err := CachedBlockstore(bs, ctx, opts) + if err == nil { + return bbs.(*bloomcache), nil + } else { + return nil, err + } +} + +func TestReturnsErrorWhenSizeNegative(t *testing.T) { + bs := NewBlockstore(syncds.MutexWrap(ds.NewMapDatastore())) + _, err := bloomCached(bs, context.TODO(), -1, 1) + if err == nil { + t.Fail() + } +} +func TestHasIsBloomCached(t *testing.T) { + cd := &callbackDatastore{f: func() {}, ds: ds.NewMapDatastore()} + bs := NewBlockstore(syncds.MutexWrap(cd)) + + for i := 0; i < 1000; i++ { + bs.Put(blocks.NewBlock([]byte(fmt.Sprintf("data: %d", i)))) + } + ctx, _ := context.WithTimeout(context.Background(), 1*time.Second) + cachedbs, err := testBloomCached(bs, ctx) + if err != nil { + t.Fatal(err) + } + + select { + case <-cachedbs.rebuildChan: + case <-ctx.Done(): + t.Fatalf("Timeout wating for rebuild: %d", cachedbs.bloom.ElementsAdded()) + } + + cacheFails := 0 + cd.SetFunc(func() { + cacheFails++ + }) + + for i := 0; i < 1000; i++ { + cachedbs.Has(blocks.NewBlock([]byte(fmt.Sprintf("data: %d", i+2000))).Key()) + } + + if float64(cacheFails)/float64(1000) > float64(0.05) { + t.Fatal("Bloom filter has cache miss rate of more than 5%") + } +} + +type callbackDatastore struct { + sync.Mutex + f func() + ds ds.Datastore +} + +func (c *callbackDatastore) SetFunc(f func()) { + c.Lock() + defer c.Unlock() + c.f = f +} + +func (c *callbackDatastore) CallF() { + c.Lock() + defer c.Unlock() + c.f() +} + +func (c *callbackDatastore) Put(key ds.Key, value interface{}) (err error) { + c.CallF() + return c.ds.Put(key, value) +} + +func (c *callbackDatastore) Get(key ds.Key) (value interface{}, err error) { + c.CallF() + return c.ds.Get(key) +} + +func (c *callbackDatastore) Has(key ds.Key) (exists bool, err error) { + c.CallF() + return c.ds.Has(key) +} + +func (c *callbackDatastore) Delete(key ds.Key) (err error) { + c.CallF() + return c.ds.Delete(key) +} + +func (c *callbackDatastore) Query(q dsq.Query) (dsq.Results, error) { + c.CallF() + return c.ds.Query(q) +} + +func (c *callbackDatastore) Batch() (ds.Batch, error) { + return ds.NewBasicBatch(c), nil +} diff --git a/blocks/blockstore/caching.go b/blocks/blockstore/caching.go new file mode 100644 index 000000000..bc78134e0 --- /dev/null +++ b/blocks/blockstore/caching.go @@ -0,0 +1,45 @@ +package blockstore + +import ( + "errors" + + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" +) + +// Next to each option is it aproximate memory usage per unit +type CacheOpts struct { + HasBloomFilterSize int // 1 byte + HasBloomFilterHashes int // No size, 7 is usually best, consult bloom papers + HasARCCacheSize int // 32 bytes +} + +func DefaultCacheOpts() CacheOpts { + return CacheOpts{ + HasBloomFilterSize: 512 << 10, + HasBloomFilterHashes: 7, + HasARCCacheSize: 64 << 10, + } +} + +func CachedBlockstore(bs GCBlockstore, + ctx context.Context, opts CacheOpts) (cbs GCBlockstore, err error) { + cbs = bs + + if opts.HasBloomFilterSize < 0 || opts.HasBloomFilterHashes < 0 || + opts.HasARCCacheSize < 0 { + return nil, errors.New("all options for cache need to be greater than zero") + } + + if opts.HasBloomFilterSize != 0 && opts.HasBloomFilterHashes == 0 { + return nil, errors.New("bloom filter hash count can't be 0 when there is size set") + } + if opts.HasBloomFilterSize != 0 { + // *8 because of bytes to bits conversion + cbs, err = bloomCached(cbs, ctx, opts.HasBloomFilterSize*8, opts.HasBloomFilterHashes) + } + if opts.HasARCCacheSize > 0 { + cbs, err = arcCached(cbs, opts.HasARCCacheSize) + } + + return cbs, err +} diff --git a/blocks/blockstore/write_cache.go b/blocks/blockstore/write_cache.go deleted file mode 100644 index f7c2caf45..000000000 --- a/blocks/blockstore/write_cache.go +++ /dev/null @@ -1,78 +0,0 @@ -package blockstore - -import ( - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/hashicorp/golang-lru" - "github.com/ipfs/go-ipfs/blocks" - key "github.com/ipfs/go-ipfs/blocks/key" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" -) - -// WriteCached returns a blockstore that caches up to |size| unique writes (bs.Put). -func WriteCached(bs Blockstore, size int) (*writecache, error) { - c, err := lru.New(size) - if err != nil { - return nil, err - } - return &writecache{blockstore: bs, cache: c}, nil -} - -type writecache struct { - cache *lru.Cache // pointer b/c Cache contains a Mutex as value (complicates copying) - blockstore Blockstore -} - -func (w *writecache) DeleteBlock(k key.Key) error { - defer log.EventBegin(context.TODO(), "writecache.BlockRemoved", &k).Done() - w.cache.Remove(k) - return w.blockstore.DeleteBlock(k) -} - -func (w *writecache) Has(k key.Key) (bool, error) { - if _, ok := w.cache.Get(k); ok { - return true, nil - } - return w.blockstore.Has(k) -} - -func (w *writecache) Get(k key.Key) (blocks.Block, error) { - return w.blockstore.Get(k) -} - -func (w *writecache) Put(b blocks.Block) error { - k := b.Key() - if _, ok := w.cache.Get(k); ok { - return nil - } - defer log.EventBegin(context.TODO(), "writecache.BlockAdded", &k).Done() - - w.cache.Add(b.Key(), struct{}{}) - return w.blockstore.Put(b) -} - -func (w *writecache) PutMany(bs []blocks.Block) error { - var good []blocks.Block - for _, b := range bs { - if _, ok := w.cache.Get(b.Key()); !ok { - good = append(good, b) - k := b.Key() - defer log.EventBegin(context.TODO(), "writecache.BlockAdded", &k).Done() - } - } - return w.blockstore.PutMany(good) -} - -func (w *writecache) AllKeysChan(ctx context.Context) (<-chan key.Key, error) { - return w.blockstore.AllKeysChan(ctx) -} - -func (w *writecache) GCLock() Unlocker { - return w.blockstore.(GCBlockstore).GCLock() -} - -func (w *writecache) PinLock() Unlocker { - return w.blockstore.(GCBlockstore).PinLock() -} - -func (w *writecache) GCRequested() bool { - return w.blockstore.(GCBlockstore).GCRequested() -} diff --git a/blocks/blockstore/write_cache_test.go b/blocks/blockstore/write_cache_test.go deleted file mode 100644 index 97bf86b12..000000000 --- a/blocks/blockstore/write_cache_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package blockstore - -import ( - "testing" - - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" - dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query" - syncds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync" - "github.com/ipfs/go-ipfs/blocks" -) - -func TestReturnsErrorWhenSizeNegative(t *testing.T) { - bs := NewBlockstore(syncds.MutexWrap(ds.NewMapDatastore())) - _, err := WriteCached(bs, -1) - if err != nil { - return - } - t.Fail() -} - -func TestRemoveCacheEntryOnDelete(t *testing.T) { - b := blocks.NewBlock([]byte("foo")) - cd := &callbackDatastore{f: func() {}, ds: ds.NewMapDatastore()} - bs := NewBlockstore(syncds.MutexWrap(cd)) - cachedbs, err := WriteCached(bs, 1) - if err != nil { - t.Fatal(err) - } - cachedbs.Put(b) - - writeHitTheDatastore := false - cd.SetFunc(func() { - writeHitTheDatastore = true - }) - - cachedbs.DeleteBlock(b.Key()) - cachedbs.Put(b) - if !writeHitTheDatastore { - t.Fail() - } -} - -func TestElideDuplicateWrite(t *testing.T) { - cd := &callbackDatastore{f: func() {}, ds: ds.NewMapDatastore()} - bs := NewBlockstore(syncds.MutexWrap(cd)) - cachedbs, err := WriteCached(bs, 1) - if err != nil { - t.Fatal(err) - } - - b1 := blocks.NewBlock([]byte("foo")) - - cachedbs.Put(b1) - cd.SetFunc(func() { - t.Fatal("write hit the datastore") - }) - cachedbs.Put(b1) -} - -type callbackDatastore struct { - f func() - ds ds.Datastore -} - -func (c *callbackDatastore) SetFunc(f func()) { c.f = f } - -func (c *callbackDatastore) Put(key ds.Key, value interface{}) (err error) { - c.f() - return c.ds.Put(key, value) -} - -func (c *callbackDatastore) Get(key ds.Key) (value interface{}, err error) { - c.f() - return c.ds.Get(key) -} - -func (c *callbackDatastore) Has(key ds.Key) (exists bool, err error) { - c.f() - return c.ds.Has(key) -} - -func (c *callbackDatastore) Delete(key ds.Key) (err error) { - c.f() - return c.ds.Delete(key) -} - -func (c *callbackDatastore) Query(q dsq.Query) (dsq.Results, error) { - c.f() - return c.ds.Query(q) -} - -func (c *callbackDatastore) Batch() (ds.Batch, error) { - return ds.NewBasicBatch(c), nil -} diff --git a/blocks/bloom/filter.go b/blocks/bloom/filter.go index 64a2db042..6b1b74854 100644 --- a/blocks/bloom/filter.go +++ b/blocks/bloom/filter.go @@ -6,7 +6,7 @@ import ( "errors" // Non crypto hash, because speed "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/mtchavez/jenkins" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/steakknife/hamming" + "gx/ipfs/QmeWQMDa5dSdP4n8WDeoY5z8L2EKVqF4ZvK4VEHsLqXsGu/hamming" "hash" ) diff --git a/blocks/key/key.go b/blocks/key/key.go index 525ac1af2..71ef20df6 100644 --- a/blocks/key/key.go +++ b/blocks/key/key.go @@ -4,9 +4,10 @@ import ( "encoding/json" "fmt" - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" b58 "gx/ipfs/QmT8rehPR3F6bmwL6zjUN8XpiDBFFpMP2myPdC6ApsWfJf/go-base58" + ds "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore" mh "gx/ipfs/QmYf7ng2hG5XBtJA3tN34DQ2GUN5HNksEw1rLDkmr6vGku/go-multihash" + base32 "gx/ipfs/Qmb1DA2A9LS2wR4FFweB4uEDomFsdmnw1VLawLE1yQzudj/base32" ) // Key is a string representation of multihash for use with maps. @@ -38,7 +39,7 @@ func B58KeyEncode(k Key) string { // DsKey returns a Datastore key func (k Key) DsKey() ds.Key { - return ds.NewKey(string(k)) + return ds.NewKey(base32.RawStdEncoding.EncodeToString([]byte(k))) } // UnmarshalJSON returns a JSON-encoded Key (string) @@ -68,36 +69,13 @@ func (k *Key) Loggable() map[string]interface{} { } // KeyFromDsKey returns a Datastore key -func KeyFromDsKey(dsk ds.Key) Key { - return Key(dsk.String()[1:]) -} - -// B58KeyConverter -- for KeyTransform datastores -// (static as only one obj needed) -var B58KeyConverter = b58KeyConverter{} - -type b58KeyConverter struct{} - -// ConvertKey returns a B58 encoded Datastore key -// TODO: this is hacky because it encodes every path component. some -// path components may be proper strings already... -func (b58KeyConverter) ConvertKey(dsk ds.Key) ds.Key { - k := ds.NewKey("/") - for _, n := range dsk.Namespaces() { - k = k.ChildString(b58.Encode([]byte(n))) +func KeyFromDsKey(dsk ds.Key) (Key, error) { + dec, err := base32.RawStdEncoding.DecodeString(dsk.String()[1:]) + if err != nil { + return "", err } - return k -} -// InvertKey returns a b58 decoded Datastore key -// TODO: this is hacky because it encodes every path component. some -// path components may be proper strings already... -func (b58KeyConverter) InvertKey(dsk ds.Key) ds.Key { - k := ds.NewKey("/") - for _, n := range dsk.Namespaces() { - k = k.ChildString(string(b58.Decode(n))) - } - return k + return Key(dec), nil } // KeySlice is used for sorting Keys diff --git a/blocks/set/set.go b/blocks/set/set.go index 65843d6eb..8c85393cc 100644 --- a/blocks/set/set.go +++ b/blocks/set/set.go @@ -4,7 +4,7 @@ package set import ( "github.com/ipfs/go-ipfs/blocks/bloom" key "github.com/ipfs/go-ipfs/blocks/key" - logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" + logging "gx/ipfs/QmNQynaz7qfriSUJkiEZUrm2Wen1u3Kj9goZzWtrPyu7XR/go-log" ) var log = logging.Logger("blockset") diff --git a/blockservice/blockservice.go b/blockservice/blockservice.go index 945f60ae6..710580614 100644 --- a/blockservice/blockservice.go +++ b/blockservice/blockservice.go @@ -10,8 +10,8 @@ import ( "github.com/ipfs/go-ipfs/blocks/blockstore" key "github.com/ipfs/go-ipfs/blocks/key" exchange "github.com/ipfs/go-ipfs/exchange" + logging "gx/ipfs/QmNQynaz7qfriSUJkiEZUrm2Wen1u3Kj9goZzWtrPyu7XR/go-log" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" ) var log = logging.Logger("blockservice") @@ -86,7 +86,7 @@ func (s *BlockService) GetBlock(ctx context.Context, k key.Key) (blocks.Block, e if err == blockstore.ErrNotFound && s.Exchange != nil { // TODO be careful checking ErrNotFound. If the underlying // implementation changes, this will break. - log.Debug("Blockservice: Searching bitswap.") + log.Debug("Blockservice: Searching bitswap") blk, err := s.Exchange.GetBlock(ctx, k) if err != nil { if err == blockstore.ErrNotFound { @@ -97,7 +97,7 @@ func (s *BlockService) GetBlock(ctx context.Context, k key.Key) (blocks.Block, e return blk, nil } - log.Debug("Blockservice GetBlock: Not found.") + log.Debug("Blockservice GetBlock: Not found") if err == blockstore.ErrNotFound { return nil, ErrNotFound } @@ -119,7 +119,7 @@ func (s *BlockService) GetBlocks(ctx context.Context, ks []key.Key) <-chan block misses = append(misses, k) continue } - log.Debug("Blockservice: Got data in datastore.") + log.Debug("Blockservice: Got data in datastore") select { case out <- hit: case <-ctx.Done(): diff --git a/blockservice/test/blocks_test.go b/blockservice/test/blocks_test.go index ed61dad59..b7df8721c 100644 --- a/blockservice/test/blocks_test.go +++ b/blockservice/test/blocks_test.go @@ -5,14 +5,14 @@ import ( "testing" "time" - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" - dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync" blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" key "github.com/ipfs/go-ipfs/blocks/key" . "github.com/ipfs/go-ipfs/blockservice" offline "github.com/ipfs/go-ipfs/exchange/offline" + ds "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore" + dssync "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore/sync" u "gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util" "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/circle.yml b/circle.yml index a9ff761b9..8be17eb8e 100644 --- a/circle.yml +++ b/circle.yml @@ -10,8 +10,8 @@ machine: post: - sudo rm -rf /usr/local/go - - if [ ! -e go1.5.2.linux-amd64.tar.gz ]; then curl -o go1.5.2.linux-amd64.tar.gz https://storage.googleapis.com/golang/go1.5.2.linux-amd64.tar.gz; fi - - sudo tar -C /usr/local -xzf go1.5.2.linux-amd64.tar.gz + - if [ ! -e go1.7.linux-amd64.tar.gz ]; then curl -o go1.7.linux-amd64.tar.gz https://storage.googleapis.com/golang/go1.7.linux-amd64.tar.gz; fi + - sudo tar -C /usr/local -xzf go1.7.linux-amd64.tar.gz services: - docker @@ -28,7 +28,7 @@ dependencies: - cd "$HOME/.go_workspace/src/$IMPORT_PATH" && make deps cache_directories: - - ~/go1.5.2.linux-amd64.tar.gz + - ~/go1.7.linux-amd64.tar.gz - "$HOME/.go_workspace/src/gx/ipfs" test: diff --git a/cmd/ipfs/README.md b/cmd/ipfs/README.md deleted file mode 100644 index 2433d9eb2..000000000 --- a/cmd/ipfs/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# [go-ipfs/cmd/ipfs](http://github.com/ipfs/go-ipfs/tree/master/cmd/ipfs) - -![](https://raw.githubusercontent.com/ipfs/logo/master/ipfs-logo-text-256-ice.png) - -This is the ipfs commandline tool. For now, it's the main entry point to using IPFS. - - -## Install from source - -``` -go install -ipfs -``` - -## Install from [gobuilder.me](https://gobuilder.me/) - -If you're viewing this in [gobuilder](https://gobuilder.me/github.com/ipfs/go-ipfs/cmd/ipfs), choose the binary that suits you best at the end of this page. Download it, unzip it, and move the binary into place. - -## Usage - -```sh -# initialize an ipfs node -ipfs init - -# list some commands -ipfs - -# get help -ipfs --help -``` diff --git a/cmd/ipfs/daemon.go b/cmd/ipfs/daemon.go index 825d3fa49..2f33f53a9 100644 --- a/cmd/ipfs/daemon.go +++ b/cmd/ipfs/daemon.go @@ -1,6 +1,7 @@ package main import ( + "errors" _ "expvar" "fmt" "net" @@ -8,11 +9,10 @@ import ( _ "net/http/pprof" "os" "sort" - "strings" "sync" - _ "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/codahale/metrics/runtime" - "gx/ipfs/QmUBa4w6CbHJUMeGJPDiMEDWsM93xToK1fTnFXnrC8Hksw/go-multiaddr-net" + "gx/ipfs/QmPpRcbNUXauP3zWZ1NJMLWpe4QnmEHrd2ba2D3yqWznw7/go-multiaddr-net" + _ "gx/ipfs/QmV3NSS3A1kX5s28r7yLczhDsXzkgo65cqRgKFXYunWZmD/metrics/runtime" ma "gx/ipfs/QmYzDkkgAEmrcNzFCiYo6L1dTX4EAG1gZkbtdbd9trL4vd/go-multiaddr" @@ -24,24 +24,27 @@ import ( "github.com/ipfs/go-ipfs/core/corerouting" nodeMount "github.com/ipfs/go-ipfs/fuse/node" fsrepo "github.com/ipfs/go-ipfs/repo/fsrepo" - conn "gx/ipfs/QmVL44QeoQDTYK8RVdpkyja7uYcK3WDNoBNHVLonf9YDtm/go-libp2p/p2p/net/conn" + migrate "github.com/ipfs/go-ipfs/repo/fsrepo/migrations" + pstore "gx/ipfs/QmQdnfvZQuhdT93LNc5bos52wAmdr3G2p6G8teLJMEN32P/go-libp2p-peerstore" + conn "gx/ipfs/QmVCe3SNMjkcPgnpFhZs719dheq6xE7gJwjzV7aWcUM4Ms/go-libp2p/p2p/net/conn" util "gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util" - peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" prometheus "gx/ipfs/QmdhsRK1EK2fvAz2i2SH5DEfkL6seDuyMYEsxKa9Braim3/client_golang/prometheus" ) const ( + adjustFDLimitKwd = "manage-fdlimit" + enableGCKwd = "enable-gc" initOptionKwd = "init" - routingOptionKwd = "routing" - routingOptionSupernodeKwd = "supernode" - mountKwd = "mount" - writableKwd = "writable" ipfsMountKwd = "mount-ipfs" ipnsMountKwd = "mount-ipns" - unrestrictedApiAccessKwd = "unrestricted-api" + migrateKwd = "migrate" + mountKwd = "mount" + offlineKwd = "offline" + routingOptionKwd = "routing" + routingOptionSupernodeKwd = "supernode" unencryptTransportKwd = "disable-transport-encryption" - enableGCKwd = "enable-gc" - adjustFDLimitKwd = "manage-fdlimit" + unrestrictedApiAccessKwd = "unrestricted-api" + writableKwd = "writable" // apiAddrKwd = "address-api" // swarmAddrKwd = "address-swarm" ) @@ -109,8 +112,9 @@ second signal. IPFS_PATH environment variable -ipfs uses a repository in the local file system. By default, the repo is located -at ~/.ipfs. To change the repo location, set the $IPFS_PATH environment variable: +ipfs uses a repository in the local file system. By default, the repo is +located at ~/.ipfs. To change the repo location, set the $IPFS_PATH +environment variable: export IPFS_PATH=/path/to/ipfsrepo @@ -120,8 +124,9 @@ Previously, IPFS used an environment variable as seen below: export API_ORIGIN="http://localhost:8888/" -This is deprecated. It is still honored in this version, but will be removed in a -future version, along with this notice. Please move to setting the HTTP Headers. +This is deprecated. It is still honored in this version, but will be removed +in a future version, along with this notice. Please move to setting the HTTP +Headers. `, }, @@ -135,7 +140,9 @@ future version, along with this notice. Please move to setting the HTTP Headers. cmds.BoolOption(unrestrictedApiAccessKwd, "Allow API access to unlisted hashes").Default(false), cmds.BoolOption(unencryptTransportKwd, "Disable transport encryption (for debugging protocols)").Default(false), cmds.BoolOption(enableGCKwd, "Enable automatic periodic repo garbage collection").Default(false), - cmds.BoolOption(adjustFDLimitKwd, "Check and raise file descriptor limits if needed").Default(false), + cmds.BoolOption(adjustFDLimitKwd, "Check and raise file descriptor limits if needed").Default(true), + cmds.BoolOption(offlineKwd, "Run offline. Do not connect to the rest of the network but provide local API.").Default(false), + cmds.BoolOption(migrateKwd, "If true, assume yes at the migrate prompt. If false, assume no."), // TODO: add way to override addresses. tricky part: updating the config if also --init. // cmds.StringOption(apiAddrKwd, "Address for the daemon rpc API (overrides config)"), @@ -165,7 +172,7 @@ func daemonFunc(req cmds.Request, res cmds.Response) { managefd, _, _ := req.Option(adjustFDLimitKwd).Bool() if managefd { if err := fileDescriptorCheck(); err != nil { - log.Error("setting file descriptor limit: %s", err) + log.Errorf("setting file descriptor limit: %s", err) } } @@ -213,9 +220,42 @@ func daemonFunc(req cmds.Request, res cmds.Response) { // acquire the repo lock _before_ constructing a node. we need to make // sure we are permitted to access the resources (datastore, etc.) repo, err := fsrepo.Open(req.InvocContext().ConfigRoot) - if err != nil { + switch err { + default: res.SetError(err, cmds.ErrNormal) return + case fsrepo.ErrNeedMigration: + domigrate, found, _ := req.Option(migrateKwd).Bool() + fmt.Println("Found outdated fs-repo, migrations need to be run.") + + if !found { + domigrate = YesNoPrompt("Run migrations now? [y/N]") + } + + if !domigrate { + fmt.Println("Not running migrations of fs-repo now.") + fmt.Println("Please get fs-repo-migrations from https://dist.ipfs.io") + res.SetError(fmt.Errorf("fs-repo requires migration"), cmds.ErrNormal) + return + } + + err = migrate.RunMigration(fsrepo.RepoVersion) + if err != nil { + fmt.Println("The migrations of fs-repo failed:") + fmt.Printf(" %s\n", err) + fmt.Println("If you think this is a bug, please file an issue and include this whole log output.") + fmt.Println(" https://github.com/ipfs/fs-repo-migrations") + res.SetError(err, cmds.ErrNormal) + return + } + + repo, err = fsrepo.Open(req.InvocContext().ConfigRoot) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + case nil: + break } cfg, err := ctx.GetConfig() @@ -226,9 +266,12 @@ func daemonFunc(req cmds.Request, res cmds.Response) { // Start assembling node config ncfg := &core.BuildCfg{ - Online: true, - Repo: repo, + Repo: repo, + Permament: true, // It is temporary way to signify that node is permament + //TODO(Kubuxu): refactor Online vs Offline by adding Permement vs Epthemeral } + offline, _, _ := req.Option(offlineKwd).Bool() + ncfg.Online = !offline routingOption, _, err := req.Option(routingOptionKwd).String() if err != nil { @@ -242,9 +285,9 @@ func daemonFunc(req cmds.Request, res cmds.Response) { repo.Close() // because ownership hasn't been transferred to the node return } - var infos []peer.PeerInfo + var infos []pstore.PeerInfo for _, addr := range servers { - infos = append(infos, peer.PeerInfo{ + infos = append(infos, pstore.PeerInfo{ ID: addr.ID(), Addrs: []ma.Multiaddr{addr.Transport()}, }) @@ -302,6 +345,11 @@ func daemonFunc(req cmds.Request, res cmds.Response) { res.SetError(err, cmds.ErrNormal) return } + if mount && offline { + res.SetError(errors.New("mount is not currently supported in offline mode"), + cmds.ErrClient) + return + } if mount { if err := mountFuse(req); err != nil { res.SetError(err, cmds.ErrNormal) @@ -359,33 +407,24 @@ func serveHTTPApi(req cmds.Request) (error, <-chan error) { apiMaddr = apiLis.Multiaddr() fmt.Printf("API server listening on %s\n", apiMaddr) + // by default, we don't let you load arbitrary ipfs objects through the api, + // because this would open up the api to scripting vulnerabilities. + // only the webui objects are allowed. + // if you know what you're doing, go ahead and pass --unrestricted-api. unrestricted, _, err := req.Option(unrestrictedApiAccessKwd).Bool() if err != nil { return fmt.Errorf("serveHTTPApi: Option(%s) failed: %s", unrestrictedApiAccessKwd, err), nil } + gatewayOpt := corehttp.GatewayOption(corehttp.WebUIPaths...) + if unrestricted { + gatewayOpt = corehttp.GatewayOption("/ipfs", "/ipns") + } - apiGw := corehttp.NewGateway(corehttp.GatewayConfig{ - Writable: true, - BlockList: &corehttp.BlockList{ - Decider: func(s string) bool { - if unrestricted { - return true - } - // for now, only allow paths in the WebUI path - for _, webuipath := range corehttp.WebUIPaths { - if strings.HasPrefix(s, webuipath) { - return true - } - } - return false - }, - }, - }) var opts = []corehttp.ServeOption{ corehttp.MetricsCollectionOption("api"), corehttp.CommandsOption(*req.InvocContext()), corehttp.WebUIOption, - apiGw.ServeOption(), + gatewayOpt, corehttp.VersionOption(), defaultMux("/debug/vars"), defaultMux("/debug/pprof/"), @@ -416,6 +455,10 @@ func serveHTTPApi(req cmds.Request) (error, <-chan error) { // printSwarmAddrs prints the addresses of the host func printSwarmAddrs(node *core.IpfsNode) { + if !node.OnlineMode() { + fmt.Println("Swarm not listening, running in offline mode.") + return + } var addrs []string for _, addr := range node.PeerHost.Addrs() { addrs = append(addrs, addr.String()) @@ -443,8 +486,8 @@ func serveHTTPGateway(req cmds.Request) (error, <-chan error) { if err != nil { return fmt.Errorf("serveHTTPGateway: req.Option(%s) failed: %s", writableKwd, err), nil } - if !writableOptionFound { - writable = cfg.Gateway.Writable + if writableOptionFound { + cfg.Gateway.Writable = writable } gwLis, err := manet.Listen(gatewayMaddr) @@ -465,7 +508,7 @@ func serveHTTPGateway(req cmds.Request) (error, <-chan error) { corehttp.CommandsROOption(*req.InvocContext()), corehttp.VersionOption(), corehttp.IPNSHostnameOption(), - corehttp.GatewayOption(writable, cfg.Gateway.PathPrefixes), + corehttp.GatewayOption("/ipfs", "/ipns"), } if len(cfg.Gateway.RootRedirect) > 0 { @@ -568,3 +611,22 @@ func merge(cs ...<-chan error) <-chan error { }() return out } + +func YesNoPrompt(prompt string) bool { + var s string + for i := 0; i < 3; i++ { + fmt.Printf("%s ", prompt) + fmt.Scanf("%s", &s) + switch s { + case "y", "Y": + return true + case "n", "N": + return false + case "": + return false + } + fmt.Println("Please press either 'y' or 'n'") + } + + return false +} diff --git a/cmd/ipfs/init.go b/cmd/ipfs/init.go index c3a15b97d..648834d7c 100644 --- a/cmd/ipfs/init.go +++ b/cmd/ipfs/init.go @@ -27,8 +27,9 @@ var initCmd = &cmds.Command{ ShortDescription: ` Initializes IPFS configuration files and generates a new keypair. -ipfs uses a repository in the local file system. By default, the repo is located -at ~/.ipfs. To change the repo location, set the $IPFS_PATH environment variable: +ipfs uses a repository in the local file system. By default, the repo is +located at ~/.ipfs. To change the repo location, set the $IPFS_PATH +environment variable: export IPFS_PATH=/path/to/ipfsrepo `, @@ -53,7 +54,7 @@ at ~/.ipfs. To change the repo location, set the $IPFS_PATH environment variable log.Info("checking if daemon is running...") if daemonLocked { - log.Debug("Ipfs daemon is running.") + log.Debug("ipfs daemon is running") e := "ipfs daemon is running. please stop it to run this command" return cmds.ClientError(e) } diff --git a/cmd/ipfs/ipfs.go b/cmd/ipfs/ipfs.go index f8c32512b..f8c903346 100644 --- a/cmd/ipfs/ipfs.go +++ b/cmd/ipfs/ipfs.go @@ -105,6 +105,7 @@ var cmdDetailsMap = map[*cmds.Command]cmdDetails{ commands.CommandsDaemonCmd: {doesNotUseRepo: true}, commands.VersionCmd: {doesNotUseConfigAsInput: true, doesNotUseRepo: true}, // must be permitted to run before init commands.LogCmd: {cannotRunOnClient: true}, + commands.ActiveReqsCmd: {cannotRunOnClient: true}, commands.RepoFsckCmd: {cannotRunOnDaemon: true}, commands.ConfigCmd.Subcommand("edit"): {cannotRunOnDaemon: true, doesNotUseRepo: true}, } diff --git a/cmd/ipfs/main.go b/cmd/ipfs/main.go index a6a46b684..f5b035678 100644 --- a/cmd/ipfs/main.go +++ b/cmd/ipfs/main.go @@ -17,12 +17,12 @@ import ( "syscall" "time" - manet "gx/ipfs/QmUBa4w6CbHJUMeGJPDiMEDWsM93xToK1fTnFXnrC8Hksw/go-multiaddr-net" + manet "gx/ipfs/QmPpRcbNUXauP3zWZ1NJMLWpe4QnmEHrd2ba2D3yqWznw7/go-multiaddr-net" ma "gx/ipfs/QmYzDkkgAEmrcNzFCiYo6L1dTX4EAG1gZkbtdbd9trL4vd/go-multiaddr" + logging "gx/ipfs/QmNQynaz7qfriSUJkiEZUrm2Wen1u3Kj9goZzWtrPyu7XR/go-log" u "gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" cmds "github.com/ipfs/go-ipfs/commands" cmdsCli "github.com/ipfs/go-ipfs/commands/cli" @@ -103,9 +103,13 @@ func main() { } // Handle `ipfs help' - if len(os.Args) == 2 && os.Args[1] == "help" { - printHelp(false, os.Stdout) - os.Exit(0) + if len(os.Args) == 2 { + if os.Args[1] == "help" { + printHelp(false, os.Stdout) + os.Exit(0) + } else if os.Args[1] == "--version" { + os.Args[1] = "version" + } } // parse the commandline into a command invocation @@ -288,7 +292,7 @@ func (i *cmdInvocation) requestedHelp() (short bool, long bool, err error) { func callPreCommandHooks(ctx context.Context, details cmdDetails, req cmds.Request, root *cmds.Command) error { log.Event(ctx, "callPreCommandHooks", &details) - log.Debug("Calling pre-command hooks...") + log.Debug("calling pre-command hooks...") return nil } @@ -325,7 +329,7 @@ func callCommand(ctx context.Context, req cmds.Request, root *cmds.Command, cmd } if client != nil && !cmd.External { - log.Debug("Executing command via API") + log.Debug("executing command via API") res, err = client.Send(req) if err != nil { if isConnRefused(err) { @@ -335,7 +339,7 @@ func callCommand(ctx context.Context, req cmds.Request, root *cmds.Command, cmd } } else { - log.Debug("Executing command locally") + log.Debug("executing command locally") err := req.SetRootContext(ctx) if err != nil { diff --git a/cmd/ipfs/ulimit_unix.go b/cmd/ipfs/ulimit_unix.go index 1ad630f74..943c83188 100644 --- a/cmd/ipfs/ulimit_unix.go +++ b/cmd/ipfs/ulimit_unix.go @@ -9,13 +9,13 @@ import ( "syscall" ) -var ipfsFileDescNum = uint64(2048) +var ipfsFileDescNum = uint64(1024) func init() { if val := os.Getenv("IPFS_FD_MAX"); val != "" { n, err := strconv.Atoi(val) if err != nil { - log.Error("bad value for IPFS_FD_MAX: %s", err) + log.Errorf("bad value for IPFS_FD_MAX: %s", err) } else { ipfsFileDescNum = uint64(n) } @@ -30,12 +30,15 @@ func checkAndSetUlimit() error { return fmt.Errorf("error getting rlimit: %s", err) } + var setting bool if rLimit.Cur < ipfsFileDescNum { if rLimit.Max < ipfsFileDescNum { + log.Error("adjusting max") rLimit.Max = ipfsFileDescNum } - fmt.Printf("Adjusting current ulimit to %d.\n", ipfsFileDescNum) + fmt.Printf("Adjusting current ulimit to %d...\n", ipfsFileDescNum) rLimit.Cur = ipfsFileDescNum + setting = true } err = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &rLimit) @@ -43,5 +46,9 @@ func checkAndSetUlimit() error { return fmt.Errorf("error setting ulimit: %s", err) } + if setting { + fmt.Printf("Successfully raised file descriptor limit to %d.\n", ipfsFileDescNum) + } + return nil } diff --git a/cmd/ipfswatch/main.go b/cmd/ipfswatch/main.go index bf173bbba..4c69a35b8 100644 --- a/cmd/ipfswatch/main.go +++ b/cmd/ipfswatch/main.go @@ -8,7 +8,6 @@ import ( "path/filepath" homedir "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/mitchellh/go-homedir" - fsnotify "github.com/ipfs/go-ipfs/Godeps/_workspace/src/gopkg.in/fsnotify.v1" commands "github.com/ipfs/go-ipfs/commands" core "github.com/ipfs/go-ipfs/core" corehttp "github.com/ipfs/go-ipfs/core/corehttp" @@ -17,6 +16,7 @@ import ( fsrepo "github.com/ipfs/go-ipfs/repo/fsrepo" process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + fsnotify "gx/ipfs/QmczzCMvJ3HV57WBKDy8b4ucp7quT325JjDbixYRS5Pwvv/fsnotify.v1" ) var http = flag.Bool("http", false, "expose IPFS HTTP API") @@ -81,10 +81,16 @@ func run(ipfsPath, watchPath string) error { } defer node.Close() + cfg, err := node.Repo.Config() + if err != nil { + return err + } + cfg.Gateway.Writable = true + if *http { addr := "/ip4/127.0.0.1/tcp/5001" var opts = []corehttp.ServeOption{ - corehttp.GatewayOption(true, nil), + corehttp.GatewayOption("/ipfs", "/ipns"), corehttp.WebUIOption, corehttp.CommandsOption(cmdCtx(node, ipfsPath)), } diff --git a/cmd/seccat/seccat.go b/cmd/seccat/seccat.go index 9f60961cf..ef83d7582 100644 --- a/cmd/seccat/seccat.go +++ b/cmd/seccat/seccat.go @@ -18,11 +18,12 @@ import ( "os/signal" "syscall" - ci "gx/ipfs/QmUEUu1CM8bxBJxc3ZLojAi8evhTr4byQogWstABet79oY/go-libp2p-crypto" + logging "gx/ipfs/QmNQynaz7qfriSUJkiEZUrm2Wen1u3Kj9goZzWtrPyu7XR/go-log" + pstore "gx/ipfs/QmQdnfvZQuhdT93LNc5bos52wAmdr3G2p6G8teLJMEN32P/go-libp2p-peerstore" + peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" + ci "gx/ipfs/QmUWER4r4qMvaCnX5zREcfyiWN7cXN9g3a7fkRqNz8qWPP/go-libp2p-crypto" + secio "gx/ipfs/QmVjz1uf6U3sVQ5DbWWj7ktTtDd4GgsptYc7FBp33nWE53/go-libp2p-secio" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" - secio "gx/ipfs/QmbuSYB51KtX8izgab1fSyBgsyMH2hTuCfBtXm77PeXWca/go-libp2p-secio" - peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" ) var verbose = false @@ -110,7 +111,7 @@ func main() { } } -func setupPeer(a args) (peer.ID, peer.Peerstore, error) { +func setupPeer(a args) (peer.ID, pstore.Peerstore, error) { if a.keybits < 1024 { return "", nil, errors.New("Bitsize less than 1024 is considered unsafe.") } @@ -126,7 +127,7 @@ func setupPeer(a args) (peer.ID, peer.Peerstore, error) { return "", nil, err } - ps := peer.NewPeerstore() + ps := pstore.NewPeerstore() ps.AddPrivKey(p, sk) ps.AddPubKey(p, pk) diff --git a/cmd/seccat/util.go b/cmd/seccat/util.go index 8b9802de5..3c244ac30 100644 --- a/cmd/seccat/util.go +++ b/cmd/seccat/util.go @@ -5,7 +5,7 @@ import ( "io" "os" - logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" + logging "gx/ipfs/QmNQynaz7qfriSUJkiEZUrm2Wen1u3Kj9goZzWtrPyu7XR/go-log" ) var log = logging.Logger("seccat") diff --git a/commands/argument.go b/commands/argument.go index 2bfe1cdfa..afc06e573 100644 --- a/commands/argument.go +++ b/commands/argument.go @@ -48,7 +48,7 @@ func (a Argument) EnableStdin() Argument { func (a Argument) EnableRecursive() Argument { if a.Type != ArgFile { - panic("Only ArgFile arguments can enable recursive") + panic("Only FileArgs can enable recursive") } a.Recursive = true diff --git a/commands/cli/helptext.go b/commands/cli/helptext.go index cceb3f90f..5a36c15f6 100644 --- a/commands/cli/helptext.go +++ b/commands/cli/helptext.go @@ -78,7 +78,6 @@ const longHelpFormat = `USAGE {{.Indent}}{{template "usage" .}} {{if .Synopsis}}SYNOPSIS - {{.Synopsis}} {{end}}{{if .Arguments}}ARGUMENTS @@ -163,6 +162,9 @@ func LongHelp(rootName string, root *cmds.Command, path []string, out io.Writer) if len(fields.Subcommands) == 0 { fields.Subcommands = strings.Join(subcommandText(cmd, rootName, path), "\n") } + if len(fields.Synopsis) == 0 { + fields.Synopsis = generateSynopsis(cmd, pathStr) + } // trim the extra newlines (see TrimNewlines doc) fields.TrimNewlines() @@ -206,6 +208,9 @@ func ShortHelp(rootName string, root *cmds.Command, path []string, out io.Writer if len(fields.Subcommands) == 0 { fields.Subcommands = strings.Join(subcommandText(cmd, rootName, path), "\n") } + if len(fields.Synopsis) == 0 { + fields.Synopsis = generateSynopsis(cmd, pathStr) + } // trim the extra newlines (see TrimNewlines doc) fields.TrimNewlines() @@ -216,6 +221,54 @@ func ShortHelp(rootName string, root *cmds.Command, path []string, out io.Writer return shortHelpTemplate.Execute(out, fields) } +func generateSynopsis(cmd *cmds.Command, path string) string { + res := path + for _, opt := range cmd.Options { + valopt, ok := cmd.Helptext.SynopsisOptionsValues[opt.Names()[0]] + if !ok { + valopt = opt.Names()[0] + } + sopt := "" + for i, n := range opt.Names() { + pre := "-" + if len(n) > 1 { + pre = "--" + } + if opt.Type() == cmds.Bool && opt.DefaultVal() == true { + pre = "--" + sopt = fmt.Sprintf("%s%s=false", pre, n) + break + } else { + if i == 0 { + if opt.Type() == cmds.Bool { + sopt = fmt.Sprintf("%s%s", pre, n) + } else { + sopt = fmt.Sprintf("%s%s=<%s>", pre, n, valopt) + } + } else { + sopt = fmt.Sprintf("%s | %s%s", sopt, pre, n) + } + } + } + res = fmt.Sprintf("%s [%s]", res, sopt) + } + if len(cmd.Arguments) > 0 { + res = fmt.Sprintf("%s [--]", res) + } + for _, arg := range cmd.Arguments { + sarg := fmt.Sprintf("<%s>", arg.Name) + if arg.Variadic { + sarg = sarg + "..." + } + + if !arg.Required { + sarg = fmt.Sprintf("[%s]", sarg) + } + res = fmt.Sprintf("%s %s", res, sarg) + } + return strings.Trim(res, " ") +} + func argumentText(cmd *cmds.Command) []string { lines := make([]string, len(cmd.Arguments)) @@ -298,18 +351,25 @@ func subcommandText(cmd *cmds.Command, rootName string, path []string) []string if len(path) > 0 { prefix += " " } + + // Sorting fixes changing order bug #2981. + sortedNames := make([]string, 0) + for name := range cmd.Subcommands { + sortedNames = append(sortedNames, name) + } + sort.Strings(sortedNames) + subcmds := make([]*cmds.Command, len(cmd.Subcommands)) lines := make([]string, len(cmd.Subcommands)) - i := 0 - for name, sub := range cmd.Subcommands { + for i, name := range sortedNames { + sub := cmd.Subcommands[name] usage := usageText(sub) if len(usage) > 0 { usage = " " + usage } lines[i] = prefix + name + usage subcmds[i] = sub - i++ } lines = align(lines) diff --git a/commands/cli/helptext_test.go b/commands/cli/helptext_test.go new file mode 100644 index 000000000..aa8361f99 --- /dev/null +++ b/commands/cli/helptext_test.go @@ -0,0 +1,45 @@ +package cli + +import ( + "strings" + "testing" + + cmds "github.com/ipfs/go-ipfs/commands" +) + +func TestSynopsisGenerator(t *testing.T) { + command := &cmds.Command{ + Arguments: []cmds.Argument{ + cmds.StringArg("required", true, false, ""), + cmds.StringArg("variadic", false, true, ""), + }, + Options: []cmds.Option{ + cmds.StringOption("opt", "o", "Option"), + }, + Helptext: cmds.HelpText{ + SynopsisOptionsValues: map[string]string{ + "opt": "OPTION", + }, + }, + } + syn := generateSynopsis(command, "cmd") + t.Logf("Synopsis is: %s", syn) + if !strings.HasPrefix(syn, "cmd ") { + t.Fatal("Synopsis should start with command name") + } + if !strings.Contains(syn, "[--opt=