Merge pull request #2838 from ipfs/feature/datastore-to-gx

Migrate go-datastore to gx
This commit is contained in:
Jeromy Johnson 2016-06-12 19:56:40 -07:00 committed by GitHub
commit 79e9bd842c
122 changed files with 105 additions and 7165 deletions

4
Godeps/Godeps.json generated
View File

@ -29,10 +29,6 @@
"ImportPath": "github.com/hashicorp/golang-lru",
"Rev": "253b2dc1ca8bae42c3b5b6e53dd2eab1a7551116"
},
{
"ImportPath": "github.com/ipfs/go-datastore",
"Rev": "e63957b6da369d986ef3e7a3f249779ba3f56c7e"
},
{
"ImportPath": "github.com/jbenet/go-detect-race",
"Rev": "3463798d9574bd0b7eca275dccc530804ff5216f"

View File

@ -1,9 +0,0 @@
language: go
go:
- 1.3.3
notifications:
# See http://about.travis-ci.org/docs/user/build-configuration/ to learn more
# about configuring notification recipients and more.
email:
recipients:
- coda.hale@gmail.com

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Coda Hale
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,15 +0,0 @@
hdrhistogram
============
[![Build Status](https://travis-ci.org/codahale/hdrhistogram.png?branch=master)](https://travis-ci.org/codahale/hdrhistogram)
A pure Go implementation of the [HDR Histogram](https://github.com/HdrHistogram/HdrHistogram).
> A Histogram that supports recording and analyzing sampled data value counts
> across a configurable integer value range with configurable value precision
> within the range. Value precision is expressed as the number of significant
> digits in the value recording, and provides control over value quantization
> behavior across the value range and the subsequent value resolution at any
> given level.
For documentation, check [godoc](http://godoc.org/github.com/codahale/hdrhistogram).

View File

@ -1,513 +0,0 @@
// Package hdrhistogram provides an implementation of Gil Tene's HDR Histogram
// data structure. The HDR Histogram allows for fast and accurate analysis of
// the extreme ranges of data with non-normal distributions, like latency.
package hdrhistogram
import (
"fmt"
"math"
)
// A Bracket is a part of a cumulative distribution.
type Bracket struct {
Quantile float64
Count, ValueAt int64
}
// A Snapshot is an exported view of a Histogram, useful for serializing them.
// A Histogram can be constructed from it by passing it to Import.
type Snapshot struct {
LowestTrackableValue int64
HighestTrackableValue int64
SignificantFigures int64
Counts []int64
}
// A Histogram is a lossy data structure used to record the distribution of
// non-normally distributed data (like latency) with a high degree of accuracy
// and a bounded degree of precision.
type Histogram struct {
lowestTrackableValue int64
highestTrackableValue int64
unitMagnitude int64
significantFigures int64
subBucketHalfCountMagnitude int32
subBucketHalfCount int32
subBucketMask int64
subBucketCount int32
bucketCount int32
countsLen int32
totalCount int64
counts []int64
}
// New returns a new Histogram instance capable of tracking values in the given
// range and with the given amount of precision.
func New(minValue, maxValue int64, sigfigs int) *Histogram {
if sigfigs < 1 || 5 < sigfigs {
panic(fmt.Errorf("sigfigs must be [1,5] (was %d)", sigfigs))
}
largestValueWithSingleUnitResolution := 2 * math.Pow10(sigfigs)
subBucketCountMagnitude := int32(math.Ceil(math.Log2(float64(largestValueWithSingleUnitResolution))))
subBucketHalfCountMagnitude := subBucketCountMagnitude
if subBucketHalfCountMagnitude < 1 {
subBucketHalfCountMagnitude = 1
}
subBucketHalfCountMagnitude--
unitMagnitude := int32(math.Floor(math.Log2(float64(minValue))))
if unitMagnitude < 0 {
unitMagnitude = 0
}
subBucketCount := int32(math.Pow(2, float64(subBucketHalfCountMagnitude)+1))
subBucketHalfCount := subBucketCount / 2
subBucketMask := int64(subBucketCount-1) << uint(unitMagnitude)
// determine exponent range needed to support the trackable value with no
// overflow:
smallestUntrackableValue := int64(subBucketCount) << uint(unitMagnitude)
bucketsNeeded := int32(1)
for smallestUntrackableValue < maxValue {
smallestUntrackableValue <<= 1
bucketsNeeded++
}
bucketCount := bucketsNeeded
countsLen := (bucketCount + 1) * (subBucketCount / 2)
return &Histogram{
lowestTrackableValue: minValue,
highestTrackableValue: maxValue,
unitMagnitude: int64(unitMagnitude),
significantFigures: int64(sigfigs),
subBucketHalfCountMagnitude: subBucketHalfCountMagnitude,
subBucketHalfCount: subBucketHalfCount,
subBucketMask: subBucketMask,
subBucketCount: subBucketCount,
bucketCount: bucketCount,
countsLen: countsLen,
totalCount: 0,
counts: make([]int64, countsLen),
}
}
// ByteSize returns an estimate of the amount of memory allocated to the
// histogram in bytes.
//
// N.B.: This does not take into account the overhead for slices, which are
// small, constant, and specific to the compiler version.
func (h *Histogram) ByteSize() int {
return 6*8 + 5*4 + len(h.counts)*8
}
// Merge merges the data stored in the given histogram with the receiver,
// returning the number of recorded values which had to be dropped.
func (h *Histogram) Merge(from *Histogram) (dropped int64) {
i := from.rIterator()
for i.next() {
v := i.valueFromIdx
c := i.countAtIdx
if h.RecordValues(v, c) != nil {
dropped += c
}
}
return
}
// TotalCount returns total number of values recorded.
func (h *Histogram) TotalCount() int64 {
return h.totalCount
}
// Max returns the approximate maximum recorded value.
func (h *Histogram) Max() int64 {
var max int64
i := h.iterator()
for i.next() {
if i.countAtIdx != 0 {
max = i.highestEquivalentValue
}
}
return h.lowestEquivalentValue(max)
}
// Min returns the approximate minimum recorded value.
func (h *Histogram) Min() int64 {
var min int64
i := h.iterator()
for i.next() {
if i.countAtIdx != 0 && min == 0 {
min = i.highestEquivalentValue
break
}
}
return h.lowestEquivalentValue(min)
}
// Mean returns the approximate arithmetic mean of the recorded values.
func (h *Histogram) Mean() float64 {
var total int64
i := h.iterator()
for i.next() {
if i.countAtIdx != 0 {
total += i.countAtIdx * h.medianEquivalentValue(i.valueFromIdx)
}
}
return float64(total) / float64(h.totalCount)
}
// StdDev returns the approximate standard deviation of the recorded values.
func (h *Histogram) StdDev() float64 {
mean := h.Mean()
geometricDevTotal := 0.0
i := h.iterator()
for i.next() {
if i.countAtIdx != 0 {
dev := float64(h.medianEquivalentValue(i.valueFromIdx)) - mean
geometricDevTotal += (dev * dev) * float64(i.countAtIdx)
}
}
return math.Sqrt(geometricDevTotal / float64(h.totalCount))
}
// Reset deletes all recorded values and restores the histogram to its original
// state.
func (h *Histogram) Reset() {
h.totalCount = 0
for i := range h.counts {
h.counts[i] = 0
}
}
// RecordValue records the given value, returning an error if the value is out
// of range.
func (h *Histogram) RecordValue(v int64) error {
return h.RecordValues(v, 1)
}
// RecordCorrectedValue records the given value, correcting for stalls in the
// recording process. This only works for processes which are recording values
// at an expected interval (e.g., doing jitter analysis). Processes which are
// recording ad-hoc values (e.g., latency for incoming requests) can't take
// advantage of this.
func (h *Histogram) RecordCorrectedValue(v, expectedInterval int64) error {
if err := h.RecordValue(v); err != nil {
return err
}
if expectedInterval <= 0 || v <= expectedInterval {
return nil
}
missingValue := v - expectedInterval
for missingValue >= expectedInterval {
if err := h.RecordValue(missingValue); err != nil {
return err
}
missingValue -= expectedInterval
}
return nil
}
// RecordValues records n occurrences of the given value, returning an error if
// the value is out of range.
func (h *Histogram) RecordValues(v, n int64) error {
idx := h.countsIndexFor(v)
if idx < 0 || int(h.countsLen) <= idx {
return fmt.Errorf("value %d is too large to be recorded", v)
}
h.counts[idx] += n
h.totalCount += n
return nil
}
// ValueAtQuantile returns the recorded value at the given quantile (0..100).
func (h *Histogram) ValueAtQuantile(q float64) int64 {
if q > 100 {
q = 100
}
total := int64(0)
countAtPercentile := int64(((q / 100) * float64(h.totalCount)) + 0.5)
i := h.iterator()
for i.next() {
total += i.countAtIdx
if total >= countAtPercentile {
return h.highestEquivalentValue(i.valueFromIdx)
}
}
return 0
}
// CumulativeDistribution returns an ordered list of brackets of the
// distribution of recorded values.
func (h *Histogram) CumulativeDistribution() []Bracket {
var result []Bracket
i := h.pIterator(1)
for i.next() {
result = append(result, Bracket{
Quantile: i.percentile,
Count: i.countToIdx,
ValueAt: i.highestEquivalentValue,
})
}
return result
}
// Equals returns true if the two Histograms are equivalent, false if not.
func (h *Histogram) Equals(other *Histogram) bool {
switch {
case
h.lowestTrackableValue != other.lowestTrackableValue,
h.highestTrackableValue != other.highestTrackableValue,
h.unitMagnitude != other.unitMagnitude,
h.significantFigures != other.significantFigures,
h.subBucketHalfCountMagnitude != other.subBucketHalfCountMagnitude,
h.subBucketHalfCount != other.subBucketHalfCount,
h.subBucketMask != other.subBucketMask,
h.subBucketCount != other.subBucketCount,
h.bucketCount != other.bucketCount,
h.countsLen != other.countsLen,
h.totalCount != other.totalCount:
return false
default:
for i, c := range h.counts {
if c != other.counts[i] {
return false
}
}
}
return true
}
// Export returns a snapshot view of the Histogram. This can be later passed to
// Import to construct a new Histogram with the same state.
func (h *Histogram) Export() *Snapshot {
return &Snapshot{
LowestTrackableValue: h.lowestTrackableValue,
HighestTrackableValue: h.highestTrackableValue,
SignificantFigures: h.significantFigures,
Counts: h.counts,
}
}
// Import returns a new Histogram populated from the Snapshot data.
func Import(s *Snapshot) *Histogram {
h := New(s.LowestTrackableValue, s.HighestTrackableValue, int(s.SignificantFigures))
h.counts = s.Counts
totalCount := int64(0)
for i := int32(0); i < h.countsLen; i++ {
countAtIndex := h.counts[i]
if countAtIndex > 0 {
totalCount += countAtIndex
}
}
h.totalCount = totalCount
return h
}
func (h *Histogram) iterator() *iterator {
return &iterator{
h: h,
subBucketIdx: -1,
}
}
func (h *Histogram) rIterator() *rIterator {
return &rIterator{
iterator: iterator{
h: h,
subBucketIdx: -1,
},
}
}
func (h *Histogram) pIterator(ticksPerHalfDistance int32) *pIterator {
return &pIterator{
iterator: iterator{
h: h,
subBucketIdx: -1,
},
ticksPerHalfDistance: ticksPerHalfDistance,
}
}
func (h *Histogram) sizeOfEquivalentValueRange(v int64) int64 {
bucketIdx := h.getBucketIndex(v)
subBucketIdx := h.getSubBucketIdx(v, bucketIdx)
adjustedBucket := bucketIdx
if subBucketIdx >= h.subBucketCount {
adjustedBucket++
}
return int64(1) << uint(h.unitMagnitude+int64(adjustedBucket))
}
func (h *Histogram) valueFromIndex(bucketIdx, subBucketIdx int32) int64 {
return int64(subBucketIdx) << uint(int64(bucketIdx)+h.unitMagnitude)
}
func (h *Histogram) lowestEquivalentValue(v int64) int64 {
bucketIdx := h.getBucketIndex(v)
subBucketIdx := h.getSubBucketIdx(v, bucketIdx)
return h.valueFromIndex(bucketIdx, subBucketIdx)
}
func (h *Histogram) nextNonEquivalentValue(v int64) int64 {
return h.lowestEquivalentValue(v) + h.sizeOfEquivalentValueRange(v)
}
func (h *Histogram) highestEquivalentValue(v int64) int64 {
return h.nextNonEquivalentValue(v) - 1
}
func (h *Histogram) medianEquivalentValue(v int64) int64 {
return h.lowestEquivalentValue(v) + (h.sizeOfEquivalentValueRange(v) >> 1)
}
func (h *Histogram) getCountAtIndex(bucketIdx, subBucketIdx int32) int64 {
return h.counts[h.countsIndex(bucketIdx, subBucketIdx)]
}
func (h *Histogram) countsIndex(bucketIdx, subBucketIdx int32) int32 {
bucketBaseIdx := (bucketIdx + 1) << uint(h.subBucketHalfCountMagnitude)
offsetInBucket := subBucketIdx - h.subBucketHalfCount
return bucketBaseIdx + offsetInBucket
}
func (h *Histogram) getBucketIndex(v int64) int32 {
pow2Ceiling := bitLen(v | h.subBucketMask)
return int32(pow2Ceiling - int64(h.unitMagnitude) -
int64(h.subBucketHalfCountMagnitude+1))
}
func (h *Histogram) getSubBucketIdx(v int64, idx int32) int32 {
return int32(v >> uint(int64(idx)+int64(h.unitMagnitude)))
}
func (h *Histogram) countsIndexFor(v int64) int {
bucketIdx := h.getBucketIndex(v)
subBucketIdx := h.getSubBucketIdx(v, bucketIdx)
return int(h.countsIndex(bucketIdx, subBucketIdx))
}
type iterator struct {
h *Histogram
bucketIdx, subBucketIdx int32
countAtIdx, countToIdx, valueFromIdx int64
highestEquivalentValue int64
}
func (i *iterator) next() bool {
if i.countToIdx >= i.h.totalCount {
return false
}
// increment bucket
i.subBucketIdx++
if i.subBucketIdx >= i.h.subBucketCount {
i.subBucketIdx = i.h.subBucketHalfCount
i.bucketIdx++
}
if i.bucketIdx >= i.h.bucketCount {
return false
}
i.countAtIdx = i.h.getCountAtIndex(i.bucketIdx, i.subBucketIdx)
i.countToIdx += i.countAtIdx
i.valueFromIdx = i.h.valueFromIndex(i.bucketIdx, i.subBucketIdx)
i.highestEquivalentValue = i.h.highestEquivalentValue(i.valueFromIdx)
return true
}
type rIterator struct {
iterator
countAddedThisStep int64
}
func (r *rIterator) next() bool {
for r.iterator.next() {
if r.countAtIdx != 0 {
r.countAddedThisStep = r.countAtIdx
return true
}
}
return false
}
type pIterator struct {
iterator
seenLastValue bool
ticksPerHalfDistance int32
percentileToIteratorTo float64
percentile float64
}
func (p *pIterator) next() bool {
if !(p.countToIdx < p.h.totalCount) {
if p.seenLastValue {
return false
}
p.seenLastValue = true
p.percentile = 100
return true
}
if p.subBucketIdx == -1 && !p.iterator.next() {
return false
}
var done = false
for !done {
currentPercentile := (100.0 * float64(p.countToIdx)) / float64(p.h.totalCount)
if p.countAtIdx != 0 && p.percentileToIteratorTo <= currentPercentile {
p.percentile = p.percentileToIteratorTo
halfDistance := math.Trunc(math.Pow(2, math.Trunc(math.Log2(100.0/(100.0-p.percentileToIteratorTo)))+1))
percentileReportingTicks := float64(p.ticksPerHalfDistance) * halfDistance
p.percentileToIteratorTo += 100.0 / percentileReportingTicks
return true
}
done = !p.iterator.next()
}
return true
}
func bitLen(x int64) (n int64) {
for ; x >= 0x8000; x >>= 16 {
n += 16
}
if x >= 0x80 {
x >>= 8
n += 8
}
if x >= 0x8 {
x >>= 4
n += 4
}
if x >= 0x2 {
x >>= 2
n += 2
}
if x >= 0x1 {
n++
}
return
}

View File

@ -1,333 +0,0 @@
package hdrhistogram_test
import (
"reflect"
"testing"
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/codahale/hdrhistogram"
)
func TestHighSigFig(t *testing.T) {
input := []int64{
459876, 669187, 711612, 816326, 931423, 1033197, 1131895, 2477317,
3964974, 12718782,
}
hist := hdrhistogram.New(459876, 12718782, 5)
for _, sample := range input {
hist.RecordValue(sample)
}
if v, want := hist.ValueAtQuantile(50), int64(1048575); v != want {
t.Errorf("Median was %v, but expected %v", v, want)
}
}
func TestValueAtQuantile(t *testing.T) {
h := hdrhistogram.New(1, 10000000, 3)
for i := 0; i < 1000000; i++ {
if err := h.RecordValue(int64(i)); err != nil {
t.Fatal(err)
}
}
data := []struct {
q float64
v int64
}{
{q: 50, v: 500223},
{q: 75, v: 750079},
{q: 90, v: 900095},
{q: 95, v: 950271},
{q: 99, v: 990207},
{q: 99.9, v: 999423},
{q: 99.99, v: 999935},
}
for _, d := range data {
if v := h.ValueAtQuantile(d.q); v != d.v {
t.Errorf("P%v was %v, but expected %v", d.q, v, d.v)
}
}
}
func TestMean(t *testing.T) {
h := hdrhistogram.New(1, 10000000, 3)
for i := 0; i < 1000000; i++ {
if err := h.RecordValue(int64(i)); err != nil {
t.Fatal(err)
}
}
if v, want := h.Mean(), 500000.013312; v != want {
t.Errorf("Mean was %v, but expected %v", v, want)
}
}
func TestStdDev(t *testing.T) {
h := hdrhistogram.New(1, 10000000, 3)
for i := 0; i < 1000000; i++ {
if err := h.RecordValue(int64(i)); err != nil {
t.Fatal(err)
}
}
if v, want := h.StdDev(), 288675.1403682715; v != want {
t.Errorf("StdDev was %v, but expected %v", v, want)
}
}
func TestTotalCount(t *testing.T) {
h := hdrhistogram.New(1, 10000000, 3)
for i := 0; i < 1000000; i++ {
if err := h.RecordValue(int64(i)); err != nil {
t.Fatal(err)
}
if v, want := h.TotalCount(), int64(i+1); v != want {
t.Errorf("TotalCount was %v, but expected %v", v, want)
}
}
}
func TestMax(t *testing.T) {
h := hdrhistogram.New(1, 10000000, 3)
for i := 0; i < 1000000; i++ {
if err := h.RecordValue(int64(i)); err != nil {
t.Fatal(err)
}
}
if v, want := h.Max(), int64(999936); v != want {
t.Errorf("Max was %v, but expected %v", v, want)
}
}
func TestReset(t *testing.T) {
h := hdrhistogram.New(1, 10000000, 3)
for i := 0; i < 1000000; i++ {
if err := h.RecordValue(int64(i)); err != nil {
t.Fatal(err)
}
}
h.Reset()
if v, want := h.Max(), int64(0); v != want {
t.Errorf("Max was %v, but expected %v", v, want)
}
}
func TestMerge(t *testing.T) {
h1 := hdrhistogram.New(1, 1000, 3)
h2 := hdrhistogram.New(1, 1000, 3)
for i := 0; i < 100; i++ {
if err := h1.RecordValue(int64(i)); err != nil {
t.Fatal(err)
}
}
for i := 100; i < 200; i++ {
if err := h2.RecordValue(int64(i)); err != nil {
t.Fatal(err)
}
}
h1.Merge(h2)
if v, want := h1.ValueAtQuantile(50), int64(99); v != want {
t.Errorf("Median was %v, but expected %v", v, want)
}
}
func TestMin(t *testing.T) {
h := hdrhistogram.New(1, 10000000, 3)
for i := 0; i < 1000000; i++ {
if err := h.RecordValue(int64(i)); err != nil {
t.Fatal(err)
}
}
if v, want := h.Min(), int64(0); v != want {
t.Errorf("Min was %v, but expected %v", v, want)
}
}
func TestByteSize(t *testing.T) {
h := hdrhistogram.New(1, 100000, 3)
if v, want := h.ByteSize(), 65604; v != want {
t.Errorf("ByteSize was %v, but expected %d", v, want)
}
}
func TestRecordCorrectedValue(t *testing.T) {
h := hdrhistogram.New(1, 100000, 3)
if err := h.RecordCorrectedValue(10, 100); err != nil {
t.Fatal(err)
}
if v, want := h.ValueAtQuantile(75), int64(10); v != want {
t.Errorf("Corrected value was %v, but expected %v", v, want)
}
}
func TestRecordCorrectedValueStall(t *testing.T) {
h := hdrhistogram.New(1, 100000, 3)
if err := h.RecordCorrectedValue(1000, 100); err != nil {
t.Fatal(err)
}
if v, want := h.ValueAtQuantile(75), int64(800); v != want {
t.Errorf("Corrected value was %v, but expected %v", v, want)
}
}
func TestCumulativeDistribution(t *testing.T) {
h := hdrhistogram.New(1, 100000000, 3)
for i := 0; i < 1000000; i++ {
if err := h.RecordValue(int64(i)); err != nil {
t.Fatal(err)
}
}
actual := h.CumulativeDistribution()
expected := []hdrhistogram.Bracket{
hdrhistogram.Bracket{Quantile: 0, Count: 1, ValueAt: 0},
hdrhistogram.Bracket{Quantile: 50, Count: 500224, ValueAt: 500223},
hdrhistogram.Bracket{Quantile: 75, Count: 750080, ValueAt: 750079},
hdrhistogram.Bracket{Quantile: 87.5, Count: 875008, ValueAt: 875007},
hdrhistogram.Bracket{Quantile: 93.75, Count: 937984, ValueAt: 937983},
hdrhistogram.Bracket{Quantile: 96.875, Count: 969216, ValueAt: 969215},
hdrhistogram.Bracket{Quantile: 98.4375, Count: 984576, ValueAt: 984575},
hdrhistogram.Bracket{Quantile: 99.21875, Count: 992256, ValueAt: 992255},
hdrhistogram.Bracket{Quantile: 99.609375, Count: 996352, ValueAt: 996351},
hdrhistogram.Bracket{Quantile: 99.8046875, Count: 998400, ValueAt: 998399},
hdrhistogram.Bracket{Quantile: 99.90234375, Count: 999424, ValueAt: 999423},
hdrhistogram.Bracket{Quantile: 99.951171875, Count: 999936, ValueAt: 999935},
hdrhistogram.Bracket{Quantile: 99.9755859375, Count: 999936, ValueAt: 999935},
hdrhistogram.Bracket{Quantile: 99.98779296875, Count: 999936, ValueAt: 999935},
hdrhistogram.Bracket{Quantile: 99.993896484375, Count: 1000000, ValueAt: 1000447},
hdrhistogram.Bracket{Quantile: 100, Count: 1000000, ValueAt: 1000447},
}
if !reflect.DeepEqual(actual, expected) {
t.Errorf("CF was %#v, but expected %#v", actual, expected)
}
}
func BenchmarkHistogramRecordValue(b *testing.B) {
h := hdrhistogram.New(1, 10000000, 3)
for i := 0; i < 1000000; i++ {
if err := h.RecordValue(int64(i)); err != nil {
b.Fatal(err)
}
}
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
h.RecordValue(100)
}
}
func BenchmarkNew(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
hdrhistogram.New(1, 120000, 3) // this could track 1ms-2min
}
}
func TestUnitMagnitudeOverflow(t *testing.T) {
h := hdrhistogram.New(0, 200, 4)
if err := h.RecordValue(11); err != nil {
t.Fatal(err)
}
}
func TestSubBucketMaskOverflow(t *testing.T) {
hist := hdrhistogram.New(2e7, 1e8, 5)
for _, sample := range [...]int64{1e8, 2e7, 3e7} {
hist.RecordValue(sample)
}
for q, want := range map[float64]int64{
50: 33554431,
83.33: 33554431,
83.34: 100663295,
99: 100663295,
} {
if got := hist.ValueAtQuantile(q); got != want {
t.Errorf("got %d for %fth percentile. want: %d", got, q, want)
}
}
}
func TestExportImport(t *testing.T) {
min := int64(1)
max := int64(10000000)
sigfigs := 3
h := hdrhistogram.New(min, max, sigfigs)
for i := 0; i < 1000000; i++ {
if err := h.RecordValue(int64(i)); err != nil {
t.Fatal(err)
}
}
s := h.Export()
if v := s.LowestTrackableValue; v != min {
t.Errorf("LowestTrackableValue was %v, but expected %v", v, min)
}
if v := s.HighestTrackableValue; v != max {
t.Errorf("HighestTrackableValue was %v, but expected %v", v, max)
}
if v := int(s.SignificantFigures); v != sigfigs {
t.Errorf("SignificantFigures was %v, but expected %v", v, sigfigs)
}
if imported := hdrhistogram.Import(s); !imported.Equals(h) {
t.Error("Expected Histograms to be equivalent")
}
}
func TestEquals(t *testing.T) {
h1 := hdrhistogram.New(1, 10000000, 3)
for i := 0; i < 1000000; i++ {
if err := h1.RecordValue(int64(i)); err != nil {
t.Fatal(err)
}
}
h2 := hdrhistogram.New(1, 10000000, 3)
for i := 0; i < 10000; i++ {
if err := h1.RecordValue(int64(i)); err != nil {
t.Fatal(err)
}
}
if h1.Equals(h2) {
t.Error("Expected Histograms to not be equivalent")
}
h1.Reset()
h2.Reset()
if !h1.Equals(h2) {
t.Error("Expected Histograms to be equivalent")
}
}

View File

@ -1,45 +0,0 @@
package hdrhistogram
// A WindowedHistogram combines histograms to provide windowed statistics.
type WindowedHistogram struct {
idx int
h []Histogram
m *Histogram
Current *Histogram
}
// NewWindowed creates a new WindowedHistogram with N underlying histograms with
// the given parameters.
func NewWindowed(n int, minValue, maxValue int64, sigfigs int) *WindowedHistogram {
w := WindowedHistogram{
idx: -1,
h: make([]Histogram, n),
m: New(minValue, maxValue, sigfigs),
}
for i := range w.h {
w.h[i] = *New(minValue, maxValue, sigfigs)
}
w.Rotate()
return &w
}
// Merge returns a histogram which includes the recorded values from all the
// sections of the window.
func (w *WindowedHistogram) Merge() *Histogram {
w.m.Reset()
for _, h := range w.h {
w.m.Merge(&h)
}
return w.m
}
// Rotate resets the oldest histogram and rotates it to be used as the current
// histogram.
func (w *WindowedHistogram) Rotate() {
w.idx++
w.Current = &w.h[w.idx%len(w.h)]
w.Current.Reset()
}

View File

@ -1,64 +0,0 @@
package hdrhistogram_test
import (
"testing"
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/codahale/hdrhistogram"
)
func TestWindowedHistogram(t *testing.T) {
w := hdrhistogram.NewWindowed(2, 1, 1000, 3)
for i := 0; i < 100; i++ {
w.Current.RecordValue(int64(i))
}
w.Rotate()
for i := 100; i < 200; i++ {
w.Current.RecordValue(int64(i))
}
w.Rotate()
for i := 200; i < 300; i++ {
w.Current.RecordValue(int64(i))
}
if v, want := w.Merge().ValueAtQuantile(50), int64(199); v != want {
t.Errorf("Median was %v, but expected %v", v, want)
}
}
func BenchmarkWindowedHistogramRecordAndRotate(b *testing.B) {
w := hdrhistogram.NewWindowed(3, 1, 10000000, 3)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
if err := w.Current.RecordValue(100); err != nil {
b.Fatal(err)
}
if i%100000 == 1 {
w.Rotate()
}
}
}
func BenchmarkWindowedHistogramMerge(b *testing.B) {
w := hdrhistogram.NewWindowed(3, 1, 10000000, 3)
for i := 0; i < 10000000; i++ {
if err := w.Current.RecordValue(100); err != nil {
b.Fatal(err)
}
if i%100000 == 1 {
w.Rotate()
}
}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
w.Merge()
}
}

View File

@ -1,9 +0,0 @@
language: go
go:
- 1.3.3
notifications:
# See http://about.travis-ci.org/docs/user/build-configuration/ to learn more
# about configuring notification recipients and more.
email:
recipients:
- coda.hale@gmail.com

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Coda Hale
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,8 +0,0 @@
metrics
=======
[![Build Status](https://travis-ci.org/codahale/metrics.png?branch=master)](https://travis-ci.org/codahale/metrics)
A Go library which provides light-weight instrumentation for your application.
For documentation, check [godoc](http://godoc.org/github.com/codahale/metrics).

View File

@ -1,329 +0,0 @@
// Package metrics provides minimalist instrumentation for your applications in
// the form of counters and gauges.
//
// Counters
//
// A counter is a monotonically-increasing, unsigned, 64-bit integer used to
// represent the number of times an event has occurred. By tracking the deltas
// between measurements of a counter over intervals of time, an aggregation
// layer can derive rates, acceleration, etc.
//
// Gauges
//
// A gauge returns instantaneous measurements of something using signed, 64-bit
// integers. This value does not need to be monotonic.
//
// Histograms
//
// A histogram tracks the distribution of a stream of values (e.g. the number of
// milliseconds it takes to handle requests), adding gauges for the values at
// meaningful quantiles: 50th, 75th, 90th, 95th, 99th, 99.9th.
//
// Reporting
//
// Measurements from counters and gauges are available as expvars. Your service
// should return its expvars from an HTTP endpoint (i.e., /debug/vars) as a JSON
// object.
package metrics
import (
"expvar"
"sync"
"time"
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/codahale/hdrhistogram"
)
// A Counter is a monotonically increasing unsigned integer.
//
// Use a counter to derive rates (e.g., record total number of requests, derive
// requests per second).
type Counter string
// Add increments the counter by one.
func (c Counter) Add() {
c.AddN(1)
}
// AddN increments the counter by N.
func (c Counter) AddN(delta uint64) {
cm.Lock()
counters[string(c)] += delta
cm.Unlock()
}
// SetFunc sets the counter's value to the lazily-called return value of the
// given function.
func (c Counter) SetFunc(f func() uint64) {
cm.Lock()
defer cm.Unlock()
counterFuncs[string(c)] = f
}
// SetBatchFunc sets the counter's value to the lazily-called return value of
// the given function, with an additional initializer function for a related
// batch of counters, all of which are keyed by an arbitrary value.
func (c Counter) SetBatchFunc(key interface{}, init func(), f func() uint64) {
cm.Lock()
defer cm.Unlock()
gm.Lock()
defer gm.Unlock()
counterFuncs[string(c)] = f
if _, ok := inits[key]; !ok {
inits[key] = init
}
}
// Remove removes the given counter.
func (c Counter) Remove() {
cm.Lock()
defer cm.Unlock()
gm.Lock()
defer gm.Unlock()
delete(counters, string(c))
delete(counterFuncs, string(c))
delete(inits, string(c))
}
// A Gauge is an instantaneous measurement of a value.
//
// Use a gauge to track metrics which increase and decrease (e.g., amount of
// free memory).
type Gauge string
// Set the gauge's value to the given value.
func (g Gauge) Set(value int64) {
gm.Lock()
defer gm.Unlock()
gauges[string(g)] = func() int64 {
return value
}
}
// SetFunc sets the gauge's value to the lazily-called return value of the given
// function.
func (g Gauge) SetFunc(f func() int64) {
gm.Lock()
defer gm.Unlock()
gauges[string(g)] = f
}
// SetBatchFunc sets the gauge's value to the lazily-called return value of the
// given function, with an additional initializer function for a related batch
// of gauges, all of which are keyed by an arbitrary value.
func (g Gauge) SetBatchFunc(key interface{}, init func(), f func() int64) {
gm.Lock()
defer gm.Unlock()
gauges[string(g)] = f
if _, ok := inits[key]; !ok {
inits[key] = init
}
}
// Remove removes the given gauge.
func (g Gauge) Remove() {
gm.Lock()
defer gm.Unlock()
delete(gauges, string(g))
delete(inits, string(g))
}
// Reset removes all existing counters and gauges.
func Reset() {
cm.Lock()
defer cm.Unlock()
gm.Lock()
defer gm.Unlock()
hm.Lock()
defer hm.Unlock()
counters = make(map[string]uint64)
counterFuncs = make(map[string]func() uint64)
gauges = make(map[string]func() int64)
histograms = make(map[string]*Histogram)
inits = make(map[interface{}]func())
}
// Snapshot returns a copy of the values of all registered counters and gauges.
func Snapshot() (c map[string]uint64, g map[string]int64) {
cm.Lock()
defer cm.Unlock()
gm.Lock()
defer gm.Unlock()
hm.Lock()
defer hm.Unlock()
for _, init := range inits {
init()
}
c = make(map[string]uint64, len(counters)+len(counterFuncs))
for n, v := range counters {
c[n] = v
}
for n, f := range counterFuncs {
c[n] = f()
}
g = make(map[string]int64, len(gauges))
for n, f := range gauges {
g[n] = f()
}
return
}
// NewHistogram returns a windowed HDR histogram which drops data older than
// five minutes. The returned histogram is safe to use from multiple goroutines.
//
// Use a histogram to track the distribution of a stream of values (e.g., the
// latency associated with HTTP requests).
func NewHistogram(name string, minValue, maxValue int64, sigfigs int) *Histogram {
hm.Lock()
defer hm.Unlock()
if _, ok := histograms[name]; ok {
panic(name + " already exists")
}
hist := &Histogram{
name: name,
hist: hdrhistogram.NewWindowed(5, minValue, maxValue, sigfigs),
}
histograms[name] = hist
Gauge(name+".P50").SetBatchFunc(hname(name), hist.merge, hist.valueAt(50))
Gauge(name+".P75").SetBatchFunc(hname(name), hist.merge, hist.valueAt(75))
Gauge(name+".P90").SetBatchFunc(hname(name), hist.merge, hist.valueAt(90))
Gauge(name+".P95").SetBatchFunc(hname(name), hist.merge, hist.valueAt(95))
Gauge(name+".P99").SetBatchFunc(hname(name), hist.merge, hist.valueAt(99))
Gauge(name+".P999").SetBatchFunc(hname(name), hist.merge, hist.valueAt(99.9))
return hist
}
// Remove removes the given histogram.
func (h *Histogram) Remove() {
hm.Lock()
defer hm.Unlock()
Gauge(h.name + ".P50").Remove()
Gauge(h.name + ".P75").Remove()
Gauge(h.name + ".P90").Remove()
Gauge(h.name + ".P95").Remove()
Gauge(h.name + ".P99").Remove()
Gauge(h.name + ".P999").Remove()
delete(histograms, h.name)
}
type hname string // unexported to prevent collisions
// A Histogram measures the distribution of a stream of values.
type Histogram struct {
name string
hist *hdrhistogram.WindowedHistogram
m *hdrhistogram.Histogram
rw sync.RWMutex
}
// Name returns the name of the histogram
func (h *Histogram) Name() string {
return h.name
}
// RecordValue records the given value, or returns an error if the value is out
// of range.
// Returned error values are of type Error.
func (h *Histogram) RecordValue(v int64) error {
h.rw.Lock()
defer h.rw.Unlock()
err := h.hist.Current.RecordValue(v)
if err != nil {
return Error{h.name, err}
}
return nil
}
func (h *Histogram) rotate() {
h.rw.Lock()
defer h.rw.Unlock()
h.hist.Rotate()
}
func (h *Histogram) merge() {
h.rw.Lock()
defer h.rw.Unlock()
h.m = h.hist.Merge()
}
func (h *Histogram) valueAt(q float64) func() int64 {
return func() int64 {
h.rw.RLock()
defer h.rw.RUnlock()
if h.m == nil {
return 0
}
return h.m.ValueAtQuantile(q)
}
}
// Error describes an error and the name of the metric where it occurred.
type Error struct {
Metric string
Err error
}
func (e Error) Error() string {
return e.Metric + ": " + e.Err.Error()
}
var (
counters = make(map[string]uint64)
counterFuncs = make(map[string]func() uint64)
gauges = make(map[string]func() int64)
inits = make(map[interface{}]func())
histograms = make(map[string]*Histogram)
cm, gm, hm sync.Mutex
)
func init() {
expvar.Publish("metrics", expvar.Func(func() interface{} {
counters, gauges := Snapshot()
return map[string]interface{}{
"Counters": counters,
"Gauges": gauges,
}
}))
go func() {
for _ = range time.NewTicker(1 * time.Minute).C {
hm.Lock()
for _, h := range histograms {
h.rotate()
}
hm.Unlock()
}
}()
}

View File

@ -1,217 +0,0 @@
package metrics_test
import (
"testing"
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/codahale/metrics"
)
func TestCounter(t *testing.T) {
metrics.Reset()
metrics.Counter("whee").Add()
metrics.Counter("whee").AddN(10)
counters, _ := metrics.Snapshot()
if v, want := counters["whee"], uint64(11); v != want {
t.Errorf("Counter was %v, but expected %v", v, want)
}
}
func TestCounterFunc(t *testing.T) {
metrics.Reset()
metrics.Counter("whee").SetFunc(func() uint64 {
return 100
})
counters, _ := metrics.Snapshot()
if v, want := counters["whee"], uint64(100); v != want {
t.Errorf("Counter was %v, but expected %v", v, want)
}
}
func TestCounterBatchFunc(t *testing.T) {
metrics.Reset()
var a, b uint64
metrics.Counter("whee").SetBatchFunc(
"yay",
func() {
a, b = 1, 2
},
func() uint64 {
return a
},
)
metrics.Counter("woo").SetBatchFunc(
"yay",
func() {
a, b = 1, 2
},
func() uint64 {
return b
},
)
counters, _ := metrics.Snapshot()
if v, want := counters["whee"], uint64(1); v != want {
t.Errorf("Counter was %v, but expected %v", v, want)
}
if v, want := counters["woo"], uint64(2); v != want {
t.Errorf("Counter was %v, but expected %v", v, want)
}
}
func TestCounterRemove(t *testing.T) {
metrics.Reset()
metrics.Counter("whee").Add()
metrics.Counter("whee").Remove()
counters, _ := metrics.Snapshot()
if v, ok := counters["whee"]; ok {
t.Errorf("Counter was %v, but expected nothing", v)
}
}
func TestGaugeValue(t *testing.T) {
metrics.Reset()
metrics.Gauge("whee").Set(-100)
_, gauges := metrics.Snapshot()
if v, want := gauges["whee"], int64(-100); v != want {
t.Errorf("Gauge was %v, but expected %v", v, want)
}
}
func TestGaugeFunc(t *testing.T) {
metrics.Reset()
metrics.Gauge("whee").SetFunc(func() int64 {
return -100
})
_, gauges := metrics.Snapshot()
if v, want := gauges["whee"], int64(-100); v != want {
t.Errorf("Gauge was %v, but expected %v", v, want)
}
}
func TestGaugeRemove(t *testing.T) {
metrics.Reset()
metrics.Gauge("whee").Set(1)
metrics.Gauge("whee").Remove()
_, gauges := metrics.Snapshot()
if v, ok := gauges["whee"]; ok {
t.Errorf("Gauge was %v, but expected nothing", v)
}
}
func TestHistogram(t *testing.T) {
metrics.Reset()
h := metrics.NewHistogram("heyo", 1, 1000, 3)
for i := 100; i > 0; i-- {
for j := 0; j < i; j++ {
h.RecordValue(int64(i))
}
}
_, gauges := metrics.Snapshot()
if v, want := gauges["heyo.P50"], int64(71); v != want {
t.Errorf("P50 was %v, but expected %v", v, want)
}
if v, want := gauges["heyo.P75"], int64(87); v != want {
t.Errorf("P75 was %v, but expected %v", v, want)
}
if v, want := gauges["heyo.P90"], int64(95); v != want {
t.Errorf("P90 was %v, but expected %v", v, want)
}
if v, want := gauges["heyo.P95"], int64(98); v != want {
t.Errorf("P95 was %v, but expected %v", v, want)
}
if v, want := gauges["heyo.P99"], int64(100); v != want {
t.Errorf("P99 was %v, but expected %v", v, want)
}
if v, want := gauges["heyo.P999"], int64(100); v != want {
t.Errorf("P999 was %v, but expected %v", v, want)
}
}
func TestHistogramRemove(t *testing.T) {
metrics.Reset()
h := metrics.NewHistogram("heyo", 1, 1000, 3)
h.Remove()
_, gauges := metrics.Snapshot()
if v, ok := gauges["heyo.P50"]; ok {
t.Errorf("Gauge was %v, but expected nothing", v)
}
}
func BenchmarkCounterAdd(b *testing.B) {
metrics.Reset()
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
metrics.Counter("test1").Add()
}
})
}
func BenchmarkCounterAddN(b *testing.B) {
metrics.Reset()
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
metrics.Counter("test2").AddN(100)
}
})
}
func BenchmarkGaugeSet(b *testing.B) {
metrics.Reset()
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
metrics.Gauge("test2").Set(100)
}
})
}
func BenchmarkHistogramRecordValue(b *testing.B) {
metrics.Reset()
h := metrics.NewHistogram("hist", 1, 1000, 3)
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
h.RecordValue(100)
}
})
}

View File

@ -1,18 +0,0 @@
// Package runtime registers gauges and counters for various operationally
// important aspects of the Go runtime.
//
// To use, import this package:
//
// import _ "github.com/codahale/metrics/runtime"
//
// This registers the following gauges:
//
// FileDescriptors.Max
// FileDescriptors.Used
// Mem.NumGC
// Mem.PauseTotalNs
// Mem.LastGC
// Mem.Alloc
// Mem.HeapObjects
// Goroutines.Num
package runtime

View File

@ -1,46 +0,0 @@
// +build !windows
package runtime
import (
"io/ioutil"
"syscall"
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/codahale/metrics"
)
func getFDLimit() (uint64, error) {
var rlimit syscall.Rlimit
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit); err != nil {
return 0, err
}
// rlimit.Cur's type is platform-dependent, so here we widen it as far as Go
// will allow by converting it to a uint64.
return uint64(rlimit.Cur), nil
}
func getFDUsage() (uint64, error) {
fds, err := ioutil.ReadDir("/proc/self/fd")
if err != nil {
return 0, err
}
return uint64(len(fds)), nil
}
func init() {
metrics.Gauge("FileDescriptors.Max").SetFunc(func() int64 {
v, err := getFDLimit()
if err != nil {
return 0
}
return int64(v)
})
metrics.Gauge("FileDescriptors.Used").SetFunc(func() int64 {
v, err := getFDUsage()
if err != nil {
return 0
}
return int64(v)
})
}

View File

@ -1,24 +0,0 @@
// +build !windows
package runtime
import (
"testing"
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/codahale/metrics"
)
func TestFdStats(t *testing.T) {
_, gauges := metrics.Snapshot()
expected := []string{
"FileDescriptors.Max",
"FileDescriptors.Used",
}
for _, name := range expected {
if _, ok := gauges[name]; !ok {
t.Errorf("Missing gauge %q", name)
}
}
}

View File

@ -1,4 +0,0 @@
package runtime
func init() {
}

View File

@ -1,13 +0,0 @@
package runtime
import (
"runtime"
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/codahale/metrics"
)
func init() {
metrics.Gauge("Goroutines.Num").SetFunc(func() int64 {
return int64(runtime.NumGoroutine())
})
}

View File

@ -1,21 +0,0 @@
package runtime
import (
"testing"
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/codahale/metrics"
)
func TestGoroutinesStats(t *testing.T) {
_, gauges := metrics.Snapshot()
expected := []string{
"Goroutines.Num",
}
for _, name := range expected {
if _, ok := gauges[name]; !ok {
t.Errorf("Missing gauge %q", name)
}
}
}

View File

@ -1,48 +0,0 @@
package runtime
import (
"runtime"
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/codahale/metrics"
)
func init() {
msg := &memStatGauges{}
metrics.Counter("Mem.NumGC").SetBatchFunc(key{}, msg.init, msg.numGC)
metrics.Counter("Mem.PauseTotalNs").SetBatchFunc(key{}, msg.init, msg.totalPause)
metrics.Gauge("Mem.LastGC").SetBatchFunc(key{}, msg.init, msg.lastPause)
metrics.Gauge("Mem.Alloc").SetBatchFunc(key{}, msg.init, msg.alloc)
metrics.Gauge("Mem.HeapObjects").SetBatchFunc(key{}, msg.init, msg.objects)
}
type key struct{} // unexported to prevent collision
type memStatGauges struct {
stats runtime.MemStats
}
func (msg *memStatGauges) init() {
runtime.ReadMemStats(&msg.stats)
}
func (msg *memStatGauges) numGC() uint64 {
return uint64(msg.stats.NumGC)
}
func (msg *memStatGauges) totalPause() uint64 {
return msg.stats.PauseTotalNs
}
func (msg *memStatGauges) lastPause() int64 {
return int64(msg.stats.LastGC)
}
func (msg *memStatGauges) alloc() int64 {
return int64(msg.stats.Alloc)
}
func (msg *memStatGauges) objects() int64 {
return int64(msg.stats.HeapObjects)
}

View File

@ -1,34 +0,0 @@
package runtime
import (
"testing"
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/codahale/metrics"
)
func TestMemStats(t *testing.T) {
counters, gauges := metrics.Snapshot()
expectedCounters := []string{
"Mem.NumGC",
"Mem.PauseTotalNs",
}
expectedGauges := []string{
"Mem.LastGC",
"Mem.Alloc",
"Mem.HeapObjects",
}
for _, name := range expectedCounters {
if _, ok := counters[name]; !ok {
t.Errorf("Missing counters %q", name)
}
}
for _, name := range expectedGauges {
if _, ok := gauges[name]; !ok {
t.Errorf("Missing gauge %q", name)
}
}
}

View File

@ -1,11 +0,0 @@
language: go
go:
- 1.3
- release
- tip
script:
- make test
env: TEST_NO_FUSE=1 TEST_VERBOSE=1

View File

@ -1,76 +0,0 @@
{
"ImportPath": "github.com/jbenet/go-datastore",
"GoVersion": "go1.5",
"Packages": [
"./..."
],
"Deps": [
{
"ImportPath": "github.com/Sirupsen/logrus",
"Comment": "v0.8.3-37-g418b41d",
"Rev": "418b41d23a1bf978c06faea5313ba194650ac088"
},
{
"ImportPath": "github.com/codahale/blake2",
"Rev": "3fa823583afba430e8fc7cdbcc670dbf90bfacc4"
},
{
"ImportPath": "github.com/codahale/hdrhistogram",
"Rev": "5fd85ec0b4e2dd5d4158d257d943f2e586d86b62"
},
{
"ImportPath": "github.com/codahale/metrics",
"Rev": "7d3beb1b480077e77c08a6f6c65ea969f6e91420"
},
{
"ImportPath": "github.com/dustin/randbo",
"Rev": "7f1b564ca7242d22bcc6e2128beb90d9fa38b9f0"
},
{
"ImportPath": "github.com/fzzy/radix/redis",
"Comment": "v0.5.1",
"Rev": "27a863cdffdb0998d13e1e11992b18489aeeaa25"
},
{
"ImportPath": "github.com/hashicorp/golang-lru",
"Rev": "4dfff096c4973178c8f35cf6dd1a732a0a139370"
},
{
"ImportPath": "github.com/ipfs/go-log",
"Rev": "ee5cb9834b33bcf29689183e0323e328c8b8de29"
},
{
"ImportPath": "github.com/jbenet/go-os-rename",
"Rev": "2d93ae970ba96c41f717036a5bf5494faf1f38c0"
},
{
"ImportPath": "github.com/jbenet/goprocess",
"Rev": "5b02f8d275a2dd882fb06f8bbdf74347795ff3b1"
},
{
"ImportPath": "github.com/mattbaird/elastigo/api",
"Rev": "041b88c1fcf6489a5721ede24378ce1253b9159d"
},
{
"ImportPath": "github.com/mattbaird/elastigo/core",
"Rev": "041b88c1fcf6489a5721ede24378ce1253b9159d"
},
{
"ImportPath": "github.com/syndtr/gosnappy/snappy",
"Rev": "ce8acff4829e0c2458a67ead32390ac0a381c862"
},
{
"ImportPath": "golang.org/x/net/context",
"Rev": "dfcbca9c45aeabb8971affa4f76b2d40f6f72328"
},
{
"ImportPath": "gopkg.in/check.v1",
"Rev": "91ae5f88a67b14891cfd43895b01164f6c120420"
},
{
"ImportPath": "launchpad.net/gocheck",
"Comment": "87",
"Rev": "gustavo@niemeyer.net-20140225173054-xu9zlkf9kxhvow02"
}
]
}

View File

@ -1,5 +0,0 @@
This directory tree is generated automatically by godep.
Please do not edit.
See https://github.com/tools/godep for more information.

View File

@ -1,21 +0,0 @@
The MIT License
Copyright (c) 2014 Juan Batiz-Benet
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,24 +0,0 @@
build:
go build
test: build
go test -race -cpu=5 -v ./...
# saves/vendors third-party dependencies to Godeps/_workspace
# -r flag rewrites import paths to use the vendored path
# ./... performs operation on all packages in tree
vendor: godep
godep save -r ./...
deps:
go get ./...
watch:
-make
@echo "[watching *.go; for recompilation]"
# for portability, use watchmedo -- pip install watchmedo
@watchmedo shell-command --patterns="*.go;" --recursive \
--command='make' .
godep:
go get github.com/tools/godep

View File

@ -1,15 +0,0 @@
# datastore interface
datastore is a generic layer of abstraction for data store and database access. It is a simple API with the aim to enable application development in a datastore-agnostic way, allowing datastores to be swapped seamlessly without changing application code. Thus, one can leverage different datastores with different strengths without committing the application to one datastore throughout its lifetime.
In addition, grouped datastores significantly simplify interesting data access patterns (such as caching and sharding).
Based on [datastore.py](https://github.com/datastore/datastore).
### Documentation
https://godoc.org/github.com/jbenet/go-datastore
### License
MIT

View File

@ -1,189 +0,0 @@
package datastore
import (
"io"
"log"
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
)
// Here are some basic datastore implementations.
type keyMap map[Key]interface{}
// MapDatastore uses a standard Go map for internal storage.
type MapDatastore struct {
values keyMap
}
// NewMapDatastore constructs a MapDatastore
func NewMapDatastore() (d *MapDatastore) {
return &MapDatastore{
values: keyMap{},
}
}
// Put implements Datastore.Put
func (d *MapDatastore) Put(key Key, value interface{}) (err error) {
d.values[key] = value
return nil
}
// Get implements Datastore.Get
func (d *MapDatastore) Get(key Key) (value interface{}, err error) {
val, found := d.values[key]
if !found {
return nil, ErrNotFound
}
return val, nil
}
// Has implements Datastore.Has
func (d *MapDatastore) Has(key Key) (exists bool, err error) {
_, found := d.values[key]
return found, nil
}
// Delete implements Datastore.Delete
func (d *MapDatastore) Delete(key Key) (err error) {
if _, found := d.values[key]; !found {
return ErrNotFound
}
delete(d.values, key)
return nil
}
// Query implements Datastore.Query
func (d *MapDatastore) Query(q dsq.Query) (dsq.Results, error) {
re := make([]dsq.Entry, 0, len(d.values))
for k, v := range d.values {
re = append(re, dsq.Entry{Key: k.String(), Value: v})
}
r := dsq.ResultsWithEntries(q, re)
r = dsq.NaiveQueryApply(q, r)
return r, nil
}
func (d *MapDatastore) Batch() (Batch, error) {
return NewBasicBatch(d), nil
}
func (d *MapDatastore) Close() error {
return nil
}
// NullDatastore stores nothing, but conforms to the API.
// Useful to test with.
type NullDatastore struct {
}
// NewNullDatastore constructs a null datastoe
func NewNullDatastore() *NullDatastore {
return &NullDatastore{}
}
// Put implements Datastore.Put
func (d *NullDatastore) Put(key Key, value interface{}) (err error) {
return nil
}
// Get implements Datastore.Get
func (d *NullDatastore) Get(key Key) (value interface{}, err error) {
return nil, nil
}
// Has implements Datastore.Has
func (d *NullDatastore) Has(key Key) (exists bool, err error) {
return false, nil
}
// Delete implements Datastore.Delete
func (d *NullDatastore) Delete(key Key) (err error) {
return nil
}
// Query implements Datastore.Query
func (d *NullDatastore) Query(q dsq.Query) (dsq.Results, error) {
return dsq.ResultsWithEntries(q, nil), nil
}
func (d *NullDatastore) Batch() (Batch, error) {
return NewBasicBatch(d), nil
}
func (d *NullDatastore) Close() error {
return nil
}
// LogDatastore logs all accesses through the datastore.
type LogDatastore struct {
Name string
child Datastore
}
// Shim is a datastore which has a child.
type Shim interface {
Datastore
Children() []Datastore
}
// NewLogDatastore constructs a log datastore.
func NewLogDatastore(ds Datastore, name string) *LogDatastore {
if len(name) < 1 {
name = "LogDatastore"
}
return &LogDatastore{Name: name, child: ds}
}
// Children implements Shim
func (d *LogDatastore) Children() []Datastore {
return []Datastore{d.child}
}
// Put implements Datastore.Put
func (d *LogDatastore) Put(key Key, value interface{}) (err error) {
log.Printf("%s: Put %s\n", d.Name, key)
// log.Printf("%s: Put %s ```%s```", d.Name, key, value)
return d.child.Put(key, value)
}
// Get implements Datastore.Get
func (d *LogDatastore) Get(key Key) (value interface{}, err error) {
log.Printf("%s: Get %s\n", d.Name, key)
return d.child.Get(key)
}
// Has implements Datastore.Has
func (d *LogDatastore) Has(key Key) (exists bool, err error) {
log.Printf("%s: Has %s\n", d.Name, key)
return d.child.Has(key)
}
// Delete implements Datastore.Delete
func (d *LogDatastore) Delete(key Key) (err error) {
log.Printf("%s: Delete %s\n", d.Name, key)
return d.child.Delete(key)
}
// Query implements Datastore.Query
func (d *LogDatastore) Query(q dsq.Query) (dsq.Results, error) {
log.Printf("%s: Query\n", d.Name)
return d.child.Query(q)
}
func (d *LogDatastore) Batch() (Batch, error) {
log.Printf("%s: Batch\n", d.Name)
if bds, ok := d.child.(Batching); ok {
return bds.Batch()
}
return nil, ErrBatchUnsupported
}
func (d *LogDatastore) Close() error {
log.Printf("%s: Close\n", d.Name)
if cds, ok := d.child.(io.Closer); ok {
return cds.Close()
}
return nil
}

View File

@ -1,44 +0,0 @@
package datastore
// basicBatch implements the transaction interface for datastores who do
// not have any sort of underlying transactional support
type basicBatch struct {
puts map[Key]interface{}
deletes map[Key]struct{}
target Datastore
}
func NewBasicBatch(ds Datastore) Batch {
return &basicBatch{
puts: make(map[Key]interface{}),
deletes: make(map[Key]struct{}),
target: ds,
}
}
func (bt *basicBatch) Put(key Key, val interface{}) error {
bt.puts[key] = val
return nil
}
func (bt *basicBatch) Delete(key Key) error {
bt.deletes[key] = struct{}{}
return nil
}
func (bt *basicBatch) Commit() error {
for k, val := range bt.puts {
if err := bt.target.Put(k, val); err != nil {
return err
}
}
for k, _ := range bt.deletes {
if err := bt.target.Delete(k); err != nil {
return err
}
}
return nil
}

View File

@ -1,42 +0,0 @@
package callback
import (
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
)
type Datastore struct {
D ds.Datastore
F func()
}
func Wrap(ds ds.Datastore, f func()) *Datastore {
return &Datastore{ds, f}
}
func (c *Datastore) SetFunc(f func()) { c.F = f }
func (c *Datastore) Put(key ds.Key, value interface{}) (err error) {
c.F()
return c.D.Put(key, value)
}
func (c *Datastore) Get(key ds.Key) (value interface{}, err error) {
c.F()
return c.D.Get(key)
}
func (c *Datastore) Has(key ds.Key) (exists bool, err error) {
c.F()
return c.D.Has(key)
}
func (c *Datastore) Delete(key ds.Key) (err error) {
c.F()
return c.D.Delete(key)
}
func (c *Datastore) Query(q dsq.Query) (dsq.Results, error) {
c.F()
return c.D.Query(q)
}

View File

@ -1,140 +0,0 @@
package coalesce
import (
"io"
"sync"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
)
// parent keys
var (
putKey = "put"
getKey = "get"
hasKey = "has"
deleteKey = "delete"
)
type keySync struct {
op string
k ds.Key
value interface{}
}
type valSync struct {
val interface{}
err error
done chan struct{}
}
// Datastore uses golang-lru for internal storage.
type datastore struct {
child ds.Datastore
reqmu sync.Mutex
req map[keySync]*valSync
}
// Wrap wraps a given datastore with a coalescing datastore.
// All simultaenous requests which have the same keys will
// yield the exact same result. Note that this shares
// memory. It is not possible to copy a generic interface{}
func Wrap(d ds.Datastore) ds.Datastore {
return &datastore{child: d, req: make(map[keySync]*valSync)}
}
// sync synchronizes requests for a given key.
func (d *datastore) sync(k keySync) (vs *valSync, found bool) {
d.reqmu.Lock()
vs, found = d.req[k]
if !found {
vs = &valSync{done: make(chan struct{})}
d.req[k] = vs
}
d.reqmu.Unlock()
// if we did find one, wait till it's done.
if found {
<-vs.done
}
return vs, found
}
// sync synchronizes requests for a given key.
func (d *datastore) syncDone(k keySync) {
d.reqmu.Lock()
vs, found := d.req[k]
if !found {
panic("attempt to syncDone non-existent request")
}
delete(d.req, k)
d.reqmu.Unlock()
// release all the waiters.
close(vs.done)
}
// Put stores the object `value` named by `key`.
func (d *datastore) Put(key ds.Key, value interface{}) (err error) {
ks := keySync{putKey, key, value}
vs, found := d.sync(ks)
if !found {
vs.err = d.child.Put(key, value)
d.syncDone(ks)
}
return err
}
// Get retrieves the object `value` named by `key`.
func (d *datastore) Get(key ds.Key) (value interface{}, err error) {
ks := keySync{getKey, key, nil}
vs, found := d.sync(ks)
if !found {
vs.val, vs.err = d.child.Get(key)
d.syncDone(ks)
}
return vs.val, vs.err
}
// Has returns whether the `key` is mapped to a `value`.
func (d *datastore) Has(key ds.Key) (exists bool, err error) {
ks := keySync{hasKey, key, nil}
vs, found := d.sync(ks)
if !found {
vs.val, vs.err = d.child.Has(key)
d.syncDone(ks)
}
return vs.val.(bool), vs.err
}
// Delete removes the value for given `key`.
func (d *datastore) Delete(key ds.Key) (err error) {
ks := keySync{deleteKey, key, nil}
vs, found := d.sync(ks)
if !found {
vs.err = d.child.Delete(key)
d.syncDone(ks)
}
return vs.err
}
// Query returns a list of keys in the datastore
func (d *datastore) Query(q dsq.Query) (dsq.Results, error) {
// query not coalesced yet.
return d.child.Query(q)
}
func (d *datastore) Close() error {
d.reqmu.Lock()
defer d.reqmu.Unlock()
for _, s := range d.req {
<-s.done
}
if c, ok := d.child.(io.Closer); ok {
return c.Close()
}
return nil
}

View File

@ -1,122 +0,0 @@
package datastore
import (
"errors"
query "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
)
/*
Datastore represents storage for any key-value pair.
Datastores are general enough to be backed by all kinds of different storage:
in-memory caches, databases, a remote datastore, flat files on disk, etc.
The general idea is to wrap a more complicated storage facility in a simple,
uniform interface, keeping the freedom of using the right tools for the job.
In particular, a Datastore can aggregate other datastores in interesting ways,
like sharded (to distribute load) or tiered access (caches before databases).
While Datastores should be written general enough to accept all sorts of
values, some implementations will undoubtedly have to be specific (e.g. SQL
databases where fields should be decomposed into columns), particularly to
support queries efficiently. Moreover, certain datastores may enforce certain
types of values (e.g. requiring an io.Reader, a specific struct, etc) or
serialization formats (JSON, Protobufs, etc).
IMPORTANT: No Datastore should ever Panic! This is a cross-module interface,
and thus it should behave predictably and handle exceptional conditions with
proper error reporting. Thus, all Datastore calls may return errors, which
should be checked by callers.
*/
type Datastore interface {
// Put stores the object `value` named by `key`.
//
// The generalized Datastore interface does not impose a value type,
// allowing various datastore middleware implementations (which do not
// handle the values directly) to be composed together.
//
// Ultimately, the lowest-level datastore will need to do some value checking
// or risk getting incorrect values. It may also be useful to expose a more
// type-safe interface to your application, and do the checking up-front.
Put(key Key, value interface{}) error
// Get retrieves the object `value` named by `key`.
// Get will return ErrNotFound if the key is not mapped to a value.
Get(key Key) (value interface{}, err error)
// Has returns whether the `key` is mapped to a `value`.
// In some contexts, it may be much cheaper only to check for existence of
// a value, rather than retrieving the value itself. (e.g. HTTP HEAD).
// The default implementation is found in `GetBackedHas`.
Has(key Key) (exists bool, err error)
// Delete removes the value for given `key`.
Delete(key Key) error
// Query searches the datastore and returns a query result. This function
// may return before the query actually runs. To wait for the query:
//
// result, _ := ds.Query(q)
//
// // use the channel interface; result may come in at different times
// for entry := range result.Entries() { ... }
//
// // or wait for the query to be completely done
// result.Wait()
// result.AllEntries()
//
Query(q query.Query) (query.Results, error)
}
type Batching interface {
Datastore
Batch() (Batch, error)
}
var ErrBatchUnsupported = errors.New("this datastore does not support batching")
// ThreadSafeDatastore is an interface that all threadsafe datastore should
// implement to leverage type safety checks.
type ThreadSafeDatastore interface {
Datastore
IsThreadSafe()
}
// Errors
// ErrNotFound is returned by Get, Has, and Delete when a datastore does not
// map the given key to a value.
var ErrNotFound = errors.New("datastore: key not found")
// ErrInvalidType is returned by Put when a given value is incopatible with
// the type the datastore supports. This means a conversion (or serialization)
// is needed beforehand.
var ErrInvalidType = errors.New("datastore: invalid type error")
// GetBackedHas provides a default Datastore.Has implementation.
// It exists so Datastore.Has implementations can use it, like so:
//
// func (*d SomeDatastore) Has(key Key) (exists bool, err error) {
// return GetBackedHas(d, key)
// }
func GetBackedHas(ds Datastore, key Key) (bool, error) {
_, err := ds.Get(key)
switch err {
case nil:
return true, nil
case ErrNotFound:
return false, nil
default:
return false, err
}
}
type Batch interface {
Put(key Key, val interface{}) error
Delete(key Key) error
Commit() error
}

View File

@ -1,128 +0,0 @@
package elastigo
import (
"errors"
"fmt"
"net/url"
"strings"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
query "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
"github.com/codahale/blake2"
"github.com/mattbaird/elastigo/api"
"github.com/mattbaird/elastigo/core"
)
// Currently, elastigo does not allow connecting to multiple elasticsearch
// instances. The elastigo API uses global static variables (ugh).
// See https://github.com/mattbaird/elastigo/issues/22
//
// Thus, we use a global static variable (GlobalInstance), and return an
// error if NewDatastore is called twice with different addresses.
var GlobalInstance string
// Datastore uses a standard Go map for internal storage.
type Datastore struct {
url string
index string
// Elastic search does not allow slashes in their object ids,
// so we hash the key. By default, we use the provided BlakeKeyHash
KeyHash func(ds.Key) string
}
func NewDatastore(urlstr string) (*Datastore, error) {
if GlobalInstance != "" && GlobalInstance != urlstr {
return nil, fmt.Errorf("elastigo only allows one client. See godoc.")
}
uf := "http://<host>:<port>/<index>"
u, err := url.Parse(urlstr)
if err != nil {
return nil, fmt.Errorf("error parsing url: %s (%s)", urlstr, uf)
}
host := strings.Split(u.Host, ":")
api.Domain = host[0]
if len(host) > 1 {
api.Port = host[1]
}
index := strings.Trim(u.Path, "/")
if strings.Contains(index, "/") {
e := "elastigo index cannot have slashes: %s (%s -> %s)"
return nil, fmt.Errorf(e, index, urlstr, uf)
}
GlobalInstance = urlstr
return &Datastore{
url: urlstr,
index: index,
KeyHash: BlakeKeyHash,
}, nil
}
// Returns the ElasticSearch index for given key. If the datastore specifies
// an index, use that. Else, key.Parent
func (d *Datastore) Index(key ds.Key) string {
if len(d.index) > 0 {
return d.index
}
return key.Parent().BaseNamespace()
}
// value should be JSON serializable.
func (d *Datastore) Put(key ds.Key, value interface{}) (err error) {
id := d.KeyHash(key)
res, err := core.Index(false, d.Index(key), key.Type(), id, value)
if err != nil {
return err
}
if !res.Ok {
return fmt.Errorf("Elasticsearch response: NOT OK. %v", res)
}
return nil
}
func (d *Datastore) Get(key ds.Key) (value interface{}, err error) {
id := d.KeyHash(key)
res, err := core.Get(false, d.Index(key), key.Type(), id)
if err != nil {
return nil, err
}
if !res.Ok {
return nil, fmt.Errorf("Elasticsearch response: NOT OK. %v", res)
}
return res.Source, nil
}
func (d *Datastore) Has(key ds.Key) (exists bool, err error) {
id := d.KeyHash(key)
return core.Exists(false, d.Index(key), key.Type(), id)
}
func (d *Datastore) Delete(key ds.Key) (err error) {
id := d.KeyHash(key)
res, err := core.Delete(false, d.Index(key), key.Type(), id, 0, "")
if err != nil {
return err
}
if !res.Ok {
return fmt.Errorf("Elasticsearch response: NOT OK. %v", res)
}
return nil
}
func (d *Datastore) Query(query.Query) (query.Results, error) {
return nil, errors.New("Not yet implemented!")
}
// Hash a key and return the first 16 hex chars of its blake2b hash.
// basically: Blake2b(key).HexString[:16]
func BlakeKeyHash(key ds.Key) string {
h := blake2.NewBlake2B()
h.Write(key.Bytes())
d := h.Sum(nil)
return fmt.Sprintf("%x", d)[:16]
}

View File

@ -1,392 +0,0 @@
// Package flatfs is a Datastore implementation that stores all
// objects in a two-level directory structure in the local file
// system, regardless of the hierarchy of the keys.
package flatfs
import (
"encoding/hex"
"errors"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-os-rename"
logging "gx/ipfs/QmYtB7Qge8cJpXc4irsEp8zRqfnZMBeB7aTrMEkPk67DRv/go-log"
)
var log = logging.Logger("flatfs")
const (
extension = ".data"
maxPrefixLen = 16
)
var (
ErrBadPrefixLen = errors.New("bad prefix length")
)
type Datastore struct {
path string
// length of the dir splay prefix, in bytes of hex digits
hexPrefixLen int
// sychronize all writes and directory changes for added safety
sync bool
}
var _ datastore.Datastore = (*Datastore)(nil)
func New(path string, prefixLen int, sync bool) (*Datastore, error) {
if prefixLen <= 0 || prefixLen > maxPrefixLen {
return nil, ErrBadPrefixLen
}
fs := &Datastore{
path: path,
// convert from binary bytes to bytes of hex encoding
hexPrefixLen: prefixLen * hex.EncodedLen(1),
sync: sync,
}
return fs, nil
}
var padding = strings.Repeat("_", maxPrefixLen*hex.EncodedLen(1))
func (fs *Datastore) encode(key datastore.Key) (dir, file string) {
safe := hex.EncodeToString(key.Bytes()[1:])
prefix := (safe + padding)[:fs.hexPrefixLen]
dir = path.Join(fs.path, prefix)
file = path.Join(dir, safe+extension)
return dir, file
}
func (fs *Datastore) decode(file string) (key datastore.Key, ok bool) {
if path.Ext(file) != extension {
return datastore.Key{}, false
}
name := file[:len(file)-len(extension)]
k, err := hex.DecodeString(name)
if err != nil {
return datastore.Key{}, false
}
return datastore.NewKey(string(k)), true
}
func (fs *Datastore) makePrefixDir(dir string) error {
if err := fs.makePrefixDirNoSync(dir); err != nil {
return err
}
// In theory, if we create a new prefix dir and add a file to
// it, the creation of the prefix dir itself might not be
// durable yet. Sync the root dir after a successful mkdir of
// a prefix dir, just to be paranoid.
if fs.sync {
if err := syncDir(fs.path); err != nil {
return err
}
}
return nil
}
func (fs *Datastore) makePrefixDirNoSync(dir string) error {
if err := os.Mkdir(dir, 0777); err != nil {
// EEXIST is safe to ignore here, that just means the prefix
// directory already existed.
if !os.IsExist(err) {
return err
}
}
return nil
}
var putMaxRetries = 3
func (fs *Datastore) Put(key datastore.Key, value interface{}) error {
val, ok := value.([]byte)
if !ok {
return datastore.ErrInvalidType
}
var err error
for i := 0; i < putMaxRetries; i++ {
err = fs.doPut(key, val)
if err == nil {
return nil
}
if !strings.Contains(err.Error(), "too many open files") {
return err
}
log.Error("too many open files, retrying in %dms", 100*i)
time.Sleep(time.Millisecond * 100 * time.Duration(i))
}
return err
}
func (fs *Datastore) doPut(key datastore.Key, val []byte) error {
dir, path := fs.encode(key)
if err := fs.makePrefixDir(dir); err != nil {
return err
}
tmp, err := ioutil.TempFile(dir, "put-")
if err != nil {
return err
}
closed := false
removed := false
defer func() {
if !closed {
// silence errcheck
_ = tmp.Close()
}
if !removed {
// silence errcheck
_ = os.Remove(tmp.Name())
}
}()
if _, err := tmp.Write(val); err != nil {
return err
}
if fs.sync {
if err := tmp.Sync(); err != nil {
return err
}
}
if err := tmp.Close(); err != nil {
return err
}
closed = true
err = osrename.Rename(tmp.Name(), path)
if err != nil {
return err
}
removed = true
if fs.sync {
if err := syncDir(dir); err != nil {
return err
}
}
return nil
}
func (fs *Datastore) putMany(data map[datastore.Key]interface{}) error {
var dirsToSync []string
files := make(map[*os.File]string)
for key, value := range data {
val, ok := value.([]byte)
if !ok {
return datastore.ErrInvalidType
}
dir, path := fs.encode(key)
if err := fs.makePrefixDirNoSync(dir); err != nil {
return err
}
dirsToSync = append(dirsToSync, dir)
tmp, err := ioutil.TempFile(dir, "put-")
if err != nil {
return err
}
if _, err := tmp.Write(val); err != nil {
return err
}
files[tmp] = path
}
ops := make(map[*os.File]int)
defer func() {
for fi, _ := range files {
val, _ := ops[fi]
switch val {
case 0:
_ = fi.Close()
fallthrough
case 1:
_ = os.Remove(fi.Name())
}
}
}()
// Now we sync everything
// sync and close files
for fi, _ := range files {
if fs.sync {
if err := fi.Sync(); err != nil {
return err
}
}
if err := fi.Close(); err != nil {
return err
}
// signify closed
ops[fi] = 1
}
// move files to their proper places
for fi, path := range files {
if err := osrename.Rename(fi.Name(), path); err != nil {
return err
}
// signify removed
ops[fi] = 2
}
// now sync the dirs for those files
if fs.sync {
for _, dir := range dirsToSync {
if err := syncDir(dir); err != nil {
return err
}
}
// sync top flatfs dir
if err := syncDir(fs.path); err != nil {
return err
}
}
return nil
}
func (fs *Datastore) Get(key datastore.Key) (value interface{}, err error) {
_, path := fs.encode(key)
data, err := ioutil.ReadFile(path)
if err != nil {
if os.IsNotExist(err) {
return nil, datastore.ErrNotFound
}
// no specific error to return, so just pass it through
return nil, err
}
return data, nil
}
func (fs *Datastore) Has(key datastore.Key) (exists bool, err error) {
_, path := fs.encode(key)
switch _, err := os.Stat(path); {
case err == nil:
return true, nil
case os.IsNotExist(err):
return false, nil
default:
return false, err
}
}
func (fs *Datastore) Delete(key datastore.Key) error {
_, path := fs.encode(key)
switch err := os.Remove(path); {
case err == nil:
return nil
case os.IsNotExist(err):
return datastore.ErrNotFound
default:
return err
}
}
func (fs *Datastore) Query(q query.Query) (query.Results, error) {
if (q.Prefix != "" && q.Prefix != "/") ||
len(q.Filters) > 0 ||
len(q.Orders) > 0 ||
q.Limit > 0 ||
q.Offset > 0 ||
!q.KeysOnly {
// TODO this is overly simplistic, but the only caller is
// `ipfs refs local` for now, and this gets us moving.
return nil, errors.New("flatfs only supports listing all keys in random order")
}
reschan := make(chan query.Result)
go func() {
defer close(reschan)
err := filepath.Walk(fs.path, func(path string, info os.FileInfo, err error) error {
if !info.Mode().IsRegular() || info.Name()[0] == '.' {
return nil
}
key, ok := fs.decode(info.Name())
if !ok {
log.Warning("failed to decode entry in flatfs")
return nil
}
reschan <- query.Result{
Entry: query.Entry{
Key: key.String(),
},
}
return nil
})
if err != nil {
log.Warning("walk failed: ", err)
}
}()
return query.ResultsWithChan(q, reschan), nil
}
func (fs *Datastore) Close() error {
return nil
}
type flatfsBatch struct {
puts map[datastore.Key]interface{}
deletes map[datastore.Key]struct{}
ds *Datastore
}
func (fs *Datastore) Batch() (datastore.Batch, error) {
return &flatfsBatch{
puts: make(map[datastore.Key]interface{}),
deletes: make(map[datastore.Key]struct{}),
ds: fs,
}, nil
}
func (bt *flatfsBatch) Put(key datastore.Key, val interface{}) error {
bt.puts[key] = val
return nil
}
func (bt *flatfsBatch) Delete(key datastore.Key) error {
bt.deletes[key] = struct{}{}
return nil
}
func (bt *flatfsBatch) Commit() error {
if err := bt.ds.putMany(bt.puts); err != nil {
return err
}
for k, _ := range bt.deletes {
if err := bt.ds.Delete(k); err != nil {
return err
}
}
return nil
}
var _ datastore.ThreadSafeDatastore = (*Datastore)(nil)
func (*Datastore) IsThreadSafe() {}

View File

@ -1,17 +0,0 @@
// +build !windows
package flatfs
import "os"
func syncDir(dir string) error {
dirF, err := os.Open(dir)
if err != nil {
return err
}
defer dirF.Close()
if err := dirF.Sync(); err != nil {
return err
}
return nil
}

View File

@ -1,5 +0,0 @@
package flatfs
func syncDir(dir string) error {
return nil
}

View File

@ -1,159 +0,0 @@
// Package fs is a simple Datastore implementation that stores keys
// are directories and files, mirroring the key. That is, the key
// "/foo/bar" is stored as file "PATH/foo/bar/.dsobject".
//
// This means key some segments will not work. For example, the
// following keys will result in unwanted behavior:
//
// - "/foo/./bar"
// - "/foo/../bar"
// - "/foo\x00bar"
//
// Keys that only differ in case may be confused with each other on
// case insensitive file systems, for example in OS X.
//
// This package is intended for exploratory use, where the user would
// examine the file system manually, and should only be used with
// human-friendly, trusted keys. You have been warned.
package fs
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
query "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
)
var ObjectKeySuffix = ".dsobject"
// Datastore uses a uses a file per key to store values.
type Datastore struct {
path string
}
// NewDatastore returns a new fs Datastore at given `path`
func NewDatastore(path string) (ds.Datastore, error) {
if !isDir(path) {
return nil, fmt.Errorf("Failed to find directory at: %v (file? perms?)", path)
}
return &Datastore{path: path}, nil
}
// KeyFilename returns the filename associated with `key`
func (d *Datastore) KeyFilename(key ds.Key) string {
return filepath.Join(d.path, key.String(), ObjectKeySuffix)
}
// Put stores the given value.
func (d *Datastore) Put(key ds.Key, value interface{}) (err error) {
// TODO: maybe use io.Readers/Writers?
// r, err := dsio.CastAsReader(value)
// if err != nil {
// return err
// }
val, ok := value.([]byte)
if !ok {
return ds.ErrInvalidType
}
fn := d.KeyFilename(key)
// mkdirall above.
err = os.MkdirAll(filepath.Dir(fn), 0755)
if err != nil {
return err
}
return ioutil.WriteFile(fn, val, 0666)
}
// Get returns the value for given key
func (d *Datastore) Get(key ds.Key) (value interface{}, err error) {
fn := d.KeyFilename(key)
if !isFile(fn) {
return nil, ds.ErrNotFound
}
return ioutil.ReadFile(fn)
}
// Has returns whether the datastore has a value for a given key
func (d *Datastore) Has(key ds.Key) (exists bool, err error) {
return ds.GetBackedHas(d, key)
}
// Delete removes the value for given key
func (d *Datastore) Delete(key ds.Key) (err error) {
fn := d.KeyFilename(key)
if !isFile(fn) {
return ds.ErrNotFound
}
return os.Remove(fn)
}
// Query implements Datastore.Query
func (d *Datastore) Query(q query.Query) (query.Results, error) {
results := make(chan query.Result)
walkFn := func(path string, info os.FileInfo, err error) error {
// remove ds path prefix
if strings.HasPrefix(path, d.path) {
path = path[len(d.path):]
}
if !info.IsDir() {
if strings.HasSuffix(path, ObjectKeySuffix) {
path = path[:len(path)-len(ObjectKeySuffix)]
}
key := ds.NewKey(path)
entry := query.Entry{Key: key.String(), Value: query.NotFetched}
results <- query.Result{Entry: entry}
}
return nil
}
go func() {
filepath.Walk(d.path, walkFn)
close(results)
}()
r := query.ResultsWithChan(q, results)
r = query.NaiveQueryApply(q, r)
return r, nil
}
// isDir returns whether given path is a directory
func isDir(path string) bool {
finfo, err := os.Stat(path)
if err != nil {
return false
}
return finfo.IsDir()
}
// isFile returns whether given path is a file
func isFile(path string) bool {
finfo, err := os.Stat(path)
if err != nil {
return false
}
return !finfo.IsDir()
}
func (d *Datastore) Close() error {
return nil
}
func (d *Datastore) Batch() (ds.Batch, error) {
return ds.NewBasicBatch(d), nil
}

View File

@ -1,252 +0,0 @@
package datastore
import (
"path"
"strings"
"gx/ipfs/QmcyaFHbyiZfoX5GTpcqqCPYmbjYNAhRDekXSJPFHdYNSV/go.uuid"
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
)
/*
A Key represents the unique identifier of an object.
Our Key scheme is inspired by file systems and Google App Engine key model.
Keys are meant to be unique across a system. Keys are hierarchical,
incorporating more and more specific namespaces. Thus keys can be deemed
'children' or 'ancestors' of other keys::
Key("/Comedy")
Key("/Comedy/MontyPython")
Also, every namespace can be parametrized to embed relevant object
information. For example, the Key `name` (most specific namespace) could
include the object type::
Key("/Comedy/MontyPython/Actor:JohnCleese")
Key("/Comedy/MontyPython/Sketch:CheeseShop")
Key("/Comedy/MontyPython/Sketch:CheeseShop/Character:Mousebender")
*/
type Key struct {
string
}
// NewKey constructs a key from string. it will clean the value.
func NewKey(s string) Key {
k := Key{s}
k.Clean()
return k
}
// KeyWithNamespaces constructs a key out of a namespace slice.
func KeyWithNamespaces(ns []string) Key {
return NewKey(strings.Join(ns, "/"))
}
// Clean up a Key, using path.Clean.
func (k *Key) Clean() {
k.string = path.Clean("/" + k.string)
}
// Strings is the string value of Key
func (k Key) String() string {
return k.string
}
// Bytes returns the string value of Key as a []byte
func (k Key) Bytes() []byte {
return []byte(k.string)
}
// Equal checks equality of two keys
func (k Key) Equal(k2 Key) bool {
return k.string == k2.string
}
// Less checks whether this key is sorted lower than another.
func (k Key) Less(k2 Key) bool {
list1 := k.List()
list2 := k2.List()
for i, c1 := range list1 {
if len(list2) < (i + 1) {
return false
}
c2 := list2[i]
if c1 < c2 {
return true
} else if c1 > c2 {
return false
}
// c1 == c2, continue
}
// list1 is shorter or exactly the same.
return len(list1) < len(list2)
}
// List returns the `list` representation of this Key.
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").List()
// ["Comedy", "MontyPythong", "Actor:JohnCleese"]
func (k Key) List() []string {
return strings.Split(k.string, "/")[1:]
}
// Reverse returns the reverse of this Key.
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Reverse()
// NewKey("/Actor:JohnCleese/MontyPython/Comedy")
func (k Key) Reverse() Key {
l := k.List()
r := make([]string, len(l), len(l))
for i, e := range l {
r[len(l)-i-1] = e
}
return KeyWithNamespaces(r)
}
// Namespaces returns the `namespaces` making up this Key.
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").List()
// ["Comedy", "MontyPythong", "Actor:JohnCleese"]
func (k Key) Namespaces() []string {
return k.List()
}
// BaseNamespace returns the "base" namespace of this key (path.Base(filename))
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").BaseNamespace()
// "Actor:JohnCleese"
func (k Key) BaseNamespace() string {
n := k.Namespaces()
return n[len(n)-1]
}
// Type returns the "type" of this key (value of last namespace).
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").List()
// "Actor"
func (k Key) Type() string {
return NamespaceType(k.BaseNamespace())
}
// Name returns the "name" of this key (field of last namespace).
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").List()
// "Actor"
func (k Key) Name() string {
return NamespaceValue(k.BaseNamespace())
}
// Instance returns an "instance" of this type key (appends value to namespace).
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").List()
// "JohnCleese"
func (k Key) Instance(s string) Key {
return NewKey(k.string + ":" + s)
}
// Path returns the "path" of this key (parent + type).
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Path()
// NewKey("/Comedy/MontyPython/Actor")
func (k Key) Path() Key {
s := k.Parent().string + "/" + NamespaceType(k.BaseNamespace())
return NewKey(s)
}
// Parent returns the `parent` Key of this Key.
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Parent()
// NewKey("/Comedy/MontyPython")
func (k Key) Parent() Key {
n := k.List()
if len(n) == 1 {
return NewKey("/")
}
return NewKey(strings.Join(n[:len(n)-1], "/"))
}
// Child returns the `child` Key of this Key.
// NewKey("/Comedy/MontyPython").Child("Actor:JohnCleese")
// NewKey("/Comedy/MontyPython/Actor:JohnCleese")
func (k Key) Child(k2 Key) Key {
return NewKey(k.string + "/" + k2.string)
}
// ChildString returns the `child` Key of this Key -- string helper.
// NewKey("/Comedy/MontyPython").Child("Actor:JohnCleese")
// NewKey("/Comedy/MontyPython/Actor:JohnCleese")
func (k Key) ChildString(s string) Key {
return NewKey(k.string + "/" + s)
}
// IsAncestorOf returns whether this key is a prefix of `other`
// NewKey("/Comedy").IsAncestorOf("/Comedy/MontyPython")
// true
func (k Key) IsAncestorOf(other Key) bool {
if other.string == k.string {
return false
}
return strings.HasPrefix(other.string, k.string)
}
// IsDescendantOf returns whether this key contains another as a prefix.
// NewKey("/Comedy/MontyPython").IsDescendantOf("/Comedy")
// true
func (k Key) IsDescendantOf(other Key) bool {
if other.string == k.string {
return false
}
return strings.HasPrefix(k.string, other.string)
}
// IsTopLevel returns whether this key has only one namespace.
func (k Key) IsTopLevel() bool {
return len(k.List()) == 1
}
// RandomKey returns a randomly (uuid) generated key.
// RandomKey()
// NewKey("/f98719ea086343f7b71f32ea9d9d521d")
func RandomKey() Key {
return NewKey(strings.Replace(uuid.NewV4().String(), "-", "", -1))
}
/*
A Key Namespace is like a path element.
A namespace can optionally include a type (delimited by ':')
> NamespaceValue("Song:PhilosopherSong")
PhilosopherSong
> NamespaceType("Song:PhilosopherSong")
Song
> NamespaceType("Music:Song:PhilosopherSong")
Music:Song
*/
// NamespaceType is the first component of a namespace. `foo` in `foo:bar`
func NamespaceType(namespace string) string {
parts := strings.Split(namespace, ":")
if len(parts) < 2 {
return ""
}
return strings.Join(parts[0:len(parts)-1], ":")
}
// NamespaceValue returns the last component of a namespace. `baz` in `f:b:baz`
func NamespaceValue(namespace string) string {
parts := strings.Split(namespace, ":")
return parts[len(parts)-1]
}
// KeySlice attaches the methods of sort.Interface to []Key,
// sorting in increasing order.
type KeySlice []Key
func (p KeySlice) Len() int { return len(p) }
func (p KeySlice) Less(i, j int) bool { return p[i].Less(p[j]) }
func (p KeySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// EntryKeys
func EntryKeys(e []dsq.Entry) []Key {
ks := make([]Key, len(e))
for i, e := range e {
ks[i] = NewKey(e.Key)
}
return ks
}

View File

@ -1,25 +0,0 @@
// Package keytransform introduces a Datastore Shim that transforms keys before
// passing them to its child. It can be used to manipulate what keys look like
// to the user, for example namespacing keys, reversing them, etc.
//
// Use the Wrap function to wrap a datastore with any KeyTransform.
// A KeyTransform is simply an interface with two functions, a conversion and
// its inverse. For example:
//
// import (
// ktds "github.com/ipfs/go-datastore/keytransform"
// ds "github.com/ipfs/go-datastore"
// )
//
// func reverseKey(k ds.Key) ds.Key {
// return k.Reverse()
// }
//
// func invertKeys(d ds.Datastore) {
// return ktds.Wrap(d, &ktds.Pair{
// Convert: reverseKey,
// Invert: reverseKey, // reverse is its own inverse.
// })
// }
//
package keytransform

View File

@ -1,34 +0,0 @@
package keytransform
import ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
// KeyMapping is a function that maps one key to annother
type KeyMapping func(ds.Key) ds.Key
// KeyTransform is an object with a pair of functions for (invertibly)
// transforming keys
type KeyTransform interface {
ConvertKey(ds.Key) ds.Key
InvertKey(ds.Key) ds.Key
}
// Datastore is a keytransform.Datastore
type Datastore interface {
ds.Shim
KeyTransform
}
// Wrap wraps a given datastore with a KeyTransform function.
// The resulting wrapped datastore will use the transform on all Datastore
// operations.
func Wrap(child ds.Datastore, t KeyTransform) *ktds {
if t == nil {
panic("t (KeyTransform) is nil")
}
if child == nil {
panic("child (ds.Datastore) is nil")
}
return &ktds{child: child, KeyTransform: t}
}

View File

@ -1,118 +0,0 @@
package keytransform
import (
"io"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
)
type Pair struct {
Convert KeyMapping
Invert KeyMapping
}
func (t *Pair) ConvertKey(k ds.Key) ds.Key {
return t.Convert(k)
}
func (t *Pair) InvertKey(k ds.Key) ds.Key {
return t.Invert(k)
}
// ktds keeps a KeyTransform function
type ktds struct {
child ds.Datastore
KeyTransform
}
// Children implements ds.Shim
func (d *ktds) Children() []ds.Datastore {
return []ds.Datastore{d.child}
}
// Put stores the given value, transforming the key first.
func (d *ktds) Put(key ds.Key, value interface{}) (err error) {
return d.child.Put(d.ConvertKey(key), value)
}
// Get returns the value for given key, transforming the key first.
func (d *ktds) Get(key ds.Key) (value interface{}, err error) {
return d.child.Get(d.ConvertKey(key))
}
// Has returns whether the datastore has a value for a given key, transforming
// the key first.
func (d *ktds) Has(key ds.Key) (exists bool, err error) {
return d.child.Has(d.ConvertKey(key))
}
// Delete removes the value for given key
func (d *ktds) Delete(key ds.Key) (err error) {
return d.child.Delete(d.ConvertKey(key))
}
// Query implements Query, inverting keys on the way back out.
func (d *ktds) Query(q dsq.Query) (dsq.Results, error) {
qr, err := d.child.Query(q)
if err != nil {
return nil, err
}
ch := make(chan dsq.Result)
go func() {
defer close(ch)
defer qr.Close()
for r := range qr.Next() {
if r.Error == nil {
r.Entry.Key = d.InvertKey(ds.NewKey(r.Entry.Key)).String()
}
ch <- r
}
}()
return dsq.DerivedResults(qr, ch), nil
}
func (d *ktds) Close() error {
if c, ok := d.child.(io.Closer); ok {
return c.Close()
}
return nil
}
func (d *ktds) Batch() (ds.Batch, error) {
bds, ok := d.child.(ds.Batching)
if !ok {
return nil, ds.ErrBatchUnsupported
}
childbatch, err := bds.Batch()
if err != nil {
return nil, err
}
return &transformBatch{
dst: childbatch,
f: d.ConvertKey,
}, nil
}
type transformBatch struct {
dst ds.Batch
f KeyMapping
}
func (t *transformBatch) Put(key ds.Key, val interface{}) error {
return t.dst.Put(t.f(key), val)
}
func (t *transformBatch) Delete(key ds.Key) error {
return t.dst.Delete(t.f(key))
}
func (t *transformBatch) Commit() error {
return t.dst.Commit()
}

View File

@ -1,155 +0,0 @@
package leveldb
import (
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
"gx/ipfs/QmbBhyDKsY4mbY6xsKt3qu9Y7FPvMJ6qbD8AMjYYvPRw1g/goleveldb/leveldb"
"gx/ipfs/QmbBhyDKsY4mbY6xsKt3qu9Y7FPvMJ6qbD8AMjYYvPRw1g/goleveldb/leveldb/opt"
"gx/ipfs/QmbBhyDKsY4mbY6xsKt3qu9Y7FPvMJ6qbD8AMjYYvPRw1g/goleveldb/leveldb/util"
"gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess"
)
type datastore struct {
DB *leveldb.DB
}
type Options opt.Options
func NewDatastore(path string, opts *Options) (*datastore, error) {
var nopts opt.Options
if opts != nil {
nopts = opt.Options(*opts)
}
db, err := leveldb.OpenFile(path, &nopts)
if err != nil {
return nil, err
}
return &datastore{
DB: db,
}, nil
}
// Returns ErrInvalidType if value is not of type []byte.
//
// NOTE: Using sync = false.
// see http://godoc.org/github.com/syndtr/goleveldb/leveldb/opt#WriteOptions
func (d *datastore) Put(key ds.Key, value interface{}) (err error) {
val, ok := value.([]byte)
if !ok {
return ds.ErrInvalidType
}
return d.DB.Put(key.Bytes(), val, nil)
}
func (d *datastore) Get(key ds.Key) (value interface{}, err error) {
val, err := d.DB.Get(key.Bytes(), nil)
if err != nil {
if err == leveldb.ErrNotFound {
return nil, ds.ErrNotFound
}
return nil, err
}
return val, nil
}
func (d *datastore) Has(key ds.Key) (exists bool, err error) {
return d.DB.Has(key.Bytes(), nil)
}
func (d *datastore) Delete(key ds.Key) (err error) {
err = d.DB.Delete(key.Bytes(), nil)
if err == leveldb.ErrNotFound {
return ds.ErrNotFound
}
return err
}
func (d *datastore) Query(q dsq.Query) (dsq.Results, error) {
// we can use multiple iterators concurrently. see:
// https://godoc.org/github.com/syndtr/goleveldb/leveldb#DB.NewIterator
// advance the iterator only if the reader reads
//
// run query in own sub-process tied to Results.Process(), so that
// it waits for us to finish AND so that clients can signal to us
// that resources should be reclaimed.
qrb := dsq.NewResultBuilder(q)
qrb.Process.Go(func(worker goprocess.Process) {
d.runQuery(worker, qrb)
})
// go wait on the worker (without signaling close)
go qrb.Process.CloseAfterChildren()
// Now, apply remaining things (filters, order)
qr := qrb.Results()
for _, f := range q.Filters {
qr = dsq.NaiveFilter(qr, f)
}
for _, o := range q.Orders {
qr = dsq.NaiveOrder(qr, o)
}
return qr, nil
}
func (d *datastore) runQuery(worker goprocess.Process, qrb *dsq.ResultBuilder) {
var rnge *util.Range
if qrb.Query.Prefix != "" {
rnge = util.BytesPrefix([]byte(qrb.Query.Prefix))
}
i := d.DB.NewIterator(rnge, nil)
defer i.Release()
// advance iterator for offset
if qrb.Query.Offset > 0 {
for j := 0; j < qrb.Query.Offset; j++ {
i.Next()
}
}
// iterate, and handle limit, too
for sent := 0; i.Next(); sent++ {
// end early if we hit the limit
if qrb.Query.Limit > 0 && sent >= qrb.Query.Limit {
break
}
k := ds.NewKey(string(i.Key())).String()
e := dsq.Entry{Key: k}
if !qrb.Query.KeysOnly {
buf := make([]byte, len(i.Value()))
copy(buf, i.Value())
e.Value = buf
}
select {
case qrb.Output <- dsq.Result{Entry: e}: // we sent it out
case <-worker.Closing(): // client told us to end early.
break
}
}
if err := i.Error(); err != nil {
select {
case qrb.Output <- dsq.Result{Error: err}: // client read our error
case <-worker.Closing(): // client told us to end.
return
}
}
}
func (d *datastore) Batch() (ds.Batch, error) {
// TODO: implement batch on leveldb
return nil, ds.ErrBatchUnsupported
}
// LevelDB needs to be closed.
func (d *datastore) Close() (err error) {
return d.DB.Close()
}
func (d *datastore) IsThreadSafe() {}

View File

@ -1,64 +0,0 @@
package lru
import (
"errors"
lru "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/hashicorp/golang-lru"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
)
// Datastore uses golang-lru for internal storage.
type Datastore struct {
cache *lru.Cache
}
// NewDatastore constructs a new LRU Datastore with given capacity.
func NewDatastore(capacity int) (*Datastore, error) {
cache, err := lru.New(capacity)
if err != nil {
return nil, err
}
return &Datastore{cache: cache}, nil
}
// Put stores the object `value` named by `key`.
func (d *Datastore) Put(key ds.Key, value interface{}) (err error) {
d.cache.Add(key, value)
return nil
}
// Get retrieves the object `value` named by `key`.
func (d *Datastore) Get(key ds.Key) (value interface{}, err error) {
val, ok := d.cache.Get(key)
if !ok {
return nil, ds.ErrNotFound
}
return val, nil
}
// Has returns whether the `key` is mapped to a `value`.
func (d *Datastore) Has(key ds.Key) (exists bool, err error) {
return ds.GetBackedHas(d, key)
}
// Delete removes the value for given `key`.
func (d *Datastore) Delete(key ds.Key) (err error) {
d.cache.Remove(key)
return nil
}
// KeyList returns a list of keys in the datastore
func (d *Datastore) Query(q dsq.Query) (dsq.Results, error) {
return nil, errors.New("KeyList not implemented.")
}
func (d *Datastore) Close() error {
return nil
}
func (d *Datastore) Batch() (ds.Batch, error) {
return nil, ds.ErrBatchUnsupported
}

View File

@ -1,248 +0,0 @@
// Package measure provides a Datastore wrapper that records metrics
// using github.com/codahale/metrics.
package measure
import (
"io"
"time"
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/codahale/metrics"
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
)
// Histogram measurements exceeding these limits are dropped. TODO
// maybe it would be better to cap the value? Should we keep track of
// drops?
const (
maxLatency = int64(1 * time.Second)
maxSize = int64(1 << 32)
)
// New wraps the datastore, providing metrics on the operations. The
// metrics are registered with names starting with prefix and a dot.
//
// If prefix is not unique, New will panic. Call Close to release the
// prefix.
func New(prefix string, ds datastore.Datastore) *measure {
m := &measure{
backend: ds,
putNum: metrics.Counter(prefix + ".Put.num"),
putErr: metrics.Counter(prefix + ".Put.err"),
putLatency: metrics.NewHistogram(prefix+".Put.latency", 0, maxLatency, 3),
putSize: metrics.NewHistogram(prefix+".Put.size", 0, maxSize, 3),
getNum: metrics.Counter(prefix + ".Get.num"),
getErr: metrics.Counter(prefix + ".Get.err"),
getLatency: metrics.NewHistogram(prefix+".Get.latency", 0, maxLatency, 3),
getSize: metrics.NewHistogram(prefix+".Get.size", 0, maxSize, 3),
hasNum: metrics.Counter(prefix + ".Has.num"),
hasErr: metrics.Counter(prefix + ".Has.err"),
hasLatency: metrics.NewHistogram(prefix+".Has.latency", 0, maxLatency, 3),
deleteNum: metrics.Counter(prefix + ".Delete.num"),
deleteErr: metrics.Counter(prefix + ".Delete.err"),
deleteLatency: metrics.NewHistogram(prefix+".Delete.latency", 0, maxLatency, 3),
queryNum: metrics.Counter(prefix + ".Query.num"),
queryErr: metrics.Counter(prefix + ".Query.err"),
queryLatency: metrics.NewHistogram(prefix+".Query.latency", 0, maxLatency, 3),
}
return m
}
type measure struct {
backend datastore.Datastore
putNum metrics.Counter
putErr metrics.Counter
putLatency *metrics.Histogram
putSize *metrics.Histogram
getNum metrics.Counter
getErr metrics.Counter
getLatency *metrics.Histogram
getSize *metrics.Histogram
hasNum metrics.Counter
hasErr metrics.Counter
hasLatency *metrics.Histogram
deleteNum metrics.Counter
deleteErr metrics.Counter
deleteLatency *metrics.Histogram
queryNum metrics.Counter
queryErr metrics.Counter
queryLatency *metrics.Histogram
}
var _ datastore.Datastore = (*measure)(nil)
func recordLatency(h *metrics.Histogram, start time.Time) {
elapsed := time.Now().Sub(start) / time.Microsecond
_ = h.RecordValue(int64(elapsed))
}
func (m *measure) Put(key datastore.Key, value interface{}) error {
defer recordLatency(m.putLatency, time.Now())
m.putNum.Add()
if b, ok := value.([]byte); ok {
_ = m.putSize.RecordValue(int64(len(b)))
}
err := m.backend.Put(key, value)
if err != nil {
m.putErr.Add()
}
return err
}
func (m *measure) Get(key datastore.Key) (value interface{}, err error) {
defer recordLatency(m.getLatency, time.Now())
m.getNum.Add()
value, err = m.backend.Get(key)
if err != nil {
m.getErr.Add()
} else {
if b, ok := value.([]byte); ok {
_ = m.getSize.RecordValue(int64(len(b)))
}
}
return value, err
}
func (m *measure) Has(key datastore.Key) (exists bool, err error) {
defer recordLatency(m.hasLatency, time.Now())
m.hasNum.Add()
exists, err = m.backend.Has(key)
if err != nil {
m.hasErr.Add()
}
return exists, err
}
func (m *measure) Delete(key datastore.Key) error {
defer recordLatency(m.deleteLatency, time.Now())
m.deleteNum.Add()
err := m.backend.Delete(key)
if err != nil {
m.deleteErr.Add()
}
return err
}
func (m *measure) Query(q query.Query) (query.Results, error) {
defer recordLatency(m.queryLatency, time.Now())
m.queryNum.Add()
res, err := m.backend.Query(q)
if err != nil {
m.queryErr.Add()
}
return res, err
}
type measuredBatch struct {
puts int
deletes int
putts datastore.Batch
delts datastore.Batch
m *measure
}
func (m *measure) Batch() (datastore.Batch, error) {
bds, ok := m.backend.(datastore.Batching)
if !ok {
return nil, datastore.ErrBatchUnsupported
}
pb, err := bds.Batch()
if err != nil {
return nil, err
}
db, err := bds.Batch()
if err != nil {
return nil, err
}
return &measuredBatch{
putts: pb,
delts: db,
m: m,
}, nil
}
func (mt *measuredBatch) Put(key datastore.Key, val interface{}) error {
mt.puts++
valb, ok := val.([]byte)
if !ok {
return datastore.ErrInvalidType
}
_ = mt.m.putSize.RecordValue(int64(len(valb)))
return mt.putts.Put(key, val)
}
func (mt *measuredBatch) Delete(key datastore.Key) error {
mt.deletes++
return mt.delts.Delete(key)
}
func (mt *measuredBatch) Commit() error {
err := logBatchCommit(mt.delts, mt.deletes, mt.m.deleteNum, mt.m.deleteErr, mt.m.deleteLatency)
if err != nil {
return err
}
err = logBatchCommit(mt.putts, mt.puts, mt.m.putNum, mt.m.putErr, mt.m.putLatency)
if err != nil {
return err
}
return nil
}
func logBatchCommit(b datastore.Batch, n int, num, errs metrics.Counter, lat *metrics.Histogram) error {
if n > 0 {
before := time.Now()
err := b.Commit()
took := int(time.Now().Sub(before)/time.Microsecond) / n
num.AddN(uint64(n))
for i := 0; i < n; i++ {
_ = lat.RecordValue(int64(took))
}
if err != nil {
errs.Add()
return err
}
}
return nil
}
func (m *measure) Close() error {
m.putNum.Remove()
m.putErr.Remove()
m.putLatency.Remove()
m.putSize.Remove()
m.getNum.Remove()
m.getErr.Remove()
m.getLatency.Remove()
m.getSize.Remove()
m.hasNum.Remove()
m.hasErr.Remove()
m.hasLatency.Remove()
m.deleteNum.Remove()
m.deleteErr.Remove()
m.deleteLatency.Remove()
m.queryNum.Remove()
m.queryErr.Remove()
m.queryLatency.Remove()
if c, ok := m.backend.(io.Closer); ok {
return c.Close()
}
return nil
}

View File

@ -1,188 +0,0 @@
// Package mount provides a Datastore that has other Datastores
// mounted at various key prefixes.
package mount
import (
"errors"
"io"
"strings"
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/keytransform"
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
)
var (
ErrNoMount = errors.New("no datastore mounted for this key")
)
type Mount struct {
Prefix datastore.Key
Datastore datastore.Datastore
}
func New(mounts []Mount) *Datastore {
// make a copy so we're sure it doesn't mutate
m := make([]Mount, len(mounts))
for i, v := range mounts {
m[i] = v
}
return &Datastore{mounts: m}
}
type Datastore struct {
mounts []Mount
}
var _ datastore.Datastore = (*Datastore)(nil)
func (d *Datastore) lookup(key datastore.Key) (ds datastore.Datastore, mountpoint, rest datastore.Key) {
for _, m := range d.mounts {
if m.Prefix.Equal(key) || m.Prefix.IsAncestorOf(key) {
s := strings.TrimPrefix(key.String(), m.Prefix.String())
k := datastore.NewKey(s)
return m.Datastore, m.Prefix, k
}
}
return nil, datastore.NewKey("/"), key
}
func (d *Datastore) Put(key datastore.Key, value interface{}) error {
ds, _, k := d.lookup(key)
if ds == nil {
return ErrNoMount
}
return ds.Put(k, value)
}
func (d *Datastore) Get(key datastore.Key) (value interface{}, err error) {
ds, _, k := d.lookup(key)
if ds == nil {
return nil, datastore.ErrNotFound
}
return ds.Get(k)
}
func (d *Datastore) Has(key datastore.Key) (exists bool, err error) {
ds, _, k := d.lookup(key)
if ds == nil {
return false, nil
}
return ds.Has(k)
}
func (d *Datastore) Delete(key datastore.Key) error {
ds, _, k := d.lookup(key)
if ds == nil {
return datastore.ErrNotFound
}
return ds.Delete(k)
}
func (d *Datastore) Query(q query.Query) (query.Results, error) {
if len(q.Filters) > 0 ||
len(q.Orders) > 0 ||
q.Limit > 0 ||
q.Offset > 0 {
// TODO this is overly simplistic, but the only caller is
// `ipfs refs local` for now, and this gets us moving.
return nil, errors.New("mount only supports listing all prefixed keys in random order")
}
key := datastore.NewKey(q.Prefix)
ds, mount, k := d.lookup(key)
if ds == nil {
return nil, errors.New("mount only supports listing a mount point")
}
// TODO support listing cross mount points too
// delegate the query to the mounted datastore, while adjusting
// keys in and out
q2 := q
q2.Prefix = k.String()
wrapDS := keytransform.Wrap(ds, &keytransform.Pair{
Convert: func(datastore.Key) datastore.Key {
panic("this should never be called")
},
Invert: func(k datastore.Key) datastore.Key {
return mount.Child(k)
},
})
r, err := wrapDS.Query(q2)
if err != nil {
return nil, err
}
r = query.ResultsReplaceQuery(r, q)
return r, nil
}
func (d *Datastore) Close() error {
for _, d := range d.mounts {
if c, ok := d.Datastore.(io.Closer); ok {
err := c.Close()
if err != nil {
return err
}
}
}
return nil
}
type mountBatch struct {
mounts map[string]datastore.Batch
d *Datastore
}
func (d *Datastore) Batch() (datastore.Batch, error) {
return &mountBatch{
mounts: make(map[string]datastore.Batch),
d: d,
}, nil
}
func (mt *mountBatch) lookupBatch(key datastore.Key) (datastore.Batch, datastore.Key, error) {
child, loc, rest := mt.d.lookup(key)
t, ok := mt.mounts[loc.String()]
if !ok {
bds, ok := child.(datastore.Batching)
if !ok {
return nil, datastore.NewKey(""), datastore.ErrBatchUnsupported
}
var err error
t, err = bds.Batch()
if err != nil {
return nil, datastore.NewKey(""), err
}
mt.mounts[loc.String()] = t
}
return t, rest, nil
}
func (mt *mountBatch) Put(key datastore.Key, val interface{}) error {
t, rest, err := mt.lookupBatch(key)
if err != nil {
return err
}
return t.Put(rest, val)
}
func (mt *mountBatch) Delete(key datastore.Key) error {
t, rest, err := mt.lookupBatch(key)
if err != nil {
return err
}
return t.Delete(rest)
}
func (mt *mountBatch) Commit() error {
for _, t := range mt.mounts {
err := t.Commit()
if err != nil {
return err
}
}
return nil
}

View File

@ -1,24 +0,0 @@
// Package namespace introduces a namespace Datastore Shim, which basically
// mounts the entire child datastore under a prefix.
//
// Use the Wrap function to wrap a datastore with any Key prefix. For example:
//
// import (
// "fmt"
//
// ds "github.com/ipfs/go-datastore"
// nsds "github.com/ipfs/go-datastore/namespace"
// )
//
// func main() {
// mp := ds.NewMapDatastore()
// ns := nsds.Wrap(mp, ds.NewKey("/foo/bar"))
//
// // in the Namespace Datastore:
// ns.Put(ds.NewKey("/beep"), "boop")
// v2, _ := ns.Get(ds.NewKey("/beep")) // v2 == "boop"
//
// // and, in the underlying MapDatastore:
// v3, _ := mp.Get(ds.NewKey("/foo/bar/beep")) // v3 == "boop"
// }
package namespace

View File

@ -1,91 +0,0 @@
package namespace
import (
"fmt"
"strings"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
ktds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/keytransform"
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
)
// PrefixTransform constructs a KeyTransform with a pair of functions that
// add or remove the given prefix key.
//
// Warning: Will panic if prefix not found when it should be there. This is
// to avoid insidious data inconsistency errors.
func PrefixTransform(prefix ds.Key) ktds.KeyTransform {
return &ktds.Pair{
// Convert adds the prefix
Convert: func(k ds.Key) ds.Key {
return prefix.Child(k)
},
// Invert removes the prefix. panics if prefix not found.
Invert: func(k ds.Key) ds.Key {
if !prefix.IsAncestorOf(k) {
fmt.Errorf("Expected prefix (%s) in key (%s)", prefix, k)
panic("expected prefix not found")
}
s := strings.TrimPrefix(k.String(), prefix.String())
return ds.NewKey(s)
},
}
}
// Wrap wraps a given datastore with a key-prefix.
func Wrap(child ds.Datastore, prefix ds.Key) *datastore {
if child == nil {
panic("child (ds.Datastore) is nil")
}
d := ktds.Wrap(child, PrefixTransform(prefix))
return &datastore{Datastore: d, raw: child, prefix: prefix}
}
type datastore struct {
prefix ds.Key
raw ds.Datastore
ktds.Datastore
}
// Query implements Query, inverting keys on the way back out.
func (d *datastore) Query(q dsq.Query) (dsq.Results, error) {
qr, err := d.raw.Query(q)
if err != nil {
return nil, err
}
ch := make(chan dsq.Result)
go func() {
defer close(ch)
defer qr.Close()
for r := range qr.Next() {
if r.Error != nil {
ch <- r
continue
}
k := ds.NewKey(r.Entry.Key)
if !d.prefix.IsAncestorOf(k) {
continue
}
r.Entry.Key = d.Datastore.InvertKey(k).String()
ch <- r
}
}()
return dsq.DerivedResults(qr, ch), nil
}
func (d *datastore) Batch() (ds.Batch, error) {
if bds, ok := d.Datastore.(ds.Batching); ok {
return bds.Batch()
}
return nil, ds.ErrBatchUnsupported
}

View File

@ -1,120 +0,0 @@
package sync
import (
"fmt"
"io"
"os"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
)
type datastore struct {
child ds.Datastore
}
// Wrap shims a datastore such than _any_ operation failing triggers a panic
// This is useful for debugging invariants.
func Wrap(d ds.Datastore) ds.Shim {
return &datastore{child: d}
}
func (d *datastore) Children() []ds.Datastore {
return []ds.Datastore{d.child}
}
func (d *datastore) Put(key ds.Key, value interface{}) error {
err := d.child.Put(key, value)
if err != nil {
fmt.Fprintf(os.Stdout, "panic datastore: %s", err)
panic("panic datastore: Put failed")
}
return nil
}
func (d *datastore) Get(key ds.Key) (interface{}, error) {
val, err := d.child.Get(key)
if err != nil {
fmt.Fprintf(os.Stdout, "panic datastore: %s", err)
panic("panic datastore: Get failed")
}
return val, nil
}
func (d *datastore) Has(key ds.Key) (bool, error) {
e, err := d.child.Has(key)
if err != nil {
fmt.Fprintf(os.Stdout, "panic datastore: %s", err)
panic("panic datastore: Has failed")
}
return e, nil
}
func (d *datastore) Delete(key ds.Key) error {
err := d.child.Delete(key)
if err != nil {
fmt.Fprintf(os.Stdout, "panic datastore: %s", err)
panic("panic datastore: Delete failed")
}
return nil
}
func (d *datastore) Query(q dsq.Query) (dsq.Results, error) {
r, err := d.child.Query(q)
if err != nil {
fmt.Fprintf(os.Stdout, "panic datastore: %s", err)
panic("panic datastore: Query failed")
}
return r, nil
}
func (d *datastore) Close() error {
if c, ok := d.child.(io.Closer); ok {
err := c.Close()
if err != nil {
fmt.Fprintf(os.Stdout, "panic datastore: %s", err)
panic("panic datastore: Close failed")
}
}
return nil
}
func (d *datastore) Batch() (ds.Batch, error) {
b, err := d.child.(ds.Batching).Batch()
if err != nil {
return nil, err
}
return &panicBatch{b}, nil
}
type panicBatch struct {
t ds.Batch
}
func (p *panicBatch) Put(key ds.Key, val interface{}) error {
err := p.t.Put(key, val)
if err != nil {
fmt.Fprintf(os.Stdout, "panic datastore: %s", err)
panic("panic datastore: transaction put failed")
}
return nil
}
func (p *panicBatch) Delete(key ds.Key) error {
err := p.t.Delete(key)
if err != nil {
fmt.Fprintf(os.Stdout, "panic datastore: %s", err)
panic("panic datastore: transaction delete failed")
}
return nil
}
func (p *panicBatch) Commit() error {
err := p.t.Commit()
if err != nil {
fmt.Fprintf(os.Stdout, "panic datastore: %s", err)
panic("panic datastore: transaction commit failed")
}
return nil
}

View File

@ -1,86 +0,0 @@
package query
import (
"fmt"
"reflect"
"strings"
)
// Filter is an object that tests ResultEntries
type Filter interface {
// Filter returns whether an entry passes the filter
Filter(e Entry) bool
}
// Op is a comparison operator
type Op string
var (
Equal = Op("==")
NotEqual = Op("!=")
GreaterThan = Op(">")
GreaterThanOrEqual = Op(">=")
LessThan = Op("<")
LessThanOrEqual = Op("<=")
)
// FilterValueCompare is used to signal to datastores they
// should apply internal comparisons. unfortunately, there
// is no way to apply comparisons* to interface{} types in
// Go, so if the datastore doesnt have a special way to
// handle these comparisons, you must provided the
// TypedFilter to actually do filtering.
//
// [*] other than == and !=, which use reflect.DeepEqual.
type FilterValueCompare struct {
Op Op
Value interface{}
TypedFilter Filter
}
func (f FilterValueCompare) Filter(e Entry) bool {
if f.TypedFilter != nil {
return f.TypedFilter.Filter(e)
}
switch f.Op {
case Equal:
return reflect.DeepEqual(f.Value, e.Value)
case NotEqual:
return !reflect.DeepEqual(f.Value, e.Value)
default:
panic(fmt.Errorf("cannot apply op '%s' to interface{}.", f.Op))
}
}
type FilterKeyCompare struct {
Op Op
Key string
}
func (f FilterKeyCompare) Filter(e Entry) bool {
switch f.Op {
case Equal:
return e.Key == f.Key
case NotEqual:
return e.Key != f.Key
case GreaterThan:
return e.Key > f.Key
case GreaterThanOrEqual:
return e.Key >= f.Key
case LessThan:
return e.Key < f.Key
case LessThanOrEqual:
return e.Key <= f.Key
default:
panic(fmt.Errorf("unknown op '%s'", f.Op))
}
}
type FilterKeyPrefix struct {
Prefix string
}
func (f FilterKeyPrefix) Filter(e Entry) bool {
return strings.HasPrefix(e.Key, f.Prefix)
}

View File

@ -1,66 +0,0 @@
package query
import (
"sort"
)
// Order is an object used to order objects
type Order interface {
// Sort sorts the Entry slice according to
// the Order criteria.
Sort([]Entry)
}
// OrderByValue is used to signal to datastores they
// should apply internal orderings. unfortunately, there
// is no way to apply order comparisons to interface{} types
// in Go, so if the datastore doesnt have a special way to
// handle these comparisons, you must provide an Order
// implementation that casts to the correct type.
type OrderByValue struct {
TypedOrder Order
}
func (o OrderByValue) Sort(res []Entry) {
if o.TypedOrder == nil {
panic("cannot order interface{} by value. see query docs.")
}
o.TypedOrder.Sort(res)
}
// OrderByValueDescending is used to signal to datastores they
// should apply internal orderings. unfortunately, there
// is no way to apply order comparisons to interface{} types
// in Go, so if the datastore doesnt have a special way to
// handle these comparisons, you are SOL.
type OrderByValueDescending struct {
TypedOrder Order
}
func (o OrderByValueDescending) Sort(res []Entry) {
if o.TypedOrder == nil {
panic("cannot order interface{} by value. see query docs.")
}
o.TypedOrder.Sort(res)
}
// OrderByKey
type OrderByKey struct{}
func (o OrderByKey) Sort(res []Entry) {
sort.Stable(reByKey(res))
}
// OrderByKeyDescending
type OrderByKeyDescending struct{}
func (o OrderByKeyDescending) Sort(res []Entry) {
sort.Stable(sort.Reverse(reByKey(res)))
}
type reByKey []Entry
func (s reByKey) Len() int { return len(s) }
func (s reByKey) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s reByKey) Less(i, j int) bool { return s[i].Key < s[j].Key }

View File

@ -1,250 +0,0 @@
package query
import (
goprocess "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess"
)
/*
Query represents storage for any key-value pair.
tl;dr:
queries are supported across datastores.
Cheap on top of relational dbs, and expensive otherwise.
Pick the right tool for the job!
In addition to the key-value store get and set semantics, datastore
provides an interface to retrieve multiple records at a time through
the use of queries. The datastore Query model gleans a common set of
operations performed when querying. To avoid pasting here years of
database research, lets summarize the operations datastore supports.
Query Operations:
* namespace - scope the query, usually by object type
* filters - select a subset of values by applying constraints
* orders - sort the results by applying sort conditions
* limit - impose a numeric limit on the number of results
* offset - skip a number of results (for efficient pagination)
datastore combines these operations into a simple Query class that allows
applications to define their constraints in a simple, generic, way without
introducing datastore specific calls, languages, etc.
Of course, different datastores provide relational query support across a
wide spectrum, from full support in traditional databases to none at all in
most key-value stores. Datastore aims to provide a common, simple interface
for the sake of application evolution over time and keeping large code bases
free of tool-specific code. It would be ridiculous to claim to support high-
performance queries on architectures that obviously do not. Instead, datastore
provides the interface, ideally translating queries to their native form
(e.g. into SQL for MySQL).
However, on the wrong datastore, queries can potentially incur the high cost
of performing the aforemantioned query operations on the data set directly in
Go. It is the clients responsibility to select the right tool for the job:
pick a data storage solution that fits the applications needs now, and wrap
it with a datastore implementation. As the needs change, swap out datastore
implementations to support your new use cases. Some applications, particularly
in early development stages, can afford to incurr the cost of queries on non-
relational databases (e.g. using a FSDatastore and not worry about a database
at all). When it comes time to switch the tool for performance, updating the
application code can be as simple as swapping the datastore in one place, not
all over the application code base. This gain in engineering time, both at
initial development and during later iterations, can significantly offset the
cost of the layer of abstraction.
*/
type Query struct {
Prefix string // namespaces the query to results whose keys have Prefix
Filters []Filter // filter results. apply sequentially
Orders []Order // order results. apply sequentially
Limit int // maximum number of results
Offset int // skip given number of results
KeysOnly bool // return only keys.
}
// NotFetched is a special type that signals whether or not the value
// of an Entry has been fetched or not. This is needed because
// datastore implementations get to decide whether Query returns values
// or only keys. nil is not a good signal, as real values may be nil.
const NotFetched int = iota
// Entry is a query result entry.
type Entry struct {
Key string // cant be ds.Key because circular imports ...!!!
Value interface{}
}
// Result is a special entry that includes an error, so that the client
// may be warned about internal errors.
type Result struct {
Entry
Error error
}
// Results is a set of Query results. This is the interface for clients.
// Example:
//
// qr, _ := myds.Query(q)
// for r := range qr.Next() {
// if r.Error != nil {
// // handle.
// break
// }
//
// fmt.Println(r.Entry.Key, r.Entry.Value)
// }
//
// or, wait on all results at once:
//
// qr, _ := myds.Query(q)
// es, _ := qr.Rest()
// for _, e := range es {
// fmt.Println(e.Key, e.Value)
// }
//
type Results interface {
Query() Query // the query these Results correspond to
Next() <-chan Result // returns a channel to wait for the next result
Rest() ([]Entry, error) // waits till processing finishes, returns all entries at once.
Close() error // client may call Close to signal early exit
// Process returns a goprocess.Process associated with these results.
// most users will not need this function (Close is all they want),
// but it's here in case you want to connect the results to other
// goprocess-friendly things.
Process() goprocess.Process
}
// results implements Results
type results struct {
query Query
proc goprocess.Process
res <-chan Result
}
func (r *results) Next() <-chan Result {
return r.res
}
func (r *results) Rest() ([]Entry, error) {
var es []Entry
for e := range r.res {
if e.Error != nil {
return es, e.Error
}
es = append(es, e.Entry)
}
<-r.proc.Closed() // wait till the processing finishes.
return es, nil
}
func (r *results) Process() goprocess.Process {
return r.proc
}
func (r *results) Close() error {
return r.proc.Close()
}
func (r *results) Query() Query {
return r.query
}
// ResultBuilder is what implementors use to construct results
// Implementors of datastores and their clients must respect the
// Process of the Request:
//
// * clients must call r.Process().Close() on an early exit, so
// implementations can reclaim resources.
// * if the Entries are read to completion (channel closed), Process
// should be closed automatically.
// * datastores must respect <-Process.Closing(), which intermediates
// an early close signal from the client.
//
type ResultBuilder struct {
Query Query
Process goprocess.Process
Output chan Result
}
// Results returns a Results to to this builder.
func (rb *ResultBuilder) Results() Results {
return &results{
query: rb.Query,
proc: rb.Process,
res: rb.Output,
}
}
func NewResultBuilder(q Query) *ResultBuilder {
b := &ResultBuilder{
Query: q,
Output: make(chan Result),
}
b.Process = goprocess.WithTeardown(func() error {
close(b.Output)
return nil
})
return b
}
// ResultsWithChan returns a Results object from a channel
// of Result entries. Respects its own Close()
func ResultsWithChan(q Query, res <-chan Result) Results {
b := NewResultBuilder(q)
// go consume all the entries and add them to the results.
b.Process.Go(func(worker goprocess.Process) {
for {
select {
case <-worker.Closing(): // client told us to close early
return
case e, more := <-res:
if !more {
return
}
select {
case b.Output <- e:
case <-worker.Closing(): // client told us to close early
return
}
}
}
return
})
go b.Process.CloseAfterChildren()
return b.Results()
}
// ResultsWithEntries returns a Results object from a list of entries
func ResultsWithEntries(q Query, res []Entry) Results {
b := NewResultBuilder(q)
// go consume all the entries and add them to the results.
b.Process.Go(func(worker goprocess.Process) {
for _, e := range res {
select {
case b.Output <- Result{Entry: e}:
case <-worker.Closing(): // client told us to close early
return
}
}
return
})
go b.Process.CloseAfterChildren()
return b.Results()
}
func ResultsReplaceQuery(r Results, q Query) Results {
return &results{
query: q,
proc: r.Process(),
res: r.Next(),
}
}

View File

@ -1,127 +0,0 @@
package query
func DerivedResults(qr Results, ch <-chan Result) Results {
return &results{
query: qr.Query(),
proc: qr.Process(),
res: ch,
}
}
// NaiveFilter applies a filter to the results.
func NaiveFilter(qr Results, filter Filter) Results {
ch := make(chan Result)
go func() {
defer close(ch)
defer qr.Close()
for e := range qr.Next() {
if e.Error != nil || filter.Filter(e.Entry) {
ch <- e
}
}
}()
return DerivedResults(qr, ch)
}
// NaiveLimit truncates the results to a given int limit
func NaiveLimit(qr Results, limit int) Results {
ch := make(chan Result)
go func() {
defer close(ch)
defer qr.Close()
l := 0
for e := range qr.Next() {
if e.Error != nil {
ch <- e
continue
}
ch <- e
l++
if limit > 0 && l >= limit {
break
}
}
}()
return DerivedResults(qr, ch)
}
// NaiveOffset skips a given number of results
func NaiveOffset(qr Results, offset int) Results {
ch := make(chan Result)
go func() {
defer close(ch)
defer qr.Close()
sent := 0
for e := range qr.Next() {
if e.Error != nil {
ch <- e
}
if sent < offset {
sent++
continue
}
ch <- e
}
}()
return DerivedResults(qr, ch)
}
// NaiveOrder reorders results according to given Order.
// WARNING: this is the only non-stream friendly operation!
func NaiveOrder(qr Results, o Order) Results {
ch := make(chan Result)
var entries []Entry
go func() {
defer close(ch)
defer qr.Close()
for e := range qr.Next() {
if e.Error != nil {
ch <- e
}
entries = append(entries, e.Entry)
}
o.Sort(entries)
for _, e := range entries {
ch <- Result{Entry: e}
}
}()
return DerivedResults(qr, ch)
}
func NaiveQueryApply(q Query, qr Results) Results {
if q.Prefix != "" {
qr = NaiveFilter(qr, FilterKeyPrefix{q.Prefix})
}
for _, f := range q.Filters {
qr = NaiveFilter(qr, f)
}
for _, o := range q.Orders {
qr = NaiveOrder(qr, o)
}
if q.Offset != 0 {
qr = NaiveOffset(qr, q.Offset)
}
if q.Limit != 0 {
qr = NaiveLimit(qr, q.Offset)
}
return qr
}
func ResultEntriesFrom(keys []string, vals []interface{}) []Entry {
re := make([]Entry, len(keys))
for i, k := range keys {
re[i] = Entry{Key: k, Value: vals[i]}
}
return re
}

View File

@ -1,92 +0,0 @@
package redis
import (
"errors"
"fmt"
"sync"
"time"
"github.com/fzzy/radix/redis"
datastore "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
query "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
)
var _ datastore.Datastore = &Datastore{}
var _ datastore.ThreadSafeDatastore = &Datastore{}
var ErrInvalidType = errors.New("redis datastore: invalid type error. this datastore only supports []byte values")
func NewExpiringDatastore(client *redis.Client, ttl time.Duration) (*Datastore, error) {
return &Datastore{
client: client,
ttl: ttl,
}, nil
}
func NewDatastore(client *redis.Client) (*Datastore, error) {
return &Datastore{
client: client,
}, nil
}
type Datastore struct {
mu sync.Mutex
client *redis.Client
ttl time.Duration
}
func (ds *Datastore) Put(key datastore.Key, value interface{}) error {
ds.mu.Lock()
defer ds.mu.Unlock()
data, ok := value.([]byte)
if !ok {
return ErrInvalidType
}
ds.client.Append("SET", key.String(), data)
if ds.ttl != 0 {
ds.client.Append("EXPIRE", key.String(), ds.ttl.Seconds())
}
if err := ds.client.GetReply().Err; err != nil {
return fmt.Errorf("failed to put value: %s", err)
}
if ds.ttl != 0 {
if err := ds.client.GetReply().Err; err != nil {
return fmt.Errorf("failed to set expiration: %s", err)
}
}
return nil
}
func (ds *Datastore) Get(key datastore.Key) (value interface{}, err error) {
ds.mu.Lock()
defer ds.mu.Unlock()
return ds.client.Cmd("GET", key.String()).Bytes()
}
func (ds *Datastore) Has(key datastore.Key) (exists bool, err error) {
ds.mu.Lock()
defer ds.mu.Unlock()
return ds.client.Cmd("EXISTS", key.String()).Bool()
}
func (ds *Datastore) Delete(key datastore.Key) (err error) {
ds.mu.Lock()
defer ds.mu.Unlock()
return ds.client.Cmd("DEL", key.String()).Err
}
func (ds *Datastore) Query(q query.Query) (query.Results, error) {
return nil, errors.New("TODO implement query for redis datastore?")
}
func (ds *Datastore) IsThreadSafe() {}
func (ds *Datastore) Batch() (datastore.Batch, error) {
return nil, datastore.ErrBatchUnsupported
}
func (ds *Datastore) Close() error {
return ds.client.Close()
}

View File

@ -1,116 +0,0 @@
package sync
import (
"io"
"sync"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
)
// MutexDatastore contains a child datastire and a mutex.
// used for coarse sync
type MutexDatastore struct {
sync.RWMutex
child ds.Datastore
}
// MutexWrap constructs a datastore with a coarse lock around
// the entire datastore, for every single operation
func MutexWrap(d ds.Datastore) *MutexDatastore {
return &MutexDatastore{child: d}
}
// Children implements Shim
func (d *MutexDatastore) Children() []ds.Datastore {
return []ds.Datastore{d.child}
}
// IsThreadSafe implements ThreadSafeDatastore
func (d *MutexDatastore) IsThreadSafe() {}
// Put implements Datastore.Put
func (d *MutexDatastore) Put(key ds.Key, value interface{}) (err error) {
d.Lock()
defer d.Unlock()
return d.child.Put(key, value)
}
// Get implements Datastore.Get
func (d *MutexDatastore) Get(key ds.Key) (value interface{}, err error) {
d.RLock()
defer d.RUnlock()
return d.child.Get(key)
}
// Has implements Datastore.Has
func (d *MutexDatastore) Has(key ds.Key) (exists bool, err error) {
d.RLock()
defer d.RUnlock()
return d.child.Has(key)
}
// Delete implements Datastore.Delete
func (d *MutexDatastore) Delete(key ds.Key) (err error) {
d.Lock()
defer d.Unlock()
return d.child.Delete(key)
}
// KeyList implements Datastore.KeyList
func (d *MutexDatastore) Query(q dsq.Query) (dsq.Results, error) {
d.RLock()
defer d.RUnlock()
return d.child.Query(q)
}
func (d *MutexDatastore) Batch() (ds.Batch, error) {
d.RLock()
defer d.RUnlock()
bds, ok := d.child.(ds.Batching)
if !ok {
return nil, ds.ErrBatchUnsupported
}
b, err := bds.Batch()
if err != nil {
return nil, err
}
return &syncBatch{
batch: b,
mds: d,
}, nil
}
func (d *MutexDatastore) Close() error {
d.RWMutex.Lock()
defer d.RWMutex.Unlock()
if c, ok := d.child.(io.Closer); ok {
return c.Close()
}
return nil
}
type syncBatch struct {
batch ds.Batch
mds *MutexDatastore
}
func (b *syncBatch) Put(key ds.Key, val interface{}) error {
b.mds.Lock()
defer b.mds.Unlock()
return b.batch.Put(key, val)
}
func (b *syncBatch) Delete(key ds.Key) error {
b.mds.Lock()
defer b.mds.Unlock()
return b.batch.Delete(key)
}
func (b *syncBatch) Commit() error {
b.mds.Lock()
defer b.mds.Unlock()
return b.batch.Commit()
}

View File

@ -1,198 +0,0 @@
// Package mount provides a Datastore that has other Datastores
// mounted at various key prefixes and is threadsafe
package syncmount
import (
"errors"
"io"
"strings"
"sync"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/keytransform"
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
)
var (
ErrNoMount = errors.New("no datastore mounted for this key")
)
type Mount struct {
Prefix ds.Key
Datastore ds.Datastore
}
func New(mounts []Mount) *Datastore {
// make a copy so we're sure it doesn't mutate
m := make([]Mount, len(mounts))
for i, v := range mounts {
m[i] = v
}
return &Datastore{mounts: m}
}
type Datastore struct {
mounts []Mount
lk sync.Mutex
}
var _ ds.Datastore = (*Datastore)(nil)
func (d *Datastore) lookup(key ds.Key) (ds.Datastore, ds.Key, ds.Key) {
d.lk.Lock()
defer d.lk.Unlock()
for _, m := range d.mounts {
if m.Prefix.Equal(key) || m.Prefix.IsAncestorOf(key) {
s := strings.TrimPrefix(key.String(), m.Prefix.String())
k := ds.NewKey(s)
return m.Datastore, m.Prefix, k
}
}
return nil, ds.NewKey("/"), key
}
func (d *Datastore) Put(key ds.Key, value interface{}) error {
cds, _, k := d.lookup(key)
if cds == nil {
return ErrNoMount
}
return cds.Put(k, value)
}
func (d *Datastore) Get(key ds.Key) (value interface{}, err error) {
cds, _, k := d.lookup(key)
if cds == nil {
return nil, ds.ErrNotFound
}
return cds.Get(k)
}
func (d *Datastore) Has(key ds.Key) (exists bool, err error) {
cds, _, k := d.lookup(key)
if cds == nil {
return false, nil
}
return cds.Has(k)
}
func (d *Datastore) Delete(key ds.Key) error {
cds, _, k := d.lookup(key)
if cds == nil {
return ds.ErrNotFound
}
return cds.Delete(k)
}
func (d *Datastore) Query(q query.Query) (query.Results, error) {
if len(q.Filters) > 0 ||
len(q.Orders) > 0 ||
q.Limit > 0 ||
q.Offset > 0 {
// TODO this is overly simplistic, but the only caller is
// `ipfs refs local` for now, and this gets us moving.
return nil, errors.New("mount only supports listing all prefixed keys in random order")
}
key := ds.NewKey(q.Prefix)
cds, mount, k := d.lookup(key)
if cds == nil {
return nil, errors.New("mount only supports listing a mount point")
}
// TODO support listing cross mount points too
// delegate the query to the mounted datastore, while adjusting
// keys in and out
q2 := q
q2.Prefix = k.String()
wrapDS := keytransform.Wrap(cds, &keytransform.Pair{
Convert: func(ds.Key) ds.Key {
panic("this should never be called")
},
Invert: func(k ds.Key) ds.Key {
return mount.Child(k)
},
})
r, err := wrapDS.Query(q2)
if err != nil {
return nil, err
}
r = query.ResultsReplaceQuery(r, q)
return r, nil
}
func (d *Datastore) IsThreadSafe() {}
func (d *Datastore) Close() error {
for _, d := range d.mounts {
if c, ok := d.Datastore.(io.Closer); ok {
err := c.Close()
if err != nil {
return err
}
}
}
return nil
}
type mountBatch struct {
mounts map[string]ds.Batch
lk sync.Mutex
d *Datastore
}
func (d *Datastore) Batch() (ds.Batch, error) {
return &mountBatch{
mounts: make(map[string]ds.Batch),
d: d,
}, nil
}
func (mt *mountBatch) lookupBatch(key ds.Key) (ds.Batch, ds.Key, error) {
mt.lk.Lock()
defer mt.lk.Unlock()
child, loc, rest := mt.d.lookup(key)
t, ok := mt.mounts[loc.String()]
if !ok {
bds, ok := child.(ds.Batching)
if !ok {
return nil, ds.NewKey(""), ds.ErrBatchUnsupported
}
var err error
t, err = bds.Batch()
if err != nil {
return nil, ds.NewKey(""), err
}
mt.mounts[loc.String()] = t
}
return t, rest, nil
}
func (mt *mountBatch) Put(key ds.Key, val interface{}) error {
t, rest, err := mt.lookupBatch(key)
if err != nil {
return err
}
return t.Put(rest, val)
}
func (mt *mountBatch) Delete(key ds.Key) error {
t, rest, err := mt.lookupBatch(key)
if err != nil {
return err
}
return t.Delete(rest)
}
func (mt *mountBatch) Commit() error {
for _, t := range mt.mounts {
err := t.Commit()
if err != nil {
return err
}
}
return nil
}

View File

@ -1,25 +0,0 @@
package dstest
import "testing"
func Nil(err error, t *testing.T, msgs ...string) {
if err != nil {
t.Fatal(msgs, "error:", err)
}
}
func True(v bool, t *testing.T, msgs ...string) {
if !v {
t.Fatal(msgs)
}
}
func False(v bool, t *testing.T, msgs ...string) {
True(!v, t, msgs...)
}
func Err(err error, t *testing.T, msgs ...string) {
if err == nil {
t.Fatal(msgs, "error:", err)
}
}

View File

@ -1,99 +0,0 @@
package dstest
import (
"bytes"
"encoding/base32"
"testing"
rand "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/dustin/randbo"
dstore "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
)
func RunBatchTest(t *testing.T, ds dstore.Batching) {
batch, err := ds.Batch()
if err != nil {
t.Fatal(err)
}
r := rand.New()
var blocks [][]byte
var keys []dstore.Key
for i := 0; i < 20; i++ {
blk := make([]byte, 256*1024)
r.Read(blk)
blocks = append(blocks, blk)
key := dstore.NewKey(base32.StdEncoding.EncodeToString(blk[:8]))
keys = append(keys, key)
err := batch.Put(key, blk)
if err != nil {
t.Fatal(err)
}
}
// Ensure they are not in the datastore before comitting
for _, k := range keys {
_, err := ds.Get(k)
if err == nil {
t.Fatal("should not have found this block")
}
}
// commit, write them to the datastore
err = batch.Commit()
if err != nil {
t.Fatal(err)
}
for i, k := range keys {
blk, err := ds.Get(k)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(blk.([]byte), blocks[i]) {
t.Fatal("blocks not correct!")
}
}
}
func RunBatchDeleteTest(t *testing.T, ds dstore.Batching) {
r := rand.New()
var keys []dstore.Key
for i := 0; i < 20; i++ {
blk := make([]byte, 16)
r.Read(blk)
key := dstore.NewKey(base32.StdEncoding.EncodeToString(blk[:8]))
keys = append(keys, key)
err := ds.Put(key, blk)
if err != nil {
t.Fatal(err)
}
}
batch, err := ds.Batch()
if err != nil {
t.Fatal(err)
}
for _, k := range keys {
err := batch.Delete(k)
if err != nil {
t.Fatal(err)
}
}
err = batch.Commit()
if err != nil {
t.Fatal(err)
}
for _, k := range keys {
_, err := ds.Get(k)
if err == nil {
t.Fatal("shouldnt have found block")
}
}
}

View File

@ -1,94 +0,0 @@
package tiered
import (
"fmt"
"sync"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
)
type tiered []ds.Datastore
// New returns a tiered datastore. Puts and Deletes will write-through to
// all datastores, Has and Get will try each datastore sequentially, and
// Query will always try the last one (most complete) first.
func New(dses ...ds.Datastore) tiered {
return tiered(dses)
}
// Put stores the object `value` named by `key`.
func (d tiered) Put(key ds.Key, value interface{}) (err error) {
errs := make(chan error, len(d))
var wg sync.WaitGroup
for _, cd := range d {
wg.Add(1)
go func(cd ds.Datastore) {
defer wg.Done()
if err := cd.Put(key, value); err != nil {
errs <- err
}
}(cd)
}
wg.Wait()
close(errs)
for err := range errs {
return err
}
return nil
}
// Get retrieves the object `value` named by `key`.
func (d tiered) Get(key ds.Key) (value interface{}, err error) {
err = fmt.Errorf("no datastores")
for _, cd := range d {
value, err = cd.Get(key)
if err == nil {
break
}
}
return
}
// Has returns whether the `key` is mapped to a `value`.
func (d tiered) Has(key ds.Key) (exists bool, err error) {
err = fmt.Errorf("no datastores")
for _, cd := range d {
exists, err = cd.Has(key)
if err == nil && exists {
break
}
}
return
}
// Delete removes the value for given `key`.
func (d tiered) Delete(key ds.Key) (err error) {
errs := make(chan error, len(d))
var wg sync.WaitGroup
for _, cd := range d {
wg.Add(1)
go func(cd ds.Datastore) {
defer wg.Done()
if err := cd.Delete(key); err != nil {
errs <- err
}
}(cd)
}
wg.Wait()
close(errs)
for err := range errs {
return err
}
return nil
}
// Query returns a list of keys in the datastore
func (d tiered) Query(q dsq.Query) (dsq.Results, error) {
// query always the last (most complete) one
return d[len(d)-1].Query(q)
}

View File

@ -1,104 +0,0 @@
package timecache
import (
"io"
"sync"
"time"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
)
// op keys
var (
putKey = "put"
getKey = "get"
hasKey = "has"
deleteKey = "delete"
)
type datastore struct {
cache ds.Datastore
ttl time.Duration
ttlmu sync.Mutex
ttls map[ds.Key]time.Time
}
func WithTTL(ttl time.Duration) *datastore {
return WithCache(ds.NewMapDatastore(), ttl)
}
// WithCache wraps a given datastore as a timecache.
// Get + Has requests are considered expired after a TTL.
func WithCache(d ds.Datastore, ttl time.Duration) *datastore {
return &datastore{cache: d, ttl: ttl, ttls: make(map[ds.Key]time.Time)}
}
func (d *datastore) gc() {
var now = time.Now()
var del []ds.Key
// remove all expired ttls.
d.ttlmu.Lock()
for k, ttl := range d.ttls {
if now.After(ttl) {
delete(d.ttls, k)
del = append(del, k)
}
}
d.ttlmu.Unlock()
for _, k := range del {
d.cache.Delete(k)
}
}
func (d *datastore) ttlPut(key ds.Key) {
d.ttlmu.Lock()
d.ttls[key] = time.Now().Add(d.ttl)
d.ttlmu.Unlock()
}
func (d *datastore) ttlDelete(key ds.Key) {
d.ttlmu.Lock()
delete(d.ttls, key)
d.ttlmu.Unlock()
}
// Put stores the object `value` named by `key`.
func (d *datastore) Put(key ds.Key, value interface{}) (err error) {
err = d.cache.Put(key, value)
d.ttlPut(key)
return err
}
// Get retrieves the object `value` named by `key`.
func (d *datastore) Get(key ds.Key) (value interface{}, err error) {
d.gc()
return d.cache.Get(key)
}
// Has returns whether the `key` is mapped to a `value`.
func (d *datastore) Has(key ds.Key) (exists bool, err error) {
d.gc()
return d.cache.Has(key)
}
// Delete removes the value for given `key`.
func (d *datastore) Delete(key ds.Key) (err error) {
d.ttlDelete(key)
return d.cache.Delete(key)
}
// Query returns a list of keys in the datastore
func (d *datastore) Query(q dsq.Query) (dsq.Results, error) {
return d.cache.Query(q)
}
func (d *datastore) Close() error {
if c, ok := d.cache.(io.Closer); ok {
return c.Close()
}
return nil
}

View File

@ -1,8 +0,0 @@
language: go
go:
- 1.4
- release
script:
- go test -race -cpu=5 -v ./...

View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,13 +0,0 @@
# go-os-rename
Easily rename files in place. This is needed because Windows errors on rename if a file already exists. Please see this commit, from which the code is extracted: https://github.com/lvarvel/cacheddownloader/commit/505a1fd
#### Godoc: https://godoc.org/github.com/jbenet/go-os-rename
## Author
The original author of this code are "David Morhovich, David Varvel and John Shahid" (see [this commit](https://github.com/lvarvel/cacheddownloader/commit/505a1fdcc5af7823f20d7c87d9e4d1c833c59053))
## License
The code originally comes from https://github.com/lvarvel/cacheddownloader, which is licensed Apache 2.0.

View File

@ -1,9 +0,0 @@
// +build !windows
package osrename
import "os"
func Rename(src, dst string) error {
return os.Rename(src, dst)
}

View File

@ -1,60 +0,0 @@
package osrename_test
import (
"bytes"
rn "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-os-rename"
"io/ioutil"
"os"
"testing"
)
func tempdir(t testing.TB) (path string, cleanup func()) {
path, err := ioutil.TempDir("", "test-windows-rename")
if err != nil {
t.Fatalf("cannot create temp directory: %v", err)
}
cleanup = func() {
if err := os.RemoveAll(path); err != nil {
t.Errorf("tempdir cleanup failed: %v", err)
}
}
return path, cleanup
}
func TestAtomicRename(t *testing.T) {
dirBase, cleanup := tempdir(t)
defer cleanup()
// Create base file
origFilePath := dirBase + "original.txt"
err := ioutil.WriteFile(origFilePath, []byte("tests"), 0644)
if err != nil {
t.Fatalf("Could not write original test file")
}
// Create secondary file
tempFilePath := dirBase + "newTempFile.txt"
err = ioutil.WriteFile(tempFilePath, []byte("success"), 0644)
if err != nil {
t.Fatalf("Could not write temp file")
}
// Execute our magic rename function
err = rn.Rename(tempFilePath, origFilePath)
if err != nil {
t.Fatalf("Could not rename temp file")
}
// Let's read the renamed file and ensure that we get data
renamedFileBytes, err := ioutil.ReadFile(origFilePath)
if err != nil {
t.Fatalf("Could not read renamed file")
}
// Let's compare the bytes of the renamed file
if bytes.Compare(renamedFileBytes, []byte("success")) != 0 {
t.Fatalf("Did not find expected bytes in renamed file %d vs %d", renamedFileBytes, []byte("success"))
}
}

View File

@ -1,43 +0,0 @@
// +build windows
package osrename
import (
"syscall"
"unsafe"
)
func Rename(src, dst string) error {
kernel32, err := syscall.LoadLibrary("kernel32.dll")
if err != nil {
return err
}
defer syscall.FreeLibrary(kernel32)
moveFileExUnicode, err := syscall.GetProcAddress(kernel32, "MoveFileExW")
if err != nil {
return err
}
srcString, err := syscall.UTF16PtrFromString(src)
if err != nil {
return err
}
dstString, err := syscall.UTF16PtrFromString(dst)
if err != nil {
return err
}
srcPtr := uintptr(unsafe.Pointer(srcString))
dstPtr := uintptr(unsafe.Pointer(dstString))
MOVEFILE_REPLACE_EXISTING := 0x1
flag := uintptr(MOVEFILE_REPLACE_EXISTING)
_, _, callErr := syscall.Syscall(uintptr(moveFileExUnicode), 3, srcPtr, dstPtr, flag)
if callErr != 0 {
return callErr
}
return nil
}

View File

@ -1,292 +0,0 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"encoding/binary"
"errors"
"io"
)
var (
// ErrCorrupt reports that the input is invalid.
ErrCorrupt = errors.New("snappy: corrupt input")
// ErrUnsupported reports that the input isn't supported.
ErrUnsupported = errors.New("snappy: unsupported input")
)
// DecodedLen returns the length of the decoded block.
func DecodedLen(src []byte) (int, error) {
v, _, err := decodedLen(src)
return v, err
}
// decodedLen returns the length of the decoded block and the number of bytes
// that the length header occupied.
func decodedLen(src []byte) (blockLen, headerLen int, err error) {
v, n := binary.Uvarint(src)
if n == 0 {
return 0, 0, ErrCorrupt
}
if uint64(int(v)) != v {
return 0, 0, errors.New("snappy: decoded block is too large")
}
return int(v), n, nil
}
// Decode returns the decoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire decoded block.
// Otherwise, a newly allocated slice will be returned.
// It is valid to pass a nil dst.
func Decode(dst, src []byte) ([]byte, error) {
dLen, s, err := decodedLen(src)
if err != nil {
return nil, err
}
if len(dst) < dLen {
dst = make([]byte, dLen)
}
var d, offset, length int
for s < len(src) {
switch src[s] & 0x03 {
case tagLiteral:
x := uint(src[s] >> 2)
switch {
case x < 60:
s += 1
case x == 60:
s += 2
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-1])
case x == 61:
s += 3
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-2]) | uint(src[s-1])<<8
case x == 62:
s += 4
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16
case x == 63:
s += 5
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24
}
length = int(x + 1)
if length <= 0 {
return nil, errors.New("snappy: unsupported literal length")
}
if length > len(dst)-d || length > len(src)-s {
return nil, ErrCorrupt
}
copy(dst[d:], src[s:s+length])
d += length
s += length
continue
case tagCopy1:
s += 2
if s > len(src) {
return nil, ErrCorrupt
}
length = 4 + int(src[s-2])>>2&0x7
offset = int(src[s-2])&0xe0<<3 | int(src[s-1])
case tagCopy2:
s += 3
if s > len(src) {
return nil, ErrCorrupt
}
length = 1 + int(src[s-3])>>2
offset = int(src[s-2]) | int(src[s-1])<<8
case tagCopy4:
return nil, errors.New("snappy: unsupported COPY_4 tag")
}
end := d + length
if offset > d || end > len(dst) {
return nil, ErrCorrupt
}
for ; d < end; d++ {
dst[d] = dst[d-offset]
}
}
if d != dLen {
return nil, ErrCorrupt
}
return dst[:d], nil
}
// NewReader returns a new Reader that decompresses from r, using the framing
// format described at
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
func NewReader(r io.Reader) *Reader {
return &Reader{
r: r,
decoded: make([]byte, maxUncompressedChunkLen),
buf: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)+checksumSize),
}
}
// Reader is an io.Reader than can read Snappy-compressed bytes.
type Reader struct {
r io.Reader
err error
decoded []byte
buf []byte
// decoded[i:j] contains decoded bytes that have not yet been passed on.
i, j int
readHeader bool
}
// Reset discards any buffered data, resets all state, and switches the Snappy
// reader to read from r. This permits reusing a Reader rather than allocating
// a new one.
func (r *Reader) Reset(reader io.Reader) {
r.r = reader
r.err = nil
r.i = 0
r.j = 0
r.readHeader = false
}
func (r *Reader) readFull(p []byte) (ok bool) {
if _, r.err = io.ReadFull(r.r, p); r.err != nil {
if r.err == io.ErrUnexpectedEOF {
r.err = ErrCorrupt
}
return false
}
return true
}
// Read satisfies the io.Reader interface.
func (r *Reader) Read(p []byte) (int, error) {
if r.err != nil {
return 0, r.err
}
for {
if r.i < r.j {
n := copy(p, r.decoded[r.i:r.j])
r.i += n
return n, nil
}
if !r.readFull(r.buf[:4]) {
return 0, r.err
}
chunkType := r.buf[0]
if !r.readHeader {
if chunkType != chunkTypeStreamIdentifier {
r.err = ErrCorrupt
return 0, r.err
}
r.readHeader = true
}
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
if chunkLen > len(r.buf) {
r.err = ErrUnsupported
return 0, r.err
}
// The chunk types are specified at
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
switch chunkType {
case chunkTypeCompressedData:
// Section 4.2. Compressed data (chunk type 0x00).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
}
buf := r.buf[:chunkLen]
if !r.readFull(buf) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
buf = buf[checksumSize:]
n, err := DecodedLen(buf)
if err != nil {
r.err = err
return 0, r.err
}
if n > len(r.decoded) {
r.err = ErrCorrupt
return 0, r.err
}
if _, err := Decode(r.decoded, buf); err != nil {
r.err = err
return 0, r.err
}
if crc(r.decoded[:n]) != checksum {
r.err = ErrCorrupt
return 0, r.err
}
r.i, r.j = 0, n
continue
case chunkTypeUncompressedData:
// Section 4.3. Uncompressed data (chunk type 0x01).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
}
buf := r.buf[:checksumSize]
if !r.readFull(buf) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
// Read directly into r.decoded instead of via r.buf.
n := chunkLen - checksumSize
if !r.readFull(r.decoded[:n]) {
return 0, r.err
}
if crc(r.decoded[:n]) != checksum {
r.err = ErrCorrupt
return 0, r.err
}
r.i, r.j = 0, n
continue
case chunkTypeStreamIdentifier:
// Section 4.1. Stream identifier (chunk type 0xff).
if chunkLen != len(magicBody) {
r.err = ErrCorrupt
return 0, r.err
}
if !r.readFull(r.buf[:len(magicBody)]) {
return 0, r.err
}
for i := 0; i < len(magicBody); i++ {
if r.buf[i] != magicBody[i] {
r.err = ErrCorrupt
return 0, r.err
}
}
continue
}
if chunkType <= 0x7f {
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
r.err = ErrUnsupported
return 0, r.err
} else {
// Section 4.4 Padding (chunk type 0xfe).
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
if !r.readFull(r.buf[:chunkLen]) {
return 0, r.err
}
}
}
}

View File

@ -1,258 +0,0 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"encoding/binary"
"io"
)
// We limit how far copy back-references can go, the same as the C++ code.
const maxOffset = 1 << 15
// emitLiteral writes a literal chunk and returns the number of bytes written.
func emitLiteral(dst, lit []byte) int {
i, n := 0, uint(len(lit)-1)
switch {
case n < 60:
dst[0] = uint8(n)<<2 | tagLiteral
i = 1
case n < 1<<8:
dst[0] = 60<<2 | tagLiteral
dst[1] = uint8(n)
i = 2
case n < 1<<16:
dst[0] = 61<<2 | tagLiteral
dst[1] = uint8(n)
dst[2] = uint8(n >> 8)
i = 3
case n < 1<<24:
dst[0] = 62<<2 | tagLiteral
dst[1] = uint8(n)
dst[2] = uint8(n >> 8)
dst[3] = uint8(n >> 16)
i = 4
case int64(n) < 1<<32:
dst[0] = 63<<2 | tagLiteral
dst[1] = uint8(n)
dst[2] = uint8(n >> 8)
dst[3] = uint8(n >> 16)
dst[4] = uint8(n >> 24)
i = 5
default:
panic("snappy: source buffer is too long")
}
if copy(dst[i:], lit) != len(lit) {
panic("snappy: destination buffer is too short")
}
return i + len(lit)
}
// emitCopy writes a copy chunk and returns the number of bytes written.
func emitCopy(dst []byte, offset, length int) int {
i := 0
for length > 0 {
x := length - 4
if 0 <= x && x < 1<<3 && offset < 1<<11 {
dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1
dst[i+1] = uint8(offset)
i += 2
break
}
x = length
if x > 1<<6 {
x = 1 << 6
}
dst[i+0] = uint8(x-1)<<2 | tagCopy2
dst[i+1] = uint8(offset)
dst[i+2] = uint8(offset >> 8)
i += 3
length -= x
}
return i
}
// Encode returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
// It is valid to pass a nil dst.
func Encode(dst, src []byte) ([]byte, error) {
if n := MaxEncodedLen(len(src)); len(dst) < n {
dst = make([]byte, n)
}
// The block starts with the varint-encoded length of the decompressed bytes.
d := binary.PutUvarint(dst, uint64(len(src)))
// Return early if src is short.
if len(src) <= 4 {
if len(src) != 0 {
d += emitLiteral(dst[d:], src)
}
return dst[:d], nil
}
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
const maxTableSize = 1 << 14
shift, tableSize := uint(32-8), 1<<8
for tableSize < maxTableSize && tableSize < len(src) {
shift--
tableSize *= 2
}
var table [maxTableSize]int
// Iterate over the source bytes.
var (
s int // The iterator position.
t int // The last position with the same hash as s.
lit int // The start position of any pending literal bytes.
)
for s+3 < len(src) {
// Update the hash table.
b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3]
h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24
p := &table[(h*0x1e35a7bd)>>shift]
// We need to to store values in [-1, inf) in table. To save
// some initialization time, (re)use the table's zero value
// and shift the values against this zero: add 1 on writes,
// subtract 1 on reads.
t, *p = *p-1, s+1
// If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte.
if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] {
s++
continue
}
// Otherwise, we have a match. First, emit any pending literal bytes.
if lit != s {
d += emitLiteral(dst[d:], src[lit:s])
}
// Extend the match to be as long as possible.
s0 := s
s, t = s+4, t+4
for s < len(src) && src[s] == src[t] {
s++
t++
}
// Emit the copied bytes.
d += emitCopy(dst[d:], s-t, s-s0)
lit = s
}
// Emit any final pending literal bytes and return.
if lit != len(src) {
d += emitLiteral(dst[d:], src[lit:])
}
return dst[:d], nil
}
// MaxEncodedLen returns the maximum length of a snappy block, given its
// uncompressed length.
func MaxEncodedLen(srcLen int) int {
// Compressed data can be defined as:
// compressed := item* literal*
// item := literal* copy
//
// The trailing literal sequence has a space blowup of at most 62/60
// since a literal of length 60 needs one tag byte + one extra byte
// for length information.
//
// Item blowup is trickier to measure. Suppose the "copy" op copies
// 4 bytes of data. Because of a special check in the encoding code,
// we produce a 4-byte copy only if the offset is < 65536. Therefore
// the copy op takes 3 bytes to encode, and this type of item leads
// to at most the 62/60 blowup for representing literals.
//
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
// enough, it will take 5 bytes to encode the copy op. Therefore the
// worst case here is a one-byte literal followed by a five-byte copy.
// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
//
// This last factor dominates the blowup, so the final estimate is:
return 32 + srcLen + srcLen/6
}
// NewWriter returns a new Writer that compresses to w, using the framing
// format described at
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
func NewWriter(w io.Writer) *Writer {
return &Writer{
w: w,
enc: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)),
}
}
// Writer is an io.Writer than can write Snappy-compressed bytes.
type Writer struct {
w io.Writer
err error
enc []byte
buf [checksumSize + chunkHeaderSize]byte
wroteHeader bool
}
// Reset discards the writer's state and switches the Snappy writer to write to
// w. This permits reusing a Writer rather than allocating a new one.
func (w *Writer) Reset(writer io.Writer) {
w.w = writer
w.err = nil
w.wroteHeader = false
}
// Write satisfies the io.Writer interface.
func (w *Writer) Write(p []byte) (n int, errRet error) {
if w.err != nil {
return 0, w.err
}
if !w.wroteHeader {
copy(w.enc, magicChunk)
if _, err := w.w.Write(w.enc[:len(magicChunk)]); err != nil {
w.err = err
return n, err
}
w.wroteHeader = true
}
for len(p) > 0 {
var uncompressed []byte
if len(p) > maxUncompressedChunkLen {
uncompressed, p = p[:maxUncompressedChunkLen], p[maxUncompressedChunkLen:]
} else {
uncompressed, p = p, nil
}
checksum := crc(uncompressed)
// Compress the buffer, discarding the result if the improvement
// isn't at least 12.5%.
chunkType := uint8(chunkTypeCompressedData)
chunkBody, err := Encode(w.enc, uncompressed)
if err != nil {
w.err = err
return n, err
}
if len(chunkBody) >= len(uncompressed)-len(uncompressed)/8 {
chunkType, chunkBody = chunkTypeUncompressedData, uncompressed
}
chunkLen := 4 + len(chunkBody)
w.buf[0] = chunkType
w.buf[1] = uint8(chunkLen >> 0)
w.buf[2] = uint8(chunkLen >> 8)
w.buf[3] = uint8(chunkLen >> 16)
w.buf[4] = uint8(checksum >> 0)
w.buf[5] = uint8(checksum >> 8)
w.buf[6] = uint8(checksum >> 16)
w.buf[7] = uint8(checksum >> 24)
if _, err = w.w.Write(w.buf[:]); err != nil {
w.err = err
return n, err
}
if _, err = w.w.Write(chunkBody); err != nil {
w.err = err
return n, err
}
n += len(uncompressed)
}
return n, nil
}

View File

@ -1,68 +0,0 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package snappy implements the snappy block-based compression format.
// It aims for very high speeds and reasonable compression.
//
// The C++ snappy implementation is at http://code.google.com/p/snappy/
package snappy
import (
"hash/crc32"
)
/*
Each encoded block begins with the varint-encoded length of the decoded data,
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
first byte of each chunk is broken into its 2 least and 6 most significant bits
called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
Zero means a literal tag. All other values mean a copy tag.
For literal tags:
- If m < 60, the next 1 + m bytes are literal bytes.
- Otherwise, let n be the little-endian unsigned integer denoted by the next
m - 59 bytes. The next 1 + n bytes after that are literal bytes.
For copy tags, length bytes are copied from offset bytes ago, in the style of
Lempel-Ziv compression algorithms. In particular:
- For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
of the offset. The next byte is bits 0-7 of the offset.
- For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
The length is 1 + m. The offset is the little-endian unsigned integer
denoted by the next 2 bytes.
- For l == 3, this tag is a legacy format that is no longer supported.
*/
const (
tagLiteral = 0x00
tagCopy1 = 0x01
tagCopy2 = 0x02
tagCopy4 = 0x03
)
const (
checksumSize = 4
chunkHeaderSize = 4
magicChunk = "\xff\x06\x00\x00" + magicBody
magicBody = "sNaPpY"
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt says
// that "the uncompressed data in a chunk must be no longer than 65536 bytes".
maxUncompressedChunkLen = 65536
)
const (
chunkTypeCompressedData = 0x00
chunkTypeUncompressedData = 0x01
chunkTypePadding = 0xfe
chunkTypeStreamIdentifier = 0xff
)
var crcTable = crc32.MakeTable(crc32.Castagnoli)
// crc implements the checksum specified in section 3 of
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
func crc(b []byte) uint32 {
c := crc32.Update(0, crcTable, b)
return uint32(c>>15|c<<17) + 0xa282ead8
}

View File

@ -1,364 +0,0 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"bytes"
"flag"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"os"
"path/filepath"
"strings"
"testing"
)
var (
download = flag.Bool("download", false, "If true, download any missing files before running benchmarks")
testdata = flag.String("testdata", "testdata", "Directory containing the test data")
)
func roundtrip(b, ebuf, dbuf []byte) error {
e, err := Encode(ebuf, b)
if err != nil {
return fmt.Errorf("encoding error: %v", err)
}
d, err := Decode(dbuf, e)
if err != nil {
return fmt.Errorf("decoding error: %v", err)
}
if !bytes.Equal(b, d) {
return fmt.Errorf("roundtrip mismatch:\n\twant %v\n\tgot %v", b, d)
}
return nil
}
func TestEmpty(t *testing.T) {
if err := roundtrip(nil, nil, nil); err != nil {
t.Fatal(err)
}
}
func TestSmallCopy(t *testing.T) {
for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {
for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {
for i := 0; i < 32; i++ {
s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb"
if err := roundtrip([]byte(s), ebuf, dbuf); err != nil {
t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err)
}
}
}
}
}
func TestSmallRand(t *testing.T) {
rng := rand.New(rand.NewSource(27354294))
for n := 1; n < 20000; n += 23 {
b := make([]byte, n)
for i := range b {
b[i] = uint8(rng.Uint32())
}
if err := roundtrip(b, nil, nil); err != nil {
t.Fatal(err)
}
}
}
func TestSmallRegular(t *testing.T) {
for n := 1; n < 20000; n += 23 {
b := make([]byte, n)
for i := range b {
b[i] = uint8(i%10 + 'a')
}
if err := roundtrip(b, nil, nil); err != nil {
t.Fatal(err)
}
}
}
func cmp(a, b []byte) error {
if len(a) != len(b) {
return fmt.Errorf("got %d bytes, want %d", len(a), len(b))
}
for i := range a {
if a[i] != b[i] {
return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i])
}
}
return nil
}
func TestFramingFormat(t *testing.T) {
// src is comprised of alternating 1e5-sized sequences of random
// (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen
// because it is larger than maxUncompressedChunkLen (64k).
src := make([]byte, 1e6)
rng := rand.New(rand.NewSource(1))
for i := 0; i < 10; i++ {
if i%2 == 0 {
for j := 0; j < 1e5; j++ {
src[1e5*i+j] = uint8(rng.Intn(256))
}
} else {
for j := 0; j < 1e5; j++ {
src[1e5*i+j] = uint8(i)
}
}
}
buf := new(bytes.Buffer)
if _, err := NewWriter(buf).Write(src); err != nil {
t.Fatalf("Write: encoding: %v", err)
}
dst, err := ioutil.ReadAll(NewReader(buf))
if err != nil {
t.Fatalf("ReadAll: decoding: %v", err)
}
if err := cmp(dst, src); err != nil {
t.Fatal(err)
}
}
func TestReaderReset(t *testing.T) {
gold := bytes.Repeat([]byte("All that is gold does not glitter,\n"), 10000)
buf := new(bytes.Buffer)
if _, err := NewWriter(buf).Write(gold); err != nil {
t.Fatalf("Write: %v", err)
}
encoded, invalid, partial := buf.String(), "invalid", "partial"
r := NewReader(nil)
for i, s := range []string{encoded, invalid, partial, encoded, partial, invalid, encoded, encoded} {
if s == partial {
r.Reset(strings.NewReader(encoded))
if _, err := r.Read(make([]byte, 101)); err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
continue
}
r.Reset(strings.NewReader(s))
got, err := ioutil.ReadAll(r)
switch s {
case encoded:
if err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
if err := cmp(got, gold); err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
case invalid:
if err == nil {
t.Errorf("#%d: got nil error, want non-nil", i)
continue
}
}
}
}
func TestWriterReset(t *testing.T) {
gold := bytes.Repeat([]byte("Not all those who wander are lost;\n"), 10000)
var gots, wants [][]byte
const n = 20
w, failed := NewWriter(nil), false
for i := 0; i <= n; i++ {
buf := new(bytes.Buffer)
w.Reset(buf)
want := gold[:len(gold)*i/n]
if _, err := w.Write(want); err != nil {
t.Errorf("#%d: Write: %v", i, err)
failed = true
continue
}
got, err := ioutil.ReadAll(NewReader(buf))
if err != nil {
t.Errorf("#%d: ReadAll: %v", i, err)
failed = true
continue
}
gots = append(gots, got)
wants = append(wants, want)
}
if failed {
return
}
for i := range gots {
if err := cmp(gots[i], wants[i]); err != nil {
t.Errorf("#%d: %v", i, err)
}
}
}
func benchDecode(b *testing.B, src []byte) {
encoded, err := Encode(nil, src)
if err != nil {
b.Fatal(err)
}
// Bandwidth is in amount of uncompressed data.
b.SetBytes(int64(len(src)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
Decode(src, encoded)
}
}
func benchEncode(b *testing.B, src []byte) {
// Bandwidth is in amount of uncompressed data.
b.SetBytes(int64(len(src)))
dst := make([]byte, MaxEncodedLen(len(src)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
Encode(dst, src)
}
}
func readFile(b testing.TB, filename string) []byte {
src, err := ioutil.ReadFile(filename)
if err != nil {
b.Fatalf("failed reading %s: %s", filename, err)
}
if len(src) == 0 {
b.Fatalf("%s has zero length", filename)
}
return src
}
// expand returns a slice of length n containing repeated copies of src.
func expand(src []byte, n int) []byte {
dst := make([]byte, n)
for x := dst; len(x) > 0; {
i := copy(x, src)
x = x[i:]
}
return dst
}
func benchWords(b *testing.B, n int, decode bool) {
// NOTE: The file is OS-language dependent so the resulting values are not
// directly comparable for non-US-English OS installations.
data := expand(readFile(b, "/usr/share/dict/words"), n)
if decode {
benchDecode(b, data)
} else {
benchEncode(b, data)
}
}
func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) }
func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) }
func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) }
func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) }
func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) }
func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) }
func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) }
func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) }
// testFiles' values are copied directly from
// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc
// The label field is unused in snappy-go.
var testFiles = []struct {
label string
filename string
}{
{"html", "html"},
{"urls", "urls.10K"},
{"jpg", "fireworks.jpeg"},
{"jpg_200", "fireworks.jpeg"},
{"pdf", "paper-100k.pdf"},
{"html4", "html_x_4"},
{"txt1", "alice29.txt"},
{"txt2", "asyoulik.txt"},
{"txt3", "lcet10.txt"},
{"txt4", "plrabn12.txt"},
{"pb", "geo.protodata"},
{"gaviota", "kppkn.gtb"},
}
// The test data files are present at this canonical URL.
const baseURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/"
func downloadTestdata(basename string) (errRet error) {
filename := filepath.Join(*testdata, basename)
if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 {
return nil
}
if !*download {
return fmt.Errorf("test data not found; skipping benchmark without the -download flag")
}
// Download the official snappy C++ implementation reference test data
// files for benchmarking.
if err := os.Mkdir(*testdata, 0777); err != nil && !os.IsExist(err) {
return fmt.Errorf("failed to create testdata: %s", err)
}
f, err := os.Create(filename)
if err != nil {
return fmt.Errorf("failed to create %s: %s", filename, err)
}
defer f.Close()
defer func() {
if errRet != nil {
os.Remove(filename)
}
}()
url := baseURL + basename
resp, err := http.Get(url)
if err != nil {
return fmt.Errorf("failed to download %s: %s", url, err)
}
defer resp.Body.Close()
if s := resp.StatusCode; s != http.StatusOK {
return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s))
}
_, err = io.Copy(f, resp.Body)
if err != nil {
return fmt.Errorf("failed to download %s to %s: %s", url, filename, err)
}
return nil
}
func benchFile(b *testing.B, n int, decode bool) {
if err := downloadTestdata(testFiles[n].filename); err != nil {
b.Fatalf("failed to download testdata: %s", err)
}
data := readFile(b, filepath.Join(*testdata, testFiles[n].filename))
if decode {
benchDecode(b, data)
} else {
benchEncode(b, data)
}
}
// Naming convention is kept similar to what snappy's C++ implementation uses.
func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) }
func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) }
func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) }
func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) }
func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) }
func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) }
func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) }
func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) }
func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) }
func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) }
func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) }
func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) }
func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) }
func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) }
func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) }
func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) }
func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) }
func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) }
func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) }
func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) }
func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) }
func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) }
func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) }
func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) }

View File

@ -7,13 +7,13 @@ import (
"sync"
"sync/atomic"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
dsns "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/namespace"
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
blocks "github.com/ipfs/go-ipfs/blocks"
key "github.com/ipfs/go-ipfs/blocks/key"
mh "gx/ipfs/QmYf7ng2hG5XBtJA3tN34DQ2GUN5HNksEw1rLDkmr6vGku/go-multihash"
logging "gx/ipfs/QmYtB7Qge8cJpXc4irsEp8zRqfnZMBeB7aTrMEkPk67DRv/go-log"
ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
dsns "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/namespace"
dsq "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/query"
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
)

View File

@ -5,9 +5,9 @@ import (
"fmt"
"testing"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
ds_sync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
dsq "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/query"
ds_sync "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/sync"
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
blocks "github.com/ipfs/go-ipfs/blocks"

View File

@ -1,9 +1,9 @@
package blockstore
import (
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/hashicorp/golang-lru"
"github.com/ipfs/go-ipfs/blocks"
key "github.com/ipfs/go-ipfs/blocks/key"
"gx/ipfs/QmVYxfoJQiZijTgPNHCHgHELvQpbsJNTg6Crmc3dQkj3yy/golang-lru"
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
)

View File

@ -3,10 +3,10 @@ package blockstore
import (
"testing"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query"
syncds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
"github.com/ipfs/go-ipfs/blocks"
ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
dsq "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/query"
syncds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/sync"
)
func TestReturnsErrorWhenSizeNegative(t *testing.T) {

View File

@ -4,9 +4,9 @@ import (
"encoding/json"
"fmt"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
b58 "gx/ipfs/QmT8rehPR3F6bmwL6zjUN8XpiDBFFpMP2myPdC6ApsWfJf/go-base58"
mh "gx/ipfs/QmYf7ng2hG5XBtJA3tN34DQ2GUN5HNksEw1rLDkmr6vGku/go-multihash"
ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
)
// Key is a string representation of multihash for use with maps.

View File

@ -5,14 +5,14 @@ import (
"testing"
"time"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
blocks "github.com/ipfs/go-ipfs/blocks"
blockstore "github.com/ipfs/go-ipfs/blocks/blockstore"
blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil"
key "github.com/ipfs/go-ipfs/blocks/key"
. "github.com/ipfs/go-ipfs/blockservice"
offline "github.com/ipfs/go-ipfs/exchange/offline"
ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
dssync "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/sync"
u "gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util"
"gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
)

View File

@ -11,8 +11,8 @@ import (
"strings"
"sync"
_ "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/codahale/metrics/runtime"
"gx/ipfs/QmPpRcbNUXauP3zWZ1NJMLWpe4QnmEHrd2ba2D3yqWznw7/go-multiaddr-net"
_ "gx/ipfs/QmV3NSS3A1kX5s28r7yLczhDsXzkgo65cqRgKFXYunWZmD/metrics/runtime"
ma "gx/ipfs/QmYzDkkgAEmrcNzFCiYo6L1dTX4EAG1gZkbtdbd9trL4vd/go-multiaddr"

View File

@ -5,8 +5,6 @@ import (
"encoding/base64"
"errors"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
dsync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
bstore "github.com/ipfs/go-ipfs/blocks/blockstore"
key "github.com/ipfs/go-ipfs/blocks/key"
bserv "github.com/ipfs/go-ipfs/blockservice"
@ -16,6 +14,8 @@ import (
pin "github.com/ipfs/go-ipfs/pin"
repo "github.com/ipfs/go-ipfs/repo"
cfg "github.com/ipfs/go-ipfs/repo/config"
ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
dsync "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/sync"
goprocessctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context"
ci "gx/ipfs/QmUEUu1CM8bxBJxc3ZLojAi8evhTr4byQogWstABet79oY/go-libp2p-crypto"

View File

@ -16,7 +16,6 @@ import (
"net"
"time"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
diag "github.com/ipfs/go-ipfs/diagnostics"
peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer"
discovery "gx/ipfs/QmQkQP7WmeT9FRJDsEzAaGYDparttDiB6mCpVBrq2MuWQS/go-libp2p/p2p/discovery"
@ -34,6 +33,7 @@ import (
pstore "gx/ipfs/QmXHUpFsnpCmanRnacqYkFoLoFfEq5yS2nUgGkAjJ1Nj9j/go-libp2p-peerstore"
logging "gx/ipfs/QmYtB7Qge8cJpXc4irsEp8zRqfnZMBeB7aTrMEkPk67DRv/go-log"
ma "gx/ipfs/QmYzDkkgAEmrcNzFCiYo6L1dTX4EAG1gZkbtdbd9trL4vd/go-multiaddr"
ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
routing "github.com/ipfs/go-ipfs/routing"

View File

@ -3,7 +3,6 @@ package corerouting
import (
"errors"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
core "github.com/ipfs/go-ipfs/core"
repo "github.com/ipfs/go-ipfs/repo"
routing "github.com/ipfs/go-ipfs/routing"
@ -11,6 +10,7 @@ import (
gcproxy "github.com/ipfs/go-ipfs/routing/supernode/proxy"
"gx/ipfs/QmQkQP7WmeT9FRJDsEzAaGYDparttDiB6mCpVBrq2MuWQS/go-libp2p/p2p/host"
pstore "gx/ipfs/QmXHUpFsnpCmanRnacqYkFoLoFfEq5yS2nUgGkAjJ1Nj9j/go-libp2p-peerstore"
ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
)

View File

@ -7,8 +7,6 @@ import (
"os"
gopath "path"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
syncds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
bstore "github.com/ipfs/go-ipfs/blocks/blockstore"
key "github.com/ipfs/go-ipfs/blocks/key"
bserv "github.com/ipfs/go-ipfs/blockservice"
@ -17,6 +15,8 @@ import (
"github.com/ipfs/go-ipfs/importer/chunk"
mfs "github.com/ipfs/go-ipfs/mfs"
"github.com/ipfs/go-ipfs/pin"
ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
syncds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/sync"
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
bs "github.com/ipfs/go-ipfs/blocks/blockstore"

View File

@ -5,8 +5,8 @@ import (
"io/ioutil"
"testing"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
dssync "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/sync"
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
bstore "github.com/ipfs/go-ipfs/blocks/blockstore"

View File

@ -3,8 +3,8 @@ package coremock
import (
"net"
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
syncds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
"gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
syncds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/sync"
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
commands "github.com/ipfs/go-ipfs/commands"

View File

@ -8,13 +8,13 @@ import (
"sync"
"testing"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
blocks "github.com/ipfs/go-ipfs/blocks"
blockstore "github.com/ipfs/go-ipfs/blocks/blockstore"
message "github.com/ipfs/go-ipfs/exchange/bitswap/message"
testutil "github.com/ipfs/go-ipfs/thirdparty/testutil"
peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer"
ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
dssync "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/sync"
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
)

View File

@ -1,12 +1,12 @@
package bitswap
import (
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network"
mockrouting "github.com/ipfs/go-ipfs/routing/mock"
testutil "github.com/ipfs/go-ipfs/thirdparty/testutil"
peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer"
mockpeernet "gx/ipfs/QmQkQP7WmeT9FRJDsEzAaGYDparttDiB6mCpVBrq2MuWQS/go-libp2p/p2p/net/mock"
ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
)

View File

@ -3,8 +3,6 @@ package bitswap
import (
"time"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
ds_sync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
blockstore "github.com/ipfs/go-ipfs/blocks/blockstore"
tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet"
datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2"
@ -12,6 +10,8 @@ import (
testutil "github.com/ipfs/go-ipfs/thirdparty/testutil"
peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer"
p2ptestutil "gx/ipfs/QmQkQP7WmeT9FRJDsEzAaGYDparttDiB6mCpVBrq2MuWQS/go-libp2p/p2p/test/util"
ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
ds_sync "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/sync"
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
)

View File

@ -3,12 +3,12 @@ package offline
import (
"testing"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
ds_sync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
blocks "github.com/ipfs/go-ipfs/blocks"
"github.com/ipfs/go-ipfs/blocks/blockstore"
"github.com/ipfs/go-ipfs/blocks/blocksutil"
key "github.com/ipfs/go-ipfs/blocks/key"
ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
ds_sync "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/sync"
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
)

View File

@ -3,12 +3,12 @@ package reprovide_test
import (
"testing"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
blocks "github.com/ipfs/go-ipfs/blocks"
blockstore "github.com/ipfs/go-ipfs/blocks/blockstore"
mock "github.com/ipfs/go-ipfs/routing/mock"
testutil "github.com/ipfs/go-ipfs/thirdparty/testutil"
ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
dssync "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/sync"
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
. "github.com/ipfs/go-ipfs/exchange/reprovide"

View File

@ -10,8 +10,6 @@ import (
"sync"
"testing"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
bstore "github.com/ipfs/go-ipfs/blocks/blockstore"
key "github.com/ipfs/go-ipfs/blocks/key"
bserv "github.com/ipfs/go-ipfs/blockservice"
@ -23,6 +21,8 @@ import (
dstest "github.com/ipfs/go-ipfs/merkledag/test"
"github.com/ipfs/go-ipfs/pin"
uio "github.com/ipfs/go-ipfs/unixfs/io"
ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
dssync "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/sync"
u "gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util"
"gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
)

View File

@ -1,12 +1,12 @@
package mdutils
import (
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
"github.com/ipfs/go-ipfs/blocks/blockstore"
bsrv "github.com/ipfs/go-ipfs/blockservice"
"github.com/ipfs/go-ipfs/exchange/offline"
dag "github.com/ipfs/go-ipfs/merkledag"
ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
dssync "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/sync"
)
func Mock() dag.DAGService {

View File

@ -3,8 +3,8 @@ package dagutils
import (
"errors"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
syncds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
syncds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/sync"
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
bstore "github.com/ipfs/go-ipfs/blocks/blockstore"

View File

@ -13,10 +13,10 @@ import (
"testing"
"time"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
"github.com/ipfs/go-ipfs/path"
randbo "gx/ipfs/QmYvsG72GsfLgUeSojXArjnU6L4Wmwk7wuAxtNLuyXcc1T/randbo"
ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
dssync "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/sync"
"gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
bstore "github.com/ipfs/go-ipfs/blocks/blockstore"

View File

@ -4,10 +4,10 @@ import (
"strings"
"time"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
path "github.com/ipfs/go-ipfs/path"
routing "github.com/ipfs/go-ipfs/routing"
ci "gx/ipfs/QmUEUu1CM8bxBJxc3ZLojAi8evhTr4byQogWstABet79oY/go-libp2p-crypto"
ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
)

View File

@ -6,8 +6,8 @@ import (
"fmt"
"time"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto"
ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
key "github.com/ipfs/go-ipfs/blocks/key"

View File

@ -12,13 +12,13 @@ import (
"github.com/ipfs/go-ipfs/routing"
dhtpb "github.com/ipfs/go-ipfs/routing/dht/pb"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer"
goprocess "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess"
gpctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context"
pstore "gx/ipfs/QmXHUpFsnpCmanRnacqYkFoLoFfEq5yS2nUgGkAjJ1Nj9j/go-libp2p-peerstore"
logging "gx/ipfs/QmYtB7Qge8cJpXc4irsEp8zRqfnZMBeB7aTrMEkPk67DRv/go-log"
proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto"
ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
)

View File

@ -5,13 +5,13 @@ import (
"testing"
"time"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
key "github.com/ipfs/go-ipfs/blocks/key"
path "github.com/ipfs/go-ipfs/path"
mockrouting "github.com/ipfs/go-ipfs/routing/mock"
testutil "github.com/ipfs/go-ipfs/thirdparty/testutil"
peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer"
ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
dssync "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/sync"
u "gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util"
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
)

View File

@ -5,7 +5,7 @@ import (
"strings"
"time"
lru "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/hashicorp/golang-lru"
lru "gx/ipfs/QmVYxfoJQiZijTgPNHCHgHELvQpbsJNTg6Crmc3dQkj3yy/golang-lru"
mh "gx/ipfs/QmYf7ng2hG5XBtJA3tN34DQ2GUN5HNksEw1rLDkmr6vGku/go-multihash"
proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto"
"gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"

View File

@ -141,6 +141,24 @@
"hash": "QmczzCMvJ3HV57WBKDy8b4ucp7quT325JjDbixYRS5Pwvv",
"name": "fsnotify.v1",
"version": "1.3.0"
},
{
"author": "jbenet",
"hash": "QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN",
"name": "go-datastore",
"version": "0.0.1"
},
{
"author": "codahale",
"hash": "QmV3NSS3A1kX5s28r7yLczhDsXzkgo65cqRgKFXYunWZmD",
"name": "metrics",
"version": "0.0.0"
},
{
"author": "hashicorp",
"hash": "QmVYxfoJQiZijTgPNHCHgHELvQpbsJNTg6Crmc3dQkj3yy",
"name": "golang-lru",
"version": "0.0.0"
}
],
"gxVersion": "0.4.0",

View File

@ -7,11 +7,11 @@ import (
"sync"
"time"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
key "github.com/ipfs/go-ipfs/blocks/key"
"github.com/ipfs/go-ipfs/blocks/set"
mdag "github.com/ipfs/go-ipfs/merkledag"
logging "gx/ipfs/QmYtB7Qge8cJpXc4irsEp8zRqfnZMBeB7aTrMEkPk67DRv/go-log"
ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
)

View File

@ -6,13 +6,13 @@ import (
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
"github.com/ipfs/go-ipfs/blocks/blockstore"
key "github.com/ipfs/go-ipfs/blocks/key"
bs "github.com/ipfs/go-ipfs/blockservice"
"github.com/ipfs/go-ipfs/exchange/offline"
mdag "github.com/ipfs/go-ipfs/merkledag"
ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
dssync "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/sync"
"gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util"
)

View File

@ -4,14 +4,14 @@ import (
"testing"
"testing/quick"
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore"
dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync"
"github.com/ipfs/go-ipfs/blocks/blockstore"
"github.com/ipfs/go-ipfs/blocks/key"
"github.com/ipfs/go-ipfs/blockservice"
"github.com/ipfs/go-ipfs/exchange/offline"
"github.com/ipfs/go-ipfs/merkledag"
mh "gx/ipfs/QmYf7ng2hG5XBtJA3tN34DQ2GUN5HNksEw1rLDkmr6vGku/go-multihash"
"gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
dssync "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/sync"
u "gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util"
"gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
)

Some files were not shown because too many files have changed in this diff Show More