mirror of
https://github.com/ipfs/kubo.git
synced 2026-02-26 12:57:44 +08:00
commit
c6ffaa4a77
1
.gitignore
vendored
1
.gitignore
vendored
@ -3,3 +3,4 @@
|
||||
*.out
|
||||
*.test
|
||||
*.orig
|
||||
*~
|
||||
|
||||
@ -8,3 +8,5 @@ go:
|
||||
|
||||
script:
|
||||
- go test -v ./...
|
||||
|
||||
env: TEST_NO_FUSE=1
|
||||
|
||||
14
Godeps/Godeps.json
generated
14
Godeps/Godeps.json
generated
@ -38,6 +38,10 @@
|
||||
"Comment": "null-15",
|
||||
"Rev": "12e4b4183793ac4b061921e7980845e750679fd0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/bren2010/proquint",
|
||||
"Rev": "5958552242606512f714d2e93513b380f43f9991"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/camlistore/lock",
|
||||
"Rev": "ae27720f340952636b826119b58130b9c1a847a0"
|
||||
@ -60,20 +64,24 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jbenet/datastore.go",
|
||||
"Rev": "e89f0511689bb2d0608496e15491f241842de085"
|
||||
"Rev": "60ebc56447b5a8264cfed3ae3ff48deb984d7cf1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jbenet/go-base58",
|
||||
"Rev": "568a28d73fd97651d3442392036a658b6976eed5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jbenet/go-is-domain",
|
||||
"Rev": "93b717f2ae17838a265e30277275ee99ee7198d6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jbenet/go-msgio",
|
||||
"Rev": "c9069ab79c95aa0686347b516972c7329c4391f2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jbenet/go-multiaddr",
|
||||
"Comment": "0.1.2-3-g74443fc",
|
||||
"Rev": "74443fca319c4c2f5e9968b8e268c30a4a74dc64"
|
||||
"Comment": "0.1.2-9-g1ec9436",
|
||||
"Rev": "1ec9436b1d642f4f04c0d9e21a0719cda3d659ee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jbenet/go-multihash",
|
||||
|
||||
6
Godeps/_workspace/src/github.com/bren2010/proquint/README.md
generated
vendored
Normal file
6
Godeps/_workspace/src/github.com/bren2010/proquint/README.md
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
Proquint
|
||||
-------
|
||||
|
||||
Golang implementation of [Proquint Pronounceable Identifiers](https://github.com/deoxxa/proquint).
|
||||
|
||||
|
||||
123
Godeps/_workspace/src/github.com/bren2010/proquint/proquint.go
generated
vendored
Normal file
123
Godeps/_workspace/src/github.com/bren2010/proquint/proquint.go
generated
vendored
Normal file
@ -0,0 +1,123 @@
|
||||
/*
|
||||
Copyright (c) 2014 Brendan McMillion
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
|
||||
package proquint
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var (
|
||||
conse = [...]byte{'b', 'd', 'f', 'g', 'h', 'j', 'k', 'l', 'm', 'n',
|
||||
'p', 'r', 's', 't', 'v', 'z'}
|
||||
vowse = [...]byte{'a', 'i', 'o', 'u'}
|
||||
|
||||
consd = map[byte] uint16 {
|
||||
'b' : 0, 'd' : 1, 'f' : 2, 'g' : 3,
|
||||
'h' : 4, 'j' : 5, 'k' : 6, 'l' : 7,
|
||||
'm' : 8, 'n' : 9, 'p' : 10, 'r' : 11,
|
||||
's' : 12, 't' : 13, 'v' : 14, 'z' : 15,
|
||||
}
|
||||
|
||||
vowsd = map[byte] uint16 {
|
||||
'a' : 0, 'i' : 1, 'o' : 2, 'u' : 3,
|
||||
}
|
||||
)
|
||||
|
||||
/**
|
||||
* Tests if a given string is a Proquint identifier
|
||||
*
|
||||
* @param {string} str The candidate string.
|
||||
*
|
||||
* @return {bool} Whether or not it qualifies.
|
||||
* @return {error} Error
|
||||
*/
|
||||
func IsProquint(str string) (bool, error) {
|
||||
exp := "^([abdfghijklmnoprstuvz]{5}-)*[abdfghijklmnoprstuvz]{5}$"
|
||||
ok, err := regexp.MatchString(exp, str)
|
||||
|
||||
return ok, err
|
||||
}
|
||||
|
||||
/**
|
||||
* Encodes an arbitrary byte slice into an identifier.
|
||||
*
|
||||
* @param {[]byte} buf Slice of bytes to encode.
|
||||
*
|
||||
* @return {string} The given byte slice as an identifier.
|
||||
*/
|
||||
func Encode(buf []byte) string {
|
||||
var out bytes.Buffer
|
||||
|
||||
for i := 0; i < len(buf); i = i + 2 {
|
||||
var n uint16 = (uint16(buf[i]) * 256) + uint16(buf[i + 1])
|
||||
|
||||
var (
|
||||
c1 = n & 0x0f
|
||||
v1 = (n >> 4) & 0x03
|
||||
c2 = (n >> 6) & 0x0f
|
||||
v2 = (n >> 10) & 0x03
|
||||
c3 = (n >> 12) & 0x0f
|
||||
)
|
||||
|
||||
out.WriteByte(conse[c1])
|
||||
out.WriteByte(vowse[v1])
|
||||
out.WriteByte(conse[c2])
|
||||
out.WriteByte(vowse[v2])
|
||||
out.WriteByte(conse[c3])
|
||||
|
||||
if (i + 2) < len(buf) {
|
||||
out.WriteByte('-')
|
||||
}
|
||||
}
|
||||
|
||||
return out.String()
|
||||
}
|
||||
|
||||
/**
|
||||
* Decodes an identifier into its corresponding byte slice.
|
||||
*
|
||||
* @param {string} str Identifier to convert.
|
||||
*
|
||||
* @return {[]byte} The identifier as a byte slice.
|
||||
*/
|
||||
func Decode(str string) []byte {
|
||||
var (
|
||||
out bytes.Buffer
|
||||
bits []string = strings.Split(str, "-")
|
||||
)
|
||||
|
||||
for i := 0; i < len(bits); i++ {
|
||||
var x uint16 = consd[bits[i][0]] +
|
||||
(vowsd[bits[i][1]] << 4) +
|
||||
(consd[bits[i][2]] << 6) +
|
||||
(vowsd[bits[i][3]] << 10) +
|
||||
(consd[bits[i][4]] << 12)
|
||||
|
||||
out.WriteByte(byte(x >> 8))
|
||||
out.WriteByte(byte(x))
|
||||
}
|
||||
|
||||
return out.Bytes()
|
||||
}
|
||||
43
Godeps/_workspace/src/github.com/jbenet/datastore.go/Godeps/Godeps.json
generated
vendored
Normal file
43
Godeps/_workspace/src/github.com/jbenet/datastore.go/Godeps/Godeps.json
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
{
|
||||
"ImportPath": "github.com/jbenet/datastore.go",
|
||||
"GoVersion": "go1.3.1",
|
||||
"Packages": [
|
||||
"./..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "code.google.com/p/go-uuid/uuid",
|
||||
"Comment": "null-12",
|
||||
"Rev": "7dda39b2e7d5e265014674c5af696ba4186679e9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "code.google.com/p/snappy-go/snappy",
|
||||
"Comment": "null-15",
|
||||
"Rev": "12e4b4183793ac4b061921e7980845e750679fd0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/codahale/blake2",
|
||||
"Rev": "3fa823583afba430e8fc7cdbcc670dbf90bfacc4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||
"Rev": "4dfff096c4973178c8f35cf6dd1a732a0a139370"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mattbaird/elastigo/api",
|
||||
"Rev": "041b88c1fcf6489a5721ede24378ce1253b9159d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mattbaird/elastigo/core",
|
||||
"Rev": "041b88c1fcf6489a5721ede24378ce1253b9159d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
||||
"Rev": "9bca75c48d6c31becfbb127702b425e7226052e3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/check.v1",
|
||||
"Rev": "91ae5f88a67b14891cfd43895b01164f6c120420"
|
||||
}
|
||||
]
|
||||
}
|
||||
5
Godeps/_workspace/src/github.com/jbenet/datastore.go/Godeps/Readme
generated
vendored
Normal file
5
Godeps/_workspace/src/github.com/jbenet/datastore.go/Godeps/Readme
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
||||
65
Godeps/_workspace/src/github.com/jbenet/datastore.go/basic_ds.go
generated
vendored
65
Godeps/_workspace/src/github.com/jbenet/datastore.go/basic_ds.go
generated
vendored
@ -1,28 +1,30 @@
|
||||
package datastore
|
||||
|
||||
import (
|
||||
"log"
|
||||
)
|
||||
import "log"
|
||||
|
||||
// Here are some basic datastore implementations.
|
||||
|
||||
// MapDatastore uses a standard Go map for internal storage.
|
||||
type keyMap map[Key]interface{}
|
||||
|
||||
// MapDatastore uses a standard Go map for internal storage.
|
||||
type MapDatastore struct {
|
||||
values keyMap
|
||||
}
|
||||
|
||||
// NewMapDatastore constructs a MapDatastore
|
||||
func NewMapDatastore() (d *MapDatastore) {
|
||||
return &MapDatastore{
|
||||
values: keyMap{},
|
||||
}
|
||||
}
|
||||
|
||||
// Put implements Datastore.Put
|
||||
func (d *MapDatastore) Put(key Key, value interface{}) (err error) {
|
||||
d.values[key] = value
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get implements Datastore.Get
|
||||
func (d *MapDatastore) Get(key Key) (value interface{}, err error) {
|
||||
val, found := d.values[key]
|
||||
if !found {
|
||||
@ -31,19 +33,22 @@ func (d *MapDatastore) Get(key Key) (value interface{}, err error) {
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// Has implements Datastore.Has
|
||||
func (d *MapDatastore) Has(key Key) (exists bool, err error) {
|
||||
_, found := d.values[key]
|
||||
return found, nil
|
||||
}
|
||||
|
||||
// Delete implements Datastore.Delete
|
||||
func (d *MapDatastore) Delete(key Key) (err error) {
|
||||
delete(d.values, key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// KeyList implements Datastore.KeyList
|
||||
func (d *MapDatastore) KeyList() ([]Key, error) {
|
||||
var keys []Key
|
||||
for k, _ := range d.values {
|
||||
for k := range d.values {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
return keys, nil
|
||||
@ -54,26 +59,32 @@ func (d *MapDatastore) KeyList() ([]Key, error) {
|
||||
type NullDatastore struct {
|
||||
}
|
||||
|
||||
// NewNullDatastore constructs a null datastoe
|
||||
func NewNullDatastore() *NullDatastore {
|
||||
return &NullDatastore{}
|
||||
}
|
||||
|
||||
// Put implements Datastore.Put
|
||||
func (d *NullDatastore) Put(key Key, value interface{}) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get implements Datastore.Get
|
||||
func (d *NullDatastore) Get(key Key) (value interface{}, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Has implements Datastore.Has
|
||||
func (d *NullDatastore) Has(key Key) (exists bool, err error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Delete implements Datastore.Delete
|
||||
func (d *NullDatastore) Delete(key Key) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// KeyList implements Datastore.KeyList
|
||||
func (d *NullDatastore) KeyList() ([]Key, error) {
|
||||
return nil, nil
|
||||
}
|
||||
@ -81,38 +92,56 @@ func (d *NullDatastore) KeyList() ([]Key, error) {
|
||||
// LogDatastore logs all accesses through the datastore.
|
||||
type LogDatastore struct {
|
||||
Name string
|
||||
Child Datastore
|
||||
child Datastore
|
||||
}
|
||||
|
||||
func NewLogDatastore(ds Datastore, name string) *LogDatastore {
|
||||
// Shim is a datastore which has a child.
|
||||
type Shim interface {
|
||||
Datastore
|
||||
|
||||
Children() []Datastore
|
||||
}
|
||||
|
||||
// NewLogDatastore constructs a log datastore.
|
||||
func NewLogDatastore(ds Datastore, name string) Shim {
|
||||
if len(name) < 1 {
|
||||
name = "LogDatastore"
|
||||
}
|
||||
return &LogDatastore{Name: name, Child: ds}
|
||||
return &LogDatastore{Name: name, child: ds}
|
||||
}
|
||||
|
||||
// Children implements Shim
|
||||
func (d *LogDatastore) Children() []Datastore {
|
||||
return []Datastore{d.child}
|
||||
}
|
||||
|
||||
// Put implements Datastore.Put
|
||||
func (d *LogDatastore) Put(key Key, value interface{}) (err error) {
|
||||
log.Printf("%s: Put %s", d.Name, key)
|
||||
log.Printf("%s: Put %s\n", d.Name, key)
|
||||
// log.Printf("%s: Put %s ```%s```", d.Name, key, value)
|
||||
return d.Child.Put(key, value)
|
||||
return d.child.Put(key, value)
|
||||
}
|
||||
|
||||
// Get implements Datastore.Get
|
||||
func (d *LogDatastore) Get(key Key) (value interface{}, err error) {
|
||||
log.Printf("%s: Get %s", d.Name, key)
|
||||
return d.Child.Get(key)
|
||||
log.Printf("%s: Get %s\n", d.Name, key)
|
||||
return d.child.Get(key)
|
||||
}
|
||||
|
||||
// Has implements Datastore.Has
|
||||
func (d *LogDatastore) Has(key Key) (exists bool, err error) {
|
||||
log.Printf("%s: Has %s", d.Name, key)
|
||||
return d.Child.Has(key)
|
||||
log.Printf("%s: Has %s\n", d.Name, key)
|
||||
return d.child.Has(key)
|
||||
}
|
||||
|
||||
// Delete implements Datastore.Delete
|
||||
func (d *LogDatastore) Delete(key Key) (err error) {
|
||||
log.Printf("%s: Delete %s", d.Name, key)
|
||||
return d.Child.Delete(key)
|
||||
log.Printf("%s: Delete %s\n", d.Name, key)
|
||||
return d.child.Delete(key)
|
||||
}
|
||||
|
||||
// KeyList implements Datastore.KeyList
|
||||
func (d *LogDatastore) KeyList() ([]Key, error) {
|
||||
log.Printf("%s: Get KeyList.", d.Name)
|
||||
return d.Child.KeyList()
|
||||
log.Printf("%s: Get KeyList\n", d.Name)
|
||||
return d.child.KeyList()
|
||||
}
|
||||
|
||||
13
Godeps/_workspace/src/github.com/jbenet/datastore.go/basic_ds_test.go
generated
vendored
Normal file
13
Godeps/_workspace/src/github.com/jbenet/datastore.go/basic_ds_test.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
package datastore_test
|
||||
|
||||
import (
|
||||
. "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go"
|
||||
. "launchpad.net/gocheck"
|
||||
)
|
||||
|
||||
// Hook up gocheck into the "go test" runner.
|
||||
func Test(t *testing.T) { TestingT(t) }
|
||||
|
||||
type BasicSuite struct{}
|
||||
|
||||
var _ = Suite(&BasicSuite{})
|
||||
16
Godeps/_workspace/src/github.com/jbenet/datastore.go/datastore.go
generated
vendored
16
Godeps/_workspace/src/github.com/jbenet/datastore.go/datastore.go
generated
vendored
@ -5,7 +5,7 @@ import (
|
||||
)
|
||||
|
||||
/*
|
||||
A Datastore represents storage for any key-value pair.
|
||||
Datastore represents storage for any key-value pair.
|
||||
|
||||
Datastores are general enough to be backed by all kinds of different storage:
|
||||
in-memory caches, databases, a remote datastore, flat files on disk, etc.
|
||||
@ -27,7 +27,6 @@ and thus it should behave predictably and handle exceptional conditions with
|
||||
proper error reporting. Thus, all Datastore calls may return errors, which
|
||||
should be checked by callers.
|
||||
*/
|
||||
|
||||
type Datastore interface {
|
||||
// Put stores the object `value` named by `key`.
|
||||
//
|
||||
@ -53,20 +52,27 @@ type Datastore interface {
|
||||
// Delete removes the value for given `key`.
|
||||
Delete(key Key) (err error)
|
||||
|
||||
// Returns a list of keys in the datastore
|
||||
// KeyList returns a list of keys in the datastore
|
||||
KeyList() ([]Key, error)
|
||||
}
|
||||
|
||||
// ThreadSafeDatastore is an interface that all threadsafe datastore should
|
||||
// implement to leverage type safety checks.
|
||||
type ThreadSafeDatastore interface {
|
||||
Datastore
|
||||
IsThreadSafe()
|
||||
}
|
||||
|
||||
// Errors
|
||||
|
||||
// ErrNotFound is returned by Get, Has, and Delete when a datastore does not
|
||||
// map the given key to a value.
|
||||
var ErrNotFound = errors.New("datastore: key not found.")
|
||||
var ErrNotFound = errors.New("datastore: key not found")
|
||||
|
||||
// ErrInvalidType is returned by Put when a given value is incopatible with
|
||||
// the type the datastore supports. This means a conversion (or serialization)
|
||||
// is needed beforehand.
|
||||
var ErrInvalidType = errors.New("datastore: invalid type error.")
|
||||
var ErrInvalidType = errors.New("datastore: invalid type error")
|
||||
|
||||
// GetBackedHas provides a default Datastore.Has implementation.
|
||||
// It exists so Datastore.Has implementations can use it, like so:
|
||||
|
||||
122
Godeps/_workspace/src/github.com/jbenet/datastore.go/fs/fs.go
generated
vendored
Normal file
122
Godeps/_workspace/src/github.com/jbenet/datastore.go/fs/fs.go
generated
vendored
Normal file
@ -0,0 +1,122 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go"
|
||||
)
|
||||
|
||||
// Datastore uses a standard Go map for internal storage.
|
||||
type Datastore struct {
|
||||
path string
|
||||
}
|
||||
|
||||
// NewDatastore returns a new fs Datastore at given `path`
|
||||
func NewDatastore(path string) (ds.Datastore, error) {
|
||||
if !isDir(path) {
|
||||
return nil, fmt.Errorf("Failed to find directory at: %v (file? perms?)", path)
|
||||
}
|
||||
|
||||
return &Datastore{path: path}, nil
|
||||
}
|
||||
|
||||
// KeyFilename returns the filename associated with `key`
|
||||
func (d *Datastore) KeyFilename(key ds.Key) string {
|
||||
return filepath.Join(d.path, key.String(), ".dsobject")
|
||||
}
|
||||
|
||||
// Put stores the given value.
|
||||
func (d *Datastore) Put(key ds.Key, value interface{}) (err error) {
|
||||
|
||||
// TODO: maybe use io.Readers/Writers?
|
||||
// r, err := dsio.CastAsReader(value)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
val, ok := value.([]byte)
|
||||
if !ok {
|
||||
return ds.ErrInvalidType
|
||||
}
|
||||
|
||||
fn := d.KeyFilename(key)
|
||||
|
||||
// mkdirall above.
|
||||
err = os.MkdirAll(filepath.Dir(fn), 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ioutil.WriteFile(fn, val, 0666)
|
||||
}
|
||||
|
||||
// Get returns the value for given key
|
||||
func (d *Datastore) Get(key ds.Key) (value interface{}, err error) {
|
||||
fn := d.KeyFilename(key)
|
||||
if !isFile(fn) {
|
||||
return nil, ds.ErrNotFound
|
||||
}
|
||||
|
||||
return ioutil.ReadFile(fn)
|
||||
}
|
||||
|
||||
// Has returns whether the datastore has a value for a given key
|
||||
func (d *Datastore) Has(key ds.Key) (exists bool, err error) {
|
||||
return ds.GetBackedHas(d, key)
|
||||
}
|
||||
|
||||
// Delete removes the value for given key
|
||||
func (d *Datastore) Delete(key ds.Key) (err error) {
|
||||
fn := d.KeyFilename(key)
|
||||
if !isFile(fn) {
|
||||
return ds.ErrNotFound
|
||||
}
|
||||
|
||||
return os.Remove(fn)
|
||||
}
|
||||
|
||||
// KeyList returns a list of all keys in the datastore
|
||||
func (d *Datastore) KeyList() ([]ds.Key, error) {
|
||||
|
||||
keys := []ds.Key{}
|
||||
|
||||
walkFn := func(path string, info os.FileInfo, err error) error {
|
||||
// remove ds path prefix
|
||||
if strings.HasPrefix(path, d.path) {
|
||||
path = path[len(d.path):]
|
||||
}
|
||||
|
||||
if !info.IsDir() {
|
||||
key := ds.NewKey(path)
|
||||
keys = append(keys, key)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
filepath.Walk(d.path, walkFn)
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// isDir returns whether given path is a directory
|
||||
func isDir(path string) bool {
|
||||
finfo, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return finfo.IsDir()
|
||||
}
|
||||
|
||||
// isFile returns whether given path is a file
|
||||
func isFile(path string) bool {
|
||||
finfo, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return !finfo.IsDir()
|
||||
}
|
||||
65
Godeps/_workspace/src/github.com/jbenet/datastore.go/fs/fs_test.go
generated
vendored
Normal file
65
Godeps/_workspace/src/github.com/jbenet/datastore.go/fs/fs_test.go
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
||||
package fs_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go"
|
||||
fs "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go/fs"
|
||||
. "launchpad.net/gocheck"
|
||||
)
|
||||
|
||||
// Hook up gocheck into the "go test" runner.
|
||||
func Test(t *testing.T) { TestingT(t) }
|
||||
|
||||
type DSSuite struct {
|
||||
dir string
|
||||
ds ds.Datastore
|
||||
}
|
||||
|
||||
var _ = Suite(&DSSuite{})
|
||||
|
||||
func (ks *DSSuite) SetUpTest(c *C) {
|
||||
ks.dir = c.MkDir()
|
||||
ks.ds, _ = fs.NewDatastore(ks.dir)
|
||||
}
|
||||
|
||||
func (ks *DSSuite) TestOpen(c *C) {
|
||||
_, err := fs.NewDatastore("/tmp/foo/bar/baz")
|
||||
c.Assert(err, Not(Equals), nil)
|
||||
|
||||
// setup ds
|
||||
_, err = fs.NewDatastore(ks.dir)
|
||||
c.Assert(err, Equals, nil)
|
||||
}
|
||||
|
||||
func (ks *DSSuite) TestBasic(c *C) {
|
||||
|
||||
keys := strsToKeys([]string{
|
||||
"foo",
|
||||
"foo/bar",
|
||||
"foo/bar/baz",
|
||||
"foo/barb",
|
||||
"foo/bar/bazb",
|
||||
"foo/bar/baz/barb",
|
||||
})
|
||||
|
||||
for _, k := range keys {
|
||||
err := ks.ds.Put(k, []byte(k.String()))
|
||||
c.Check(err, Equals, nil)
|
||||
}
|
||||
|
||||
for _, k := range keys {
|
||||
v, err := ks.ds.Get(k)
|
||||
c.Check(err, Equals, nil)
|
||||
c.Check(bytes.Equal(v.([]byte), []byte(k.String())), Equals, true)
|
||||
}
|
||||
}
|
||||
|
||||
func strsToKeys(strs []string) []ds.Key {
|
||||
keys := make([]ds.Key, len(strs))
|
||||
for i, s := range strs {
|
||||
keys[i] = ds.NewKey(s)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
44
Godeps/_workspace/src/github.com/jbenet/datastore.go/io/io.go
generated
vendored
Normal file
44
Godeps/_workspace/src/github.com/jbenet/datastore.go/io/io.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go"
|
||||
)
|
||||
|
||||
// CastAsReader does type assertions to find the type of a value and attempts
|
||||
// to turn it into an io.Reader. If not possible, will return ds.ErrInvalidType
|
||||
func CastAsReader(value interface{}) (io.Reader, error) {
|
||||
switch v := value.(type) {
|
||||
case io.Reader:
|
||||
return v, nil
|
||||
|
||||
case []byte:
|
||||
return bytes.NewReader(v), nil
|
||||
|
||||
case string:
|
||||
return bytes.NewReader([]byte(v)), nil
|
||||
|
||||
default:
|
||||
return nil, ds.ErrInvalidType
|
||||
}
|
||||
}
|
||||
|
||||
// // CastAsWriter does type assertions to find the type of a value and attempts
|
||||
// // to turn it into an io.Writer. If not possible, will return ds.ErrInvalidType
|
||||
// func CastAsWriter(value interface{}) (err error) {
|
||||
// switch v := value.(type) {
|
||||
// case io.Reader:
|
||||
// return v, nil
|
||||
//
|
||||
// case []byte:
|
||||
// return bytes.NewReader(v), nil
|
||||
//
|
||||
// case string:
|
||||
// return bytes.NewReader([]byte(v)), nil
|
||||
//
|
||||
// default:
|
||||
// return nil, ds.ErrInvalidType
|
||||
// }
|
||||
// }
|
||||
5
Godeps/_workspace/src/github.com/jbenet/datastore.go/key_test.go
generated
vendored
5
Godeps/_workspace/src/github.com/jbenet/datastore.go/key_test.go
generated
vendored
@ -2,12 +2,13 @@ package datastore_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
. "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go"
|
||||
. "launchpad.net/gocheck"
|
||||
"math/rand"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
. "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go"
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
// Hook up gocheck into the "go test" runner.
|
||||
|
||||
88
Godeps/_workspace/src/github.com/jbenet/datastore.go/keytransform/keytransform.go
generated
vendored
Normal file
88
Godeps/_workspace/src/github.com/jbenet/datastore.go/keytransform/keytransform.go
generated
vendored
Normal file
@ -0,0 +1,88 @@
|
||||
package keytransform
|
||||
|
||||
import ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go"
|
||||
|
||||
// KeyTransform is a function that transforms one key into another.
|
||||
type KeyTransform func(ds.Key) ds.Key
|
||||
|
||||
// Datastore is a keytransform.Datastore
|
||||
type Datastore interface {
|
||||
ds.Shim
|
||||
|
||||
// Transform runs the transformation function
|
||||
Transform(ds.Key) ds.Key
|
||||
|
||||
// TransformFunc returns the KeyTransform function
|
||||
TransformFunc() KeyTransform
|
||||
}
|
||||
|
||||
// ktds keeps a KeyTransform function
|
||||
type ktds struct {
|
||||
child ds.Datastore
|
||||
xform KeyTransform
|
||||
}
|
||||
|
||||
// WrapDatastore wraps a given datastore with a KeyTransform function.
|
||||
// The resulting wrapped datastore will use the transform on all Datastore
|
||||
// operations.
|
||||
func WrapDatastore(child ds.Datastore, f KeyTransform) Datastore {
|
||||
if f == nil {
|
||||
panic("f (KeyTransform) is nil")
|
||||
}
|
||||
|
||||
if child == nil {
|
||||
panic("child (ds.Datastore) is nil")
|
||||
}
|
||||
|
||||
return &ktds{child, f}
|
||||
}
|
||||
|
||||
// TransformFunc returns the KeyTransform function
|
||||
func (d *ktds) TransformFunc() KeyTransform {
|
||||
return d.xform
|
||||
}
|
||||
|
||||
// Transform runs the KeyTransform function
|
||||
func (d *ktds) Transform(k ds.Key) ds.Key {
|
||||
return d.xform(k)
|
||||
}
|
||||
|
||||
// Children implements ds.Shim
|
||||
func (d *ktds) Children() []ds.Datastore {
|
||||
return []ds.Datastore{d.child}
|
||||
}
|
||||
|
||||
// Put stores the given value, transforming the key first.
|
||||
func (d *ktds) Put(key ds.Key, value interface{}) (err error) {
|
||||
return d.child.Put(d.Transform(key), value)
|
||||
}
|
||||
|
||||
// Get returns the value for given key, transforming the key first.
|
||||
func (d *ktds) Get(key ds.Key) (value interface{}, err error) {
|
||||
return d.child.Get(d.Transform(key))
|
||||
}
|
||||
|
||||
// Has returns whether the datastore has a value for a given key, transforming
|
||||
// the key first.
|
||||
func (d *ktds) Has(key ds.Key) (exists bool, err error) {
|
||||
return d.child.Has(d.Transform(key))
|
||||
}
|
||||
|
||||
// Delete removes the value for given key
|
||||
func (d *ktds) Delete(key ds.Key) (err error) {
|
||||
return d.child.Delete(d.Transform(key))
|
||||
}
|
||||
|
||||
// KeyList returns a list of all keys in the datastore, transforming keys out.
|
||||
func (d *ktds) KeyList() ([]ds.Key, error) {
|
||||
|
||||
keys, err := d.child.KeyList()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i, k := range keys {
|
||||
keys[i] = d.Transform(k)
|
||||
}
|
||||
return keys, nil
|
||||
}
|
||||
60
Godeps/_workspace/src/github.com/jbenet/datastore.go/keytransform/keytransform_test.go
generated
vendored
Normal file
60
Godeps/_workspace/src/github.com/jbenet/datastore.go/keytransform/keytransform_test.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
package keytransform_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go"
|
||||
kt "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go/keytransform"
|
||||
. "launchpad.net/gocheck"
|
||||
)
|
||||
|
||||
// Hook up gocheck into the "go test" runner.
|
||||
func Test(t *testing.T) { TestingT(t) }
|
||||
|
||||
type DSSuite struct {
|
||||
dir string
|
||||
ds ds.Datastore
|
||||
}
|
||||
|
||||
var _ = Suite(&DSSuite{})
|
||||
|
||||
func (ks *DSSuite) TestBasic(c *C) {
|
||||
|
||||
mpds := ds.NewMapDatastore()
|
||||
ktds := kt.WrapDatastore(mpds, func(k ds.Key) ds.Key {
|
||||
return k.Reverse()
|
||||
})
|
||||
|
||||
keys := strsToKeys([]string{
|
||||
"foo",
|
||||
"foo/bar",
|
||||
"foo/bar/baz",
|
||||
"foo/barb",
|
||||
"foo/bar/bazb",
|
||||
"foo/bar/baz/barb",
|
||||
})
|
||||
|
||||
for _, k := range keys {
|
||||
err := ktds.Put(k, []byte(k.String()))
|
||||
c.Check(err, Equals, nil)
|
||||
}
|
||||
|
||||
for _, k := range keys {
|
||||
v1, err := ktds.Get(k)
|
||||
c.Check(err, Equals, nil)
|
||||
c.Check(bytes.Equal(v1.([]byte), []byte(k.String())), Equals, true)
|
||||
|
||||
v2, err := mpds.Get(k.Reverse())
|
||||
c.Check(err, Equals, nil)
|
||||
c.Check(bytes.Equal(v2.([]byte), []byte(k.String())), Equals, true)
|
||||
}
|
||||
}
|
||||
|
||||
func strsToKeys(strs []string) []ds.Key {
|
||||
keys := make([]ds.Key, len(strs))
|
||||
for i, s := range strs {
|
||||
keys[i] = ds.NewKey(s)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
4
Godeps/_workspace/src/github.com/jbenet/datastore.go/leveldb/datastore.go
generated
vendored
4
Godeps/_workspace/src/github.com/jbenet/datastore.go/leveldb/datastore.go
generated
vendored
@ -13,7 +13,7 @@ type Datastore struct {
|
||||
|
||||
type Options opt.Options
|
||||
|
||||
func NewDatastore(path string, opts *Options) (*Datastore, error) {
|
||||
func NewDatastore(path string, opts *Options) (ds.ThreadSafeDatastore, error) {
|
||||
var nopts opt.Options
|
||||
if opts != nil {
|
||||
nopts = opt.Options(*opts)
|
||||
@ -76,3 +76,5 @@ func (d *Datastore) KeyList() ([]ds.Key, error) {
|
||||
func (d *Datastore) Close() (err error) {
|
||||
return d.DB.Close()
|
||||
}
|
||||
|
||||
func (d *Datastore) IsThreadSafe() {}
|
||||
|
||||
54
Godeps/_workspace/src/github.com/jbenet/datastore.go/lru/datastore.go
generated
vendored
Normal file
54
Godeps/_workspace/src/github.com/jbenet/datastore.go/lru/datastore.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
package lru
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go"
|
||||
)
|
||||
|
||||
// Datastore uses golang-lru for internal storage.
|
||||
type Datastore struct {
|
||||
cache *lru.Cache
|
||||
}
|
||||
|
||||
// NewDatastore constructs a new LRU Datastore with given capacity.
|
||||
func NewDatastore(capacity int) (*Datastore, error) {
|
||||
cache, err := lru.New(capacity)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Datastore{cache: cache}, nil
|
||||
}
|
||||
|
||||
// Put stores the object `value` named by `key`.
|
||||
func (d *Datastore) Put(key ds.Key, value interface{}) (err error) {
|
||||
d.cache.Add(key, value)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves the object `value` named by `key`.
|
||||
func (d *Datastore) Get(key ds.Key) (value interface{}, err error) {
|
||||
val, ok := d.cache.Get(key)
|
||||
if !ok {
|
||||
return nil, ds.ErrNotFound
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// Has returns whether the `key` is mapped to a `value`.
|
||||
func (d *Datastore) Has(key ds.Key) (exists bool, err error) {
|
||||
return ds.GetBackedHas(d, key)
|
||||
}
|
||||
|
||||
// Delete removes the value for given `key`.
|
||||
func (d *Datastore) Delete(key ds.Key) (err error) {
|
||||
d.cache.Remove(key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// KeyList returns a list of keys in the datastore
|
||||
func (d *Datastore) KeyList() ([]ds.Key, error) {
|
||||
return nil, errors.New("KeyList not implemented.")
|
||||
}
|
||||
52
Godeps/_workspace/src/github.com/jbenet/datastore.go/lru/datastore_test.go
generated
vendored
Normal file
52
Godeps/_workspace/src/github.com/jbenet/datastore.go/lru/datastore_test.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
package lru_test
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go"
|
||||
lru "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go/lru"
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
// Hook up gocheck into the "go test" runner.
|
||||
func Test(t *testing.T) { TestingT(t) }
|
||||
|
||||
type DSSuite struct{}
|
||||
|
||||
var _ = Suite(&DSSuite{})
|
||||
|
||||
func (ks *DSSuite) TestBasic(c *C) {
|
||||
var size = 1000
|
||||
|
||||
d, err := lru.NewDatastore(size)
|
||||
c.Check(err, Equals, nil)
|
||||
|
||||
for i := 0; i < size; i++ {
|
||||
err := d.Put(ds.NewKey(strconv.Itoa(i)), i)
|
||||
c.Check(err, Equals, nil)
|
||||
}
|
||||
|
||||
for i := 0; i < size; i++ {
|
||||
j, err := d.Get(ds.NewKey(strconv.Itoa(i)))
|
||||
c.Check(j, Equals, i)
|
||||
c.Check(err, Equals, nil)
|
||||
}
|
||||
|
||||
for i := 0; i < size; i++ {
|
||||
err := d.Put(ds.NewKey(strconv.Itoa(i+size)), i)
|
||||
c.Check(err, Equals, nil)
|
||||
}
|
||||
|
||||
for i := 0; i < size; i++ {
|
||||
j, err := d.Get(ds.NewKey(strconv.Itoa(i)))
|
||||
c.Check(j, Equals, nil)
|
||||
c.Check(err, Equals, ds.ErrNotFound)
|
||||
}
|
||||
|
||||
for i := 0; i < size; i++ {
|
||||
j, err := d.Get(ds.NewKey(strconv.Itoa(i + size)))
|
||||
c.Check(j, Equals, i)
|
||||
c.Check(err, Equals, nil)
|
||||
}
|
||||
}
|
||||
19
Godeps/_workspace/src/github.com/jbenet/datastore.go/query.go
generated
vendored
Normal file
19
Godeps/_workspace/src/github.com/jbenet/datastore.go/query.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
package datastore
|
||||
|
||||
// type KeyIterator struct {
|
||||
// HasNext() bool
|
||||
// Next() interface{}
|
||||
// }
|
||||
|
||||
// type Query struct {
|
||||
// }
|
||||
|
||||
/*
|
||||
QueryDatastores support a Query interface. Queries are used to support
|
||||
searching for values (beyond simple key-based `Get`s).
|
||||
*/
|
||||
// type QueryDatastore interface {
|
||||
// // Query returns an Iterator of Keys whose Values match criteria
|
||||
// // expressed in `query`.
|
||||
// Query(Query) (iter Iterator, err error)
|
||||
// }
|
||||
64
Godeps/_workspace/src/github.com/jbenet/datastore.go/sync/sync.go
generated
vendored
Normal file
64
Godeps/_workspace/src/github.com/jbenet/datastore.go/sync/sync.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go"
|
||||
)
|
||||
|
||||
// MutexDatastore contains a child datastire and a mutex.
|
||||
// used for coarse sync
|
||||
type MutexDatastore struct {
|
||||
sync.RWMutex
|
||||
|
||||
child ds.Datastore
|
||||
}
|
||||
|
||||
// MutexWrap constructs a datastore with a coarse lock around
|
||||
// the entire datastore, for every single operation
|
||||
func MutexWrap(d ds.Datastore) ds.ThreadSafeDatastore {
|
||||
return &MutexDatastore{child: d}
|
||||
}
|
||||
|
||||
// Children implements Shim
|
||||
func (d *MutexDatastore) Children() []ds.Datastore {
|
||||
return []ds.Datastore{d.child}
|
||||
}
|
||||
|
||||
// IsThreadSafe implements ThreadSafeDatastore
|
||||
func (d *MutexDatastore) IsThreadSafe() {}
|
||||
|
||||
// Put implements Datastore.Put
|
||||
func (d *MutexDatastore) Put(key ds.Key, value interface{}) (err error) {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
return d.child.Put(key, value)
|
||||
}
|
||||
|
||||
// Get implements Datastore.Get
|
||||
func (d *MutexDatastore) Get(key ds.Key) (value interface{}, err error) {
|
||||
d.RLock()
|
||||
defer d.RUnlock()
|
||||
return d.child.Get(key)
|
||||
}
|
||||
|
||||
// Has implements Datastore.Has
|
||||
func (d *MutexDatastore) Has(key ds.Key) (exists bool, err error) {
|
||||
d.RLock()
|
||||
defer d.RUnlock()
|
||||
return d.child.Has(key)
|
||||
}
|
||||
|
||||
// Delete implements Datastore.Delete
|
||||
func (d *MutexDatastore) Delete(key ds.Key) (err error) {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
return d.child.Delete(key)
|
||||
}
|
||||
|
||||
// KeyList implements Datastore.KeyList
|
||||
func (d *MutexDatastore) KeyList() ([]ds.Key, error) {
|
||||
d.RLock()
|
||||
defer d.RUnlock()
|
||||
return d.child.KeyList()
|
||||
}
|
||||
21
Godeps/_workspace/src/github.com/jbenet/go-is-domain/LICENSE
generated
vendored
Normal file
21
Godeps/_workspace/src/github.com/jbenet/go-is-domain/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Juan Batiz-Benet
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
23
Godeps/_workspace/src/github.com/jbenet/go-is-domain/README.md
generated
vendored
Normal file
23
Godeps/_workspace/src/github.com/jbenet/go-is-domain/README.md
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
# go-is-domain
|
||||
|
||||
This package is dedicated to [@whyrusleeping](https://github.com/whyrusleeping).
|
||||
|
||||
Docs: https://godoc.org/github.com/jbenet/go-is-domain
|
||||
|
||||
|
||||
Check whether something is a domain.
|
||||
|
||||
|
||||
```Go
|
||||
|
||||
import (
|
||||
isd "github.com/jbenet/go-is-domain"
|
||||
)
|
||||
|
||||
isd.IsDomain("foo.com") // true
|
||||
isd.IsDomain("foo.bar.com.") // true
|
||||
isd.IsDomain("foo.bar.baz") // false
|
||||
|
||||
```
|
||||
|
||||
MIT Licensed
|
||||
13
Godeps/_workspace/src/github.com/jbenet/go-is-domain/doc.go
generated
vendored
Normal file
13
Godeps/_workspace/src/github.com/jbenet/go-is-domain/doc.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
/*
|
||||
Package isdomain package allows users to check whether strings represent domain names.
|
||||
|
||||
import (
|
||||
isd "github.com/jbenet/go-is-domain"
|
||||
)
|
||||
|
||||
isd.IsDomain("foo.com") // true
|
||||
isd.IsDomain("foo.bar.com.") // true
|
||||
isd.IsDomain("foo.bar.baz") // false
|
||||
|
||||
*/
|
||||
package isdomain
|
||||
12
Godeps/_workspace/src/github.com/jbenet/go-is-domain/domainre.go
generated
vendored
Normal file
12
Godeps/_workspace/src/github.com/jbenet/go-is-domain/domainre.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
package isdomain
|
||||
|
||||
import "regexp"
|
||||
|
||||
// DomainRegexpStr is a regular expression string to validate domains.
|
||||
const DomainRegexpStr = "^([a-z0-9]+(-[a-z0-9]+)*\\.)+[a-z]{2,}$"
|
||||
|
||||
var domainRegexp *regexp.Regexp
|
||||
|
||||
func init() {
|
||||
domainRegexp = regexp.MustCompile(DomainRegexpStr)
|
||||
}
|
||||
44
Godeps/_workspace/src/github.com/jbenet/go-is-domain/is_domain.go
generated
vendored
Normal file
44
Godeps/_workspace/src/github.com/jbenet/go-is-domain/is_domain.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
package isdomain
|
||||
|
||||
import "strings"
|
||||
|
||||
// IsICANNTLD returns whether the given string is a TLD (Top Level Domain),
|
||||
// according to ICANN. Well, really according to the TLDs listed in this
|
||||
// package.
|
||||
func IsICANNTLD(s string) bool {
|
||||
s = strings.ToUpper(s)
|
||||
_, found := TLDs[s]
|
||||
return found
|
||||
}
|
||||
|
||||
// IsExtendedTLD returns whether the given string is a TLD (Top Level Domain),
|
||||
// extended with a few other "TLDs": .bit, .onion
|
||||
func IsExtendedTLD(s string) bool {
|
||||
s = strings.ToUpper(s)
|
||||
_, found := ExtendedTLDs[s]
|
||||
return found
|
||||
}
|
||||
|
||||
// IsTLD returns whether the given string is a TLD (according to ICANN, or
|
||||
// in the set of ExtendedTLDs listed in this package.
|
||||
func IsTLD(s string) bool {
|
||||
return IsICANNTLD(s) || IsExtendedTLD(s)
|
||||
}
|
||||
|
||||
// IsDomain returns whether given string is a domain.
|
||||
// It first checks the TLD, and then uses a regular expression.
|
||||
func IsDomain(s string) bool {
|
||||
if strings.HasSuffix(s, ".") {
|
||||
s = s[:len(s)-1]
|
||||
}
|
||||
|
||||
split := strings.Split(s, ".")
|
||||
tld := split[len(split)-1]
|
||||
|
||||
if !IsTLD(tld) {
|
||||
return false
|
||||
}
|
||||
|
||||
s = strings.ToLower(s)
|
||||
return domainRegexp.MatchString(s)
|
||||
}
|
||||
29
Godeps/_workspace/src/github.com/jbenet/go-is-domain/is_domain_test.go
generated
vendored
Normal file
29
Godeps/_workspace/src/github.com/jbenet/go-is-domain/is_domain_test.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
package isdomain
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestBasic(t *testing.T) {
|
||||
cases := map[string]bool{
|
||||
"foo.bar.baz.com": true,
|
||||
"foo.bar.baz": false,
|
||||
"foo.bar.baz.com.": true,
|
||||
"com": false, // yeah yeah...
|
||||
".": false, // yeah yeah...
|
||||
"..": false,
|
||||
".foo.com.": false,
|
||||
".foo.com": false,
|
||||
"fo o.com": false,
|
||||
"example.com": true,
|
||||
"fjdoisajfdiosafdsa8fd8saf8dsa8fdsafdsa-fd-sa-fd-saf-dsa.org": true,
|
||||
"fjdoisajfdiosafdsa8fd8saf8dsa8fdsafdsa-fd-sa-fd-saf-dsa.bit": true,
|
||||
"fjdoisajfdiosafdsa8fd8saf8dsa8fdsafdsa-fd-sa-fd-saf-dsa.onion": true,
|
||||
"a.b.c.d.e.f.g.h.i.j.k.l.museum": true,
|
||||
"a.b.c.d.e.f.g.h.i.j.k.l": false,
|
||||
}
|
||||
|
||||
for d, ok := range cases {
|
||||
if IsDomain(d) != ok {
|
||||
t.Errorf("Misclassification: %v should be %v", d, ok)
|
||||
}
|
||||
}
|
||||
}
|
||||
727
Godeps/_workspace/src/github.com/jbenet/go-is-domain/tlds-alpha-by-domain.txt
generated
vendored
Normal file
727
Godeps/_workspace/src/github.com/jbenet/go-is-domain/tlds-alpha-by-domain.txt
generated
vendored
Normal file
@ -0,0 +1,727 @@
|
||||
# from http://data.iana.org/TLD/tlds-alpha-by-domain.txt
|
||||
# Version 2014100300, Last Updated Fri Oct 3 07:07:01 2014 UTC
|
||||
AC
|
||||
ACADEMY
|
||||
ACCOUNTANTS
|
||||
ACTIVE
|
||||
ACTOR
|
||||
AD
|
||||
AE
|
||||
AERO
|
||||
AF
|
||||
AG
|
||||
AGENCY
|
||||
AI
|
||||
AIRFORCE
|
||||
AL
|
||||
ALLFINANZ
|
||||
AM
|
||||
AN
|
||||
AO
|
||||
AQ
|
||||
AR
|
||||
ARCHI
|
||||
ARMY
|
||||
ARPA
|
||||
AS
|
||||
ASIA
|
||||
ASSOCIATES
|
||||
AT
|
||||
ATTORNEY
|
||||
AU
|
||||
AUCTION
|
||||
AUDIO
|
||||
AUTOS
|
||||
AW
|
||||
AX
|
||||
AXA
|
||||
AZ
|
||||
BA
|
||||
BAR
|
||||
BARGAINS
|
||||
BAYERN
|
||||
BB
|
||||
BD
|
||||
BE
|
||||
BEER
|
||||
BERLIN
|
||||
BEST
|
||||
BF
|
||||
BG
|
||||
BH
|
||||
BI
|
||||
BID
|
||||
BIKE
|
||||
BIO
|
||||
BIZ
|
||||
BJ
|
||||
BLACK
|
||||
BLACKFRIDAY
|
||||
BLUE
|
||||
BM
|
||||
BMW
|
||||
BN
|
||||
BNPPARIBAS
|
||||
BO
|
||||
BOO
|
||||
BOUTIQUE
|
||||
BR
|
||||
BRUSSELS
|
||||
BS
|
||||
BT
|
||||
BUDAPEST
|
||||
BUILD
|
||||
BUILDERS
|
||||
BUSINESS
|
||||
BUZZ
|
||||
BV
|
||||
BW
|
||||
BY
|
||||
BZ
|
||||
BZH
|
||||
CA
|
||||
CAB
|
||||
CAL
|
||||
CAMERA
|
||||
CAMP
|
||||
CANCERRESEARCH
|
||||
CAPETOWN
|
||||
CAPITAL
|
||||
CARAVAN
|
||||
CARDS
|
||||
CARE
|
||||
CAREER
|
||||
CAREERS
|
||||
CASA
|
||||
CASH
|
||||
CAT
|
||||
CATERING
|
||||
CC
|
||||
CD
|
||||
CENTER
|
||||
CEO
|
||||
CERN
|
||||
CF
|
||||
CG
|
||||
CH
|
||||
CHANNEL
|
||||
CHEAP
|
||||
CHRISTMAS
|
||||
CHROME
|
||||
CHURCH
|
||||
CI
|
||||
CITIC
|
||||
CITY
|
||||
CK
|
||||
CL
|
||||
CLAIMS
|
||||
CLEANING
|
||||
CLICK
|
||||
CLINIC
|
||||
CLOTHING
|
||||
CLUB
|
||||
CM
|
||||
CN
|
||||
CO
|
||||
CODES
|
||||
COFFEE
|
||||
COLLEGE
|
||||
COLOGNE
|
||||
COM
|
||||
COMMUNITY
|
||||
COMPANY
|
||||
COMPUTER
|
||||
CONDOS
|
||||
CONSTRUCTION
|
||||
CONSULTING
|
||||
CONTRACTORS
|
||||
COOKING
|
||||
COOL
|
||||
COOP
|
||||
COUNTRY
|
||||
CR
|
||||
CREDIT
|
||||
CREDITCARD
|
||||
CRUISES
|
||||
CU
|
||||
CUISINELLA
|
||||
CV
|
||||
CW
|
||||
CX
|
||||
CY
|
||||
CYMRU
|
||||
CZ
|
||||
DAD
|
||||
DANCE
|
||||
DATING
|
||||
DAY
|
||||
DE
|
||||
DEALS
|
||||
DEGREE
|
||||
DEMOCRAT
|
||||
DENTAL
|
||||
DENTIST
|
||||
DESI
|
||||
DIAMONDS
|
||||
DIET
|
||||
DIGITAL
|
||||
DIRECT
|
||||
DIRECTORY
|
||||
DISCOUNT
|
||||
DJ
|
||||
DK
|
||||
DM
|
||||
DNP
|
||||
DO
|
||||
DOMAINS
|
||||
DURBAN
|
||||
DVAG
|
||||
DZ
|
||||
EAT
|
||||
EC
|
||||
EDU
|
||||
EDUCATION
|
||||
EE
|
||||
EG
|
||||
EMAIL
|
||||
ENGINEER
|
||||
ENGINEERING
|
||||
ENTERPRISES
|
||||
EQUIPMENT
|
||||
ER
|
||||
ES
|
||||
ESQ
|
||||
ESTATE
|
||||
ET
|
||||
EU
|
||||
EUS
|
||||
EVENTS
|
||||
EXCHANGE
|
||||
EXPERT
|
||||
EXPOSED
|
||||
FAIL
|
||||
FARM
|
||||
FEEDBACK
|
||||
FI
|
||||
FINANCE
|
||||
FINANCIAL
|
||||
FISH
|
||||
FISHING
|
||||
FITNESS
|
||||
FJ
|
||||
FK
|
||||
FLIGHTS
|
||||
FLORIST
|
||||
FLY
|
||||
FM
|
||||
FO
|
||||
FOO
|
||||
FORSALE
|
||||
FOUNDATION
|
||||
FR
|
||||
FRL
|
||||
FROGANS
|
||||
FUND
|
||||
FURNITURE
|
||||
FUTBOL
|
||||
GA
|
||||
GAL
|
||||
GALLERY
|
||||
GB
|
||||
GBIZ
|
||||
GD
|
||||
GE
|
||||
GENT
|
||||
GF
|
||||
GG
|
||||
GH
|
||||
GI
|
||||
GIFT
|
||||
GIFTS
|
||||
GIVES
|
||||
GL
|
||||
GLASS
|
||||
GLE
|
||||
GLOBAL
|
||||
GLOBO
|
||||
GM
|
||||
GMAIL
|
||||
GMO
|
||||
GMX
|
||||
GN
|
||||
GOOGLE
|
||||
GOP
|
||||
GOV
|
||||
GP
|
||||
GQ
|
||||
GR
|
||||
GRAPHICS
|
||||
GRATIS
|
||||
GREEN
|
||||
GRIPE
|
||||
GS
|
||||
GT
|
||||
GU
|
||||
GUIDE
|
||||
GUITARS
|
||||
GURU
|
||||
GW
|
||||
GY
|
||||
HAMBURG
|
||||
HAUS
|
||||
HEALTHCARE
|
||||
HELP
|
||||
HERE
|
||||
HIPHOP
|
||||
HIV
|
||||
HK
|
||||
HM
|
||||
HN
|
||||
HOLDINGS
|
||||
HOLIDAY
|
||||
HOMES
|
||||
HORSE
|
||||
HOST
|
||||
HOSTING
|
||||
HOUSE
|
||||
HOW
|
||||
HR
|
||||
HT
|
||||
HU
|
||||
IBM
|
||||
ID
|
||||
IE
|
||||
IL
|
||||
IM
|
||||
IMMO
|
||||
IMMOBILIEN
|
||||
IN
|
||||
INDUSTRIES
|
||||
INFO
|
||||
ING
|
||||
INK
|
||||
INSTITUTE
|
||||
INSURE
|
||||
INT
|
||||
INTERNATIONAL
|
||||
INVESTMENTS
|
||||
IO
|
||||
IQ
|
||||
IR
|
||||
IS
|
||||
IT
|
||||
JE
|
||||
JETZT
|
||||
JM
|
||||
JO
|
||||
JOBS
|
||||
JOBURG
|
||||
JP
|
||||
JUEGOS
|
||||
KAUFEN
|
||||
KE
|
||||
KG
|
||||
KH
|
||||
KI
|
||||
KIM
|
||||
KITCHEN
|
||||
KIWI
|
||||
KM
|
||||
KN
|
||||
KOELN
|
||||
KP
|
||||
KR
|
||||
KRD
|
||||
KRED
|
||||
KW
|
||||
KY
|
||||
KZ
|
||||
LA
|
||||
LACAIXA
|
||||
LAND
|
||||
LAWYER
|
||||
LB
|
||||
LC
|
||||
LEASE
|
||||
LGBT
|
||||
LI
|
||||
LIFE
|
||||
LIGHTING
|
||||
LIMITED
|
||||
LIMO
|
||||
LINK
|
||||
LK
|
||||
LOANS
|
||||
LONDON
|
||||
LOTTO
|
||||
LR
|
||||
LS
|
||||
LT
|
||||
LTDA
|
||||
LU
|
||||
LUXE
|
||||
LUXURY
|
||||
LV
|
||||
LY
|
||||
MA
|
||||
MAISON
|
||||
MANAGEMENT
|
||||
MANGO
|
||||
MARKET
|
||||
MARKETING
|
||||
MC
|
||||
MD
|
||||
ME
|
||||
MEDIA
|
||||
MEET
|
||||
MELBOURNE
|
||||
MEME
|
||||
MENU
|
||||
MG
|
||||
MH
|
||||
MIAMI
|
||||
MIL
|
||||
MINI
|
||||
MK
|
||||
ML
|
||||
MM
|
||||
MN
|
||||
MO
|
||||
MOBI
|
||||
MODA
|
||||
MOE
|
||||
MONASH
|
||||
MORTGAGE
|
||||
MOSCOW
|
||||
MOTORCYCLES
|
||||
MOV
|
||||
MP
|
||||
MQ
|
||||
MR
|
||||
MS
|
||||
MT
|
||||
MU
|
||||
MUSEUM
|
||||
MV
|
||||
MW
|
||||
MX
|
||||
MY
|
||||
MZ
|
||||
NA
|
||||
NAGOYA
|
||||
NAME
|
||||
NAVY
|
||||
NC
|
||||
NE
|
||||
NET
|
||||
NETWORK
|
||||
NEUSTAR
|
||||
NEW
|
||||
NEXUS
|
||||
NF
|
||||
NG
|
||||
NGO
|
||||
NHK
|
||||
NI
|
||||
NINJA
|
||||
NL
|
||||
NO
|
||||
NP
|
||||
NR
|
||||
NRA
|
||||
NRW
|
||||
NU
|
||||
NYC
|
||||
NZ
|
||||
OKINAWA
|
||||
OM
|
||||
ONG
|
||||
ONL
|
||||
OOO
|
||||
ORG
|
||||
ORGANIC
|
||||
OTSUKA
|
||||
OVH
|
||||
PA
|
||||
PARIS
|
||||
PARTNERS
|
||||
PARTS
|
||||
PE
|
||||
PF
|
||||
PG
|
||||
PH
|
||||
PHARMACY
|
||||
PHOTO
|
||||
PHOTOGRAPHY
|
||||
PHOTOS
|
||||
PHYSIO
|
||||
PICS
|
||||
PICTURES
|
||||
PINK
|
||||
PIZZA
|
||||
PK
|
||||
PL
|
||||
PLACE
|
||||
PLUMBING
|
||||
PM
|
||||
PN
|
||||
POHL
|
||||
POST
|
||||
PR
|
||||
PRAXI
|
||||
PRESS
|
||||
PRO
|
||||
PROD
|
||||
PRODUCTIONS
|
||||
PROF
|
||||
PROPERTIES
|
||||
PROPERTY
|
||||
PS
|
||||
PT
|
||||
PUB
|
||||
PW
|
||||
PY
|
||||
QA
|
||||
QPON
|
||||
QUEBEC
|
||||
RE
|
||||
REALTOR
|
||||
RECIPES
|
||||
RED
|
||||
REHAB
|
||||
REISE
|
||||
REISEN
|
||||
REN
|
||||
RENTALS
|
||||
REPAIR
|
||||
REPORT
|
||||
REPUBLICAN
|
||||
REST
|
||||
RESTAURANT
|
||||
REVIEWS
|
||||
RICH
|
||||
RIO
|
||||
RO
|
||||
ROCKS
|
||||
RODEO
|
||||
RS
|
||||
RSVP
|
||||
RU
|
||||
RUHR
|
||||
RW
|
||||
RYUKYU
|
||||
SA
|
||||
SAARLAND
|
||||
SARL
|
||||
SB
|
||||
SC
|
||||
SCA
|
||||
SCB
|
||||
SCHMIDT
|
||||
SCHULE
|
||||
SCOT
|
||||
SD
|
||||
SE
|
||||
SERVICES
|
||||
SEXY
|
||||
SG
|
||||
SH
|
||||
SHIKSHA
|
||||
SHOES
|
||||
SI
|
||||
SINGLES
|
||||
SJ
|
||||
SK
|
||||
SL
|
||||
SM
|
||||
SN
|
||||
SO
|
||||
SOCIAL
|
||||
SOFTWARE
|
||||
SOHU
|
||||
SOLAR
|
||||
SOLUTIONS
|
||||
SOY
|
||||
SPACE
|
||||
SPIEGEL
|
||||
SR
|
||||
ST
|
||||
SU
|
||||
SUPPLIES
|
||||
SUPPLY
|
||||
SUPPORT
|
||||
SURF
|
||||
SURGERY
|
||||
SUZUKI
|
||||
SV
|
||||
SX
|
||||
SY
|
||||
SYSTEMS
|
||||
SZ
|
||||
TATAR
|
||||
TATTOO
|
||||
TAX
|
||||
TC
|
||||
TD
|
||||
TECHNOLOGY
|
||||
TEL
|
||||
TF
|
||||
TG
|
||||
TH
|
||||
TIENDA
|
||||
TIPS
|
||||
TIROL
|
||||
TJ
|
||||
TK
|
||||
TL
|
||||
TM
|
||||
TN
|
||||
TO
|
||||
TODAY
|
||||
TOKYO
|
||||
TOOLS
|
||||
TOP
|
||||
TOWN
|
||||
TOYS
|
||||
TP
|
||||
TR
|
||||
TRADE
|
||||
TRAINING
|
||||
TRAVEL
|
||||
TT
|
||||
TUI
|
||||
TV
|
||||
TW
|
||||
TZ
|
||||
UA
|
||||
UG
|
||||
UK
|
||||
UNIVERSITY
|
||||
UNO
|
||||
UOL
|
||||
US
|
||||
UY
|
||||
UZ
|
||||
VA
|
||||
VACATIONS
|
||||
VC
|
||||
VE
|
||||
VEGAS
|
||||
VENTURES
|
||||
VERSICHERUNG
|
||||
VET
|
||||
VG
|
||||
VI
|
||||
VIAJES
|
||||
VILLAS
|
||||
VISION
|
||||
VLAANDEREN
|
||||
VN
|
||||
VODKA
|
||||
VOTE
|
||||
VOTING
|
||||
VOTO
|
||||
VOYAGE
|
||||
VU
|
||||
WALES
|
||||
WANG
|
||||
WATCH
|
||||
WEBCAM
|
||||
WEBSITE
|
||||
WED
|
||||
WF
|
||||
WHOSWHO
|
||||
WIEN
|
||||
WIKI
|
||||
WILLIAMHILL
|
||||
WME
|
||||
WORK
|
||||
WORKS
|
||||
WORLD
|
||||
WS
|
||||
WTC
|
||||
WTF
|
||||
XN--1QQW23A
|
||||
XN--3BST00M
|
||||
XN--3DS443G
|
||||
XN--3E0B707E
|
||||
XN--45BRJ9C
|
||||
XN--4GBRIM
|
||||
XN--55QW42G
|
||||
XN--55QX5D
|
||||
XN--6FRZ82G
|
||||
XN--6QQ986B3XL
|
||||
XN--80ADXHKS
|
||||
XN--80AO21A
|
||||
XN--80ASEHDB
|
||||
XN--80ASWG
|
||||
XN--90A3AC
|
||||
XN--C1AVG
|
||||
XN--CG4BKI
|
||||
XN--CLCHC0EA0B2G2A9GCD
|
||||
XN--CZR694B
|
||||
XN--CZRU2D
|
||||
XN--D1ACJ3B
|
||||
XN--FIQ228C5HS
|
||||
XN--FIQ64B
|
||||
XN--FIQS8S
|
||||
XN--FIQZ9S
|
||||
XN--FPCRJ9C3D
|
||||
XN--FZC2C9E2C
|
||||
XN--GECRJ9C
|
||||
XN--H2BRJ9C
|
||||
XN--I1B6B1A6A2E
|
||||
XN--IO0A7I
|
||||
XN--J1AMH
|
||||
XN--J6W193G
|
||||
XN--KPRW13D
|
||||
XN--KPRY57D
|
||||
XN--KPUT3I
|
||||
XN--L1ACC
|
||||
XN--LGBBAT1AD8J
|
||||
XN--MGB9AWBF
|
||||
XN--MGBA3A4F16A
|
||||
XN--MGBAAM7A8H
|
||||
XN--MGBAB2BD
|
||||
XN--MGBAYH7GPA
|
||||
XN--MGBBH1A71E
|
||||
XN--MGBC0A9AZCG
|
||||
XN--MGBERP4A5D4AR
|
||||
XN--MGBX4CD0AB
|
||||
XN--NGBC5AZD
|
||||
XN--NQV7F
|
||||
XN--NQV7FS00EMA
|
||||
XN--O3CW4H
|
||||
XN--OGBPF8FL
|
||||
XN--P1ACF
|
||||
XN--P1AI
|
||||
XN--PGBS0DH
|
||||
XN--Q9JYB4C
|
||||
XN--RHQV96G
|
||||
XN--S9BRJ9C
|
||||
XN--SES554G
|
||||
XN--UNUP4Y
|
||||
XN--VERMGENSBERATER-CTB
|
||||
XN--VERMGENSBERATUNG-PWB
|
||||
XN--VHQUV
|
||||
XN--WGBH1C
|
||||
XN--WGBL6A
|
||||
XN--XHQ521B
|
||||
XN--XKC2AL3HYE2A
|
||||
XN--XKC2DL3A5EE0H
|
||||
XN--YFRO4I67O
|
||||
XN--YGBI2AMMX
|
||||
XN--ZFR164B
|
||||
XXX
|
||||
XYZ
|
||||
YACHTS
|
||||
YANDEX
|
||||
YE
|
||||
YOKOHAMA
|
||||
YOUTUBE
|
||||
YT
|
||||
ZA
|
||||
ZIP
|
||||
ZM
|
||||
ZONE
|
||||
ZW
|
||||
737
Godeps/_workspace/src/github.com/jbenet/go-is-domain/tlds.go
generated
vendored
Normal file
737
Godeps/_workspace/src/github.com/jbenet/go-is-domain/tlds.go
generated
vendored
Normal file
@ -0,0 +1,737 @@
|
||||
package isdomain
|
||||
|
||||
// TLDs is a set of TLDs, according to ICANN in 2014.
|
||||
var TLDs = map[string]bool{
|
||||
"AC": true,
|
||||
"ACADEMY": true,
|
||||
"ACCOUNTANTS": true,
|
||||
"ACTIVE": true,
|
||||
"ACTOR": true,
|
||||
"AD": true,
|
||||
"AE": true,
|
||||
"AERO": true,
|
||||
"AF": true,
|
||||
"AG": true,
|
||||
"AGENCY": true,
|
||||
"AI": true,
|
||||
"AIRFORCE": true,
|
||||
"AL": true,
|
||||
"ALLFINANZ": true,
|
||||
"AM": true,
|
||||
"AN": true,
|
||||
"AO": true,
|
||||
"AQ": true,
|
||||
"AR": true,
|
||||
"ARCHI": true,
|
||||
"ARMY": true,
|
||||
"ARPA": true,
|
||||
"AS": true,
|
||||
"ASIA": true,
|
||||
"ASSOCIATES": true,
|
||||
"AT": true,
|
||||
"ATTORNEY": true,
|
||||
"AU": true,
|
||||
"AUCTION": true,
|
||||
"AUDIO": true,
|
||||
"AUTOS": true,
|
||||
"AW": true,
|
||||
"AX": true,
|
||||
"AXA": true,
|
||||
"AZ": true,
|
||||
"BA": true,
|
||||
"BAR": true,
|
||||
"BARGAINS": true,
|
||||
"BAYERN": true,
|
||||
"BB": true,
|
||||
"BD": true,
|
||||
"BE": true,
|
||||
"BEER": true,
|
||||
"BERLIN": true,
|
||||
"BEST": true,
|
||||
"BF": true,
|
||||
"BG": true,
|
||||
"BH": true,
|
||||
"BI": true,
|
||||
"BID": true,
|
||||
"BIKE": true,
|
||||
"BIO": true,
|
||||
"BIZ": true,
|
||||
"BJ": true,
|
||||
"BLACK": true,
|
||||
"BLACKFRIDAY": true,
|
||||
"BLUE": true,
|
||||
"BM": true,
|
||||
"BMW": true,
|
||||
"BN": true,
|
||||
"BNPPARIBAS": true,
|
||||
"BO": true,
|
||||
"BOO": true,
|
||||
"BOUTIQUE": true,
|
||||
"BR": true,
|
||||
"BRUSSELS": true,
|
||||
"BS": true,
|
||||
"BT": true,
|
||||
"BUDAPEST": true,
|
||||
"BUILD": true,
|
||||
"BUILDERS": true,
|
||||
"BUSINESS": true,
|
||||
"BUZZ": true,
|
||||
"BV": true,
|
||||
"BW": true,
|
||||
"BY": true,
|
||||
"BZ": true,
|
||||
"BZH": true,
|
||||
"CA": true,
|
||||
"CAB": true,
|
||||
"CAL": true,
|
||||
"CAMERA": true,
|
||||
"CAMP": true,
|
||||
"CANCERRESEARCH": true,
|
||||
"CAPETOWN": true,
|
||||
"CAPITAL": true,
|
||||
"CARAVAN": true,
|
||||
"CARDS": true,
|
||||
"CARE": true,
|
||||
"CAREER": true,
|
||||
"CAREERS": true,
|
||||
"CASA": true,
|
||||
"CASH": true,
|
||||
"CAT": true,
|
||||
"CATERING": true,
|
||||
"CC": true,
|
||||
"CD": true,
|
||||
"CENTER": true,
|
||||
"CEO": true,
|
||||
"CERN": true,
|
||||
"CF": true,
|
||||
"CG": true,
|
||||
"CH": true,
|
||||
"CHANNEL": true,
|
||||
"CHEAP": true,
|
||||
"CHRISTMAS": true,
|
||||
"CHROME": true,
|
||||
"CHURCH": true,
|
||||
"CI": true,
|
||||
"CITIC": true,
|
||||
"CITY": true,
|
||||
"CK": true,
|
||||
"CL": true,
|
||||
"CLAIMS": true,
|
||||
"CLEANING": true,
|
||||
"CLICK": true,
|
||||
"CLINIC": true,
|
||||
"CLOTHING": true,
|
||||
"CLUB": true,
|
||||
"CM": true,
|
||||
"CN": true,
|
||||
"CO": true,
|
||||
"CODES": true,
|
||||
"COFFEE": true,
|
||||
"COLLEGE": true,
|
||||
"COLOGNE": true,
|
||||
"COM": true,
|
||||
"COMMUNITY": true,
|
||||
"COMPANY": true,
|
||||
"COMPUTER": true,
|
||||
"CONDOS": true,
|
||||
"CONSTRUCTION": true,
|
||||
"CONSULTING": true,
|
||||
"CONTRACTORS": true,
|
||||
"COOKING": true,
|
||||
"COOL": true,
|
||||
"COOP": true,
|
||||
"COUNTRY": true,
|
||||
"CR": true,
|
||||
"CREDIT": true,
|
||||
"CREDITCARD": true,
|
||||
"CRUISES": true,
|
||||
"CU": true,
|
||||
"CUISINELLA": true,
|
||||
"CV": true,
|
||||
"CW": true,
|
||||
"CX": true,
|
||||
"CY": true,
|
||||
"CYMRU": true,
|
||||
"CZ": true,
|
||||
"DAD": true,
|
||||
"DANCE": true,
|
||||
"DATING": true,
|
||||
"DAY": true,
|
||||
"DE": true,
|
||||
"DEALS": true,
|
||||
"DEGREE": true,
|
||||
"DEMOCRAT": true,
|
||||
"DENTAL": true,
|
||||
"DENTIST": true,
|
||||
"DESI": true,
|
||||
"DIAMONDS": true,
|
||||
"DIET": true,
|
||||
"DIGITAL": true,
|
||||
"DIRECT": true,
|
||||
"DIRECTORY": true,
|
||||
"DISCOUNT": true,
|
||||
"DJ": true,
|
||||
"DK": true,
|
||||
"DM": true,
|
||||
"DNP": true,
|
||||
"DO": true,
|
||||
"DOMAINS": true,
|
||||
"DURBAN": true,
|
||||
"DVAG": true,
|
||||
"DZ": true,
|
||||
"EAT": true,
|
||||
"EC": true,
|
||||
"EDU": true,
|
||||
"EDUCATION": true,
|
||||
"EE": true,
|
||||
"EG": true,
|
||||
"EMAIL": true,
|
||||
"ENGINEER": true,
|
||||
"ENGINEERING": true,
|
||||
"ENTERPRISES": true,
|
||||
"EQUIPMENT": true,
|
||||
"ER": true,
|
||||
"ES": true,
|
||||
"ESQ": true,
|
||||
"ESTATE": true,
|
||||
"ET": true,
|
||||
"EU": true,
|
||||
"EUS": true,
|
||||
"EVENTS": true,
|
||||
"EXCHANGE": true,
|
||||
"EXPERT": true,
|
||||
"EXPOSED": true,
|
||||
"FAIL": true,
|
||||
"FARM": true,
|
||||
"FEEDBACK": true,
|
||||
"FI": true,
|
||||
"FINANCE": true,
|
||||
"FINANCIAL": true,
|
||||
"FISH": true,
|
||||
"FISHING": true,
|
||||
"FITNESS": true,
|
||||
"FJ": true,
|
||||
"FK": true,
|
||||
"FLIGHTS": true,
|
||||
"FLORIST": true,
|
||||
"FLY": true,
|
||||
"FM": true,
|
||||
"FO": true,
|
||||
"FOO": true,
|
||||
"FORSALE": true,
|
||||
"FOUNDATION": true,
|
||||
"FR": true,
|
||||
"FRL": true,
|
||||
"FROGANS": true,
|
||||
"FUND": true,
|
||||
"FURNITURE": true,
|
||||
"FUTBOL": true,
|
||||
"GA": true,
|
||||
"GAL": true,
|
||||
"GALLERY": true,
|
||||
"GB": true,
|
||||
"GBIZ": true,
|
||||
"GD": true,
|
||||
"GE": true,
|
||||
"GENT": true,
|
||||
"GF": true,
|
||||
"GG": true,
|
||||
"GH": true,
|
||||
"GI": true,
|
||||
"GIFT": true,
|
||||
"GIFTS": true,
|
||||
"GIVES": true,
|
||||
"GL": true,
|
||||
"GLASS": true,
|
||||
"GLE": true,
|
||||
"GLOBAL": true,
|
||||
"GLOBO": true,
|
||||
"GM": true,
|
||||
"GMAIL": true,
|
||||
"GMO": true,
|
||||
"GMX": true,
|
||||
"GN": true,
|
||||
"GOOGLE": true,
|
||||
"GOP": true,
|
||||
"GOV": true,
|
||||
"GP": true,
|
||||
"GQ": true,
|
||||
"GR": true,
|
||||
"GRAPHICS": true,
|
||||
"GRATIS": true,
|
||||
"GREEN": true,
|
||||
"GRIPE": true,
|
||||
"GS": true,
|
||||
"GT": true,
|
||||
"GU": true,
|
||||
"GUIDE": true,
|
||||
"GUITARS": true,
|
||||
"GURU": true,
|
||||
"GW": true,
|
||||
"GY": true,
|
||||
"HAMBURG": true,
|
||||
"HAUS": true,
|
||||
"HEALTHCARE": true,
|
||||
"HELP": true,
|
||||
"HERE": true,
|
||||
"HIPHOP": true,
|
||||
"HIV": true,
|
||||
"HK": true,
|
||||
"HM": true,
|
||||
"HN": true,
|
||||
"HOLDINGS": true,
|
||||
"HOLIDAY": true,
|
||||
"HOMES": true,
|
||||
"HORSE": true,
|
||||
"HOST": true,
|
||||
"HOSTING": true,
|
||||
"HOUSE": true,
|
||||
"HOW": true,
|
||||
"HR": true,
|
||||
"HT": true,
|
||||
"HU": true,
|
||||
"IBM": true,
|
||||
"ID": true,
|
||||
"IE": true,
|
||||
"IL": true,
|
||||
"IM": true,
|
||||
"IMMO": true,
|
||||
"IMMOBILIEN": true,
|
||||
"IN": true,
|
||||
"INDUSTRIES": true,
|
||||
"INFO": true,
|
||||
"ING": true,
|
||||
"INK": true,
|
||||
"INSTITUTE": true,
|
||||
"INSURE": true,
|
||||
"INT": true,
|
||||
"INTERNATIONAL": true,
|
||||
"INVESTMENTS": true,
|
||||
"IO": true,
|
||||
"IQ": true,
|
||||
"IR": true,
|
||||
"IS": true,
|
||||
"IT": true,
|
||||
"JE": true,
|
||||
"JETZT": true,
|
||||
"JM": true,
|
||||
"JO": true,
|
||||
"JOBS": true,
|
||||
"JOBURG": true,
|
||||
"JP": true,
|
||||
"JUEGOS": true,
|
||||
"KAUFEN": true,
|
||||
"KE": true,
|
||||
"KG": true,
|
||||
"KH": true,
|
||||
"KI": true,
|
||||
"KIM": true,
|
||||
"KITCHEN": true,
|
||||
"KIWI": true,
|
||||
"KM": true,
|
||||
"KN": true,
|
||||
"KOELN": true,
|
||||
"KP": true,
|
||||
"KR": true,
|
||||
"KRD": true,
|
||||
"KRED": true,
|
||||
"KW": true,
|
||||
"KY": true,
|
||||
"KZ": true,
|
||||
"LA": true,
|
||||
"LACAIXA": true,
|
||||
"LAND": true,
|
||||
"LAWYER": true,
|
||||
"LB": true,
|
||||
"LC": true,
|
||||
"LEASE": true,
|
||||
"LGBT": true,
|
||||
"LI": true,
|
||||
"LIFE": true,
|
||||
"LIGHTING": true,
|
||||
"LIMITED": true,
|
||||
"LIMO": true,
|
||||
"LINK": true,
|
||||
"LK": true,
|
||||
"LOANS": true,
|
||||
"LONDON": true,
|
||||
"LOTTO": true,
|
||||
"LR": true,
|
||||
"LS": true,
|
||||
"LT": true,
|
||||
"LTDA": true,
|
||||
"LU": true,
|
||||
"LUXE": true,
|
||||
"LUXURY": true,
|
||||
"LV": true,
|
||||
"LY": true,
|
||||
"MA": true,
|
||||
"MAISON": true,
|
||||
"MANAGEMENT": true,
|
||||
"MANGO": true,
|
||||
"MARKET": true,
|
||||
"MARKETING": true,
|
||||
"MC": true,
|
||||
"MD": true,
|
||||
"ME": true,
|
||||
"MEDIA": true,
|
||||
"MEET": true,
|
||||
"MELBOURNE": true,
|
||||
"MEME": true,
|
||||
"MENU": true,
|
||||
"MG": true,
|
||||
"MH": true,
|
||||
"MIAMI": true,
|
||||
"MIL": true,
|
||||
"MINI": true,
|
||||
"MK": true,
|
||||
"ML": true,
|
||||
"MM": true,
|
||||
"MN": true,
|
||||
"MO": true,
|
||||
"MOBI": true,
|
||||
"MODA": true,
|
||||
"MOE": true,
|
||||
"MONASH": true,
|
||||
"MORTGAGE": true,
|
||||
"MOSCOW": true,
|
||||
"MOTORCYCLES": true,
|
||||
"MOV": true,
|
||||
"MP": true,
|
||||
"MQ": true,
|
||||
"MR": true,
|
||||
"MS": true,
|
||||
"MT": true,
|
||||
"MU": true,
|
||||
"MUSEUM": true,
|
||||
"MV": true,
|
||||
"MW": true,
|
||||
"MX": true,
|
||||
"MY": true,
|
||||
"MZ": true,
|
||||
"NA": true,
|
||||
"NAGOYA": true,
|
||||
"NAME": true,
|
||||
"NAVY": true,
|
||||
"NC": true,
|
||||
"NE": true,
|
||||
"NET": true,
|
||||
"NETWORK": true,
|
||||
"NEUSTAR": true,
|
||||
"NEW": true,
|
||||
"NEXUS": true,
|
||||
"NF": true,
|
||||
"NG": true,
|
||||
"NGO": true,
|
||||
"NHK": true,
|
||||
"NI": true,
|
||||
"NINJA": true,
|
||||
"NL": true,
|
||||
"NO": true,
|
||||
"NP": true,
|
||||
"NR": true,
|
||||
"NRA": true,
|
||||
"NRW": true,
|
||||
"NU": true,
|
||||
"NYC": true,
|
||||
"NZ": true,
|
||||
"OKINAWA": true,
|
||||
"OM": true,
|
||||
"ONG": true,
|
||||
"ONL": true,
|
||||
"OOO": true,
|
||||
"ORG": true,
|
||||
"ORGANIC": true,
|
||||
"OTSUKA": true,
|
||||
"OVH": true,
|
||||
"PA": true,
|
||||
"PARIS": true,
|
||||
"PARTNERS": true,
|
||||
"PARTS": true,
|
||||
"PE": true,
|
||||
"PF": true,
|
||||
"PG": true,
|
||||
"PH": true,
|
||||
"PHARMACY": true,
|
||||
"PHOTO": true,
|
||||
"PHOTOGRAPHY": true,
|
||||
"PHOTOS": true,
|
||||
"PHYSIO": true,
|
||||
"PICS": true,
|
||||
"PICTURES": true,
|
||||
"PINK": true,
|
||||
"PIZZA": true,
|
||||
"PK": true,
|
||||
"PL": true,
|
||||
"PLACE": true,
|
||||
"PLUMBING": true,
|
||||
"PM": true,
|
||||
"PN": true,
|
||||
"POHL": true,
|
||||
"POST": true,
|
||||
"PR": true,
|
||||
"PRAXI": true,
|
||||
"PRESS": true,
|
||||
"PRO": true,
|
||||
"PROD": true,
|
||||
"PRODUCTIONS": true,
|
||||
"PROF": true,
|
||||
"PROPERTIES": true,
|
||||
"PROPERTY": true,
|
||||
"PS": true,
|
||||
"PT": true,
|
||||
"PUB": true,
|
||||
"PW": true,
|
||||
"PY": true,
|
||||
"QA": true,
|
||||
"QPON": true,
|
||||
"QUEBEC": true,
|
||||
"RE": true,
|
||||
"REALTOR": true,
|
||||
"RECIPES": true,
|
||||
"RED": true,
|
||||
"REHAB": true,
|
||||
"REISE": true,
|
||||
"REISEN": true,
|
||||
"REN": true,
|
||||
"RENTALS": true,
|
||||
"REPAIR": true,
|
||||
"REPORT": true,
|
||||
"REPUBLICAN": true,
|
||||
"REST": true,
|
||||
"RESTAURANT": true,
|
||||
"REVIEWS": true,
|
||||
"RICH": true,
|
||||
"RIO": true,
|
||||
"RO": true,
|
||||
"ROCKS": true,
|
||||
"RODEO": true,
|
||||
"RS": true,
|
||||
"RSVP": true,
|
||||
"RU": true,
|
||||
"RUHR": true,
|
||||
"RW": true,
|
||||
"RYUKYU": true,
|
||||
"SA": true,
|
||||
"SAARLAND": true,
|
||||
"SARL": true,
|
||||
"SB": true,
|
||||
"SC": true,
|
||||
"SCA": true,
|
||||
"SCB": true,
|
||||
"SCHMIDT": true,
|
||||
"SCHULE": true,
|
||||
"SCOT": true,
|
||||
"SD": true,
|
||||
"SE": true,
|
||||
"SERVICES": true,
|
||||
"SEXY": true,
|
||||
"SG": true,
|
||||
"SH": true,
|
||||
"SHIKSHA": true,
|
||||
"SHOES": true,
|
||||
"SI": true,
|
||||
"SINGLES": true,
|
||||
"SJ": true,
|
||||
"SK": true,
|
||||
"SL": true,
|
||||
"SM": true,
|
||||
"SN": true,
|
||||
"SO": true,
|
||||
"SOCIAL": true,
|
||||
"SOFTWARE": true,
|
||||
"SOHU": true,
|
||||
"SOLAR": true,
|
||||
"SOLUTIONS": true,
|
||||
"SOY": true,
|
||||
"SPACE": true,
|
||||
"SPIEGEL": true,
|
||||
"SR": true,
|
||||
"ST": true,
|
||||
"SU": true,
|
||||
"SUPPLIES": true,
|
||||
"SUPPLY": true,
|
||||
"SUPPORT": true,
|
||||
"SURF": true,
|
||||
"SURGERY": true,
|
||||
"SUZUKI": true,
|
||||
"SV": true,
|
||||
"SX": true,
|
||||
"SY": true,
|
||||
"SYSTEMS": true,
|
||||
"SZ": true,
|
||||
"TATAR": true,
|
||||
"TATTOO": true,
|
||||
"TAX": true,
|
||||
"TC": true,
|
||||
"TD": true,
|
||||
"TECHNOLOGY": true,
|
||||
"TEL": true,
|
||||
"TF": true,
|
||||
"TG": true,
|
||||
"TH": true,
|
||||
"TIENDA": true,
|
||||
"TIPS": true,
|
||||
"TIROL": true,
|
||||
"TJ": true,
|
||||
"TK": true,
|
||||
"TL": true,
|
||||
"TM": true,
|
||||
"TN": true,
|
||||
"TO": true,
|
||||
"TODAY": true,
|
||||
"TOKYO": true,
|
||||
"TOOLS": true,
|
||||
"TOP": true,
|
||||
"TOWN": true,
|
||||
"TOYS": true,
|
||||
"TP": true,
|
||||
"TR": true,
|
||||
"TRADE": true,
|
||||
"TRAINING": true,
|
||||
"TRAVEL": true,
|
||||
"TT": true,
|
||||
"TUI": true,
|
||||
"TV": true,
|
||||
"TW": true,
|
||||
"TZ": true,
|
||||
"UA": true,
|
||||
"UG": true,
|
||||
"UK": true,
|
||||
"UNIVERSITY": true,
|
||||
"UNO": true,
|
||||
"UOL": true,
|
||||
"US": true,
|
||||
"UY": true,
|
||||
"UZ": true,
|
||||
"VA": true,
|
||||
"VACATIONS": true,
|
||||
"VC": true,
|
||||
"VE": true,
|
||||
"VEGAS": true,
|
||||
"VENTURES": true,
|
||||
"VERSICHERUNG": true,
|
||||
"VET": true,
|
||||
"VG": true,
|
||||
"VI": true,
|
||||
"VIAJES": true,
|
||||
"VILLAS": true,
|
||||
"VISION": true,
|
||||
"VLAANDEREN": true,
|
||||
"VN": true,
|
||||
"VODKA": true,
|
||||
"VOTE": true,
|
||||
"VOTING": true,
|
||||
"VOTO": true,
|
||||
"VOYAGE": true,
|
||||
"VU": true,
|
||||
"WALES": true,
|
||||
"WANG": true,
|
||||
"WATCH": true,
|
||||
"WEBCAM": true,
|
||||
"WEBSITE": true,
|
||||
"WED": true,
|
||||
"WF": true,
|
||||
"WHOSWHO": true,
|
||||
"WIEN": true,
|
||||
"WIKI": true,
|
||||
"WILLIAMHILL": true,
|
||||
"WME": true,
|
||||
"WORK": true,
|
||||
"WORKS": true,
|
||||
"WORLD": true,
|
||||
"WS": true,
|
||||
"WTC": true,
|
||||
"WTF": true,
|
||||
"XN--1QQW23A": true,
|
||||
"XN--3BST00M": true,
|
||||
"XN--3DS443G": true,
|
||||
"XN--3E0B707E": true,
|
||||
"XN--45BRJ9C": true,
|
||||
"XN--4GBRIM": true,
|
||||
"XN--55QW42G": true,
|
||||
"XN--55QX5D": true,
|
||||
"XN--6FRZ82G": true,
|
||||
"XN--6QQ986B3XL": true,
|
||||
"XN--80ADXHKS": true,
|
||||
"XN--80AO21A": true,
|
||||
"XN--80ASEHDB": true,
|
||||
"XN--80ASWG": true,
|
||||
"XN--90A3AC": true,
|
||||
"XN--C1AVG": true,
|
||||
"XN--CG4BKI": true,
|
||||
"XN--CLCHC0EA0B2G2A9GCD": true,
|
||||
"XN--CZR694B": true,
|
||||
"XN--CZRU2D": true,
|
||||
"XN--D1ACJ3B": true,
|
||||
"XN--FIQ228C5HS": true,
|
||||
"XN--FIQ64B": true,
|
||||
"XN--FIQS8S": true,
|
||||
"XN--FIQZ9S": true,
|
||||
"XN--FPCRJ9C3D": true,
|
||||
"XN--FZC2C9E2C": true,
|
||||
"XN--GECRJ9C": true,
|
||||
"XN--H2BRJ9C": true,
|
||||
"XN--I1B6B1A6A2E": true,
|
||||
"XN--IO0A7I": true,
|
||||
"XN--J1AMH": true,
|
||||
"XN--J6W193G": true,
|
||||
"XN--KPRW13D": true,
|
||||
"XN--KPRY57D": true,
|
||||
"XN--KPUT3I": true,
|
||||
"XN--L1ACC": true,
|
||||
"XN--LGBBAT1AD8J": true,
|
||||
"XN--MGB9AWBF": true,
|
||||
"XN--MGBA3A4F16A": true,
|
||||
"XN--MGBAAM7A8H": true,
|
||||
"XN--MGBAB2BD": true,
|
||||
"XN--MGBAYH7GPA": true,
|
||||
"XN--MGBBH1A71E": true,
|
||||
"XN--MGBC0A9AZCG": true,
|
||||
"XN--MGBERP4A5D4AR": true,
|
||||
"XN--MGBX4CD0AB": true,
|
||||
"XN--NGBC5AZD": true,
|
||||
"XN--NQV7F": true,
|
||||
"XN--NQV7FS00EMA": true,
|
||||
"XN--O3CW4H": true,
|
||||
"XN--OGBPF8FL": true,
|
||||
"XN--P1ACF": true,
|
||||
"XN--P1AI": true,
|
||||
"XN--PGBS0DH": true,
|
||||
"XN--Q9JYB4C": true,
|
||||
"XN--RHQV96G": true,
|
||||
"XN--S9BRJ9C": true,
|
||||
"XN--SES554G": true,
|
||||
"XN--UNUP4Y": true,
|
||||
"XN--VERMGENSBERATER-CTB": true,
|
||||
"XN--VERMGENSBERATUNG-PWB": true,
|
||||
"XN--VHQUV": true,
|
||||
"XN--WGBH1C": true,
|
||||
"XN--WGBL6A": true,
|
||||
"XN--XHQ521B": true,
|
||||
"XN--XKC2AL3HYE2A": true,
|
||||
"XN--XKC2DL3A5EE0H": true,
|
||||
"XN--YFRO4I67O": true,
|
||||
"XN--YGBI2AMMX": true,
|
||||
"XN--ZFR164B": true,
|
||||
"XXX": true,
|
||||
"XYZ": true,
|
||||
"YACHTS": true,
|
||||
"YANDEX": true,
|
||||
"YE": true,
|
||||
"YOKOHAMA": true,
|
||||
"YOUTUBE": true,
|
||||
"YT": true,
|
||||
"ZA": true,
|
||||
"ZIP": true,
|
||||
"ZM": true,
|
||||
"ZONE": true,
|
||||
"ZW": true,
|
||||
}
|
||||
|
||||
// ExtendedTLDs is a set of additional "TLDs", allowing decentralized name
|
||||
// systems, like TOR and Namecoin.
|
||||
var ExtendedTLDs = map[string]bool{
|
||||
"BIT": true,
|
||||
"ONION": true,
|
||||
}
|
||||
21
Godeps/_workspace/src/github.com/jbenet/go-multiaddr/LICENSE
generated
vendored
Normal file
21
Godeps/_workspace/src/github.com/jbenet/go-multiaddr/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Juan Batiz-Benet
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
38
Godeps/_workspace/src/github.com/jbenet/go-multiaddr/README.md
generated
vendored
38
Godeps/_workspace/src/github.com/jbenet/go-multiaddr/README.md
generated
vendored
@ -7,18 +7,20 @@
|
||||
### Simple
|
||||
|
||||
```go
|
||||
import "github.com/jbenet/go-multiaddr"
|
||||
import ma "github.com/jbenet/go-multiaddr"
|
||||
|
||||
m := multiaddr.NewMultiaddr("/ip4/127.0.0.1/udp/1234")
|
||||
// <Multiaddr /ip4/127.0.0.1/udp/1234>
|
||||
m.buffer
|
||||
// <Buffer >
|
||||
m.String()
|
||||
// /ip4/127.0.0.1/udp/1234
|
||||
// construct from a string (err signals parse failure)
|
||||
m1, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/1234")
|
||||
|
||||
// construct with Buffer
|
||||
m = multiaddr.Multiaddr{ Bytes: m.Bytes }
|
||||
// <Multiaddr /ip4/127.0.0.1/udp/1234>
|
||||
// construct from bytes (err signals parse failure)
|
||||
m2, err := ma.NewMultiaddrBytes(m1.Bytes())
|
||||
|
||||
// true
|
||||
strings.Equal(m1.String(), "/ip4/127.0.0.1/udp/1234")
|
||||
strings.Equal(m1.String(), m2.String())
|
||||
bytes.Equal(m1.Bytes(), m2.Bytes())
|
||||
m1.Equal(m2)
|
||||
m2.Equal(m1)
|
||||
```
|
||||
|
||||
### Protocols
|
||||
@ -36,7 +38,7 @@ addr.Protocols()
|
||||
|
||||
```go
|
||||
// handles the stupid url version too
|
||||
m = multiaddr.NewUrl("udp4://127.0.0.1:1234")
|
||||
m = ma.NewUrl("udp4://127.0.0.1:1234")
|
||||
// <Multiaddr /ip4/127.0.0.1/udp/1234>
|
||||
m.Url(buf)
|
||||
// udp4://127.0.0.1:1234
|
||||
@ -45,9 +47,9 @@ m.Url(buf)
|
||||
### En/decapsulate
|
||||
|
||||
```go
|
||||
m.Encapsulate(m.NewMultiaddr("/sctp/5678"))
|
||||
m.Encapsulate(ma.NewMultiaddr("/sctp/5678"))
|
||||
// <Multiaddr /ip4/127.0.0.1/udp/1234/sctp/5678>
|
||||
m.Decapsulate(m.NewMultiaddr("/udp")) // up to + inc last occurrence of subaddr
|
||||
m.Decapsulate(ma.NewMultiaddr("/udp")) // up to + inc last occurrence of subaddr
|
||||
// <Multiaddr /ip4/127.0.0.1>
|
||||
```
|
||||
|
||||
@ -56,11 +58,11 @@ m.Decapsulate(m.NewMultiaddr("/udp")) // up to + inc last occurrence of subaddr
|
||||
Multiaddr allows expressing tunnels very nicely.
|
||||
|
||||
```js
|
||||
printer := multiaddr.NewMultiaddr("/ip4/192.168.0.13/tcp/80")
|
||||
proxy := multiaddr.NewMultiaddr("/ip4/10.20.30.40/tcp/443")
|
||||
printer, _ := ma.NewMultiaddr("/ip4/192.168.0.13/tcp/80")
|
||||
proxy, _ := ma.NewMultiaddr("/ip4/10.20.30.40/tcp/443")
|
||||
printerOverProxy := proxy.Encapsulate(printer)
|
||||
// <Multiaddr /ip4/10.20.30.40/tcp/443/ip4/192.168.0.13/tcp/80>
|
||||
// /ip4/10.20.30.40/tcp/443/ip4/192.168.0.13/tcp/80
|
||||
|
||||
proxyAgain := printerOverProxy.Decapsulate(multiaddr.NewMultiaddr("/ip4"))
|
||||
// <Multiaddr /ip4/10.20.30.40/tcp/443>
|
||||
proxyAgain := printerOverProxy.Decapsulate(printer)
|
||||
// /ip4/10.20.30.40/tcp/443
|
||||
```
|
||||
|
||||
36
Godeps/_workspace/src/github.com/jbenet/go-multiaddr/doc.go
generated
vendored
Normal file
36
Godeps/_workspace/src/github.com/jbenet/go-multiaddr/doc.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
/*
|
||||
Package multiaddr provides an implementation of the Multiaddr network
|
||||
address format. Multiaddr emphasizes explicitness, self-description, and
|
||||
portability. It allows applications to treat addresses as opaque tokens,
|
||||
and to avoid making assumptions about the address representation (e.g. length).
|
||||
Learn more at https://github.com/jbenet/multiaddr
|
||||
|
||||
Basic Use:
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
ma "github.com/jbenet/go-multiaddr"
|
||||
)
|
||||
|
||||
// construct from a string (err signals parse failure)
|
||||
m1, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/1234")
|
||||
|
||||
// construct from bytes (err signals parse failure)
|
||||
m2, err := ma.NewMultiaddrBytes(m1.Bytes())
|
||||
|
||||
// true
|
||||
strings.Equal(m1.String(), "/ip4/127.0.0.1/udp/1234")
|
||||
strings.Equal(m1.String(), m2.String())
|
||||
bytes.Equal(m1.Bytes(), m2.Bytes())
|
||||
m1.Equal(m2)
|
||||
m2.Equal(m1)
|
||||
|
||||
// tunneling (en/decap)
|
||||
printer, _ := ma.NewMultiaddr("/ip4/192.168.0.13/tcp/80")
|
||||
proxy, _ := ma.NewMultiaddr("/ip4/10.20.30.40/tcp/443")
|
||||
printerOverProxy := proxy.Encapsulate(printer)
|
||||
proxyAgain := printerOverProxy.Decapsulate(printer)
|
||||
|
||||
*/
|
||||
package multiaddr
|
||||
117
Godeps/_workspace/src/github.com/jbenet/go-multiaddr/index.go
generated
vendored
117
Godeps/_workspace/src/github.com/jbenet/go-multiaddr/index.go
generated
vendored
@ -1,117 +0,0 @@
|
||||
package multiaddr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Multiaddr is the data structure representing a multiaddr
|
||||
type Multiaddr struct {
|
||||
Bytes []byte
|
||||
}
|
||||
|
||||
// NewMultiaddr parses and validates an input string, returning a *Multiaddr
|
||||
func NewMultiaddr(s string) (*Multiaddr, error) {
|
||||
b, err := stringToBytes(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Multiaddr{Bytes: b}, nil
|
||||
}
|
||||
|
||||
// Equal tests whether two multiaddrs are equal
|
||||
func (m *Multiaddr) Equal(m2 *Multiaddr) bool {
|
||||
return bytes.Equal(m.Bytes, m2.Bytes)
|
||||
}
|
||||
|
||||
// String returns the string representation of a Multiaddr
|
||||
func (m *Multiaddr) String() (string, error) {
|
||||
return bytesToString(m.Bytes)
|
||||
}
|
||||
|
||||
// Protocols returns the list of protocols this Multiaddr has.
|
||||
func (m *Multiaddr) Protocols() (ret []*Protocol, err error) {
|
||||
|
||||
// panic handler, in case we try accessing bytes incorrectly.
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
ret = nil
|
||||
err = e.(error)
|
||||
}
|
||||
}()
|
||||
|
||||
ps := []*Protocol{}
|
||||
b := m.Bytes[:]
|
||||
for len(b) > 0 {
|
||||
p := ProtocolWithCode(int(b[0]))
|
||||
if p == nil {
|
||||
return nil, fmt.Errorf("no protocol with code %d", b[0])
|
||||
}
|
||||
ps = append(ps, p)
|
||||
b = b[1+(p.Size/8):]
|
||||
}
|
||||
return ps, nil
|
||||
}
|
||||
|
||||
// Encapsulate wraps a given Multiaddr, returning the resulting joined Multiaddr
|
||||
func (m *Multiaddr) Encapsulate(o *Multiaddr) *Multiaddr {
|
||||
b := make([]byte, len(m.Bytes)+len(o.Bytes))
|
||||
b = append(m.Bytes, o.Bytes...)
|
||||
return &Multiaddr{Bytes: b}
|
||||
}
|
||||
|
||||
// Decapsulate unwraps Multiaddr up until the given Multiaddr is found.
|
||||
func (m *Multiaddr) Decapsulate(o *Multiaddr) (*Multiaddr, error) {
|
||||
s1, err := m.String()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s2, err := o.String()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
i := strings.LastIndex(s1, s2)
|
||||
if i < 0 {
|
||||
return nil, fmt.Errorf("%s not contained in %s", s2, s1)
|
||||
}
|
||||
return NewMultiaddr(s1[:i])
|
||||
}
|
||||
|
||||
// DialArgs is a convenience function returning arguments for use in net.Dial
|
||||
func (m *Multiaddr) DialArgs() (string, string, error) {
|
||||
if !m.IsThinWaist() {
|
||||
return "", "", fmt.Errorf("%s is not a 'thin waist' address", m)
|
||||
}
|
||||
|
||||
str, err := m.String()
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
parts := strings.Split(str, "/")[1:]
|
||||
network := parts[2]
|
||||
host := strings.Join([]string{parts[1], parts[3]}, ":")
|
||||
return network, host, nil
|
||||
}
|
||||
|
||||
// IsThinWaist returns whether this multiaddr includes "Thin Waist" Protocols.
|
||||
// This means: /{IP4, IP6}/{TCP, UDP}
|
||||
func (m *Multiaddr) IsThinWaist() bool {
|
||||
p, err := m.Protocols()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if p[0].Code != P_IP4 && p[0].Code != P_IP6 {
|
||||
return false
|
||||
}
|
||||
|
||||
if p[1].Code != P_TCP && p[1].Code != P_UDP {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
42
Godeps/_workspace/src/github.com/jbenet/go-multiaddr/interface.go
generated
vendored
Normal file
42
Godeps/_workspace/src/github.com/jbenet/go-multiaddr/interface.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
package multiaddr
|
||||
|
||||
/*
|
||||
Multiaddr is a cross-protocol, cross-platform format for representing
|
||||
internet addresses. It emphasizes explicitness and self-description.
|
||||
Learn more here: https://github.com/jbenet/multiaddr
|
||||
|
||||
Multiaddrs have both a binary and string representation.
|
||||
|
||||
import ma "github.com/jbenet/go-multiaddr"
|
||||
|
||||
addr, err := ma.NewMultiaddr("/ip4/1.2.3.4/tcp/80")
|
||||
// err non-nil when parsing failed.
|
||||
|
||||
*/
|
||||
type Multiaddr interface {
|
||||
// Equal returns whether two Multiaddrs are exactly equal
|
||||
Equal(Multiaddr) bool
|
||||
|
||||
// Bytes returns the []byte representation of this Multiaddr
|
||||
Bytes() []byte
|
||||
|
||||
// String returns the string representation of this Multiaddr
|
||||
// (may panic if internal state is corrupted)
|
||||
String() string
|
||||
|
||||
// Protocols returns the list of Protocols this Multiaddr includes
|
||||
// will panic if protocol code incorrect (and bytes accessed incorrectly)
|
||||
Protocols() []*Protocol
|
||||
|
||||
// Encapsulate wraps this Multiaddr around another. For example:
|
||||
//
|
||||
// /ip4/1.2.3.4 encapsulate /tcp/80 = /ip4/1.2.3.4/tcp/80
|
||||
//
|
||||
Encapsulate(Multiaddr) Multiaddr
|
||||
|
||||
// Decapsultate removes a Multiaddr wrapping. For example:
|
||||
//
|
||||
// /ip4/1.2.3.4/tcp/80 decapsulate /ip4/1.2.3.4 = /tcp/80
|
||||
//
|
||||
Decapsulate(Multiaddr) Multiaddr
|
||||
}
|
||||
110
Godeps/_workspace/src/github.com/jbenet/go-multiaddr/multiaddr.go
generated
vendored
Normal file
110
Godeps/_workspace/src/github.com/jbenet/go-multiaddr/multiaddr.go
generated
vendored
Normal file
@ -0,0 +1,110 @@
|
||||
package multiaddr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// multiaddr is the data structure representing a Multiaddr
|
||||
type multiaddr struct {
|
||||
bytes []byte
|
||||
}
|
||||
|
||||
// NewMultiaddr parses and validates an input string, returning a *Multiaddr
|
||||
func NewMultiaddr(s string) (Multiaddr, error) {
|
||||
b, err := stringToBytes(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &multiaddr{bytes: b}, nil
|
||||
}
|
||||
|
||||
// NewMultiaddrBytes initializes a Multiaddr from a byte representation.
|
||||
// It validates it as an input string.
|
||||
func NewMultiaddrBytes(b []byte) (Multiaddr, error) {
|
||||
s, err := bytesToString(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewMultiaddr(s)
|
||||
}
|
||||
|
||||
// Equal tests whether two multiaddrs are equal
|
||||
func (m *multiaddr) Equal(m2 Multiaddr) bool {
|
||||
return bytes.Equal(m.bytes, m2.Bytes())
|
||||
}
|
||||
|
||||
// Bytes returns the []byte representation of this Multiaddr
|
||||
func (m *multiaddr) Bytes() []byte {
|
||||
// consider returning copy to prevent changing underneath us?
|
||||
cpy := make([]byte, len(m.bytes))
|
||||
copy(cpy, m.bytes)
|
||||
return cpy
|
||||
}
|
||||
|
||||
// String returns the string representation of a Multiaddr
|
||||
func (m *multiaddr) String() string {
|
||||
s, err := bytesToString(m.bytes)
|
||||
if err != nil {
|
||||
panic("multiaddr failed to convert back to string. corrupted?")
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Protocols returns the list of protocols this Multiaddr has.
|
||||
// will panic in case we access bytes incorrectly.
|
||||
func (m *multiaddr) Protocols() []*Protocol {
|
||||
|
||||
// panic handler, in case we try accessing bytes incorrectly.
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err := e.(error)
|
||||
panic("Multiaddr.Protocols error: " + err.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
ps := []*Protocol{}
|
||||
b := m.bytes[:]
|
||||
for len(b) > 0 {
|
||||
p := ProtocolWithCode(int(b[0]))
|
||||
if p == nil {
|
||||
// this is a panic (and not returning err) because this should've been
|
||||
// caught on constructing the Multiaddr
|
||||
panic(fmt.Errorf("no protocol with code %d", b[0]))
|
||||
}
|
||||
ps = append(ps, p)
|
||||
b = b[1+(p.Size/8):]
|
||||
}
|
||||
return ps
|
||||
}
|
||||
|
||||
// Encapsulate wraps a given Multiaddr, returning the resulting joined Multiaddr
|
||||
func (m *multiaddr) Encapsulate(o Multiaddr) Multiaddr {
|
||||
mb := m.bytes
|
||||
ob := o.Bytes()
|
||||
|
||||
var b bytes.Buffer
|
||||
b.Write(mb)
|
||||
b.Write(ob)
|
||||
return &multiaddr{bytes: b.Bytes()}
|
||||
}
|
||||
|
||||
// Decapsulate unwraps Multiaddr up until the given Multiaddr is found.
|
||||
func (m *multiaddr) Decapsulate(o Multiaddr) Multiaddr {
|
||||
s1 := m.String()
|
||||
s2 := o.String()
|
||||
i := strings.LastIndex(s1, s2)
|
||||
if i < 0 {
|
||||
// if multiaddr not contained, returns a copy.
|
||||
cpy := make([]byte, len(m.bytes))
|
||||
copy(cpy, m.bytes)
|
||||
return &multiaddr{bytes: cpy}
|
||||
}
|
||||
|
||||
ma, err := NewMultiaddr(s1[:i])
|
||||
if err != nil {
|
||||
panic("Multiaddr.Decapsulate incorrect byte boundaries.")
|
||||
}
|
||||
return ma
|
||||
}
|
||||
46
Godeps/_workspace/src/github.com/jbenet/go-multiaddr/multiaddr_test.go
generated
vendored
46
Godeps/_workspace/src/github.com/jbenet/go-multiaddr/multiaddr_test.go
generated
vendored
@ -6,7 +6,7 @@ import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func newMultiaddr(t *testing.T, a string) *Multiaddr {
|
||||
func newMultiaddr(t *testing.T, a string) Multiaddr {
|
||||
m, err := NewMultiaddr(a)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
@ -88,11 +88,7 @@ func TestProtocols(t *testing.T) {
|
||||
t.Error("failed to construct", "/ip4/127.0.0.1/udp/1234")
|
||||
}
|
||||
|
||||
ps, err := m.Protocols()
|
||||
if err != nil {
|
||||
t.Error("failed to get protocols", "/ip4/127.0.0.1/udp/1234")
|
||||
}
|
||||
|
||||
ps := m.Protocols()
|
||||
if ps[0] != ProtocolWithName("ip4") {
|
||||
t.Error(ps[0], ProtocolWithName("ip4"))
|
||||
t.Error("failed to get ip4 protocol")
|
||||
@ -117,47 +113,19 @@ func TestEncapsulate(t *testing.T) {
|
||||
}
|
||||
|
||||
b := m.Encapsulate(m2)
|
||||
if s, _ := b.String(); s != "/ip4/127.0.0.1/udp/1234/udp/5678" {
|
||||
if s := b.String(); s != "/ip4/127.0.0.1/udp/1234/udp/5678" {
|
||||
t.Error("encapsulate /ip4/127.0.0.1/udp/1234/udp/5678 failed.", s)
|
||||
}
|
||||
|
||||
m3, _ := NewMultiaddr("/udp/5678")
|
||||
c, err := b.Decapsulate(m3)
|
||||
if err != nil {
|
||||
t.Error("decapsulate /udp failed.", err)
|
||||
}
|
||||
|
||||
if s, _ := c.String(); s != "/ip4/127.0.0.1/udp/1234" {
|
||||
c := b.Decapsulate(m3)
|
||||
if s := c.String(); s != "/ip4/127.0.0.1/udp/1234" {
|
||||
t.Error("decapsulate /udp failed.", "/ip4/127.0.0.1/udp/1234", s)
|
||||
}
|
||||
|
||||
m4, _ := NewMultiaddr("/ip4/127.0.0.1")
|
||||
d, err := c.Decapsulate(m4)
|
||||
if err != nil {
|
||||
t.Error("decapsulate /ip4 failed.", err)
|
||||
}
|
||||
|
||||
if s, _ := d.String(); s != "" {
|
||||
d := c.Decapsulate(m4)
|
||||
if s := d.String(); s != "" {
|
||||
t.Error("decapsulate /ip4 failed.", "/", s)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDialArgs(t *testing.T) {
|
||||
m, err := NewMultiaddr("/ip4/127.0.0.1/udp/1234")
|
||||
if err != nil {
|
||||
t.Fatal("failed to construct", "/ip4/127.0.0.1/udp/1234")
|
||||
}
|
||||
|
||||
nw, host, err := m.DialArgs()
|
||||
if err != nil {
|
||||
t.Fatal("failed to get dial args", "/ip4/127.0.0.1/udp/1234", err)
|
||||
}
|
||||
|
||||
if nw != "udp" {
|
||||
t.Error("failed to get udp network Dial Arg")
|
||||
}
|
||||
|
||||
if host != "127.0.0.1:1234" {
|
||||
t.Error("failed to get host:port Dial Arg")
|
||||
}
|
||||
}
|
||||
|
||||
40
Godeps/_workspace/src/github.com/jbenet/go-multiaddr/net.go
generated
vendored
40
Godeps/_workspace/src/github.com/jbenet/go-multiaddr/net.go
generated
vendored
@ -3,12 +3,13 @@ package multiaddr
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var errIncorrectNetAddr = fmt.Errorf("incorrect network addr conversion")
|
||||
|
||||
// FromNetAddr converts a net.Addr type to a Multiaddr.
|
||||
func FromNetAddr(a net.Addr) (*Multiaddr, error) {
|
||||
func FromNetAddr(a net.Addr) (Multiaddr, error) {
|
||||
switch a.Network() {
|
||||
case "tcp", "tcp4", "tcp6":
|
||||
ac, ok := a.(*net.TCPAddr)
|
||||
@ -65,7 +66,7 @@ func FromNetAddr(a net.Addr) (*Multiaddr, error) {
|
||||
}
|
||||
|
||||
// FromIP converts a net.IP type to a Multiaddr.
|
||||
func FromIP(ip net.IP) (*Multiaddr, error) {
|
||||
func FromIP(ip net.IP) (Multiaddr, error) {
|
||||
switch {
|
||||
case ip.To4() != nil:
|
||||
return NewMultiaddr("/ip4/" + ip.String())
|
||||
@ -75,3 +76,38 @@ func FromIP(ip net.IP) (*Multiaddr, error) {
|
||||
return nil, errIncorrectNetAddr
|
||||
}
|
||||
}
|
||||
|
||||
// DialArgs is a convenience function returning arguments for use in net.Dial
|
||||
func DialArgs(m Multiaddr) (string, string, error) {
|
||||
if !IsThinWaist(m) {
|
||||
return "", "", fmt.Errorf("%s is not a 'thin waist' address", m)
|
||||
}
|
||||
|
||||
str := m.String()
|
||||
parts := strings.Split(str, "/")[1:]
|
||||
network := parts[2]
|
||||
|
||||
var host string
|
||||
switch parts[0] {
|
||||
case "ip4":
|
||||
host = strings.Join([]string{parts[1], parts[3]}, ":")
|
||||
case "ip6":
|
||||
host = fmt.Sprintf("[%s]:%s", parts[1], parts[3])
|
||||
}
|
||||
return network, host, nil
|
||||
}
|
||||
|
||||
// IsThinWaist returns whether a Multiaddr starts with "Thin Waist" Protocols.
|
||||
// This means: /{IP4, IP6}/{TCP, UDP}
|
||||
func IsThinWaist(m Multiaddr) bool {
|
||||
p := m.Protocols()
|
||||
if p[0].Code != P_IP4 && p[0].Code != P_IP6 {
|
||||
return false
|
||||
}
|
||||
|
||||
if p[1].Code != P_TCP && p[1].Code != P_UDP {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
32
Godeps/_workspace/src/github.com/jbenet/go-multiaddr/net_test.go
generated
vendored
32
Godeps/_workspace/src/github.com/jbenet/go-multiaddr/net_test.go
generated
vendored
@ -5,7 +5,7 @@ import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
type GenFunc func() (*Multiaddr, error)
|
||||
type GenFunc func() (Multiaddr, error)
|
||||
|
||||
func testConvert(t *testing.T, s string, gen GenFunc) {
|
||||
m, err := gen()
|
||||
@ -13,25 +13,25 @@ func testConvert(t *testing.T, s string, gen GenFunc) {
|
||||
t.Fatal("failed to generate.")
|
||||
}
|
||||
|
||||
if s2, _ := m.String(); err != nil || s2 != s {
|
||||
if s2 := m.String(); err != nil || s2 != s {
|
||||
t.Fatal("failed to convert: " + s + " != " + s2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromIP4(t *testing.T) {
|
||||
testConvert(t, "/ip4/10.20.30.40", func() (*Multiaddr, error) {
|
||||
testConvert(t, "/ip4/10.20.30.40", func() (Multiaddr, error) {
|
||||
return FromIP(net.ParseIP("10.20.30.40"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestFromIP6(t *testing.T) {
|
||||
testConvert(t, "/ip6/2001:4860:0:2001::68", func() (*Multiaddr, error) {
|
||||
testConvert(t, "/ip6/2001:4860:0:2001::68", func() (Multiaddr, error) {
|
||||
return FromIP(net.ParseIP("2001:4860:0:2001::68"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestFromTCP(t *testing.T) {
|
||||
testConvert(t, "/ip4/10.20.30.40/tcp/1234", func() (*Multiaddr, error) {
|
||||
testConvert(t, "/ip4/10.20.30.40/tcp/1234", func() (Multiaddr, error) {
|
||||
return FromNetAddr(&net.TCPAddr{
|
||||
IP: net.ParseIP("10.20.30.40"),
|
||||
Port: 1234,
|
||||
@ -40,10 +40,30 @@ func TestFromTCP(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFromUDP(t *testing.T) {
|
||||
testConvert(t, "/ip4/10.20.30.40/udp/1234", func() (*Multiaddr, error) {
|
||||
testConvert(t, "/ip4/10.20.30.40/udp/1234", func() (Multiaddr, error) {
|
||||
return FromNetAddr(&net.UDPAddr{
|
||||
IP: net.ParseIP("10.20.30.40"),
|
||||
Port: 1234,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestDialArgs(t *testing.T) {
|
||||
m, err := NewMultiaddr("/ip4/127.0.0.1/udp/1234")
|
||||
if err != nil {
|
||||
t.Fatal("failed to construct", "/ip4/127.0.0.1/udp/1234")
|
||||
}
|
||||
|
||||
nw, host, err := DialArgs(m)
|
||||
if err != nil {
|
||||
t.Fatal("failed to get dial args", "/ip4/127.0.0.1/udp/1234", err)
|
||||
}
|
||||
|
||||
if nw != "udp" {
|
||||
t.Error("failed to get udp network Dial Arg")
|
||||
}
|
||||
|
||||
if host != "127.0.0.1:1234" {
|
||||
t.Error("failed to get host:port Dial Arg")
|
||||
}
|
||||
}
|
||||
|
||||
@ -12,12 +12,8 @@ type Block struct {
|
||||
}
|
||||
|
||||
// NewBlock creates a Block object from opaque data. It will hash the data.
|
||||
func NewBlock(data []byte) (*Block, error) {
|
||||
h, err := u.Hash(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Block{Data: data, Multihash: h}, nil
|
||||
func NewBlock(data []byte) *Block {
|
||||
return &Block{Data: data, Multihash: u.Hash(data)}
|
||||
}
|
||||
|
||||
// Key returns the block's Multihash as a Key value.
|
||||
|
||||
@ -6,20 +6,11 @@ func TestBlocksBasic(t *testing.T) {
|
||||
|
||||
// Test empty data
|
||||
empty := []byte{}
|
||||
_, err := NewBlock(empty)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
NewBlock(empty)
|
||||
|
||||
// Test nil case
|
||||
_, err = NewBlock(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
NewBlock(nil)
|
||||
|
||||
// Test some data
|
||||
_, err = NewBlock([]byte("Hello world!"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
NewBlock([]byte("Hello world!"))
|
||||
}
|
||||
|
||||
@ -17,18 +17,8 @@ func TestBlocks(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
b, err := blocks.NewBlock([]byte("beep boop"))
|
||||
if err != nil {
|
||||
t.Error("failed to construct block", err)
|
||||
return
|
||||
}
|
||||
|
||||
h, err := u.Hash([]byte("beep boop"))
|
||||
if err != nil {
|
||||
t.Error("failed to hash data", err)
|
||||
return
|
||||
}
|
||||
|
||||
b := blocks.NewBlock([]byte("beep boop"))
|
||||
h := u.Hash([]byte("beep boop"))
|
||||
if !bytes.Equal(b.Multihash, h) {
|
||||
t.Error("Block Multihash and data multihash not equal")
|
||||
}
|
||||
|
||||
@ -13,6 +13,8 @@ import (
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
)
|
||||
|
||||
var log = u.Logger("blockservice")
|
||||
|
||||
// BlockService is a block datastore.
|
||||
// It uses an internal `datastore.Datastore` instance to store values.
|
||||
type BlockService struct {
|
||||
@ -26,7 +28,7 @@ func NewBlockService(d ds.Datastore, rem exchange.Interface) (*BlockService, err
|
||||
return nil, fmt.Errorf("BlockService requires valid datastore")
|
||||
}
|
||||
if rem == nil {
|
||||
u.DErr("Caution: blockservice running in local (offline) mode.\n")
|
||||
log.Warning("blockservice running in local (offline) mode.")
|
||||
}
|
||||
return &BlockService{Datastore: d, Remote: rem}, nil
|
||||
}
|
||||
@ -34,11 +36,10 @@ func NewBlockService(d ds.Datastore, rem exchange.Interface) (*BlockService, err
|
||||
// AddBlock adds a particular block to the service, Putting it into the datastore.
|
||||
func (s *BlockService) AddBlock(b *blocks.Block) (u.Key, error) {
|
||||
k := b.Key()
|
||||
dsk := ds.NewKey(string(k))
|
||||
u.DOut("storing [%s] in datastore\n", k.Pretty())
|
||||
log.Debug("blockservice: storing [%s] in datastore", k)
|
||||
// TODO(brian): define a block datastore with a Put method which accepts a
|
||||
// block parameter
|
||||
err := s.Datastore.Put(dsk, b.Data)
|
||||
err := s.Datastore.Put(k.DsKey(), b.Data)
|
||||
if err != nil {
|
||||
return k, err
|
||||
}
|
||||
@ -52,11 +53,10 @@ func (s *BlockService) AddBlock(b *blocks.Block) (u.Key, error) {
|
||||
// GetBlock retrieves a particular block from the service,
|
||||
// Getting it from the datastore using the key (hash).
|
||||
func (s *BlockService) GetBlock(k u.Key) (*blocks.Block, error) {
|
||||
u.DOut("BlockService GetBlock: '%s'\n", k.Pretty())
|
||||
dsk := ds.NewKey(string(k))
|
||||
datai, err := s.Datastore.Get(dsk)
|
||||
log.Debug("BlockService GetBlock: '%s'", k)
|
||||
datai, err := s.Datastore.Get(k.DsKey())
|
||||
if err == nil {
|
||||
u.DOut("Blockservice: Got data in datastore.\n")
|
||||
log.Debug("Blockservice: Got data in datastore.")
|
||||
bdata, ok := datai.([]byte)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("data associated with %s is not a []byte", k)
|
||||
@ -66,7 +66,7 @@ func (s *BlockService) GetBlock(k u.Key) (*blocks.Block, error) {
|
||||
Data: bdata,
|
||||
}, nil
|
||||
} else if err == ds.ErrNotFound && s.Remote != nil {
|
||||
u.DOut("Blockservice: Searching bitswap.\n")
|
||||
log.Debug("Blockservice: Searching bitswap.")
|
||||
ctx, _ := context.WithTimeout(context.TODO(), 5*time.Second)
|
||||
blk, err := s.Remote.Block(ctx, k)
|
||||
if err != nil {
|
||||
@ -74,7 +74,7 @@ func (s *BlockService) GetBlock(k u.Key) (*blocks.Block, error) {
|
||||
}
|
||||
return blk, nil
|
||||
} else {
|
||||
u.DOut("Blockservice GetBlock: Not found.\n")
|
||||
log.Debug("Blockservice GetBlock: Not found.")
|
||||
return nil, u.ErrNotFound
|
||||
}
|
||||
}
|
||||
|
||||
@ -13,7 +13,7 @@ var ValueTypeMismatch = errors.New("The retrieved value is not a Block")
|
||||
|
||||
type Blockstore interface {
|
||||
Get(u.Key) (*blocks.Block, error)
|
||||
Put(blocks.Block) error
|
||||
Put(*blocks.Block) error
|
||||
}
|
||||
|
||||
func NewBlockstore(d ds.Datastore) Blockstore {
|
||||
@ -27,7 +27,7 @@ type blockstore struct {
|
||||
}
|
||||
|
||||
func (bs *blockstore) Get(k u.Key) (*blocks.Block, error) {
|
||||
maybeData, err := bs.datastore.Get(toDatastoreKey(k))
|
||||
maybeData, err := bs.datastore.Get(k.DsKey())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -35,13 +35,9 @@ func (bs *blockstore) Get(k u.Key) (*blocks.Block, error) {
|
||||
if !ok {
|
||||
return nil, ValueTypeMismatch
|
||||
}
|
||||
return blocks.NewBlock(bdata)
|
||||
return blocks.NewBlock(bdata), nil
|
||||
}
|
||||
|
||||
func (bs *blockstore) Put(block blocks.Block) error {
|
||||
return bs.datastore.Put(toDatastoreKey(block.Key()), block.Data)
|
||||
}
|
||||
|
||||
func toDatastoreKey(k u.Key) ds.Key {
|
||||
return ds.NewKey(string(k))
|
||||
func (bs *blockstore) Put(block *blocks.Block) error {
|
||||
return bs.datastore.Put(block.Key().DsKey(), block.Data)
|
||||
}
|
||||
|
||||
@ -5,8 +5,8 @@ import (
|
||||
"testing"
|
||||
|
||||
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go"
|
||||
blocks "github.com/jbenet/go-ipfs/blocks"
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
testutil "github.com/jbenet/go-ipfs/util/testutil"
|
||||
)
|
||||
|
||||
// TODO(brian): TestGetReturnsNil
|
||||
@ -24,7 +24,7 @@ func TestGetWhenKeyNotPresent(t *testing.T) {
|
||||
|
||||
func TestPutThenGetBlock(t *testing.T) {
|
||||
bs := NewBlockstore(ds.NewMapDatastore())
|
||||
block := testutil.NewBlockOrFail(t, "some data")
|
||||
block := blocks.NewBlock([]byte("some data"))
|
||||
|
||||
err := bs.Put(block)
|
||||
if err != nil {
|
||||
@ -41,10 +41,10 @@ func TestPutThenGetBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValueTypeMismatch(t *testing.T) {
|
||||
block := testutil.NewBlockOrFail(t, "some data")
|
||||
block := blocks.NewBlock([]byte("some data"))
|
||||
|
||||
datastore := ds.NewMapDatastore()
|
||||
datastore.Put(toDatastoreKey(block.Key()), "data that isn't a block!")
|
||||
datastore.Put(block.Key().DsKey(), "data that isn't a block!")
|
||||
|
||||
blockstore := NewBlockstore(datastore)
|
||||
|
||||
|
||||
@ -29,4 +29,9 @@ func init() {
|
||||
cmdIpfsAdd.Flag.Bool("r", false, "add objects recursively")
|
||||
}
|
||||
|
||||
var addCmd = MakeCommand("add", []string{"r"}, commands.Add)
|
||||
var addCmd = makeCommand(command{
|
||||
name: "add",
|
||||
args: 1,
|
||||
flags: []string{"r"},
|
||||
cmdFn: commands.Add,
|
||||
})
|
||||
|
||||
@ -222,10 +222,7 @@ func bootstrapInputToPeers(input []string) ([]*config.BootstrapPeer, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addrS, err = maddr.String()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addrS = maddr.String()
|
||||
}
|
||||
|
||||
// make sure idS parses as a peer.ID
|
||||
|
||||
@ -18,33 +18,9 @@ var cmdIpfsCat = &commander.Command{
|
||||
Flag: *flag.NewFlagSet("ipfs-cat", flag.ExitOnError),
|
||||
}
|
||||
|
||||
var catCmd = MakeCommand("cat", nil, commands.Cat)
|
||||
|
||||
/*
|
||||
func catCmd(c *commander.Command, inp []string) error {
|
||||
if len(inp) < 1 {
|
||||
u.POut(c.Long)
|
||||
return nil
|
||||
}
|
||||
|
||||
conf, err := getConfigDir(c.Parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
com := daemon.NewCommand()
|
||||
com.Command = "cat"
|
||||
com.Args = inp
|
||||
|
||||
err = daemon.SendCommand(com, conf)
|
||||
if err != nil {
|
||||
n, err := localNode(conf, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return commands.Cat(n, com.Args, com.Opts, os.Stdout)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
*/
|
||||
var catCmd = makeCommand(command{
|
||||
name: "cat",
|
||||
args: 1,
|
||||
flags: nil,
|
||||
cmdFn: commands.Cat,
|
||||
})
|
||||
|
||||
@ -1,62 +1,60 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander"
|
||||
"github.com/jbenet/go-ipfs/config"
|
||||
"github.com/jbenet/go-ipfs/core/commands"
|
||||
"github.com/jbenet/go-ipfs/daemon"
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
)
|
||||
|
||||
// CommanderFunc is a function that can be passed into the Commander library as
|
||||
// a command handler. Defined here because commander lacks this definition.
|
||||
type CommanderFunc func(*commander.Command, []string) error
|
||||
// command is the descriptor of an ipfs daemon command.
|
||||
// Used with makeCommand to proxy over commands via the daemon.
|
||||
type command struct {
|
||||
name string
|
||||
args int
|
||||
flags []string
|
||||
online bool
|
||||
cmdFn commands.CmdFunc
|
||||
}
|
||||
|
||||
// MakeCommand Wraps a commands.CmdFunc so that it may be safely run by the
|
||||
// commanderFunc is a function that can be passed into the Commander library as
|
||||
// a command handler. Defined here because commander lacks this definition.
|
||||
type commanderFunc func(*commander.Command, []string) error
|
||||
|
||||
// makeCommand Wraps a commands.CmdFunc so that it may be safely run by the
|
||||
// commander library
|
||||
func MakeCommand(cmdName string, expargs []string, cmdFn commands.CmdFunc) CommanderFunc {
|
||||
func makeCommand(cmdDesc command) commanderFunc {
|
||||
return func(c *commander.Command, inp []string) error {
|
||||
if len(inp) < 1 {
|
||||
if len(inp) < cmdDesc.args {
|
||||
u.POut(c.Long)
|
||||
return nil
|
||||
}
|
||||
confdir, err := getConfigDir(c.Parent)
|
||||
confdir, err := getConfigDir(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
confapi, err := config.ReadConfigKey(confdir+"/config", "Addresses.API")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
apiaddr, ok := confapi.(string)
|
||||
if !ok {
|
||||
return errors.New("ApiAddress in config file was not a string")
|
||||
}
|
||||
|
||||
cmd := daemon.NewCommand()
|
||||
cmd.Command = cmdName
|
||||
cmd.Command = cmdDesc.name
|
||||
cmd.Args = inp
|
||||
|
||||
for _, a := range expargs {
|
||||
for _, a := range cmdDesc.flags {
|
||||
cmd.Opts[a] = c.Flag.Lookup(a).Value.Get()
|
||||
}
|
||||
err = daemon.SendCommand(cmd, apiaddr)
|
||||
|
||||
err = daemon.SendCommand(cmd, confdir)
|
||||
if err != nil {
|
||||
fmt.Printf("Executing command locally: %s", err)
|
||||
log.Info("Executing command locally: %s", err)
|
||||
// Do locally
|
||||
n, err := localNode(confdir, false)
|
||||
n, err := localNode(confdir, cmdDesc.online)
|
||||
if err != nil {
|
||||
fmt.Println("Local node creation failed.")
|
||||
return err
|
||||
return fmt.Errorf("Local node creation failed: %v", err)
|
||||
}
|
||||
|
||||
return cmdFn(n, cmd.Args, cmd.Opts, os.Stdout)
|
||||
return cmdDesc.cmdFn(n, cmd.Args, cmd.Opts, os.Stdout)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -92,6 +92,12 @@ func initCmd(c *commander.Command, inp []string) error {
|
||||
API: "/ip4/127.0.0.1/tcp/5001",
|
||||
}
|
||||
|
||||
// setup the node mount points.
|
||||
cfg.Mounts = config.Mounts{
|
||||
IPFS: "/ipfs",
|
||||
IPNS: "/ipns",
|
||||
}
|
||||
|
||||
nbits, ok := c.Flag.Lookup("b").Value.Get().(int)
|
||||
if !ok {
|
||||
return errors.New("failed to get bits flag")
|
||||
|
||||
@ -1,14 +1,18 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag"
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander"
|
||||
"github.com/jbenet/go-ipfs/config"
|
||||
flag "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag"
|
||||
commander "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander"
|
||||
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
|
||||
|
||||
config "github.com/jbenet/go-ipfs/config"
|
||||
core "github.com/jbenet/go-ipfs/core"
|
||||
daemon "github.com/jbenet/go-ipfs/daemon"
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
)
|
||||
|
||||
@ -50,11 +54,16 @@ Use "ipfs help <command>" for more information about a command.
|
||||
cmdIpfsMount,
|
||||
cmdIpfsInit,
|
||||
cmdIpfsServe,
|
||||
cmdIpfsRun,
|
||||
cmdIpfsName,
|
||||
cmdIpfsBootstrap,
|
||||
},
|
||||
Flag: *flag.NewFlagSet("ipfs", flag.ExitOnError),
|
||||
}
|
||||
|
||||
// log is the command logger
|
||||
var log = u.Logger("cmd/ipfs")
|
||||
|
||||
func init() {
|
||||
config, err := config.PathRoot()
|
||||
if err != nil {
|
||||
@ -69,16 +78,24 @@ func ipfsCmd(c *commander.Command, args []string) error {
|
||||
}
|
||||
|
||||
func main() {
|
||||
u.Debug = true
|
||||
ofi, err := os.Create("cpu.prof")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
u.Debug = false
|
||||
|
||||
// setup logging
|
||||
// u.SetupLogging() done in an init() block now.
|
||||
|
||||
// if debugging, setup profiling.
|
||||
if u.Debug {
|
||||
ofi, err := os.Create("cpu.prof")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
pprof.StartCPUProfile(ofi)
|
||||
defer ofi.Close()
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
pprof.StartCPUProfile(ofi)
|
||||
defer ofi.Close()
|
||||
defer pprof.StopCPUProfile()
|
||||
err = CmdIpfs.Dispatch(os.Args[1:])
|
||||
|
||||
err := CmdIpfs.Dispatch(os.Args[1:])
|
||||
if err != nil {
|
||||
if len(err.Error()) > 0 {
|
||||
fmt.Fprintf(os.Stderr, "ipfs %s: %v\n", os.Args[1], err)
|
||||
@ -88,6 +105,7 @@ func main() {
|
||||
return
|
||||
}
|
||||
|
||||
// localNode constructs a node
|
||||
func localNode(confdir string, online bool) (*core.IpfsNode, error) {
|
||||
filename, err := config.Filename(confdir)
|
||||
if err != nil {
|
||||
@ -132,3 +150,54 @@ func getConfig(c *commander.Command) (*config.Config, error) {
|
||||
|
||||
return config.Load(filename)
|
||||
}
|
||||
|
||||
// cmdContext is a wrapper structure that keeps a node, a daemonlistener, and
|
||||
// a config directory together. These three are needed for most commands.
|
||||
type cmdContext struct {
|
||||
node *core.IpfsNode
|
||||
daemon *daemon.DaemonListener
|
||||
configDir string
|
||||
}
|
||||
|
||||
// setupCmdContext initializes a cmdContext structure from a given command.
|
||||
func setupCmdContext(c *commander.Command, online bool) (cc cmdContext, err error) {
|
||||
rootCmd := c
|
||||
for ; rootCmd.Parent != nil; rootCmd = c.Parent {
|
||||
}
|
||||
|
||||
cc.configDir, err = getConfigDir(rootCmd)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
cc.node, err = localNode(cc.configDir, online)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
cc.daemon, err = setupDaemon(cc.configDir, cc.node)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// setupDaemon sets up the daemon corresponding to given node.
|
||||
func setupDaemon(confdir string, node *core.IpfsNode) (*daemon.DaemonListener, error) {
|
||||
if node.Config.Addresses.API == "" {
|
||||
return nil, errors.New("no config.Addresses.API endpoint supplied")
|
||||
}
|
||||
|
||||
maddr, err := ma.NewMultiaddr(node.Config.Addresses.API)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dl, err := daemon.NewDaemonListener(node, maddr, confdir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go dl.Listen()
|
||||
return dl, nil
|
||||
}
|
||||
|
||||
@ -21,4 +21,9 @@ var cmdIpfsLs = &commander.Command{
|
||||
Flag: *flag.NewFlagSet("ipfs-ls", flag.ExitOnError),
|
||||
}
|
||||
|
||||
var lsCmd = MakeCommand("ls", nil, commands.Ls)
|
||||
var lsCmd = makeCommand(command{
|
||||
name: "ls",
|
||||
args: 1,
|
||||
flags: nil,
|
||||
cmdFn: commands.Ls,
|
||||
})
|
||||
|
||||
@ -3,16 +3,14 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag"
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander"
|
||||
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
|
||||
|
||||
"github.com/jbenet/go-ipfs/daemon"
|
||||
core "github.com/jbenet/go-ipfs/core"
|
||||
ipns "github.com/jbenet/go-ipfs/fuse/ipns"
|
||||
rofs "github.com/jbenet/go-ipfs/fuse/readonly"
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
)
|
||||
|
||||
var cmdIpfsMount = &commander.Command{
|
||||
@ -29,42 +27,68 @@ var cmdIpfsMount = &commander.Command{
|
||||
Flag: *flag.NewFlagSet("ipfs-mount", flag.ExitOnError),
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmdIpfsMount.Flag.String("f", "", "specify a mountpoint for ipfs")
|
||||
cmdIpfsMount.Flag.String("n", "", "specify a mountpoint for ipns")
|
||||
}
|
||||
|
||||
func mountCmd(c *commander.Command, inp []string) error {
|
||||
if len(inp) < 1 || len(inp[0]) == 0 {
|
||||
u.POut(c.Long)
|
||||
|
||||
cc, err := setupCmdContext(c, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cc.daemon.Close()
|
||||
|
||||
// update fsdir with flag.
|
||||
fsdir := cc.node.Config.Mounts.IPFS
|
||||
if val, ok := c.Flag.Lookup("f").Value.Get().(string); ok && val != "" {
|
||||
fsdir = val
|
||||
}
|
||||
fsdone := mountIpfs(cc.node, fsdir)
|
||||
|
||||
// get default mount points
|
||||
nsdir := cc.node.Config.Mounts.IPNS
|
||||
if val, ok := c.Flag.Lookup("n").Value.Get().(string); ok && val != "" {
|
||||
nsdir = val
|
||||
}
|
||||
nsdone := mountIpns(cc.node, nsdir, fsdir)
|
||||
|
||||
// wait till mounts are done.
|
||||
err1 := <-fsdone
|
||||
err2 := <-nsdone
|
||||
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return err2
|
||||
}
|
||||
|
||||
func mountIpfs(node *core.IpfsNode, fsdir string) <-chan error {
|
||||
done := make(chan error)
|
||||
fmt.Printf("mounting ipfs at %s\n", fsdir)
|
||||
|
||||
go func() {
|
||||
err := rofs.Mount(node, fsdir)
|
||||
done <- err
|
||||
close(done)
|
||||
}()
|
||||
|
||||
return done
|
||||
}
|
||||
|
||||
func mountIpns(node *core.IpfsNode, nsdir, fsdir string) <-chan error {
|
||||
if nsdir == "" {
|
||||
return nil
|
||||
}
|
||||
done := make(chan error)
|
||||
fmt.Printf("mounting ipns at %s\n", nsdir)
|
||||
|
||||
conf, err := getConfigDir(c.Parent)
|
||||
if err != nil {
|
||||
fmt.Println("Couldnt get config dir")
|
||||
return err
|
||||
}
|
||||
n, err := localNode(conf, true)
|
||||
if err != nil {
|
||||
fmt.Println("Local node creation failed.")
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
err := ipns.Mount(node, nsdir, fsdir)
|
||||
done <- err
|
||||
close(done)
|
||||
}()
|
||||
|
||||
// launch the API RPC endpoint.
|
||||
if n.Config.Addresses.API == "" {
|
||||
return errors.New("no config.RPCAddress endpoint supplied")
|
||||
}
|
||||
|
||||
maddr, err := ma.NewMultiaddr(n.Config.Addresses.API)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dl, err := daemon.NewDaemonListener(n, maddr, conf)
|
||||
if err != nil {
|
||||
fmt.Println("Failed to create daemon listener.")
|
||||
return err
|
||||
}
|
||||
go dl.Listen()
|
||||
defer dl.Close()
|
||||
|
||||
mp := inp[0]
|
||||
fmt.Printf("Mounting at %s\n", mp)
|
||||
return rofs.Mount(n, mp)
|
||||
return done
|
||||
}
|
||||
|
||||
57
cmd/ipfs/name.go
Normal file
57
cmd/ipfs/name.go
Normal file
@ -0,0 +1,57 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
flag "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag"
|
||||
commander "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander"
|
||||
)
|
||||
|
||||
var cmdIpfsName = &commander.Command{
|
||||
UsageLine: "name [publish | resolve]",
|
||||
Short: "ipfs namespace (ipns) tool",
|
||||
Long: `ipfs name - Get/Set ipfs config values.
|
||||
|
||||
ipfs name publish [<name>] <ref> - Assign the <ref> to <name>
|
||||
ipfs name resolve [<name>] - Resolve the <ref> value of <name>
|
||||
|
||||
IPNS is a PKI namespace, where names are the hashes of public keys, and
|
||||
the private key enables publishing new (signed) values. In both publish
|
||||
and resolve, the default value of <name> is your own identity public key.
|
||||
|
||||
|
||||
Examples:
|
||||
|
||||
Publish a <ref> to your identity name:
|
||||
|
||||
> ipfs name publish QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
|
||||
published name QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n to QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
|
||||
|
||||
Publish a <ref> to another public key:
|
||||
|
||||
> ipfs name publish QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
|
||||
published name QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n to QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
|
||||
|
||||
Resolve the value of your identity:
|
||||
|
||||
> ipfs name resolve
|
||||
QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
|
||||
|
||||
Resolve te value of another name:
|
||||
|
||||
> ipfs name resolve QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n
|
||||
QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
|
||||
|
||||
`,
|
||||
Run: addCmd,
|
||||
Flag: *flag.NewFlagSet("ipfs-name", flag.ExitOnError),
|
||||
Subcommands: []*commander.Command{
|
||||
cmdIpfsPub,
|
||||
cmdIpfsResolve,
|
||||
},
|
||||
}
|
||||
|
||||
func nameCmd(c *commander.Command, args []string) error {
|
||||
fmt.Println(c.Long)
|
||||
return nil
|
||||
}
|
||||
31
cmd/ipfs/pin.go
Normal file
31
cmd/ipfs/pin.go
Normal file
@ -0,0 +1,31 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag"
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander"
|
||||
"github.com/jbenet/go-ipfs/core/commands"
|
||||
)
|
||||
|
||||
var cmdIpfsPin = &commander.Command{
|
||||
UsageLine: "pin",
|
||||
Short: "pin an ipfs object to local storage.",
|
||||
Long: `ipfs pin <ipfs-path> - pin ipfs object to local storage.
|
||||
|
||||
Retrieves the object named by <ipfs-path> and stores it locally
|
||||
on disk.
|
||||
`,
|
||||
Run: pinCmd,
|
||||
Flag: *flag.NewFlagSet("ipfs-pin", flag.ExitOnError),
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmdIpfsPin.Flag.Bool("r", false, "pin objects recursively")
|
||||
cmdIpfsPin.Flag.Int("d", 1, "recursive depth")
|
||||
}
|
||||
|
||||
var pinCmd = makeCommand(command{
|
||||
name: "pin",
|
||||
args: 1,
|
||||
flags: []string{"r", "d"},
|
||||
cmdFn: commands.Pin,
|
||||
})
|
||||
41
cmd/ipfs/publish.go
Normal file
41
cmd/ipfs/publish.go
Normal file
@ -0,0 +1,41 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag"
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander"
|
||||
"github.com/jbenet/go-ipfs/core/commands"
|
||||
)
|
||||
|
||||
var cmdIpfsPub = &commander.Command{
|
||||
UsageLine: "publish",
|
||||
Short: "publish a <ref> to ipns.",
|
||||
Long: `ipfs publish [<name>] <ref> - publish a <ref> to ipns.
|
||||
|
||||
IPNS is a PKI namespace, where names are the hashes of public keys, and
|
||||
the private key enables publishing new (signed) values. In publish, the
|
||||
default value of <name> is your own identity public key.
|
||||
|
||||
Examples:
|
||||
|
||||
Publish a <ref> to your identity name:
|
||||
|
||||
> ipfs name publish QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
|
||||
published name QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n to QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
|
||||
|
||||
Publish a <ref> to another public key:
|
||||
|
||||
> ipfs name publish QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
|
||||
published name QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n to QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
|
||||
|
||||
`,
|
||||
Run: pubCmd,
|
||||
Flag: *flag.NewFlagSet("ipfs-publish", flag.ExitOnError),
|
||||
}
|
||||
|
||||
var pubCmd = makeCommand(command{
|
||||
name: "publish",
|
||||
args: 1,
|
||||
flags: nil,
|
||||
online: true,
|
||||
cmdFn: commands.Publish,
|
||||
})
|
||||
@ -28,4 +28,9 @@ func init() {
|
||||
cmdIpfsRefs.Flag.Bool("u", false, "unique: list each ref only once")
|
||||
}
|
||||
|
||||
var refCmd = MakeCommand("refs", []string{"r", "u"}, commands.Refs)
|
||||
var refCmd = makeCommand(command{
|
||||
name: "refs",
|
||||
args: 1,
|
||||
flags: []string{"r", "u"},
|
||||
cmdFn: commands.Refs,
|
||||
})
|
||||
|
||||
42
cmd/ipfs/resolve.go
Normal file
42
cmd/ipfs/resolve.go
Normal file
@ -0,0 +1,42 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag"
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander"
|
||||
"github.com/jbenet/go-ipfs/core/commands"
|
||||
)
|
||||
|
||||
var cmdIpfsResolve = &commander.Command{
|
||||
UsageLine: "resolve",
|
||||
Short: "resolve an ipns name to a <ref>",
|
||||
Long: `ipfs resolve [<name>] - Resolve an ipns name to a <ref>.
|
||||
|
||||
IPNS is a PKI namespace, where names are the hashes of public keys, and
|
||||
the private key enables publishing new (signed) values. In resolve, the
|
||||
default value of <name> is your own identity public key.
|
||||
|
||||
|
||||
Examples:
|
||||
|
||||
Resolve the value of your identity:
|
||||
|
||||
> ipfs name resolve
|
||||
QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
|
||||
|
||||
Resolve te value of another name:
|
||||
|
||||
> ipfs name resolve QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n
|
||||
QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
|
||||
|
||||
`,
|
||||
Run: resolveCmd,
|
||||
Flag: *flag.NewFlagSet("ipfs-resolve", flag.ExitOnError),
|
||||
}
|
||||
|
||||
var resolveCmd = makeCommand(command{
|
||||
name: "resolve",
|
||||
args: 0,
|
||||
flags: nil,
|
||||
online: true,
|
||||
cmdFn: commands.Resolve,
|
||||
})
|
||||
36
cmd/ipfs/run.go
Normal file
36
cmd/ipfs/run.go
Normal file
@ -0,0 +1,36 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag"
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander"
|
||||
)
|
||||
|
||||
var cmdIpfsRun = &commander.Command{
|
||||
UsageLine: "run",
|
||||
Short: "run local ifps node.",
|
||||
Long: `run a local ipfs node with no other interface.
|
||||
`,
|
||||
Run: runCmd,
|
||||
Flag: *flag.NewFlagSet("ipfs-run", flag.ExitOnError),
|
||||
}
|
||||
|
||||
func runCmd(c *commander.Command, inp []string) error {
|
||||
cc, err := setupCmdContext(c, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sigc := make(chan os.Signal, 1)
|
||||
signal.Notify(sigc, syscall.SIGHUP, syscall.SIGINT,
|
||||
syscall.SIGTERM, syscall.SIGQUIT)
|
||||
|
||||
// wait until we get a signal to exit.
|
||||
<-sigc
|
||||
|
||||
cc.daemon.Close()
|
||||
return nil
|
||||
}
|
||||
@ -1,14 +1,12 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag"
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander"
|
||||
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
|
||||
|
||||
"github.com/jbenet/go-ipfs/daemon"
|
||||
h "github.com/jbenet/go-ipfs/server/http"
|
||||
)
|
||||
|
||||
@ -16,58 +14,36 @@ var cmdIpfsServe = &commander.Command{
|
||||
UsageLine: "serve",
|
||||
Short: "Serve an interface to ipfs",
|
||||
Subcommands: []*commander.Command{
|
||||
cmdIpfsServeHttp,
|
||||
cmdIpfsServeHTTP,
|
||||
},
|
||||
Flag: *flag.NewFlagSet("ipfs-serve", flag.ExitOnError),
|
||||
}
|
||||
|
||||
var cmdIpfsServeHttp = &commander.Command{
|
||||
var cmdIpfsServeHTTP = &commander.Command{
|
||||
UsageLine: "http",
|
||||
Short: "Serve an HTTP API",
|
||||
Long: `ipfs serve http - Serve an http gateway into ipfs.`,
|
||||
Run: serveHttpCmd,
|
||||
Run: serveHTTPCmd,
|
||||
Flag: *flag.NewFlagSet("ipfs-serve-http", flag.ExitOnError),
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmdIpfsServeHttp.Flag.String("address", "/ip4/127.0.0.1/tcp/8080", "Listen Address")
|
||||
cmdIpfsServeHTTP.Flag.String("address", "/ip4/127.0.0.1/tcp/8080", "Listen Address")
|
||||
}
|
||||
|
||||
func serveHttpCmd(c *commander.Command, _ []string) error {
|
||||
conf, err := getConfigDir(c.Parent.Parent)
|
||||
func serveHTTPCmd(c *commander.Command, _ []string) error {
|
||||
cc, err := setupCmdContext(c, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n, err := localNode(conf, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// launch the API RPC endpoint.
|
||||
if n.Config.Addresses.API == "" {
|
||||
return errors.New("no config.RPCAddress endpoint supplied")
|
||||
}
|
||||
|
||||
maddr, err := ma.NewMultiaddr(n.Config.Addresses.API)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dl, err := daemon.NewDaemonListener(n, maddr, conf)
|
||||
if err != nil {
|
||||
fmt.Println("Failed to create daemon listener.")
|
||||
return err
|
||||
}
|
||||
go dl.Listen()
|
||||
defer dl.Close()
|
||||
defer cc.daemon.Close()
|
||||
|
||||
address := c.Flag.Lookup("address").Value.Get().(string)
|
||||
maddr, err = ma.NewMultiaddr(address)
|
||||
maddr, err := ma.NewMultiaddr(address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Serving on %s\n", address)
|
||||
return h.Serve(maddr, n)
|
||||
return h.Serve(maddr, cc.node)
|
||||
}
|
||||
|
||||
@ -29,6 +29,12 @@ type Addresses struct {
|
||||
API string // address for the local API (RPC)
|
||||
}
|
||||
|
||||
// Mounts stores the (string) mount points
|
||||
type Mounts struct {
|
||||
IPFS string
|
||||
IPNS string
|
||||
}
|
||||
|
||||
// BootstrapPeer is a peer used to bootstrap the network.
|
||||
type BootstrapPeer struct {
|
||||
Address string
|
||||
@ -44,12 +50,20 @@ type Config struct {
|
||||
Identity Identity // local node's peer identity
|
||||
Datastore Datastore // local node's storage
|
||||
Addresses Addresses // local node's addresses
|
||||
Mounts Mounts // local node's mount points
|
||||
Bootstrap []*BootstrapPeer // local nodes's bootstrap peers
|
||||
}
|
||||
|
||||
// DefaultPathRoot is the path to the default config dir location.
|
||||
const DefaultPathRoot = "~/.go-ipfs"
|
||||
|
||||
// DefaultConfigFile is the filename of the configuration file
|
||||
const DefaultConfigFile = "config"
|
||||
|
||||
// DefaultDataStoreDirectory is the directory to store all the local IPFS data.
|
||||
const DefaultDataStoreDirectory = "datastore"
|
||||
|
||||
// EnvDir is the environment variable used to change the path root.
|
||||
const EnvDir = "IPFS_DIR"
|
||||
|
||||
// PathRoot returns the default configuration root directory
|
||||
@ -69,13 +83,11 @@ func Path(configroot, extension string) (string, error) {
|
||||
dir, err := PathRoot()
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else {
|
||||
return filepath.Join(dir, extension), nil
|
||||
}
|
||||
return filepath.Join(dir, extension), nil
|
||||
|
||||
} else {
|
||||
return filepath.Join(configroot, extension), nil
|
||||
}
|
||||
return filepath.Join(configroot, extension), nil
|
||||
}
|
||||
|
||||
// DataStorePath returns the default data store path given a configuration root
|
||||
|
||||
@ -11,6 +11,7 @@ import (
|
||||
"github.com/jbenet/go-ipfs/core"
|
||||
"github.com/jbenet/go-ipfs/importer"
|
||||
dag "github.com/jbenet/go-ipfs/merkledag"
|
||||
ft "github.com/jbenet/go-ipfs/unixfs"
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
)
|
||||
|
||||
@ -36,7 +37,7 @@ func Add(n *core.IpfsNode, args []string, opts map[string]interface{}, out io.Wr
|
||||
}
|
||||
|
||||
// Add the file
|
||||
nd, err := AddPath(n, path, depth)
|
||||
_, err = AddPath(n, path, depth)
|
||||
if err != nil {
|
||||
if err == ErrDepthLimitExceeded && depth == 1 {
|
||||
err = errors.New("use -r to recursively add directories")
|
||||
@ -45,12 +46,13 @@ func Add(n *core.IpfsNode, args []string, opts map[string]interface{}, out io.Wr
|
||||
}
|
||||
|
||||
// get the key to print it
|
||||
k, err := nd.Key()
|
||||
if err != nil {
|
||||
return fmt.Errorf("addFile error: %v", err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(out, "added %s %s\n", k.Pretty(), path)
|
||||
// k, err := nd.Key()
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("addFile error: %v", err)
|
||||
// }
|
||||
//
|
||||
// Commenting out of here, because it's already in addNode below.
|
||||
// fmt.Fprintf(out, "added %s %s\n", k, path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -74,7 +76,7 @@ func AddPath(n *core.IpfsNode, fpath string, depth int) (*dag.Node, error) {
|
||||
}
|
||||
|
||||
func addDir(n *core.IpfsNode, fpath string, depth int) (*dag.Node, error) {
|
||||
tree := &dag.Node{Data: dag.FolderPBData()}
|
||||
tree := &dag.Node{Data: ft.FolderPBData()}
|
||||
|
||||
files, err := ioutil.ReadDir(fpath)
|
||||
if err != nil {
|
||||
@ -103,6 +105,16 @@ func addFile(n *core.IpfsNode, fpath string, depth int) (*dag.Node, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
k, err := root.Key()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Info("Adding file: %s = %s\n", fpath, k)
|
||||
for _, l := range root.Links {
|
||||
log.Info("SubBlock: %s\n", l.Hash.B58String())
|
||||
}
|
||||
|
||||
return root, addNode(n, root, fpath)
|
||||
}
|
||||
|
||||
@ -119,8 +131,8 @@ func addNode(n *core.IpfsNode, nd *dag.Node, fpath string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
u.POut("added %s %s\n", k.Pretty(), fpath)
|
||||
u.POut("added %s %s\n", k, fpath)
|
||||
|
||||
// ensure we keep it. atm no-op
|
||||
return n.PinDagNode(nd)
|
||||
return n.PinDagNodeRecursively(nd, -1)
|
||||
}
|
||||
|
||||
@ -5,7 +5,7 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/jbenet/go-ipfs/core"
|
||||
mdag "github.com/jbenet/go-ipfs/merkledag"
|
||||
uio "github.com/jbenet/go-ipfs/unixfs/io"
|
||||
)
|
||||
|
||||
func Cat(n *core.IpfsNode, args []string, opts map[string]interface{}, out io.Writer) error {
|
||||
@ -15,7 +15,7 @@ func Cat(n *core.IpfsNode, args []string, opts map[string]interface{}, out io.Wr
|
||||
return fmt.Errorf("catFile error: %v", err)
|
||||
}
|
||||
|
||||
read, err := mdag.NewDagReader(dagnode, n.DAG)
|
||||
read, err := uio.NewDagReader(dagnode, n.DAG)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cat error: %v", err)
|
||||
}
|
||||
|
||||
@ -4,6 +4,9 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/jbenet/go-ipfs/core"
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
)
|
||||
|
||||
var log = u.Logger("commands")
|
||||
|
||||
type CmdFunc func(*core.IpfsNode, []string, map[string]interface{}, io.Writer) error
|
||||
|
||||
@ -8,13 +8,28 @@ import (
|
||||
)
|
||||
|
||||
func Pin(n *core.IpfsNode, args []string, opts map[string]interface{}, out io.Writer) error {
|
||||
|
||||
// set recursive flag
|
||||
recursive, _ := opts["r"].(bool) // false if cast fails.
|
||||
|
||||
// if recursive, set depth flag
|
||||
depth := 1 // default (non recursive)
|
||||
if d, ok := opts["d"].(int); recursive && ok {
|
||||
depth = d
|
||||
}
|
||||
if depth < -1 {
|
||||
return fmt.Errorf("ipfs pin: called with invalid depth: %v", depth)
|
||||
}
|
||||
|
||||
fmt.Printf("recursive, depth: %v, %v\n", recursive, depth)
|
||||
|
||||
for _, fn := range args {
|
||||
dagnode, err := n.Resolver.ResolvePath(fn)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pin error: %v", err)
|
||||
}
|
||||
|
||||
err = n.PinDagNode(dagnode)
|
||||
err = n.PinDagNodeRecursively(dagnode, depth)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pin: %v", err)
|
||||
}
|
||||
|
||||
53
core/commands/publish.go
Normal file
53
core/commands/publish.go
Normal file
@ -0,0 +1,53 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/jbenet/go-ipfs/core"
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
|
||||
nsys "github.com/jbenet/go-ipfs/namesys"
|
||||
)
|
||||
|
||||
func Publish(n *core.IpfsNode, args []string, opts map[string]interface{}, out io.Writer) error {
|
||||
log.Debug("Begin Publish")
|
||||
|
||||
if n.Identity == nil {
|
||||
return errors.New("Identity not loaded!")
|
||||
}
|
||||
|
||||
// name := ""
|
||||
ref := ""
|
||||
|
||||
switch len(args) {
|
||||
case 2:
|
||||
// name = args[0]
|
||||
ref = args[1]
|
||||
return errors.New("keychains not yet implemented")
|
||||
case 1:
|
||||
// name = n.Identity.ID.String()
|
||||
ref = args[0]
|
||||
|
||||
default:
|
||||
return fmt.Errorf("Publish expects 1 or 2 args; got %d.", len(args))
|
||||
}
|
||||
|
||||
// later, n.Keychain.Get(name).PrivKey
|
||||
k := n.Identity.PrivKey
|
||||
|
||||
pub := nsys.NewRoutingPublisher(n.Routing)
|
||||
err := pub.Publish(k, ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hash, err := k.GetPublic().Hash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(out, "published name %s to %s\n", u.Key(hash), ref)
|
||||
|
||||
return nil
|
||||
}
|
||||
35
core/commands/resolve.go
Normal file
35
core/commands/resolve.go
Normal file
@ -0,0 +1,35 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/jbenet/go-ipfs/core"
|
||||
)
|
||||
|
||||
func Resolve(n *core.IpfsNode, args []string, opts map[string]interface{}, out io.Writer) error {
|
||||
|
||||
name := ""
|
||||
|
||||
switch len(args) {
|
||||
case 1:
|
||||
name = args[0]
|
||||
case 0:
|
||||
if n.Identity == nil {
|
||||
return errors.New("Identity not loaded!")
|
||||
}
|
||||
name = n.Identity.ID.String()
|
||||
|
||||
default:
|
||||
return fmt.Errorf("Publish expects 1 or 2 args; got %d.", len(args))
|
||||
}
|
||||
|
||||
res, err := n.Namesys.Resolve(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Fprintf(out, "%s\n", res)
|
||||
return nil
|
||||
}
|
||||
22
core/core.go
22
core/core.go
@ -16,6 +16,7 @@ import (
|
||||
exchange "github.com/jbenet/go-ipfs/exchange"
|
||||
bitswap "github.com/jbenet/go-ipfs/exchange/bitswap"
|
||||
merkledag "github.com/jbenet/go-ipfs/merkledag"
|
||||
namesys "github.com/jbenet/go-ipfs/namesys"
|
||||
inet "github.com/jbenet/go-ipfs/net"
|
||||
mux "github.com/jbenet/go-ipfs/net/mux"
|
||||
netservice "github.com/jbenet/go-ipfs/net/service"
|
||||
@ -26,6 +27,8 @@ import (
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
)
|
||||
|
||||
var log = u.Logger("core")
|
||||
|
||||
// IpfsNode is IPFS Core module. It represents an IPFS instance.
|
||||
type IpfsNode struct {
|
||||
|
||||
@ -39,7 +42,7 @@ type IpfsNode struct {
|
||||
Peerstore peer.Peerstore
|
||||
|
||||
// the local datastore
|
||||
Datastore ds.Datastore
|
||||
Datastore ds.ThreadSafeDatastore
|
||||
|
||||
// the network message stream
|
||||
Network inet.Network
|
||||
@ -60,7 +63,7 @@ type IpfsNode struct {
|
||||
Resolver *path.Resolver
|
||||
|
||||
// the name system, resolves paths to hashes
|
||||
// Namesys *namesys.Namesys
|
||||
Namesys namesys.NameSystem
|
||||
}
|
||||
|
||||
// NewIpfsNode constructs a new IpfsNode based on the given config.
|
||||
@ -142,6 +145,7 @@ func NewIpfsNode(cfg *config.Config, online bool) (*IpfsNode, error) {
|
||||
}
|
||||
|
||||
dag := &merkledag.DAGService{Blocks: bs}
|
||||
ns := namesys.NewNameSystem(route)
|
||||
|
||||
success = true
|
||||
return &IpfsNode{
|
||||
@ -154,6 +158,7 @@ func NewIpfsNode(cfg *config.Config, online bool) (*IpfsNode, error) {
|
||||
Exchange: exchangeSession,
|
||||
Identity: local,
|
||||
Routing: route,
|
||||
Namesys: ns,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -167,14 +172,14 @@ func initIdentity(cfg *config.Config, online bool) (*peer.Peer, error) {
|
||||
}
|
||||
|
||||
// address is optional
|
||||
var addresses []*ma.Multiaddr
|
||||
var addresses []ma.Multiaddr
|
||||
if len(cfg.Addresses.Swarm) > 0 {
|
||||
maddr, err := ma.NewMultiaddr(cfg.Addresses.Swarm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addresses = []*ma.Multiaddr{maddr}
|
||||
addresses = []ma.Multiaddr{maddr}
|
||||
}
|
||||
|
||||
var (
|
||||
@ -232,8 +237,13 @@ func initConnections(ctx context.Context, cfg *config.Config, pstore peer.Peerst
|
||||
}
|
||||
}
|
||||
|
||||
// PinDagNode ensures a given node is stored persistently locally.
|
||||
// PinDagNode ensures a given node is stored persistently locally
|
||||
func (n *IpfsNode) PinDagNode(nd *merkledag.Node) error {
|
||||
u.DOut("Pinning node. Currently No-Op\n")
|
||||
return n.PinDagNodeRecursively(nd, 1)
|
||||
}
|
||||
|
||||
// PinDagNodeRecursively ensures a given node is stored persistently locally
|
||||
func (n *IpfsNode) PinDagNodeRecursively(nd *merkledag.Node, depth int) error {
|
||||
u.DOut("Pinning node recursively. Currently No-Op\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -4,11 +4,16 @@ import (
|
||||
"fmt"
|
||||
|
||||
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go"
|
||||
fsds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go/fs"
|
||||
ktds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go/keytransform"
|
||||
lds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go/leveldb"
|
||||
syncds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go/sync"
|
||||
|
||||
config "github.com/jbenet/go-ipfs/config"
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
)
|
||||
|
||||
func makeDatastore(cfg config.Datastore) (ds.Datastore, error) {
|
||||
func makeDatastore(cfg config.Datastore) (ds.ThreadSafeDatastore, error) {
|
||||
if len(cfg.Type) == 0 {
|
||||
return nil, fmt.Errorf("config datastore.type required")
|
||||
}
|
||||
@ -16,14 +21,24 @@ func makeDatastore(cfg config.Datastore) (ds.Datastore, error) {
|
||||
switch cfg.Type {
|
||||
case "leveldb":
|
||||
return makeLevelDBDatastore(cfg)
|
||||
|
||||
case "memory":
|
||||
return ds.NewMapDatastore(), nil
|
||||
return syncds.MutexWrap(ds.NewMapDatastore()), nil
|
||||
|
||||
case "fs":
|
||||
log.Warning("using fs.Datastore at .datastore for testing.")
|
||||
d, err := fsds.NewDatastore(".datastore") // for testing!!
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ktd := ktds.WrapDatastore(d, u.DsKeyB58Encode)
|
||||
return syncds.MutexWrap(ktd), nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Unknown datastore type: %s", cfg.Type)
|
||||
}
|
||||
|
||||
func makeLevelDBDatastore(cfg config.Datastore) (ds.Datastore, error) {
|
||||
func makeLevelDBDatastore(cfg config.Datastore) (ds.ThreadSafeDatastore, error) {
|
||||
if len(cfg.Path) == 0 {
|
||||
return nil, fmt.Errorf("config datastore.path required for leveldb")
|
||||
}
|
||||
|
||||
52
core/mock.go
Normal file
52
core/mock.go
Normal file
@ -0,0 +1,52 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go"
|
||||
syncds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go/sync"
|
||||
bs "github.com/jbenet/go-ipfs/blockservice"
|
||||
ci "github.com/jbenet/go-ipfs/crypto"
|
||||
mdag "github.com/jbenet/go-ipfs/merkledag"
|
||||
nsys "github.com/jbenet/go-ipfs/namesys"
|
||||
path "github.com/jbenet/go-ipfs/path"
|
||||
"github.com/jbenet/go-ipfs/peer"
|
||||
mdht "github.com/jbenet/go-ipfs/routing/mock"
|
||||
)
|
||||
|
||||
func NewMockNode() (*IpfsNode, error) {
|
||||
nd := new(IpfsNode)
|
||||
|
||||
//Generate Identity
|
||||
nd.Identity = &peer.Peer{ID: []byte("TESTING")}
|
||||
pk, sk, err := ci.GenerateKeyPair(ci.RSA, 1024)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nd.Identity.PrivKey = pk
|
||||
nd.Identity.PubKey = sk
|
||||
|
||||
// Temp Datastore
|
||||
dstore := ds.NewMapDatastore()
|
||||
nd.Datastore = syncds.MutexWrap(dstore)
|
||||
|
||||
// Routing
|
||||
dht := mdht.NewMockRouter(nd.Identity, nd.Datastore)
|
||||
nd.Routing = dht
|
||||
|
||||
// Bitswap
|
||||
//??
|
||||
|
||||
bserv, err := bs.NewBlockService(nd.Datastore, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nd.DAG = &mdag.DAGService{bserv}
|
||||
|
||||
// Namespace resolver
|
||||
nd.Namesys = nsys.NewNameSystem(dht)
|
||||
|
||||
// Path resolver
|
||||
nd.Resolver = &path.Resolver{nd.DAG}
|
||||
|
||||
return nd, nil
|
||||
}
|
||||
@ -3,6 +3,7 @@ package crypto
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"crypto/elliptic"
|
||||
"crypto/hmac"
|
||||
@ -12,9 +13,10 @@ import (
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"hash"
|
||||
"math/big"
|
||||
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
|
||||
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
)
|
||||
|
||||
var ErrBadKeyType = errors.New("invalid or unsupported key type")
|
||||
@ -27,6 +29,9 @@ type Key interface {
|
||||
// Bytes returns a serialized, storeable representation of this key
|
||||
Bytes() ([]byte, error)
|
||||
|
||||
// Hash returns the hash of this key
|
||||
Hash() ([]byte, error)
|
||||
|
||||
// Equals checks whether two PubKeys are the same
|
||||
Equals(Key) bool
|
||||
}
|
||||
@ -91,25 +96,16 @@ func GenerateEKeyPair(curveName string) ([]byte, GenSharedKey, error) {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var pubKey bytes.Buffer
|
||||
pubKey.Write(x.Bytes())
|
||||
pubKey.Write(y.Bytes())
|
||||
pubKey := elliptic.Marshal(curve, x, y)
|
||||
u.PErr("GenerateEKeyPair %d\n", len(pubKey))
|
||||
|
||||
done := func(theirPub []byte) ([]byte, error) {
|
||||
// Verify and unpack node's public key.
|
||||
curveSize := curve.Params().BitSize
|
||||
|
||||
if len(theirPub) != (curveSize / 4) {
|
||||
return nil, errors.New("Malformed public key.")
|
||||
x, y := elliptic.Unmarshal(curve, theirPub)
|
||||
if x == nil {
|
||||
return nil, fmt.Errorf("Malformed public key: %d %v", len(theirPub), theirPub)
|
||||
}
|
||||
|
||||
bound := (curveSize / 8)
|
||||
x := big.NewInt(0)
|
||||
y := big.NewInt(0)
|
||||
|
||||
x.SetBytes(theirPub[0:bound])
|
||||
y.SetBytes(theirPub[bound : bound*2])
|
||||
|
||||
if !curve.IsOnCurve(x, y) {
|
||||
return nil, errors.New("Invalid public key.")
|
||||
}
|
||||
@ -120,7 +116,7 @@ func GenerateEKeyPair(curveName string) ([]byte, GenSharedKey, error) {
|
||||
return secret.Bytes(), nil
|
||||
}
|
||||
|
||||
return pubKey.Bytes(), done, nil
|
||||
return pubKey, done, nil
|
||||
}
|
||||
|
||||
// Generates a set of keys for each party by stretching the shared key.
|
||||
@ -246,3 +242,12 @@ func KeyEqual(k1, k2 Key) bool {
|
||||
b2, err2 := k2.Bytes()
|
||||
return bytes.Equal(b1, b2) && err1 == err2
|
||||
}
|
||||
|
||||
// KeyHash hashes a key.
|
||||
func KeyHash(k Key) ([]byte, error) {
|
||||
kb, err := k.Bytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return u.Hash(kb), nil
|
||||
}
|
||||
|
||||
@ -92,3 +92,7 @@ func (pk testkey) Bytes() ([]byte, error) {
|
||||
func (pk testkey) Equals(k Key) bool {
|
||||
return KeyEqual(pk, k)
|
||||
}
|
||||
|
||||
func (pk testkey) Hash() ([]byte, error) {
|
||||
return KeyHash(pk)
|
||||
}
|
||||
|
||||
@ -46,6 +46,10 @@ func (pk *RsaPublicKey) Equals(k Key) bool {
|
||||
return KeyEqual(pk, k)
|
||||
}
|
||||
|
||||
func (pk *RsaPublicKey) Hash() ([]byte, error) {
|
||||
return KeyHash(pk)
|
||||
}
|
||||
|
||||
func (sk *RsaPrivateKey) GenSecret() []byte {
|
||||
buf := make([]byte, 16)
|
||||
rand.Read(buf)
|
||||
@ -75,6 +79,10 @@ func (sk *RsaPrivateKey) Equals(k Key) bool {
|
||||
return KeyEqual(sk, k)
|
||||
}
|
||||
|
||||
func (sk *RsaPrivateKey) Hash() ([]byte, error) {
|
||||
return KeyHash(sk)
|
||||
}
|
||||
|
||||
func UnmarshalRsaPrivateKey(b []byte) (*RsaPrivateKey, error) {
|
||||
sk, err := x509.ParsePKCS1PrivateKey(b)
|
||||
if err != nil {
|
||||
|
||||
@ -49,7 +49,7 @@ func (s *SecurePipe) handshake() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// u.DOut("handshake: %s <--> %s\n", s.local.ID.Pretty(), s.remote.ID.Pretty())
|
||||
// u.DOut("handshake: %s <--> %s\n", s.local, s.remote)
|
||||
myPubKey, err := s.local.PubKey.Bytes()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -101,7 +101,7 @@ func (s *SecurePipe) handshake() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
u.DOut("[%s] Remote Peer Identified as %s\n", s.local.ID.Pretty(), s.remote.ID.Pretty())
|
||||
u.DOut("%s Remote Peer Identified as %s\n", s.local, s.remote)
|
||||
|
||||
exchange, err := selectBest(SupportedExchanges, proposeResp.GetExchanges())
|
||||
if err != nil {
|
||||
@ -119,7 +119,7 @@ func (s *SecurePipe) handshake() error {
|
||||
}
|
||||
|
||||
// u.POut("Selected %s %s %s\n", exchange, cipherType, hashType)
|
||||
epubkey, done, err := ci.GenerateEKeyPair(exchange) // Generate EphemeralPubKey
|
||||
epubkey, genSharedKey, err := ci.GenerateEKeyPair(exchange) // Generate EphemeralPubKey
|
||||
|
||||
var handshake bytes.Buffer // Gather corpus to sign.
|
||||
handshake.Write(encoded)
|
||||
@ -163,7 +163,7 @@ func (s *SecurePipe) handshake() error {
|
||||
theirHandshake.Write(encoded)
|
||||
theirHandshake.Write(exchangeResp.GetEpubkey())
|
||||
|
||||
// u.POut("Remote Peer Identified as %s\n", s.remote.ID.Pretty())
|
||||
// u.POut("Remote Peer Identified as %s\n", s.remote)
|
||||
ok, err := s.remote.PubKey.Verify(theirHandshake.Bytes(), exchangeResp.GetSignature())
|
||||
if err != nil {
|
||||
return err
|
||||
@ -173,7 +173,7 @@ func (s *SecurePipe) handshake() error {
|
||||
return errors.New("Bad signature!")
|
||||
}
|
||||
|
||||
secret, err := done(exchangeResp.GetEpubkey())
|
||||
secret, err := genSharedKey(exchangeResp.GetEpubkey())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -205,7 +205,7 @@ func (s *SecurePipe) handshake() error {
|
||||
return errors.New("Negotiation failed.")
|
||||
}
|
||||
|
||||
u.DOut("[%s] handshake: Got node id: %s\n", s.local.ID.Pretty(), s.remote.ID.Pretty())
|
||||
u.DOut("%s handshake: Got node id: %s\n", s.local, s.remote)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -229,10 +229,11 @@ func (s *SecurePipe) handleSecureIn(hashType string, tIV, tCKey, tMKey []byte) {
|
||||
for {
|
||||
data, ok := <-s.insecure.In
|
||||
if !ok {
|
||||
close(s.Duplex.In)
|
||||
return
|
||||
}
|
||||
|
||||
// u.DOut("[peer %s] secure in [from = %s] %d\n", s.local.ID.Pretty(), s.remote.ID.Pretty(), len(data))
|
||||
// u.DOut("[peer %s] secure in [from = %s] %d\n", s.local, s.remote, len(data))
|
||||
if len(data) <= macSize {
|
||||
continue
|
||||
}
|
||||
@ -280,7 +281,7 @@ func (s *SecurePipe) handleSecureOut(hashType string, mIV, mCKey, mMKey []byte)
|
||||
copy(buff[len(data):], myMac.Sum(nil))
|
||||
myMac.Reset()
|
||||
|
||||
// u.DOut("[peer %s] secure out [to = %s] %d\n", s.local.ID.Pretty(), s.remote.ID.Pretty(), len(buff))
|
||||
// u.DOut("[peer %s] secure out [to = %s] %d\n", s.local, s.remote, len(buff))
|
||||
s.insecure.Out <- buff
|
||||
}
|
||||
}
|
||||
@ -291,25 +292,15 @@ func IDFromPubKey(pk ci.PubKey) (peer.ID, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash, err := u.Hash(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := u.Hash(b)
|
||||
return peer.ID(hash), nil
|
||||
}
|
||||
|
||||
// Determines which algorithm to use. Note: f(a, b) = f(b, a)
|
||||
func selectBest(myPrefs, theirPrefs string) (string, error) {
|
||||
// Person with greatest hash gets first choice.
|
||||
myHash, err := u.Hash([]byte(myPrefs))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
theirHash, err := u.Hash([]byte(theirPrefs))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
myHash := u.Hash([]byte(myPrefs))
|
||||
theirHash := u.Hash([]byte(theirPrefs))
|
||||
|
||||
cmp := bytes.Compare(myHash, theirHash)
|
||||
var firstChoiceArr, secChoiceArr []string
|
||||
@ -367,7 +358,7 @@ func getOrConstructPeer(peers peer.Peerstore, rpk ci.PubKey) (*peer.Peer, error)
|
||||
// let's verify ID
|
||||
if !npeer.ID.Equal(rid) {
|
||||
e := "Expected peer.ID does not match sent pubkey's hash: %v - %v"
|
||||
return nil, fmt.Errorf(e, npeer.ID.Pretty(), rid.Pretty())
|
||||
return nil, fmt.Errorf(e, npeer, rid)
|
||||
}
|
||||
|
||||
if npeer.PubKey == nil {
|
||||
@ -380,7 +371,7 @@ func getOrConstructPeer(peers peer.Peerstore, rpk ci.PubKey) (*peer.Peer, error)
|
||||
// this shouldn't ever happen, given we hashed, etc, but it could mean
|
||||
// expected code (or protocol) invariants violated.
|
||||
if !npeer.PubKey.Equals(rpk) {
|
||||
return nil, fmt.Errorf("WARNING: PubKey mismatch: %v", npeer.ID.Pretty())
|
||||
return nil, fmt.Errorf("WARNING: PubKey mismatch: %v", npeer)
|
||||
}
|
||||
return npeer, nil
|
||||
}
|
||||
|
||||
@ -82,6 +82,5 @@ func (s *SecurePipe) Close() error {
|
||||
|
||||
s.cancel()
|
||||
s.cancel = nil
|
||||
close(s.In)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -9,7 +9,6 @@ import (
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
logging "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/op/go-logging"
|
||||
core "github.com/jbenet/go-ipfs/core"
|
||||
"github.com/jbenet/go-ipfs/core/commands"
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
@ -18,7 +17,7 @@ import (
|
||||
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
|
||||
)
|
||||
|
||||
var log = logging.MustGetLogger("daemon")
|
||||
var log = u.Logger("daemon")
|
||||
|
||||
// LockFile is the filename of the daemon lock, relative to config dir
|
||||
const LockFile = "daemon.lock"
|
||||
@ -40,7 +39,7 @@ type Command struct {
|
||||
Opts map[string]interface{}
|
||||
}
|
||||
|
||||
func NewDaemonListener(ipfsnode *core.IpfsNode, addr *ma.Multiaddr, confdir string) (*DaemonListener, error) {
|
||||
func NewDaemonListener(ipfsnode *core.IpfsNode, addr ma.Multiaddr, confdir string) (*DaemonListener, error) {
|
||||
var err error
|
||||
confdir, err = u.TildeExpansion(confdir)
|
||||
if err != nil {
|
||||
@ -52,7 +51,7 @@ func NewDaemonListener(ipfsnode *core.IpfsNode, addr *ma.Multiaddr, confdir stri
|
||||
return nil, err
|
||||
}
|
||||
|
||||
network, host, err := addr.DialArgs()
|
||||
network, host, err := ma.DialArgs(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -63,7 +62,7 @@ func NewDaemonListener(ipfsnode *core.IpfsNode, addr *ma.Multiaddr, confdir stri
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = ofi.Write([]byte(host))
|
||||
_, err = ofi.Write([]byte(addr.String()))
|
||||
if err != nil {
|
||||
log.Warning("Could not write to rpcaddress file: %s", err)
|
||||
return nil, err
|
||||
@ -133,6 +132,10 @@ func (dl *DaemonListener) handleConnection(conn net.Conn) {
|
||||
err = commands.Ls(dl.node, command.Args, command.Opts, conn)
|
||||
case "pin":
|
||||
err = commands.Pin(dl.node, command.Args, command.Opts, conn)
|
||||
case "publish":
|
||||
err = commands.Publish(dl.node, command.Args, command.Opts, conn)
|
||||
case "resolve":
|
||||
err = commands.Resolve(dl.node, command.Args, command.Opts, conn)
|
||||
default:
|
||||
err = fmt.Errorf("Invalid Command: '%s'", command.Command)
|
||||
}
|
||||
|
||||
@ -60,19 +60,20 @@ func SendCommand(command *Command, confdir string) error {
|
||||
return ErrDaemonNotRunning
|
||||
}
|
||||
|
||||
log.Info("Daemon is running! %s", err)
|
||||
log.Info("Daemon is running! [reason = %s]", err)
|
||||
|
||||
server, err := getDaemonAddr(confdir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("Daemon address: %s", server)
|
||||
maddr, err := ma.NewMultiaddr(server)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
network, host, err := maddr.DialArgs()
|
||||
network, host, err := ma.DialArgs(maddr)
|
||||
|
||||
conn, err := net.Dial(network, host)
|
||||
if err != nil {
|
||||
|
||||
@ -61,7 +61,7 @@ type bitswap struct {
|
||||
//
|
||||
// TODO ensure only one active request per key
|
||||
func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) {
|
||||
u.DOut("Get Block %v\n", k.Pretty())
|
||||
u.DOut("Get Block %v\n", k)
|
||||
|
||||
ctx, cancelFunc := context.WithCancel(parent)
|
||||
bs.wantlist.Add(k)
|
||||
@ -110,7 +110,7 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error)
|
||||
// HasBlock announces the existance of a block to bitswap, potentially sending
|
||||
// it to peers (Partners) whose WantLists include it.
|
||||
func (bs *bitswap) HasBlock(ctx context.Context, blk blocks.Block) error {
|
||||
u.DOut("Has Block %v\n", blk.Key().Pretty())
|
||||
u.DOut("Has Block %v\n", blk.Key())
|
||||
bs.wantlist.Remove(blk.Key())
|
||||
bs.sendToPeersThatWant(ctx, blk)
|
||||
return bs.routing.Provide(ctx, blk.Key())
|
||||
@ -119,7 +119,7 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk blocks.Block) error {
|
||||
// TODO(brian): handle errors
|
||||
func (bs *bitswap) ReceiveMessage(ctx context.Context, p *peer.Peer, incoming bsmsg.BitSwapMessage) (
|
||||
*peer.Peer, bsmsg.BitSwapMessage) {
|
||||
u.DOut("ReceiveMessage from %v\n", p.Key().Pretty())
|
||||
u.DOut("ReceiveMessage from %v\n", p.Key())
|
||||
|
||||
if p == nil {
|
||||
// TODO propagate the error upward
|
||||
@ -134,7 +134,7 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p *peer.Peer, incoming bs
|
||||
|
||||
for _, block := range incoming.Blocks() {
|
||||
// TODO verify blocks?
|
||||
if err := bs.blockstore.Put(block); err != nil {
|
||||
if err := bs.blockstore.Put(&block); err != nil {
|
||||
continue // FIXME(brian): err ignored
|
||||
}
|
||||
go bs.notifications.Publish(block)
|
||||
@ -173,10 +173,10 @@ func (bs *bitswap) send(ctx context.Context, p *peer.Peer, m bsmsg.BitSwapMessag
|
||||
}
|
||||
|
||||
func (bs *bitswap) sendToPeersThatWant(ctx context.Context, block blocks.Block) {
|
||||
u.DOut("Sending %v to peers that want it\n", block.Key().Pretty())
|
||||
u.DOut("Sending %v to peers that want it\n", block.Key())
|
||||
for _, p := range bs.strategy.Peers() {
|
||||
if bs.strategy.BlockIsWantedByPeer(block.Key(), p) {
|
||||
u.DOut("%v wants %v\n", p.Key().Pretty(), block.Key().Pretty())
|
||||
u.DOut("%v wants %v\n", p, block.Key())
|
||||
if bs.strategy.ShouldSendBlockToPeer(block.Key(), p) {
|
||||
message := bsmsg.New()
|
||||
message.AppendBlock(block)
|
||||
|
||||
@ -9,7 +9,7 @@ import (
|
||||
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
|
||||
|
||||
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go"
|
||||
"github.com/jbenet/go-ipfs/blocks"
|
||||
blocks "github.com/jbenet/go-ipfs/blocks"
|
||||
bstore "github.com/jbenet/go-ipfs/blockstore"
|
||||
exchange "github.com/jbenet/go-ipfs/exchange"
|
||||
notifications "github.com/jbenet/go-ipfs/exchange/bitswap/notifications"
|
||||
@ -18,7 +18,6 @@ import (
|
||||
peer "github.com/jbenet/go-ipfs/peer"
|
||||
mock "github.com/jbenet/go-ipfs/routing/mock"
|
||||
util "github.com/jbenet/go-ipfs/util"
|
||||
testutil "github.com/jbenet/go-ipfs/util/testutil"
|
||||
)
|
||||
|
||||
func TestGetBlockTimeout(t *testing.T) {
|
||||
@ -30,7 +29,7 @@ func TestGetBlockTimeout(t *testing.T) {
|
||||
self := g.Next()
|
||||
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Nanosecond)
|
||||
block := testutil.NewBlockOrFail(t, "block")
|
||||
block := blocks.NewBlock([]byte("block"))
|
||||
_, err := self.exchange.Block(ctx, block.Key())
|
||||
|
||||
if err != context.DeadlineExceeded {
|
||||
@ -44,7 +43,7 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) {
|
||||
rs := mock.VirtualRoutingServer()
|
||||
g := NewSessionGenerator(net, rs)
|
||||
|
||||
block := testutil.NewBlockOrFail(t, "block")
|
||||
block := blocks.NewBlock([]byte("block"))
|
||||
rs.Announce(&peer.Peer{}, block.Key()) // but not on network
|
||||
|
||||
solo := g.Next()
|
||||
@ -63,7 +62,7 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) {
|
||||
|
||||
net := tn.VirtualNetwork()
|
||||
rs := mock.VirtualRoutingServer()
|
||||
block := testutil.NewBlockOrFail(t, "block")
|
||||
block := blocks.NewBlock([]byte("block"))
|
||||
g := NewSessionGenerator(net, rs)
|
||||
|
||||
hasBlock := g.Next()
|
||||
@ -71,7 +70,7 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) {
|
||||
if err := hasBlock.blockstore.Put(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := hasBlock.exchange.HasBlock(context.Background(), block); err != nil {
|
||||
if err := hasBlock.exchange.HasBlock(context.Background(), *block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -93,7 +92,7 @@ func TestSwarm(t *testing.T) {
|
||||
net := tn.VirtualNetwork()
|
||||
rs := mock.VirtualRoutingServer()
|
||||
sg := NewSessionGenerator(net, rs)
|
||||
bg := NewBlockGenerator(t)
|
||||
bg := NewBlockGenerator()
|
||||
|
||||
t.Log("Create a ton of instances, and just a few blocks")
|
||||
|
||||
@ -107,7 +106,7 @@ func TestSwarm(t *testing.T) {
|
||||
|
||||
first := instances[0]
|
||||
for _, b := range blocks {
|
||||
first.blockstore.Put(*b)
|
||||
first.blockstore.Put(b)
|
||||
first.exchange.HasBlock(context.Background(), *b)
|
||||
rs.Announce(first.peer, b.Key())
|
||||
}
|
||||
@ -154,55 +153,55 @@ func TestSendToWantingPeer(t *testing.T) {
|
||||
net := tn.VirtualNetwork()
|
||||
rs := mock.VirtualRoutingServer()
|
||||
sg := NewSessionGenerator(net, rs)
|
||||
bg := NewBlockGenerator(t)
|
||||
bg := NewBlockGenerator()
|
||||
|
||||
me := sg.Next()
|
||||
w := sg.Next()
|
||||
o := sg.Next()
|
||||
|
||||
t.Logf("Session %v\n", me.peer.Key().Pretty())
|
||||
t.Logf("Session %v\n", w.peer.Key().Pretty())
|
||||
t.Logf("Session %v\n", o.peer.Key().Pretty())
|
||||
t.Logf("Session %v\n", me.peer)
|
||||
t.Logf("Session %v\n", w.peer)
|
||||
t.Logf("Session %v\n", o.peer)
|
||||
|
||||
alpha := bg.Next()
|
||||
|
||||
const timeout = 1 * time.Millisecond // FIXME don't depend on time
|
||||
|
||||
t.Logf("Peer %v attempts to get %v. NB: not available\n", w.peer.Key().Pretty(), alpha.Key().Pretty())
|
||||
t.Logf("Peer %v attempts to get %v. NB: not available\n", w.peer, alpha.Key())
|
||||
ctx, _ := context.WithTimeout(context.Background(), timeout)
|
||||
_, err := w.exchange.Block(ctx, alpha.Key())
|
||||
if err == nil {
|
||||
t.Fatalf("Expected %v to NOT be available", alpha.Key().Pretty())
|
||||
t.Fatalf("Expected %v to NOT be available", alpha.Key())
|
||||
}
|
||||
|
||||
beta := bg.Next()
|
||||
t.Logf("Peer %v announes availability of %v\n", w.peer.Key().Pretty(), beta.Key().Pretty())
|
||||
t.Logf("Peer %v announes availability of %v\n", w.peer, beta.Key())
|
||||
ctx, _ = context.WithTimeout(context.Background(), timeout)
|
||||
if err := w.blockstore.Put(beta); err != nil {
|
||||
if err := w.blockstore.Put(&beta); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
w.exchange.HasBlock(ctx, beta)
|
||||
|
||||
t.Logf("%v gets %v from %v and discovers it wants %v\n", me.peer.Key().Pretty(), beta.Key().Pretty(), w.peer.Key().Pretty(), alpha.Key().Pretty())
|
||||
t.Logf("%v gets %v from %v and discovers it wants %v\n", me.peer, beta.Key(), w.peer, alpha.Key())
|
||||
ctx, _ = context.WithTimeout(context.Background(), timeout)
|
||||
if _, err := me.exchange.Block(ctx, beta.Key()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Logf("%v announces availability of %v\n", o.peer.Key().Pretty(), alpha.Key().Pretty())
|
||||
t.Logf("%v announces availability of %v\n", o.peer, alpha.Key())
|
||||
ctx, _ = context.WithTimeout(context.Background(), timeout)
|
||||
if err := o.blockstore.Put(alpha); err != nil {
|
||||
if err := o.blockstore.Put(&alpha); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
o.exchange.HasBlock(ctx, alpha)
|
||||
|
||||
t.Logf("%v requests %v\n", me.peer.Key().Pretty(), alpha.Key().Pretty())
|
||||
t.Logf("%v requests %v\n", me.peer, alpha.Key())
|
||||
ctx, _ = context.WithTimeout(context.Background(), timeout)
|
||||
if _, err := me.exchange.Block(ctx, alpha.Key()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Logf("%v should now have %v\n", w.peer.Key().Pretty(), alpha.Key().Pretty())
|
||||
t.Logf("%v should now have %v\n", w.peer, alpha.Key())
|
||||
block, err := w.blockstore.Get(alpha.Key())
|
||||
if err != nil {
|
||||
t.Fatal("Should not have received an error")
|
||||
@ -212,20 +211,17 @@ func TestSendToWantingPeer(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func NewBlockGenerator(t *testing.T) BlockGenerator {
|
||||
return BlockGenerator{
|
||||
T: t,
|
||||
}
|
||||
func NewBlockGenerator() BlockGenerator {
|
||||
return BlockGenerator{}
|
||||
}
|
||||
|
||||
type BlockGenerator struct {
|
||||
*testing.T // b/c block generation can fail
|
||||
seq int
|
||||
seq int
|
||||
}
|
||||
|
||||
func (bg *BlockGenerator) Next() blocks.Block {
|
||||
bg.seq++
|
||||
return testutil.NewBlockOrFail(bg.T, string(bg.seq))
|
||||
return *blocks.NewBlock([]byte(string(bg.seq)))
|
||||
}
|
||||
|
||||
func (bg *BlockGenerator) Blocks(n int) []*blocks.Block {
|
||||
|
||||
@ -32,19 +32,16 @@ func New() *message {
|
||||
return new(message)
|
||||
}
|
||||
|
||||
func newMessageFromProto(pbm PBMessage) (BitSwapMessage, error) {
|
||||
func newMessageFromProto(pbm PBMessage) BitSwapMessage {
|
||||
m := New()
|
||||
for _, s := range pbm.GetWantlist() {
|
||||
m.AppendWanted(u.Key(s))
|
||||
}
|
||||
for _, d := range pbm.GetBlocks() {
|
||||
b, err := blocks.NewBlock(d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b := blocks.NewBlock(d)
|
||||
m.AppendBlock(*b)
|
||||
}
|
||||
return m, nil
|
||||
return m
|
||||
}
|
||||
|
||||
// TODO(brian): convert these into keys
|
||||
@ -70,10 +67,7 @@ func FromNet(nmsg netmsg.NetMessage) (BitSwapMessage, error) {
|
||||
if err := proto.Unmarshal(nmsg.Data(), pb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m, err := newMessageFromProto(*pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := newMessageFromProto(*pb)
|
||||
return m, nil
|
||||
}
|
||||
|
||||
|
||||
@ -4,9 +4,9 @@ import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/jbenet/go-ipfs/blocks"
|
||||
peer "github.com/jbenet/go-ipfs/peer"
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
testutil "github.com/jbenet/go-ipfs/util/testutil"
|
||||
)
|
||||
|
||||
func TestAppendWanted(t *testing.T) {
|
||||
@ -26,10 +26,7 @@ func TestNewMessageFromProto(t *testing.T) {
|
||||
if !contains(protoMessage.Wantlist, str) {
|
||||
t.Fail()
|
||||
}
|
||||
m, err := newMessageFromProto(*protoMessage)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
m := newMessageFromProto(*protoMessage)
|
||||
if !contains(m.ToProto().GetWantlist(), str) {
|
||||
t.Fail()
|
||||
}
|
||||
@ -43,8 +40,8 @@ func TestAppendBlock(t *testing.T) {
|
||||
|
||||
m := New()
|
||||
for _, str := range strs {
|
||||
block := testutil.NewBlockOrFail(t, str)
|
||||
m.AppendBlock(block)
|
||||
block := blocks.NewBlock([]byte(str))
|
||||
m.AppendBlock(*block)
|
||||
}
|
||||
|
||||
// assert strings are in proto message
|
||||
@ -134,10 +131,10 @@ func TestToNetFromNetPreservesWantList(t *testing.T) {
|
||||
func TestToAndFromNetMessage(t *testing.T) {
|
||||
|
||||
original := New()
|
||||
original.AppendBlock(testutil.NewBlockOrFail(t, "W"))
|
||||
original.AppendBlock(testutil.NewBlockOrFail(t, "E"))
|
||||
original.AppendBlock(testutil.NewBlockOrFail(t, "F"))
|
||||
original.AppendBlock(testutil.NewBlockOrFail(t, "M"))
|
||||
original.AppendBlock(*blocks.NewBlock([]byte("W")))
|
||||
original.AppendBlock(*blocks.NewBlock([]byte("E")))
|
||||
original.AppendBlock(*blocks.NewBlock([]byte("F")))
|
||||
original.AppendBlock(*blocks.NewBlock([]byte("M")))
|
||||
|
||||
p := &peer.Peer{ID: []byte("X")}
|
||||
netmsg, err := original.ToNet(p)
|
||||
|
||||
@ -6,25 +6,23 @@ import (
|
||||
"time"
|
||||
|
||||
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
|
||||
testutil "github.com/jbenet/go-ipfs/util/testutil"
|
||||
|
||||
blocks "github.com/jbenet/go-ipfs/blocks"
|
||||
)
|
||||
|
||||
func TestPublishSubscribe(t *testing.T) {
|
||||
blockSent := testutil.NewBlockOrFail(t, "Greetings from The Interval")
|
||||
blockSent := blocks.NewBlock([]byte("Greetings from The Interval"))
|
||||
|
||||
n := New()
|
||||
defer n.Shutdown()
|
||||
ch := n.Subscribe(context.Background(), blockSent.Key())
|
||||
|
||||
n.Publish(blockSent)
|
||||
n.Publish(*blockSent)
|
||||
blockRecvd, ok := <-ch
|
||||
if !ok {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
assertBlocksEqual(t, blockRecvd, blockSent)
|
||||
assertBlocksEqual(t, blockRecvd, *blockSent)
|
||||
|
||||
}
|
||||
|
||||
@ -35,7 +33,7 @@ func TestCarryOnWhenDeadlineExpires(t *testing.T) {
|
||||
|
||||
n := New()
|
||||
defer n.Shutdown()
|
||||
block := testutil.NewBlockOrFail(t, "A Missed Connection")
|
||||
block := blocks.NewBlock([]byte("A Missed Connection"))
|
||||
blockChannel := n.Subscribe(fastExpiringCtx, block.Key())
|
||||
|
||||
assertBlockChannelNil(t, blockChannel)
|
||||
|
||||
@ -4,9 +4,9 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
blocks "github.com/jbenet/go-ipfs/blocks"
|
||||
message "github.com/jbenet/go-ipfs/exchange/bitswap/message"
|
||||
peer "github.com/jbenet/go-ipfs/peer"
|
||||
testutil "github.com/jbenet/go-ipfs/util/testutil"
|
||||
)
|
||||
|
||||
type peerAndStrategist struct {
|
||||
@ -30,7 +30,7 @@ func TestConsistentAccounting(t *testing.T) {
|
||||
|
||||
m := message.New()
|
||||
content := []string{"this", "is", "message", "i"}
|
||||
m.AppendBlock(testutil.NewBlockOrFail(t, strings.Join(content, " ")))
|
||||
m.AppendBlock(*blocks.NewBlock([]byte(strings.Join(content, " "))))
|
||||
|
||||
sender.MessageSent(receiver.Peer, m)
|
||||
receiver.MessageReceived(sender.Peer, m)
|
||||
@ -57,7 +57,7 @@ func TestBlockRecordedAsWantedAfterMessageReceived(t *testing.T) {
|
||||
beggar := newPeerAndStrategist("can't be chooser")
|
||||
chooser := newPeerAndStrategist("chooses JIF")
|
||||
|
||||
block := testutil.NewBlockOrFail(t, "data wanted by beggar")
|
||||
block := blocks.NewBlock([]byte("data wanted by beggar"))
|
||||
|
||||
messageFromBeggarToChooser := message.New()
|
||||
messageFromBeggarToChooser.AppendWanted(block.Key())
|
||||
|
||||
@ -5,10 +5,10 @@ import (
|
||||
"testing"
|
||||
|
||||
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
|
||||
blocks "github.com/jbenet/go-ipfs/blocks"
|
||||
bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message"
|
||||
bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network"
|
||||
peer "github.com/jbenet/go-ipfs/peer"
|
||||
testutil "github.com/jbenet/go-ipfs/util/testutil"
|
||||
)
|
||||
|
||||
func TestSendRequestToCooperativePeer(t *testing.T) {
|
||||
@ -33,7 +33,7 @@ func TestSendRequestToCooperativePeer(t *testing.T) {
|
||||
// TODO test contents of incoming message
|
||||
|
||||
m := bsmsg.New()
|
||||
m.AppendBlock(testutil.NewBlockOrFail(t, expectedStr))
|
||||
m.AppendBlock(*blocks.NewBlock([]byte(expectedStr)))
|
||||
|
||||
return from, m
|
||||
}))
|
||||
@ -41,7 +41,7 @@ func TestSendRequestToCooperativePeer(t *testing.T) {
|
||||
t.Log("Build a message and send a synchronous request to recipient")
|
||||
|
||||
message := bsmsg.New()
|
||||
message.AppendBlock(testutil.NewBlockOrFail(t, "data"))
|
||||
message.AppendBlock(*blocks.NewBlock([]byte("data")))
|
||||
response, err := initiator.SendRequest(
|
||||
context.Background(), &peer.Peer{ID: idOfRecipient}, message)
|
||||
if err != nil {
|
||||
@ -77,7 +77,7 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) {
|
||||
*peer.Peer, bsmsg.BitSwapMessage) {
|
||||
|
||||
msgToWaiter := bsmsg.New()
|
||||
msgToWaiter.AppendBlock(testutil.NewBlockOrFail(t, expectedStr))
|
||||
msgToWaiter.AppendBlock(*blocks.NewBlock([]byte(expectedStr)))
|
||||
|
||||
return fromWaiter, msgToWaiter
|
||||
}))
|
||||
@ -105,7 +105,7 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) {
|
||||
}))
|
||||
|
||||
messageSentAsync := bsmsg.New()
|
||||
messageSentAsync.AppendBlock(testutil.NewBlockOrFail(t, "data"))
|
||||
messageSentAsync.AppendBlock(*blocks.NewBlock([]byte("data")))
|
||||
errSending := waiter.SendMessage(
|
||||
context.Background(), &peer.Peer{ID: idOfResponder}, messageSentAsync)
|
||||
if errSending != nil {
|
||||
|
||||
@ -5,8 +5,8 @@ import (
|
||||
|
||||
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
|
||||
|
||||
blocks "github.com/jbenet/go-ipfs/blocks"
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
testutil "github.com/jbenet/go-ipfs/util/testutil"
|
||||
)
|
||||
|
||||
func TestBlockReturnsErr(t *testing.T) {
|
||||
@ -20,8 +20,8 @@ func TestBlockReturnsErr(t *testing.T) {
|
||||
|
||||
func TestHasBlockReturnsNil(t *testing.T) {
|
||||
off := NewOfflineExchange()
|
||||
block := testutil.NewBlockOrFail(t, "data")
|
||||
err := off.HasBlock(context.Background(), block)
|
||||
block := blocks.NewBlock([]byte("data"))
|
||||
err := off.HasBlock(context.Background(), *block)
|
||||
if err != nil {
|
||||
t.Fatal("")
|
||||
}
|
||||
|
||||
337
fuse/ipns/ipns_test.go
Normal file
337
fuse/ipns/ipns_test.go
Normal file
@ -0,0 +1,337 @@
|
||||
package ipns
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
fstest "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse/fs/fstestutil"
|
||||
core "github.com/jbenet/go-ipfs/core"
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
)
|
||||
|
||||
func maybeSkipFuseTests(t *testing.T) bool {
|
||||
v := "TEST_NO_FUSE"
|
||||
n := strings.ToLower(os.Getenv(v))
|
||||
skip := n != "" && n != "false" && n != "f"
|
||||
|
||||
if skip {
|
||||
t.Skipf("Skipping FUSE tests (%s=%s)", v, n)
|
||||
}
|
||||
return skip
|
||||
}
|
||||
|
||||
func randBytes(size int) []byte {
|
||||
b := make([]byte, size)
|
||||
rand.Read(b)
|
||||
return b
|
||||
}
|
||||
|
||||
func writeFile(t *testing.T, size int, path string) []byte {
|
||||
return writeFileData(t, randBytes(size), path)
|
||||
}
|
||||
|
||||
func writeFileData(t *testing.T, data []byte, path string) []byte {
|
||||
fi, err := os.Create(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
n, err := fi.Write(data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if n != len(data) {
|
||||
t.Fatal("Didnt write proper amount!")
|
||||
}
|
||||
|
||||
err = fi.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
func setupIpnsTest(t *testing.T, node *core.IpfsNode) (*core.IpfsNode, *fstest.Mount) {
|
||||
maybeSkipFuseTests(t)
|
||||
|
||||
var err error
|
||||
if node == nil {
|
||||
node, err = core.NewMockNode()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
fs, err := NewIpns(node, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mnt, err := fstest.MountedT(t, fs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return node, mnt
|
||||
}
|
||||
|
||||
// Test writing a file and reading it back
|
||||
func TestIpnsBasicIO(t *testing.T) {
|
||||
_, mnt := setupIpnsTest(t, nil)
|
||||
defer mnt.Close()
|
||||
|
||||
fname := mnt.Dir + "/local/testfile"
|
||||
data := writeFile(t, 12345, fname)
|
||||
|
||||
rbuf, err := ioutil.ReadFile(fname)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(rbuf, data) {
|
||||
t.Fatal("Incorrect Read!")
|
||||
}
|
||||
}
|
||||
|
||||
// Test to make sure file changes persist over mounts of ipns
|
||||
func TestFilePersistence(t *testing.T) {
|
||||
node, mnt := setupIpnsTest(t, nil)
|
||||
|
||||
fname := "/local/atestfile"
|
||||
data := writeFile(t, 127, mnt.Dir+fname)
|
||||
|
||||
// Wait for publish: TODO: make publish happen faster in tests
|
||||
time.Sleep(time.Millisecond * 40)
|
||||
|
||||
mnt.Close()
|
||||
|
||||
node, mnt = setupIpnsTest(t, node)
|
||||
defer mnt.Close()
|
||||
|
||||
rbuf, err := ioutil.ReadFile(mnt.Dir + fname)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(rbuf, data) {
|
||||
t.Fatalf("File data changed between mounts! sizes differ: %d != %d", len(data), len(rbuf))
|
||||
}
|
||||
}
|
||||
|
||||
// Test to make sure the filesystem reports file sizes correctly
|
||||
func TestFileSizeReporting(t *testing.T) {
|
||||
_, mnt := setupIpnsTest(t, nil)
|
||||
defer mnt.Close()
|
||||
|
||||
fname := mnt.Dir + "/local/sizecheck"
|
||||
data := writeFile(t, 5555, fname)
|
||||
|
||||
finfo, err := os.Stat(fname)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if finfo.Size() != int64(len(data)) {
|
||||
t.Fatal("Read incorrect size from stat!")
|
||||
}
|
||||
}
|
||||
|
||||
// Test to make sure you cant create multiple entries with the same name
|
||||
func TestDoubleEntryFailure(t *testing.T) {
|
||||
_, mnt := setupIpnsTest(t, nil)
|
||||
defer mnt.Close()
|
||||
|
||||
dname := mnt.Dir + "/local/thisisadir"
|
||||
err := os.Mkdir(dname, 0777)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = os.Mkdir(dname, 0777)
|
||||
if err == nil {
|
||||
t.Fatal("Should have gotten error one creating new directory.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendFile(t *testing.T) {
|
||||
_, mnt := setupIpnsTest(t, nil)
|
||||
defer mnt.Close()
|
||||
|
||||
fname := mnt.Dir + "/local/file"
|
||||
data := writeFile(t, 1300, fname)
|
||||
|
||||
fi, err := os.OpenFile(fname, os.O_RDWR|os.O_APPEND, 0666)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
nudata := randBytes(500)
|
||||
|
||||
n, err := fi.Write(nudata)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = fi.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if n != len(nudata) {
|
||||
t.Fatal("Failed to write enough bytes.")
|
||||
}
|
||||
|
||||
data = append(data, nudata...)
|
||||
|
||||
rbuf, err := ioutil.ReadFile(fname)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(rbuf, data) {
|
||||
t.Fatal("Data inconsistent!")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFastRepublish(t *testing.T) {
|
||||
|
||||
// make timeout noticeable.
|
||||
osrt := shortRepublishTimeout
|
||||
shortRepublishTimeout = time.Millisecond * 100
|
||||
|
||||
olrt := longRepublishTimeout
|
||||
longRepublishTimeout = time.Second
|
||||
|
||||
node, mnt := setupIpnsTest(t, nil)
|
||||
|
||||
h, err := node.Identity.PrivKey.GetPublic().Hash()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pubkeyHash := u.Key(h).Pretty()
|
||||
|
||||
// set them back
|
||||
defer func() {
|
||||
shortRepublishTimeout = osrt
|
||||
longRepublishTimeout = olrt
|
||||
mnt.Close()
|
||||
}()
|
||||
|
||||
closed := make(chan struct{})
|
||||
dataA := []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
|
||||
dataB := []byte("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb")
|
||||
|
||||
fname := mnt.Dir + "/local/file"
|
||||
|
||||
// get first resolved hash
|
||||
log.Debug("publishing first hash")
|
||||
writeFileData(t, dataA, fname) // random
|
||||
<-time.After(shortRepublishTimeout * 11 / 10)
|
||||
log.Debug("resolving first hash")
|
||||
resolvedHash, err := node.Namesys.Resolve(pubkeyHash)
|
||||
if err != nil {
|
||||
t.Fatal("resolve err:", pubkeyHash, err)
|
||||
}
|
||||
|
||||
// constantly keep writing to the file
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-closed:
|
||||
return
|
||||
|
||||
case <-time.After(shortRepublishTimeout * 8 / 10):
|
||||
writeFileData(t, dataB, fname)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
hasPublished := func() bool {
|
||||
res, err := node.Namesys.Resolve(pubkeyHash)
|
||||
if err != nil {
|
||||
t.Fatal("resolve err: %v", err)
|
||||
}
|
||||
return res != resolvedHash
|
||||
}
|
||||
|
||||
// test things
|
||||
|
||||
// at this point, should not have written dataA and not have written dataB
|
||||
rbuf, err := ioutil.ReadFile(fname)
|
||||
if err != nil || !bytes.Equal(rbuf, dataA) {
|
||||
t.Fatal("Data inconsistent! %v %v", err, string(rbuf))
|
||||
}
|
||||
|
||||
if hasPublished() {
|
||||
t.Fatal("published (wrote)")
|
||||
}
|
||||
|
||||
<-time.After(shortRepublishTimeout * 11 / 10)
|
||||
|
||||
// at this point, should have written written dataB, but not published it
|
||||
rbuf, err = ioutil.ReadFile(fname)
|
||||
if err != nil || !bytes.Equal(rbuf, dataB) {
|
||||
t.Fatal("Data inconsistent! %v %v", err, string(rbuf))
|
||||
}
|
||||
|
||||
if hasPublished() {
|
||||
t.Fatal("published (wrote)")
|
||||
}
|
||||
|
||||
<-time.After(longRepublishTimeout * 11 / 10)
|
||||
|
||||
// at this point, should have written written dataB, and published it
|
||||
rbuf, err = ioutil.ReadFile(fname)
|
||||
if err != nil || !bytes.Equal(rbuf, dataB) {
|
||||
t.Fatal("Data inconsistent! %v %v", err, string(rbuf))
|
||||
}
|
||||
|
||||
if !hasPublished() {
|
||||
t.Fatal("not published")
|
||||
}
|
||||
|
||||
close(closed)
|
||||
}
|
||||
|
||||
// Test writing a medium sized file one byte at a time
|
||||
func TestMultiWrite(t *testing.T) {
|
||||
if runtime.GOOS == "darwin" {
|
||||
link := "https://github.com/jbenet/go-ipfs/issues/147"
|
||||
t.Skipf("Skipping as is broken in OSX. See %s", link)
|
||||
}
|
||||
|
||||
_, mnt := setupIpnsTest(t, nil)
|
||||
defer mnt.Close()
|
||||
|
||||
fpath := mnt.Dir + "/local/file"
|
||||
fi, err := os.Create(fpath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
data := randBytes(1001)
|
||||
for i := 0; i < len(data); i++ {
|
||||
n, err := fi.Write(data[i : i+1])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != 1 {
|
||||
t.Fatal("Somehow wrote the wrong number of bytes! (n != 1)")
|
||||
}
|
||||
}
|
||||
fi.Close()
|
||||
|
||||
rbuf, err := ioutil.ReadFile(fpath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(rbuf, data) {
|
||||
t.Fatal("File on disk did not match bytes written")
|
||||
}
|
||||
}
|
||||
568
fuse/ipns/ipns_unix.go
Normal file
568
fuse/ipns/ipns_unix.go
Normal file
@ -0,0 +1,568 @@
|
||||
package ipns
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse"
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse/fs"
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
|
||||
|
||||
"github.com/jbenet/go-ipfs/core"
|
||||
ci "github.com/jbenet/go-ipfs/crypto"
|
||||
"github.com/jbenet/go-ipfs/importer/chunk"
|
||||
mdag "github.com/jbenet/go-ipfs/merkledag"
|
||||
ft "github.com/jbenet/go-ipfs/unixfs"
|
||||
uio "github.com/jbenet/go-ipfs/unixfs/io"
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
)
|
||||
|
||||
var log = u.Logger("ipns")
|
||||
|
||||
var (
|
||||
shortRepublishTimeout = time.Millisecond * 5
|
||||
longRepublishTimeout = time.Millisecond * 500
|
||||
)
|
||||
|
||||
// FileSystem is the readwrite IPNS Fuse Filesystem.
|
||||
type FileSystem struct {
|
||||
Ipfs *core.IpfsNode
|
||||
RootNode *Root
|
||||
}
|
||||
|
||||
// NewFileSystem constructs new fs using given core.IpfsNode instance.
|
||||
func NewIpns(ipfs *core.IpfsNode, ipfspath string) (*FileSystem, error) {
|
||||
root, err := CreateRoot(ipfs, []ci.PrivKey{ipfs.Identity.PrivKey}, ipfspath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FileSystem{Ipfs: ipfs, RootNode: root}, nil
|
||||
}
|
||||
|
||||
func CreateRoot(n *core.IpfsNode, keys []ci.PrivKey, ipfsroot string) (*Root, error) {
|
||||
root := new(Root)
|
||||
root.LocalDirs = make(map[string]*Node)
|
||||
root.Ipfs = n
|
||||
abspath, err := filepath.Abs(ipfsroot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
root.IpfsRoot = abspath
|
||||
|
||||
root.Keys = keys
|
||||
|
||||
if len(keys) == 0 {
|
||||
log.Warning("No keys given for ipns root creation")
|
||||
} else {
|
||||
k := keys[0]
|
||||
pub := k.GetPublic()
|
||||
hash, err := pub.Hash()
|
||||
if err != nil {
|
||||
log.Error("Read Root Error: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
root.LocalLink = &Link{u.Key(hash).Pretty()}
|
||||
}
|
||||
|
||||
for _, k := range keys {
|
||||
hash, err := k.GetPublic().Hash()
|
||||
if err != nil {
|
||||
log.Error("failed to hash public key.")
|
||||
continue
|
||||
}
|
||||
name := u.Key(hash).Pretty()
|
||||
nd := new(Node)
|
||||
nd.Ipfs = n
|
||||
nd.key = k
|
||||
nd.repub = NewRepublisher(nd, shortRepublishTimeout, longRepublishTimeout)
|
||||
|
||||
go nd.repub.Run()
|
||||
|
||||
pointsTo, err := n.Namesys.Resolve(name)
|
||||
if err != nil {
|
||||
log.Warning("Could not resolve value for local ipns entry, providing empty dir")
|
||||
nd.Nd = &mdag.Node{Data: ft.FolderPBData()}
|
||||
root.LocalDirs[name] = nd
|
||||
continue
|
||||
}
|
||||
|
||||
if !u.IsValidHash(pointsTo) {
|
||||
log.Critical("Got back bad data from namesys resolve! [%s]", pointsTo)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
node, err := n.Resolver.ResolvePath(pointsTo)
|
||||
if err != nil {
|
||||
log.Warning("Failed to resolve value from ipns entry in ipfs")
|
||||
continue
|
||||
}
|
||||
|
||||
nd.Nd = node
|
||||
root.LocalDirs[name] = nd
|
||||
}
|
||||
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// Root constructs the Root of the filesystem, a Root object.
|
||||
func (f FileSystem) Root() (fs.Node, fuse.Error) {
|
||||
return f.RootNode, nil
|
||||
}
|
||||
|
||||
// Root is the root object of the filesystem tree.
|
||||
type Root struct {
|
||||
Ipfs *core.IpfsNode
|
||||
Keys []ci.PrivKey
|
||||
|
||||
// Used for symlinking into ipfs
|
||||
IpfsRoot string
|
||||
LocalDirs map[string]*Node
|
||||
|
||||
LocalLink *Link
|
||||
}
|
||||
|
||||
// Attr returns file attributes.
|
||||
func (*Root) Attr() fuse.Attr {
|
||||
return fuse.Attr{Mode: os.ModeDir | 0111} // -rw+x
|
||||
}
|
||||
|
||||
// Lookup performs a lookup under this node.
|
||||
func (s *Root) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) {
|
||||
log.Debug("ipns: Root Lookup: '%s'", name)
|
||||
switch name {
|
||||
case "mach_kernel", ".hidden", "._.":
|
||||
// Just quiet some log noise on OS X.
|
||||
return nil, fuse.ENOENT
|
||||
}
|
||||
|
||||
if name == "local" {
|
||||
if s.LocalLink == nil {
|
||||
return nil, fuse.ENOENT
|
||||
}
|
||||
return s.LocalLink, nil
|
||||
}
|
||||
|
||||
nd, ok := s.LocalDirs[name]
|
||||
if ok {
|
||||
return nd, nil
|
||||
}
|
||||
|
||||
log.Debug("ipns: Falling back to resolution for [%s].", name)
|
||||
resolved, err := s.Ipfs.Namesys.Resolve(name)
|
||||
if err != nil {
|
||||
log.Warning("ipns: namesys resolve error: %s", err)
|
||||
return nil, fuse.ENOENT
|
||||
}
|
||||
|
||||
return &Link{s.IpfsRoot + "/" + resolved}, nil
|
||||
}
|
||||
|
||||
// ReadDir reads a particular directory. Disallowed for root.
|
||||
func (r *Root) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) {
|
||||
log.Debug("Read Root.")
|
||||
listing := []fuse.Dirent{
|
||||
fuse.Dirent{
|
||||
Name: "local",
|
||||
Type: fuse.DT_Link,
|
||||
},
|
||||
}
|
||||
for _, k := range r.Keys {
|
||||
pub := k.GetPublic()
|
||||
hash, err := pub.Hash()
|
||||
if err != nil {
|
||||
log.Error("Read Root Error: %s", err)
|
||||
continue
|
||||
}
|
||||
ent := fuse.Dirent{
|
||||
Name: u.Key(hash).Pretty(),
|
||||
Type: fuse.DT_Dir,
|
||||
}
|
||||
listing = append(listing, ent)
|
||||
}
|
||||
return listing, nil
|
||||
}
|
||||
|
||||
// Node is the core object representing a filesystem tree node.
|
||||
type Node struct {
|
||||
root *Root
|
||||
nsRoot *Node
|
||||
parent *Node
|
||||
|
||||
repub *Republisher
|
||||
|
||||
// This nodes name in its parent dir.
|
||||
// NOTE: this strategy wont work well if we allow hard links
|
||||
// (im all for murdering the thought of hard links)
|
||||
name string
|
||||
|
||||
// Private keys held by nodes at the root of a keyspace
|
||||
// WARNING(security): the PrivKey interface is currently insecure
|
||||
// (holds the raw key). It will be secured later.
|
||||
key ci.PrivKey
|
||||
|
||||
Ipfs *core.IpfsNode
|
||||
Nd *mdag.Node
|
||||
dagMod *uio.DagModifier
|
||||
cached *ft.PBData
|
||||
}
|
||||
|
||||
func (s *Node) loadData() error {
|
||||
s.cached = new(ft.PBData)
|
||||
return proto.Unmarshal(s.Nd.Data, s.cached)
|
||||
}
|
||||
|
||||
// Attr returns the attributes of a given node.
|
||||
func (s *Node) Attr() fuse.Attr {
|
||||
if s.cached == nil {
|
||||
err := s.loadData()
|
||||
if err != nil {
|
||||
log.Error("Error loading PBData for file: '%s'", s.name)
|
||||
}
|
||||
}
|
||||
switch s.cached.GetType() {
|
||||
case ft.PBData_Directory:
|
||||
return fuse.Attr{Mode: os.ModeDir | 0555}
|
||||
case ft.PBData_File, ft.PBData_Raw:
|
||||
size, err := ft.DataSize(s.Nd.Data)
|
||||
if err != nil {
|
||||
log.Error("Error getting size of file: %s", err)
|
||||
size = 0
|
||||
}
|
||||
return fuse.Attr{
|
||||
Mode: 0666,
|
||||
Size: size,
|
||||
Blocks: uint64(len(s.Nd.Links)),
|
||||
}
|
||||
default:
|
||||
log.Error("Invalid data type.")
|
||||
return fuse.Attr{}
|
||||
}
|
||||
}
|
||||
|
||||
// Lookup performs a lookup under this node.
|
||||
func (s *Node) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) {
|
||||
log.Debug("ipns: node[%s] Lookup '%s'", s.name, name)
|
||||
nd, err := s.Ipfs.Resolver.ResolveLinks(s.Nd, []string{name})
|
||||
if err != nil {
|
||||
// todo: make this error more versatile.
|
||||
return nil, fuse.ENOENT
|
||||
}
|
||||
|
||||
return s.makeChild(name, nd), nil
|
||||
}
|
||||
|
||||
func (n *Node) makeChild(name string, node *mdag.Node) *Node {
|
||||
child := &Node{
|
||||
Ipfs: n.Ipfs,
|
||||
Nd: node,
|
||||
name: name,
|
||||
nsRoot: n.nsRoot,
|
||||
parent: n,
|
||||
}
|
||||
|
||||
// Always ensure that each child knows where the root is
|
||||
if n.nsRoot == nil {
|
||||
child.nsRoot = n
|
||||
} else {
|
||||
child.nsRoot = n.nsRoot
|
||||
}
|
||||
|
||||
return child
|
||||
}
|
||||
|
||||
// ReadDir reads the link structure as directory entries
|
||||
func (s *Node) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) {
|
||||
log.Debug("Node ReadDir")
|
||||
entries := make([]fuse.Dirent, len(s.Nd.Links))
|
||||
for i, link := range s.Nd.Links {
|
||||
n := link.Name
|
||||
if len(n) == 0 {
|
||||
n = link.Hash.B58String()
|
||||
}
|
||||
entries[i] = fuse.Dirent{Name: n, Type: fuse.DT_File}
|
||||
}
|
||||
|
||||
if len(entries) > 0 {
|
||||
return entries, nil
|
||||
}
|
||||
return nil, fuse.ENOENT
|
||||
}
|
||||
|
||||
// ReadAll reads the object data as file data
|
||||
func (s *Node) ReadAll(intr fs.Intr) ([]byte, fuse.Error) {
|
||||
log.Debug("ipns: ReadAll [%s]", s.name)
|
||||
r, err := uio.NewDagReader(s.Nd, s.Ipfs.DAG)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// this is a terrible function... 'ReadAll'?
|
||||
// what if i have a 6TB file? GG RAM.
|
||||
b, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
log.Error("[%s] Readall error: %s", s.name, err)
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (n *Node) Write(req *fuse.WriteRequest, resp *fuse.WriteResponse, intr fs.Intr) fuse.Error {
|
||||
log.Debug("ipns: Node Write [%s]: flags = %s, offset = %d, size = %d", n.name, req.Flags.String(), req.Offset, len(req.Data))
|
||||
|
||||
if n.dagMod == nil {
|
||||
// Create a DagModifier to allow us to change the existing dag node
|
||||
dmod, err := uio.NewDagModifier(n.Nd, n.Ipfs.DAG, chunk.DefaultSplitter)
|
||||
if err != nil {
|
||||
log.Error("Error creating dag modifier: %s", err)
|
||||
return err
|
||||
}
|
||||
n.dagMod = dmod
|
||||
}
|
||||
wrote, err := n.dagMod.WriteAt(req.Data, uint64(req.Offset))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Size = wrote
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Node) Flush(req *fuse.FlushRequest, intr fs.Intr) fuse.Error {
|
||||
log.Debug("Got flush request [%s]!", n.name)
|
||||
|
||||
// If a write has happened
|
||||
if n.dagMod != nil {
|
||||
newNode, err := n.dagMod.GetNode()
|
||||
if err != nil {
|
||||
log.Error("Error getting dag node from dagMod: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if n.parent != nil {
|
||||
log.Debug("updating self in parent!")
|
||||
err := n.parent.update(n.name, newNode)
|
||||
if err != nil {
|
||||
log.Critical("error in updating ipns dag tree: %s", err)
|
||||
// return fuse.ETHISISPRETTYBAD
|
||||
return err
|
||||
}
|
||||
}
|
||||
n.Nd = newNode
|
||||
|
||||
/*/TEMP
|
||||
dr, err := mdag.NewDagReader(n.Nd, n.Ipfs.DAG)
|
||||
if err != nil {
|
||||
log.Critical("Verification read failed.")
|
||||
}
|
||||
b, err := ioutil.ReadAll(dr)
|
||||
if err != nil {
|
||||
log.Critical("Verification read failed.")
|
||||
}
|
||||
fmt.Println("VERIFICATION READ")
|
||||
fmt.Printf("READ %d BYTES\n", len(b))
|
||||
fmt.Println(string(b))
|
||||
fmt.Println(b)
|
||||
//*/
|
||||
|
||||
n.dagMod = nil
|
||||
|
||||
n.wasChanged()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Signal that a node in this tree was changed so the root can republish
|
||||
func (n *Node) wasChanged() {
|
||||
root := n.nsRoot
|
||||
if root == nil {
|
||||
root = n
|
||||
}
|
||||
|
||||
root.repub.Publish <- struct{}{}
|
||||
}
|
||||
|
||||
func (n *Node) republishRoot() error {
|
||||
log.Debug("Republish root")
|
||||
|
||||
// We should already be the root, this is just a sanity check
|
||||
var root *Node
|
||||
if n.nsRoot != nil {
|
||||
root = n.nsRoot
|
||||
} else {
|
||||
root = n
|
||||
}
|
||||
|
||||
// Add any nodes that may be new to the DAG service
|
||||
err := n.Ipfs.DAG.AddRecursive(root.Nd)
|
||||
if err != nil {
|
||||
log.Critical("ipns: Dag Add Error: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
ndkey, err := root.Nd.Key()
|
||||
if err != nil {
|
||||
log.Error("getKey error: %s", err)
|
||||
return err
|
||||
}
|
||||
log.Debug("Publishing changes!")
|
||||
|
||||
err = n.Ipfs.Namesys.Publish(root.key, ndkey.Pretty())
|
||||
if err != nil {
|
||||
log.Error("ipns: Publish Failed: %s", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Node) Fsync(req *fuse.FsyncRequest, intr fs.Intr) fuse.Error {
|
||||
log.Debug("Got fsync request!")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Node) Mkdir(req *fuse.MkdirRequest, intr fs.Intr) (fs.Node, fuse.Error) {
|
||||
log.Debug("Got mkdir request!")
|
||||
dagnd := &mdag.Node{Data: ft.FolderPBData()}
|
||||
nnode := n.Nd.Copy()
|
||||
nnode.AddNodeLink(req.Name, dagnd)
|
||||
|
||||
child := &Node{
|
||||
Ipfs: n.Ipfs,
|
||||
Nd: dagnd,
|
||||
name: req.Name,
|
||||
}
|
||||
|
||||
if n.nsRoot == nil {
|
||||
child.nsRoot = n
|
||||
} else {
|
||||
child.nsRoot = n.nsRoot
|
||||
}
|
||||
|
||||
if n.parent != nil {
|
||||
err := n.parent.update(n.name, nnode)
|
||||
if err != nil {
|
||||
log.Critical("Error updating node: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
n.Nd = nnode
|
||||
|
||||
n.wasChanged()
|
||||
|
||||
return child, nil
|
||||
}
|
||||
|
||||
func (n *Node) Open(req *fuse.OpenRequest, resp *fuse.OpenResponse, intr fs.Intr) (fs.Handle, fuse.Error) {
|
||||
//log.Debug("[%s] Received open request! flags = %s", n.name, req.Flags.String())
|
||||
//TODO: check open flags and truncate if necessary
|
||||
if req.Flags&fuse.OpenTruncate != 0 {
|
||||
log.Warning("Need to truncate file!")
|
||||
n.cached = nil
|
||||
n.Nd = &mdag.Node{Data: ft.FilePBData(nil, 0)}
|
||||
} else if req.Flags&fuse.OpenAppend != 0 {
|
||||
log.Warning("Need to append to file!")
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (n *Node) Mknod(req *fuse.MknodRequest, intr fs.Intr) (fs.Node, fuse.Error) {
|
||||
log.Debug("Got mknod request!")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (n *Node) Create(req *fuse.CreateRequest, resp *fuse.CreateResponse, intr fs.Intr) (fs.Node, fs.Handle, fuse.Error) {
|
||||
log.Debug("Got create request: %s", req.Name)
|
||||
|
||||
// New 'empty' file
|
||||
nd := &mdag.Node{Data: ft.FilePBData(nil, 0)}
|
||||
child := n.makeChild(req.Name, nd)
|
||||
|
||||
nnode := n.Nd.Copy()
|
||||
|
||||
err := nnode.AddNodeLink(req.Name, nd)
|
||||
if err != nil {
|
||||
log.Error("Error adding child to node: %s", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
if n.parent != nil {
|
||||
err := n.parent.update(n.name, nnode)
|
||||
if err != nil {
|
||||
log.Critical("Error updating node: %s", err)
|
||||
// Can we panic, please?
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
n.Nd = nnode
|
||||
n.wasChanged()
|
||||
|
||||
return child, child, nil
|
||||
}
|
||||
|
||||
func (n *Node) Remove(req *fuse.RemoveRequest, intr fs.Intr) fuse.Error {
|
||||
log.Debug("[%s] Got Remove request: %s", n.name, req.Name)
|
||||
nnode := n.Nd.Copy()
|
||||
err := nnode.RemoveNodeLink(req.Name)
|
||||
if err != nil {
|
||||
log.Error("Remove: No such file.")
|
||||
return fuse.ENOENT
|
||||
}
|
||||
|
||||
if n.parent != nil {
|
||||
err := n.parent.update(n.name, nnode)
|
||||
if err != nil {
|
||||
log.Critical("Error updating node: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
n.Nd = nnode
|
||||
n.wasChanged()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Node) Rename(req *fuse.RenameRequest, newDir fs.Node, intr fs.Intr) fuse.Error {
|
||||
log.Debug("Got Rename request '%s' -> '%s'", req.OldName, req.NewName)
|
||||
var mdn *mdag.Node
|
||||
for _, l := range n.Nd.Links {
|
||||
if l.Name == req.OldName {
|
||||
mdn = l.Node
|
||||
}
|
||||
}
|
||||
if mdn == nil {
|
||||
log.Critical("nil Link found on rename!")
|
||||
return fuse.ENOENT
|
||||
}
|
||||
n.Nd.RemoveNodeLink(req.OldName)
|
||||
|
||||
switch newDir := newDir.(type) {
|
||||
case *Node:
|
||||
err := newDir.Nd.AddNodeLink(req.NewName, mdn)
|
||||
if err != nil {
|
||||
log.Error("Error adding node to new dir on rename: %s", err)
|
||||
return err
|
||||
}
|
||||
default:
|
||||
log.Critical("Unknown node type for rename target dir!")
|
||||
return errors.New("Unknown fs node type!")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Updates the child of this node, specified by name to the given newnode
|
||||
func (n *Node) update(name string, newnode *mdag.Node) error {
|
||||
log.Debug("update '%s' in '%s'", name, n.name)
|
||||
nnode := n.Nd.Copy()
|
||||
err := nnode.RemoveNodeLink(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nnode.AddNodeLink(name, newnode)
|
||||
|
||||
if n.parent != nil {
|
||||
err := n.parent.update(n.name, nnode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
n.Nd = nnode
|
||||
return nil
|
||||
}
|
||||
24
fuse/ipns/link_unix.go
Normal file
24
fuse/ipns/link_unix.go
Normal file
@ -0,0 +1,24 @@
|
||||
package ipns
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse"
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse/fs"
|
||||
)
|
||||
|
||||
type Link struct {
|
||||
Target string
|
||||
}
|
||||
|
||||
func (l *Link) Attr() fuse.Attr {
|
||||
log.Debug("Link attr.")
|
||||
return fuse.Attr{
|
||||
Mode: os.ModeSymlink | 0555,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Link) Readlink(req *fuse.ReadlinkRequest, intr fs.Intr) (string, fuse.Error) {
|
||||
log.Debug("ReadLink: %s", l.Target)
|
||||
return l.Target, nil
|
||||
}
|
||||
91
fuse/ipns/mount_unix.go
Normal file
91
fuse/ipns/mount_unix.go
Normal file
@ -0,0 +1,91 @@
|
||||
package ipns
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse"
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse/fs"
|
||||
"github.com/jbenet/go-ipfs/core"
|
||||
)
|
||||
|
||||
// Mount mounts an IpfsNode instance at a particular path. It
|
||||
// serves until the process receives exit signals (to Unmount).
|
||||
func Mount(ipfs *core.IpfsNode, fpath string, ipfspath string) error {
|
||||
|
||||
sigc := make(chan os.Signal, 1)
|
||||
signal.Notify(sigc, syscall.SIGHUP, syscall.SIGINT,
|
||||
syscall.SIGTERM, syscall.SIGQUIT)
|
||||
|
||||
go func() {
|
||||
<-sigc
|
||||
for {
|
||||
err := Unmount(fpath)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
}
|
||||
ipfs.Network.Close()
|
||||
}()
|
||||
|
||||
c, err := fuse.Mount(fpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
fsys, err := NewIpns(ipfs, ipfspath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = fs.Serve(c, fsys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check if the mount process has an error to report
|
||||
<-c.Ready
|
||||
if err := c.MountError; err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmount attempts to unmount the provided FUSE mount point, forcibly
|
||||
// if necessary.
|
||||
func Unmount(point string) error {
|
||||
fmt.Printf("Unmounting %s...\n", point)
|
||||
|
||||
var cmd *exec.Cmd
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
cmd = exec.Command("diskutil", "umount", "force", point)
|
||||
case "linux":
|
||||
cmd = exec.Command("fusermount", "-u", point)
|
||||
default:
|
||||
return fmt.Errorf("unmount: unimplemented")
|
||||
}
|
||||
|
||||
errc := make(chan error, 1)
|
||||
go func() {
|
||||
if err := exec.Command("umount", point).Run(); err == nil {
|
||||
errc <- err
|
||||
}
|
||||
// retry to unmount with the fallback cmd
|
||||
errc <- cmd.Run()
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(1 * time.Second):
|
||||
return fmt.Errorf("umount timeout")
|
||||
case err := <-errc:
|
||||
return err
|
||||
}
|
||||
}
|
||||
42
fuse/ipns/repub_unix.go
Normal file
42
fuse/ipns/repub_unix.go
Normal file
@ -0,0 +1,42 @@
|
||||
package ipns
|
||||
|
||||
import "time"
|
||||
|
||||
type Republisher struct {
|
||||
TimeoutLong time.Duration
|
||||
TimeoutShort time.Duration
|
||||
Publish chan struct{}
|
||||
node *Node
|
||||
}
|
||||
|
||||
func NewRepublisher(n *Node, tshort, tlong time.Duration) *Republisher {
|
||||
return &Republisher{
|
||||
TimeoutShort: tshort,
|
||||
TimeoutLong: tlong,
|
||||
Publish: make(chan struct{}),
|
||||
node: n,
|
||||
}
|
||||
}
|
||||
|
||||
func (np *Republisher) Run() {
|
||||
for _ = range np.Publish {
|
||||
quick := time.After(np.TimeoutShort)
|
||||
longer := time.After(np.TimeoutLong)
|
||||
|
||||
wait:
|
||||
select {
|
||||
case <-quick:
|
||||
case <-longer:
|
||||
case <-np.Publish:
|
||||
quick = time.After(np.TimeoutShort)
|
||||
goto wait
|
||||
}
|
||||
|
||||
log.Info("Publishing Changes!")
|
||||
err := np.node.republishRoot()
|
||||
if err != nil {
|
||||
log.Critical("republishRoot error: %s", err)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
29
fuse/ipns/writerat.go
Normal file
29
fuse/ipns/writerat.go
Normal file
@ -0,0 +1,29 @@
|
||||
package ipns
|
||||
|
||||
import "io"
|
||||
|
||||
type WriteAtBuf interface {
|
||||
io.WriterAt
|
||||
Bytes() []byte
|
||||
}
|
||||
|
||||
type writerAt struct {
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func NewWriterAtFromBytes(b []byte) WriteAtBuf {
|
||||
return &writerAt{b}
|
||||
}
|
||||
|
||||
// TODO: make this better in the future, this is just a quick hack for now
|
||||
func (wa *writerAt) WriteAt(p []byte, off int64) (int, error) {
|
||||
if off+int64(len(p)) > int64(len(wa.buf)) {
|
||||
wa.buf = append(wa.buf, make([]byte, (int(off)+len(p))-len(wa.buf))...)
|
||||
}
|
||||
copy(wa.buf[off:], p)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (wa *writerAt) Bytes() []byte {
|
||||
return wa.buf
|
||||
}
|
||||
@ -14,13 +14,19 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
|
||||
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse"
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse/fs"
|
||||
core "github.com/jbenet/go-ipfs/core"
|
||||
mdag "github.com/jbenet/go-ipfs/merkledag"
|
||||
ft "github.com/jbenet/go-ipfs/unixfs"
|
||||
uio "github.com/jbenet/go-ipfs/unixfs/io"
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
)
|
||||
|
||||
var log = u.Logger("ipfs")
|
||||
|
||||
// FileSystem is the readonly Ipfs Fuse Filesystem.
|
||||
type FileSystem struct {
|
||||
Ipfs *core.IpfsNode
|
||||
@ -48,7 +54,7 @@ func (*Root) Attr() fuse.Attr {
|
||||
|
||||
// Lookup performs a lookup under this node.
|
||||
func (s *Root) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) {
|
||||
u.DOut("Root Lookup: '%s'\n", name)
|
||||
log.Debug("Root Lookup: '%s'", name)
|
||||
switch name {
|
||||
case "mach_kernel", ".hidden", "._.":
|
||||
// Just quiet some log noise on OS X.
|
||||
@ -66,31 +72,48 @@ func (s *Root) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) {
|
||||
|
||||
// ReadDir reads a particular directory. Disallowed for root.
|
||||
func (*Root) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) {
|
||||
u.DOut("Read Root.\n")
|
||||
log.Debug("Read Root.")
|
||||
return nil, fuse.EPERM
|
||||
}
|
||||
|
||||
// Node is the core object representing a filesystem tree node.
|
||||
type Node struct {
|
||||
Ipfs *core.IpfsNode
|
||||
Nd *mdag.Node
|
||||
fd *mdag.DagReader
|
||||
Ipfs *core.IpfsNode
|
||||
Nd *mdag.Node
|
||||
fd *uio.DagReader
|
||||
cached *ft.PBData
|
||||
}
|
||||
|
||||
func (s *Node) loadData() error {
|
||||
s.cached = new(ft.PBData)
|
||||
return proto.Unmarshal(s.Nd.Data, s.cached)
|
||||
}
|
||||
|
||||
// Attr returns the attributes of a given node.
|
||||
func (s *Node) Attr() fuse.Attr {
|
||||
u.DOut("Node attr.\n")
|
||||
if len(s.Nd.Links) > 0 {
|
||||
return fuse.Attr{Mode: os.ModeDir | 0555}
|
||||
log.Debug("Node attr.")
|
||||
if s.cached == nil {
|
||||
s.loadData()
|
||||
}
|
||||
switch s.cached.GetType() {
|
||||
case ft.PBData_Directory:
|
||||
return fuse.Attr{Mode: os.ModeDir | 0555}
|
||||
case ft.PBData_File, ft.PBData_Raw:
|
||||
size, _ := s.Nd.Size()
|
||||
return fuse.Attr{
|
||||
Mode: 0444,
|
||||
Size: uint64(size),
|
||||
Blocks: uint64(len(s.Nd.Links)),
|
||||
}
|
||||
default:
|
||||
u.PErr("Invalid data type.")
|
||||
return fuse.Attr{}
|
||||
}
|
||||
|
||||
size, _ := s.Nd.Size()
|
||||
return fuse.Attr{Mode: 0444, Size: uint64(size)}
|
||||
}
|
||||
|
||||
// Lookup performs a lookup under this node.
|
||||
func (s *Node) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) {
|
||||
u.DOut("Lookup '%s'\n", name)
|
||||
log.Debug("Lookup '%s'", name)
|
||||
nd, err := s.Ipfs.Resolver.ResolveLinks(s.Nd, []string{name})
|
||||
if err != nil {
|
||||
// todo: make this error more versatile.
|
||||
@ -102,7 +125,7 @@ func (s *Node) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) {
|
||||
|
||||
// ReadDir reads the link structure as directory entries
|
||||
func (s *Node) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) {
|
||||
u.DOut("Node ReadDir\n")
|
||||
log.Debug("Node ReadDir")
|
||||
entries := make([]fuse.Dirent, len(s.Nd.Links))
|
||||
for i, link := range s.Nd.Links {
|
||||
n := link.Name
|
||||
@ -121,7 +144,7 @@ func (s *Node) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) {
|
||||
// ReadAll reads the object data as file data
|
||||
func (s *Node) ReadAll(intr fs.Intr) ([]byte, fuse.Error) {
|
||||
u.DOut("Read node.\n")
|
||||
r, err := mdag.NewDagReader(s.Nd, s.Ipfs.DAG)
|
||||
r, err := uio.NewDagReader(s.Nd, s.Ipfs.DAG)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -147,6 +170,7 @@ func Mount(ipfs *core.IpfsNode, fpath string) error {
|
||||
}
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
}
|
||||
ipfs.Network.Close()
|
||||
}()
|
||||
|
||||
c, err := fuse.Mount(fpath)
|
||||
@ -171,7 +195,7 @@ func Mount(ipfs *core.IpfsNode, fpath string) error {
|
||||
// Unmount attempts to unmount the provided FUSE mount point, forcibly
|
||||
// if necessary.
|
||||
func Unmount(point string) error {
|
||||
fmt.Printf("Unmounting %s...\n", point)
|
||||
log.Info("Unmounting %s...", point)
|
||||
|
||||
var cmd *exec.Cmd
|
||||
switch runtime.GOOS {
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
package importer
|
||||
package chunk
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@ -92,43 +92,3 @@ func (mr *MaybeRabin) Split(r io.Reader) chan []byte {
|
||||
}()
|
||||
return out
|
||||
}
|
||||
|
||||
/*
|
||||
func WhyrusleepingCantImplementRabin(r io.Reader) chan []byte {
|
||||
out := make(chan []byte, 4)
|
||||
go func() {
|
||||
buf := bufio.NewReader(r)
|
||||
blkbuf := new(bytes.Buffer)
|
||||
window := make([]byte, 16)
|
||||
var val uint64
|
||||
prime := uint64(61)
|
||||
|
||||
get := func(i int) uint64 {
|
||||
return uint64(window[i%len(window)])
|
||||
}
|
||||
|
||||
set := func(i int, val byte) {
|
||||
window[i%len(window)] = val
|
||||
}
|
||||
|
||||
for i := 0; ; i++ {
|
||||
curb, err := buf.ReadByte()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
set(i, curb)
|
||||
blkbuf.WriteByte(curb)
|
||||
|
||||
hash := md5.Sum(window)
|
||||
if hash[0] == 0 && hash[1] == 0 {
|
||||
out <- blkbuf.Bytes()
|
||||
blkbuf.Reset()
|
||||
}
|
||||
}
|
||||
out <- blkbuf.Bytes()
|
||||
close(out)
|
||||
}()
|
||||
|
||||
return out
|
||||
}
|
||||
*/
|
||||
@ -1,13 +1,17 @@
|
||||
package importer
|
||||
package chunk
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
"github.com/jbenet/go-ipfs/util"
|
||||
)
|
||||
|
||||
var log = util.Logger("chunk")
|
||||
|
||||
var DefaultSplitter = &SizeSplitter{1024 * 512}
|
||||
|
||||
type BlockSplitter interface {
|
||||
Split(io.Reader) chan []byte
|
||||
Split(r io.Reader) chan []byte
|
||||
}
|
||||
|
||||
type SizeSplitter struct {
|
||||
@ -28,7 +32,7 @@ func (ss *SizeSplitter) Split(r io.Reader) chan []byte {
|
||||
}
|
||||
return
|
||||
}
|
||||
u.PErr("block split error: %v\n", err)
|
||||
log.Error("Block split error: %s", err)
|
||||
return
|
||||
}
|
||||
if nread < ss.Size {
|
||||
@ -5,9 +5,14 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/jbenet/go-ipfs/importer/chunk"
|
||||
dag "github.com/jbenet/go-ipfs/merkledag"
|
||||
ft "github.com/jbenet/go-ipfs/unixfs"
|
||||
"github.com/jbenet/go-ipfs/util"
|
||||
)
|
||||
|
||||
var log = util.Logger("importer")
|
||||
|
||||
// BlockSizeLimit specifies the maximum size an imported block can have.
|
||||
var BlockSizeLimit = int64(1048576) // 1 MB
|
||||
|
||||
@ -20,22 +25,31 @@ var ErrSizeLimitExceeded = fmt.Errorf("object size limit exceeded")
|
||||
// NewDagFromReader constructs a Merkle DAG from the given io.Reader.
|
||||
// size required for block construction.
|
||||
func NewDagFromReader(r io.Reader) (*dag.Node, error) {
|
||||
return NewDagFromReaderWithSplitter(r, &SizeSplitter{1024 * 512})
|
||||
return NewDagFromReaderWithSplitter(r, chunk.DefaultSplitter)
|
||||
}
|
||||
|
||||
func NewDagFromReaderWithSplitter(r io.Reader, spl BlockSplitter) (*dag.Node, error) {
|
||||
func NewDagFromReaderWithSplitter(r io.Reader, spl chunk.BlockSplitter) (*dag.Node, error) {
|
||||
blkChan := spl.Split(r)
|
||||
first := <-blkChan
|
||||
root := &dag.Node{Data: dag.FilePBData(first)}
|
||||
root := &dag.Node{}
|
||||
|
||||
mbf := new(ft.MultiBlock)
|
||||
for blk := range blkChan {
|
||||
child := &dag.Node{Data: dag.WrapData(blk)}
|
||||
mbf.AddBlockSize(uint64(len(blk)))
|
||||
child := &dag.Node{Data: ft.WrapData(blk)}
|
||||
err := root.AddNodeLink("", child)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
mbf.Data = first
|
||||
data, err := mbf.GetBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
root.Data = data
|
||||
return root, nil
|
||||
}
|
||||
|
||||
|
||||
@ -9,9 +9,13 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
dag "github.com/jbenet/go-ipfs/merkledag"
|
||||
"github.com/jbenet/go-ipfs/importer/chunk"
|
||||
uio "github.com/jbenet/go-ipfs/unixfs/io"
|
||||
)
|
||||
|
||||
// NOTE:
|
||||
// These tests tests a combination of unixfs/io/dagreader and importer/chunk.
|
||||
// Maybe split them up somehow?
|
||||
func TestBuildDag(t *testing.T) {
|
||||
td := os.TempDir()
|
||||
fi, err := os.Create(td + "/tmpfi")
|
||||
@ -34,24 +38,30 @@ func TestBuildDag(t *testing.T) {
|
||||
|
||||
//Test where calls to read are smaller than the chunk size
|
||||
func TestSizeBasedSplit(t *testing.T) {
|
||||
bs := &SizeSplitter{512}
|
||||
bs := &chunk.SizeSplitter{512}
|
||||
testFileConsistency(t, bs, 32*512)
|
||||
bs = &SizeSplitter{4096}
|
||||
bs = &chunk.SizeSplitter{4096}
|
||||
testFileConsistency(t, bs, 32*4096)
|
||||
|
||||
// Uneven offset
|
||||
testFileConsistency(t, bs, 31*4095)
|
||||
}
|
||||
|
||||
func testFileConsistency(t *testing.T, bs BlockSplitter, nbytes int) {
|
||||
func dup(b []byte) []byte {
|
||||
o := make([]byte, len(b))
|
||||
copy(o, b)
|
||||
return o
|
||||
}
|
||||
|
||||
func testFileConsistency(t *testing.T, bs chunk.BlockSplitter, nbytes int) {
|
||||
buf := new(bytes.Buffer)
|
||||
io.CopyN(buf, rand.Reader, int64(nbytes))
|
||||
should := buf.Bytes()
|
||||
should := dup(buf.Bytes())
|
||||
nd, err := NewDagFromReaderWithSplitter(buf, bs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r, err := dag.NewDagReader(nd, nil)
|
||||
r, err := uio.NewDagReader(nd, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -80,14 +90,14 @@ func arrComp(a, b []byte) error {
|
||||
}
|
||||
|
||||
func TestMaybeRabinConsistency(t *testing.T) {
|
||||
testFileConsistency(t, NewMaybeRabin(4096), 256*4096)
|
||||
testFileConsistency(t, chunk.NewMaybeRabin(4096), 256*4096)
|
||||
}
|
||||
|
||||
func TestRabinBlockSize(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
nbytes := 1024 * 1024
|
||||
io.CopyN(buf, rand.Reader, int64(nbytes))
|
||||
rab := NewMaybeRabin(4096)
|
||||
rab := chunk.NewMaybeRabin(4096)
|
||||
blkch := rab.Split(buf)
|
||||
|
||||
var blocks [][]byte
|
||||
|
||||
@ -1,11 +1,8 @@
|
||||
|
||||
all: node.pb.go data.pb.go
|
||||
all: node.pb.go
|
||||
|
||||
node.pb.go: node.proto
|
||||
protoc --gogo_out=. --proto_path=../../../../:/usr/local/opt/protobuf/include:. $<
|
||||
|
||||
data.pb.go: data.proto
|
||||
protoc --go_out=. data.proto
|
||||
|
||||
clean:
|
||||
rm node.pb.go
|
||||
|
||||
@ -3,6 +3,8 @@ package merkledag
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
|
||||
mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash"
|
||||
)
|
||||
|
||||
@ -76,6 +78,7 @@ func (n *Node) Encoded(force bool) ([]byte, error) {
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
n.cached = u.Hash(n.encoded)
|
||||
}
|
||||
|
||||
return n.encoded, nil
|
||||
|
||||
@ -3,14 +3,14 @@ package merkledag
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
|
||||
|
||||
mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash"
|
||||
blocks "github.com/jbenet/go-ipfs/blocks"
|
||||
bserv "github.com/jbenet/go-ipfs/blockservice"
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
)
|
||||
|
||||
var log = u.Logger("merkledag")
|
||||
|
||||
// NodeMap maps u.Keys to Nodes.
|
||||
// We cannot use []byte/Multihash for keys :(
|
||||
// so have to convert Multihash bytes to string (u.Key)
|
||||
@ -24,6 +24,8 @@ type Node struct {
|
||||
|
||||
// cache encoded/marshaled value
|
||||
encoded []byte
|
||||
|
||||
cached mh.Multihash
|
||||
}
|
||||
|
||||
// Link represents an IPFS Merkle DAG Link between Nodes.
|
||||
@ -41,27 +43,70 @@ type Link struct {
|
||||
Node *Node
|
||||
}
|
||||
|
||||
// AddNodeLink adds a link to another node.
|
||||
func (n *Node) AddNodeLink(name string, that *Node) error {
|
||||
s, err := that.Size()
|
||||
func MakeLink(n *Node) (*Link, error) {
|
||||
s, err := n.Size()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
h, err := that.Multihash()
|
||||
h, err := n.Multihash()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n.Links = append(n.Links, &Link{
|
||||
Name: name,
|
||||
return &Link{
|
||||
Size: s,
|
||||
Hash: h,
|
||||
Node: that,
|
||||
})
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AddNodeLink adds a link to another node.
|
||||
func (n *Node) AddNodeLink(name string, that *Node) error {
|
||||
lnk, err := MakeLink(that)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lnk.Name = name
|
||||
lnk.Node = that
|
||||
|
||||
n.Links = append(n.Links, lnk)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddNodeLink adds a link to another node. without keeping a reference to
|
||||
// the child node
|
||||
func (n *Node) AddNodeLinkClean(name string, that *Node) error {
|
||||
lnk, err := MakeLink(that)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lnk.Name = name
|
||||
|
||||
n.Links = append(n.Links, lnk)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Node) RemoveNodeLink(name string) error {
|
||||
for i, l := range n.Links {
|
||||
if l.Name == name {
|
||||
n.Links = append(n.Links[:i], n.Links[i+1:]...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return u.ErrNotFound
|
||||
}
|
||||
|
||||
// Copy returns a copy of the node.
|
||||
// NOTE: does not make copies of Node objects in the links.
|
||||
func (n *Node) Copy() *Node {
|
||||
nnode := new(Node)
|
||||
nnode.Data = make([]byte, len(n.Data))
|
||||
copy(nnode.Data, n.Data)
|
||||
|
||||
nnode.Links = make([]*Link, len(n.Links))
|
||||
copy(nnode.Links, n.Links)
|
||||
return nnode
|
||||
}
|
||||
|
||||
// Size returns the total size of the data addressed by node,
|
||||
// including the total sizes of references.
|
||||
func (n *Node) Size() (uint64, error) {
|
||||
@ -79,12 +124,12 @@ func (n *Node) Size() (uint64, error) {
|
||||
|
||||
// Multihash hashes the encoded data of this node.
|
||||
func (n *Node) Multihash() (mh.Multihash, error) {
|
||||
b, err := n.Encoded(false)
|
||||
_, err := n.Encoded(false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return u.Hash(b)
|
||||
return n.cached, nil
|
||||
}
|
||||
|
||||
// Key returns the Multihash as a key, for maps.
|
||||
@ -105,7 +150,7 @@ type DAGService struct {
|
||||
// Add adds a node to the DAGService, storing the block in the BlockService
|
||||
func (n *DAGService) Add(nd *Node) (u.Key, error) {
|
||||
k, _ := nd.Key()
|
||||
u.DOut("DagService Add [%s]\n", k.Pretty())
|
||||
log.Debug("DagService Add [%s]", k)
|
||||
if n == nil {
|
||||
return "", fmt.Errorf("DAGService is nil")
|
||||
}
|
||||
@ -115,7 +160,9 @@ func (n *DAGService) Add(nd *Node) (u.Key, error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
b, err := blocks.NewBlock(d)
|
||||
b := new(blocks.Block)
|
||||
b.Data = d
|
||||
b.Multihash, err = nd.Multihash()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -126,16 +173,16 @@ func (n *DAGService) Add(nd *Node) (u.Key, error) {
|
||||
func (n *DAGService) AddRecursive(nd *Node) error {
|
||||
_, err := n.Add(nd)
|
||||
if err != nil {
|
||||
log.Info("AddRecursive Error: %s\n", err)
|
||||
return err
|
||||
}
|
||||
|
||||
for _, link := range nd.Links {
|
||||
if link.Node == nil {
|
||||
panic("Why does this node have a nil link?\n")
|
||||
}
|
||||
err := n.AddRecursive(link.Node)
|
||||
if err != nil {
|
||||
return err
|
||||
if link.Node != nil {
|
||||
err := n.AddRecursive(link.Node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -155,45 +202,3 @@ func (n *DAGService) Get(k u.Key) (*Node, error) {
|
||||
|
||||
return Decoded(b.Data)
|
||||
}
|
||||
|
||||
func FilePBData(data []byte) []byte {
|
||||
pbfile := new(PBData)
|
||||
typ := PBData_File
|
||||
pbfile.Type = &typ
|
||||
pbfile.Data = data
|
||||
|
||||
data, err := proto.Marshal(pbfile)
|
||||
if err != nil {
|
||||
//this really shouldnt happen, i promise
|
||||
panic(err)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func FolderPBData() []byte {
|
||||
pbfile := new(PBData)
|
||||
typ := PBData_Directory
|
||||
pbfile.Type = &typ
|
||||
|
||||
data, err := proto.Marshal(pbfile)
|
||||
if err != nil {
|
||||
//this really shouldnt happen, i promise
|
||||
panic(err)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func WrapData(b []byte) []byte {
|
||||
pbdata := new(PBData)
|
||||
typ := PBData_Raw
|
||||
pbdata.Data = b
|
||||
pbdata.Type = &typ
|
||||
|
||||
out, err := proto.Marshal(pbdata)
|
||||
if err != nil {
|
||||
// This shouldnt happen. seriously.
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
@ -2,8 +2,9 @@ package merkledag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
"testing"
|
||||
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
)
|
||||
|
||||
func TestNode(t *testing.T) {
|
||||
|
||||
48
namesys/dns.go
Normal file
48
namesys/dns.go
Normal file
@ -0,0 +1,48 @@
|
||||
package namesys
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
b58 "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-base58"
|
||||
isd "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-is-domain"
|
||||
mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash"
|
||||
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
)
|
||||
|
||||
// DNSResolver implements a Resolver on DNS domains
|
||||
type DNSResolver struct {
|
||||
// TODO: maybe some sort of caching?
|
||||
// cache would need a timeout
|
||||
}
|
||||
|
||||
// CanResolve implements Resolver
|
||||
func (r *DNSResolver) CanResolve(name string) bool {
|
||||
return isd.IsDomain(name)
|
||||
}
|
||||
|
||||
// Resolve implements Resolver
|
||||
// TXT records for a given domain name should contain a b58
|
||||
// encoded multihash.
|
||||
func (r *DNSResolver) Resolve(name string) (string, error) {
|
||||
log.Info("DNSResolver resolving %v", name)
|
||||
txt, err := net.LookupTXT(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, t := range txt {
|
||||
chk := b58.Decode(t)
|
||||
if len(chk) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := mh.Cast(chk)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
return "", u.ErrNotFound
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user