Merge pull request #466 from jbenet/peer-restrict

The Peer is Dead. Long live the ID
This commit is contained in:
Juan Batiz-Benet 2014-12-23 09:44:27 -08:00
commit 32589ad4bb
125 changed files with 3623 additions and 3311 deletions

2
Godeps/Godeps.json generated
View File

@ -141,7 +141,7 @@
},
{
"ImportPath": "github.com/jbenet/go-peerstream",
"Rev": "5023d0d6b3efeb50c2c30535d011bdcb2351e212"
"Rev": "1c71a3e04eeef9297a12ecdff75a0b28ffa8bf90"
},
{
"ImportPath": "github.com/jbenet/go-random",

View File

@ -159,7 +159,6 @@ func (s *Swarm) addConn(netConn net.Conn, server bool) (*Conn, error) {
// first, check if we already have it...
for c := range s.conns {
if c.netConn == netConn {
s.connLock.Unlock()
return c, nil
}
}
@ -167,7 +166,6 @@ func (s *Swarm) addConn(netConn net.Conn, server bool) (*Conn, error) {
// create a new spdystream connection
ssConn, err := ss.NewConnection(netConn, server)
if err != nil {
s.connLock.Unlock()
return nil, err
}

View File

@ -0,0 +1,77 @@
package main
import (
"bufio"
"fmt"
"net"
"os"
"time"
ps "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-peerstream"
)
func die(err error) {
fmt.Fprintf(os.Stderr, "error: %s\n")
os.Exit(1)
}
func main() {
// create a new Swarm
swarm := ps.NewSwarm()
defer swarm.Close()
// tell swarm what to do with a new incoming streams.
// EchoHandler just echos back anything they write.
swarm.SetStreamHandler(ps.EchoHandler)
l, err := net.Listen("tcp", "localhost:8001")
if err != nil {
die(err)
}
if _, err := swarm.AddListener(l); err != nil {
die(err)
}
nc, err := net.Dial("tcp", "localhost:8001")
if err != nil {
die(err)
}
c, err := swarm.AddConn(nc)
if err != nil {
die(err)
}
nRcvStream := 0
bio := bufio.NewReader(os.Stdin)
swarm.SetStreamHandler(func(s *ps.Stream) {
log("handling new stream %d", nRcvStream)
nRcvStream++
line, err := bio.ReadString('\n')
if err != nil {
die(err)
}
_ = line
// line = "read: " + line
// s.Write([]byte(line))
s.Close()
})
nSndStream := 0
for {
<-time.After(200 * time.Millisecond)
s, err := swarm.NewStreamWithConn(c)
if err != nil {
die(err)
}
log("sender got new stream %d", nSndStream)
nSndStream++
s.Wait()
}
}
func log(s string, ifs ...interface{}) {
fmt.Fprintf(os.Stderr, s+"\n", ifs...)
}

View File

@ -110,28 +110,34 @@ func (s *Swarm) SelectConn() SelectConn {
// Conns returns all the connections associated with this Swarm.
func (s *Swarm) Conns() []*Conn {
s.connLock.RLock()
conns := make([]*Conn, 0, len(s.conns))
for c := range s.conns {
conns = append(conns, c)
}
s.connLock.RUnlock()
return conns
}
// Listeners returns all the listeners associated with this Swarm.
func (s *Swarm) Listeners() []*Listener {
s.listenerLock.RLock()
out := make([]*Listener, 0, len(s.listeners))
for c := range s.listeners {
out = append(out, c)
}
s.listenerLock.RUnlock()
return out
}
// Streams returns all the streams associated with this Swarm.
func (s *Swarm) Streams() []*Stream {
s.streamLock.RLock()
out := make([]*Stream, 0, len(s.streams))
for c := range s.streams {
out = append(out, c)
}
s.streamLock.RUnlock()
return out
}

View File

@ -18,8 +18,6 @@ test: test_go test_sharness
test_expensive: test_go_expensive test_sharness_expensive
test_docker:
cd ./src/github.com/jbenet/go-ipfs
docker build -t zaqwsx_ipfs-test-img .
cd dockertest/ && make
test_go:

View File

@ -41,7 +41,6 @@ func New(bs blockstore.Blockstore, rem exchange.Interface) (*BlockService, error
// TODO pass a context into this if the remote.HasBlock is going to remain here.
func (s *BlockService) AddBlock(b *blocks.Block) (u.Key, error) {
k := b.Key()
log.Debugf("blockservice: storing [%s] in datastore", k)
err := s.Blockstore.Put(b)
if err != nil {
return k, err

View File

@ -11,9 +11,8 @@ import (
// Mocks returns |n| connected mock Blockservices
func Mocks(t *testing.T, n int) []*BlockService {
net := tn.VirtualNetwork(delay.Fixed(0))
rs := mockrouting.NewServer()
sg := bitswap.NewSessionGenerator(net, rs)
net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0))
sg := bitswap.NewSessionGenerator(net)
instances := sg.Instances(n)

View File

@ -266,7 +266,7 @@ func identityConfig(nbits int) (config.Identity, error) {
}
ident.PrivKey = base64.StdEncoding.EncodeToString(skbytes)
id, err := peer.IDFromPubKey(pk)
id, err := peer.IDFromPublicKey(pk)
if err != nil {
return ident, err
}

View File

@ -109,24 +109,28 @@ func main() {
}
}
func setupPeer(a args) (peer.Peer, peer.Peerstore, error) {
func setupPeer(a args) (peer.ID, peer.Peerstore, error) {
if a.keybits < 1024 {
return nil, nil, errors.New("Bitsize less than 1024 is considered unsafe.")
return "", nil, errors.New("Bitsize less than 1024 is considered unsafe.")
}
out("generating key pair...")
sk, pk, err := ci.GenerateKeyPair(ci.RSA, a.keybits)
if err != nil {
return nil, nil, err
return "", nil, err
}
p, err := peer.IDFromPublicKey(pk)
if err != nil {
return "", nil, err
}
ps := peer.NewPeerstore()
peer, err := ps.WithKeyPair(sk, pk)
if err != nil {
return nil, nil, err
}
out("local peer id: %s", peer.ID())
return peer, ps, nil
ps.AddPrivKey(p, sk)
ps.AddPubKey(p, pk)
out("local peer id: %s", p)
return p, ps, nil
}
func connect(args args) error {
@ -149,12 +153,13 @@ func connect(args args) error {
rwc := &logRW{n: "conn", rw: conn}
// OK, let's setup the channel.
sg := secio.SessionGenerator{Local: p, Peerstore: ps}
sk := ps.PrivKey(p)
sg := secio.SessionGenerator{LocalID: p, PrivateKey: sk}
sess, err := sg.NewSession(nil, rwc)
if err != nil {
return err
}
out("remote peer id: %s", sess.RemotePeer().ID())
out("remote peer id: %s", sess.RemotePeer())
netcat(sess.ReadWriter().(io.ReadWriteCloser))
return nil
}

View File

@ -2,12 +2,11 @@
package config
import (
"crypto"
"crypto/x509"
"encoding/base64"
"os"
"path/filepath"
ic "github.com/jbenet/go-ipfs/crypto"
u "github.com/jbenet/go-ipfs/util"
"github.com/jbenet/go-ipfs/util/debugerror"
)
@ -132,7 +131,7 @@ func Filename(configroot string) (string, error) {
}
// DecodePrivateKey is a helper to decode the users PrivateKey
func (i *Identity) DecodePrivateKey(passphrase string) (crypto.PrivateKey, error) {
func (i *Identity) DecodePrivateKey(passphrase string) (ic.PrivKey, error) {
pkb, err := base64.StdEncoding.DecodeString(i.PrivKey)
if err != nil {
return nil, err
@ -140,7 +139,7 @@ func (i *Identity) DecodePrivateKey(passphrase string) (crypto.PrivateKey, error
// currently storing key unencrypted. in the future we need to encrypt it.
// TODO(security)
return x509.ParsePKCS1PrivateKey(pkb)
return ic.UnmarshalPrivateKey(pkb)
}
// Load reads given file and returns the read config, or error.

View File

@ -1,17 +1,21 @@
package core
import (
"errors"
"fmt"
"math/rand"
"sync"
"time"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
config "github.com/jbenet/go-ipfs/config"
inet "github.com/jbenet/go-ipfs/net"
peer "github.com/jbenet/go-ipfs/peer"
dht "github.com/jbenet/go-ipfs/routing/dht"
lgbl "github.com/jbenet/go-ipfs/util/eventlog/loggables"
math2 "github.com/jbenet/go-ipfs/util/math2"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
)
const (
@ -50,34 +54,59 @@ func bootstrap(ctx context.Context,
connectedPeers := n.Peers()
if len(connectedPeers) >= recoveryThreshold {
log.Event(ctx, "bootstrapSkip", n.LocalPeer())
log.Debugf("%s bootstrap skipped -- connected to %d (> %d) nodes",
n.LocalPeer(), len(connectedPeers), recoveryThreshold)
return nil
}
numCxnsToCreate := recoveryThreshold - len(connectedPeers)
var bootstrapPeers []peer.Peer
log.Event(ctx, "bootstrapStart", n.LocalPeer())
log.Debugf("%s bootstrapping to %d more nodes", n.LocalPeer(), numCxnsToCreate)
var bootstrapPeers []peer.PeerInfo
for _, bootstrap := range boots {
p, err := toPeer(ps, bootstrap)
p, err := toPeer(bootstrap)
if err != nil {
log.Event(ctx, "bootstrapError", n.LocalPeer(), lgbl.Error(err))
log.Errorf("%s bootstrap error: %s", n.LocalPeer(), err)
return err
}
bootstrapPeers = append(bootstrapPeers, p)
}
var notConnected []peer.Peer
var notConnected []peer.PeerInfo
for _, p := range bootstrapPeers {
if n.Connectedness(p) != inet.Connected {
if n.Connectedness(p.ID) != inet.Connected {
notConnected = append(notConnected, p)
}
}
if len(notConnected) < 1 {
s := "must bootstrap to %d more nodes, but already connected to all candidates"
err := fmt.Errorf(s, numCxnsToCreate)
log.Event(ctx, "bootstrapError", n.LocalPeer(), lgbl.Error(err))
log.Errorf("%s bootstrap error: %s", n.LocalPeer(), err)
return err
}
var randomSubset = randomSubsetOfPeers(notConnected, numCxnsToCreate)
if err := connect(ctx, r, randomSubset); err != nil {
log.Debugf("%s bootstrapping to %d nodes: %s", n.LocalPeer(), numCxnsToCreate, randomSubset)
if err := connect(ctx, ps, r, randomSubset); err != nil {
log.Event(ctx, "bootstrapError", n.LocalPeer(), lgbl.Error(err))
log.Errorf("%s bootstrap error: %s", n.LocalPeer(), err)
return err
}
return nil
}
func connect(ctx context.Context, r *dht.IpfsDHT, peers []peer.Peer) error {
func connect(ctx context.Context, ps peer.Peerstore, r *dht.IpfsDHT, peers []peer.PeerInfo) error {
if len(peers) < 1 {
return errors.New("bootstrap set empty")
}
var wg sync.WaitGroup
for _, p := range peers {
@ -86,42 +115,45 @@ func connect(ctx context.Context, r *dht.IpfsDHT, peers []peer.Peer) error {
// fail/abort due to an expiring context.
wg.Add(1)
go func(p peer.Peer) {
go func(p peer.PeerInfo) {
defer wg.Done()
err := r.Connect(ctx, p)
log.Event(ctx, "bootstrapDial", r.LocalPeer(), p.ID)
log.Debugf("%s bootstrapping to %s", r.LocalPeer(), p.ID)
ps.AddAddresses(p.ID, p.Addrs)
err := r.Connect(ctx, p.ID)
if err != nil {
log.Event(ctx, "bootstrapFailed", p)
log.Criticalf("failed to bootstrap with %v", p)
log.Event(ctx, "bootstrapFailed", p.ID)
log.Criticalf("failed to bootstrap with %v", p.ID)
return
}
log.Event(ctx, "bootstrapSuccess", p)
log.Infof("bootstrapped with %v", p)
log.Event(ctx, "bootstrapSuccess", p.ID)
log.Infof("bootstrapped with %v", p.ID)
}(p)
}
wg.Wait()
return nil
}
func toPeer(ps peer.Peerstore, bootstrap *config.BootstrapPeer) (peer.Peer, error) {
id, err := peer.DecodePrettyID(bootstrap.PeerID)
func toPeer(bootstrap *config.BootstrapPeer) (p peer.PeerInfo, err error) {
id, err := peer.IDB58Decode(bootstrap.PeerID)
if err != nil {
return nil, err
}
p, err := ps.FindOrCreate(id)
if err != nil {
return nil, err
return
}
maddr, err := ma.NewMultiaddr(bootstrap.Address)
if err != nil {
return nil, err
return
}
p.AddAddress(maddr)
return p, nil
p = peer.PeerInfo{
ID: id,
Addrs: []ma.Multiaddr{maddr},
}
return
}
func randomSubsetOfPeers(in []peer.Peer, max int) []peer.Peer {
func randomSubsetOfPeers(in []peer.PeerInfo, max int) []peer.PeerInfo {
n := math2.IntMin(max, len(in))
var out []peer.Peer
var out []peer.PeerInfo
for _, val := range rand.Perm(n) {
out = append(out, in[val])
}

View File

@ -8,10 +8,15 @@ import (
)
func TestSubsetWhenMaxIsGreaterThanLengthOfSlice(t *testing.T) {
var ps []peer.Peer
var ps []peer.PeerInfo
sizeofSlice := 100
for i := 0; i < sizeofSlice; i++ {
ps = append(ps, testutil.RandPeer())
pid, err := testutil.RandPeerID()
if err != nil {
t.Fatal(err)
}
ps = append(ps, peer.PeerInfo{ID: pid})
}
out := randomSubsetOfPeers(ps, 2*sizeofSlice)
if len(out) != len(ps) {

View File

@ -11,6 +11,7 @@ import (
b58 "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-base58"
cmds "github.com/jbenet/go-ipfs/commands"
ic "github.com/jbenet/go-ipfs/crypto"
"github.com/jbenet/go-ipfs/peer"
kb "github.com/jbenet/go-ipfs/routing/kbucket"
u "github.com/jbenet/go-ipfs/util"
@ -49,7 +50,7 @@ if no peer is specified, prints out local peers info.
}
if len(req.Arguments()) == 0 {
return printPeer(node.Identity)
return printPeer(node.Peerstore, node.Identity)
}
pid := req.Arguments()[0]
@ -72,7 +73,7 @@ if no peer is specified, prints out local peers info.
if err != nil {
return nil, err
}
return printPeer(p)
return printPeer(node.Peerstore, p.ID)
},
Marshalers: cmds.MarshalerMap{
cmds.Text: func(res cmds.Response) ([]byte, error) {
@ -87,27 +88,36 @@ if no peer is specified, prints out local peers info.
Type: &IdOutput{},
}
func printPeer(p peer.Peer) (interface{}, error) {
if p == nil {
func printPeer(ps peer.Peerstore, p peer.ID) (interface{}, error) {
if p == "" {
return nil, errors.New("Attempted to print nil peer!")
}
info := new(IdOutput)
info.ID = p.ID().String()
if p.PubKey() != nil {
pkb, err := p.PubKey().Bytes()
info := new(IdOutput)
info.ID = p.Pretty()
if pk := ps.PubKey(p); pk != nil {
pkb, err := ic.MarshalPublicKey(pk)
if err != nil {
return nil, err
}
info.PublicKey = base64.StdEncoding.EncodeToString(pkb)
}
for _, a := range p.Addresses() {
for _, a := range ps.Addresses(p) {
info.Addresses = append(info.Addresses, a.String())
}
agent, protocol := p.GetVersions()
info.AgentVersion = agent
info.ProtocolVersion = protocol
if v, err := ps.Get(p, "ProtocolVersion"); err == nil {
if vs, ok := v.(string); ok {
info.AgentVersion = vs
}
}
if v, err := ps.Get(p, "AgentVersion"); err == nil {
if vs, ok := v.(string); ok {
info.ProtocolVersion = vs
}
}
return info, nil
}

View File

@ -57,7 +57,7 @@ Publish a <ref> to another public key:
return nil, errNotOnline
}
if n.Identity == nil {
if n.Identity == "" {
return nil, errors.New("Identity not loaded!")
}
@ -75,8 +75,7 @@ Publish a <ref> to another public key:
}
// TODO n.Keychain.Get(name).PrivKey
k := n.Identity.PrivKey()
return publish(n, k, ref)
return publish(n, n.PrivateKey, ref)
},
Marshalers: cmds.MarshalerMap{
cmds.Text: func(res cmds.Response) ([]byte, error) {

View File

@ -52,10 +52,10 @@ Resolve te value of another name:
}
if len(req.Arguments()) == 0 {
if n.Identity == nil {
if n.Identity == "" {
return nil, errors.New("Identity not loaded!")
}
name = n.Identity.ID().String()
name = n.Identity.Pretty()
} else {
name = req.Arguments()[0]

View File

@ -58,7 +58,7 @@ ipfs swarm peers lists the set of peers this node is connected to.
conns := n.Network.Conns()
addrs := make([]string, len(conns))
for i, c := range conns {
pid := c.RemotePeer().ID()
pid := c.RemotePeer()
addr := c.RemoteMultiaddr()
addrs[i] = fmt.Sprintf("%s/%s", addr, pid)
}
@ -106,7 +106,7 @@ ipfs swarm connect /ip4/104.131.131.82/tcp/4001/QmaCpDMGvV2BGHeYERUEnRQAwe3N8Szb
output := make([]string, len(peers))
for i, p := range peers {
output[i] = "connect " + p.ID().String()
output[i] = "connect " + p.Pretty()
err := n.Network.DialPeer(ctx, p)
if err != nil {
@ -149,7 +149,7 @@ func splitAddresses(addrs []string) (maddrs []ma.Multiaddr, pids []peer.ID, err
if err != nil {
return nil, nil, cmds.ClientError("invalid peer address: " + err.Error())
}
id, err := peer.DecodePrettyID(path.Base(addr))
id, err := peer.IDB58Decode(path.Base(addr))
if err != nil {
return nil, nil, err
}
@ -161,21 +161,14 @@ func splitAddresses(addrs []string) (maddrs []ma.Multiaddr, pids []peer.ID, err
// peersWithAddresses is a function that takes in a slice of string peer addresses
// (multiaddr + peerid) and returns a slice of properly constructed peers
func peersWithAddresses(ps peer.Peerstore, addrs []string) ([]peer.Peer, error) {
func peersWithAddresses(ps peer.Peerstore, addrs []string) ([]peer.ID, error) {
maddrs, pids, err := splitAddresses(addrs)
if err != nil {
return nil, err
}
peers := make([]peer.Peer, len(pids))
for i, pid := range pids {
p, err := ps.FindOrCreate(pid)
if err != nil {
return nil, err
}
p.AddAddress(maddrs[i])
peers[i] = p
for i, p := range pids {
ps.AddAddress(p, maddrs[i])
}
return peers, nil
return pids, nil
}

View File

@ -1,7 +1,6 @@
package core
import (
"encoding/base64"
"fmt"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
@ -12,6 +11,7 @@ import (
bstore "github.com/jbenet/go-ipfs/blocks/blockstore"
bserv "github.com/jbenet/go-ipfs/blockservice"
config "github.com/jbenet/go-ipfs/config"
ic "github.com/jbenet/go-ipfs/crypto"
diag "github.com/jbenet/go-ipfs/diagnostics"
exchange "github.com/jbenet/go-ipfs/exchange"
bitswap "github.com/jbenet/go-ipfs/exchange/bitswap"
@ -21,7 +21,6 @@ import (
merkledag "github.com/jbenet/go-ipfs/merkledag"
namesys "github.com/jbenet/go-ipfs/namesys"
inet "github.com/jbenet/go-ipfs/net"
handshake "github.com/jbenet/go-ipfs/net/handshake"
path "github.com/jbenet/go-ipfs/path"
peer "github.com/jbenet/go-ipfs/peer"
pin "github.com/jbenet/go-ipfs/pin"
@ -42,7 +41,8 @@ type IpfsNode struct {
// Self
Config *config.Config // the node's configuration
Identity peer.Peer // the local node's identity
Identity peer.ID // the local node's identity
PrivateKey ic.PrivKey // the local node's private Key
onlineMode bool // alternatively, offline
// Local node
@ -97,13 +97,18 @@ func NewIpfsNode(ctx context.Context, cfg *config.Config, online bool) (n *IpfsN
return nil, debugerror.Wrap(err)
}
// setup peerstore + local peer identity
n.Peerstore = peer.NewPeerstore()
n.Identity, err = initIdentity(&n.Config.Identity, n.Peerstore, online)
// setup local peer identity
n.Identity, n.PrivateKey, err = initIdentity(&n.Config.Identity, online)
if err != nil {
return nil, debugerror.Wrap(err)
}
// setup Peerstore
n.Peerstore = peer.NewPeerstore()
if n.PrivateKey != nil {
n.Peerstore.AddPrivKey(n.Identity, n.PrivateKey)
}
blockstore, err := bstore.WriteCached(bstore.NewBlockstore(n.Datastore), kSizeBlockstoreWriteCache)
n.Exchange = offline.Exchange(blockstore)
@ -122,11 +127,21 @@ func NewIpfsNode(ctx context.Context, cfg *config.Config, online bool) (n *IpfsN
}
n.AddChildGroup(n.Network.CtxGroup())
// explicitly set these as our listen addrs.
// (why not do it inside inet.NewNetwork? because this way we can
// listen on addresses without necessarily advertising those publicly.)
addrs, err := n.Network.InterfaceListenAddresses()
if err != nil {
return nil, debugerror.Wrap(err)
}
n.Peerstore.AddAddresses(n.Identity, addrs)
// setup diagnostics service
n.Diagnostics = diag.NewDiagnostics(n.Identity, n.Network)
// setup routing service
dhtRouting := dht.NewDHT(ctx, n.Identity, n.Peerstore, n.Network, n.Datastore)
dhtRouting := dht.NewDHT(ctx, n.Identity, n.Network, n.Datastore)
dhtRouting.Validators[IpnsValidatorTag] = namesys.ValidateIpnsRecord
// TODO(brian): perform this inside NewDHT factory method
@ -135,9 +150,9 @@ func NewIpfsNode(ctx context.Context, cfg *config.Config, online bool) (n *IpfsN
// setup exchange service
const alwaysSendToPeer = true // use YesManStrategy
bitswapNetwork := bsnet.NewFromIpfsNetwork(n.Network)
bitswapNetwork := bsnet.NewFromIpfsNetwork(n.Network, n.Routing)
n.Exchange = bitswap.New(ctx, n.Identity, bitswapNetwork, n.Routing, blockstore, alwaysSendToPeer)
n.Exchange = bitswap.New(ctx, n.Identity, bitswapNetwork, blockstore, alwaysSendToPeer)
// TODO consider moving connection supervision into the Network. We've
// discussed improvements to this Node constructor. One improvement
@ -178,42 +193,47 @@ func (n *IpfsNode) OnlineMode() bool {
return n.onlineMode
}
func initIdentity(cfg *config.Identity, peers peer.Peerstore, online bool) (peer.Peer, error) {
func initIdentity(cfg *config.Identity, online bool) (peer.ID, ic.PrivKey, error) {
if cfg.PeerID == "" {
return nil, debugerror.New("Identity was not set in config (was ipfs init run?)")
return "", nil, debugerror.New("Identity was not set in config (was ipfs init run?)")
}
if len(cfg.PeerID) == 0 {
return nil, debugerror.New("No peer ID in config! (was ipfs init run?)")
return "", nil, debugerror.New("No peer ID in config! (was ipfs init run?)")
}
// get peer from peerstore (so it is constructed there)
id := peer.ID(b58.Decode(cfg.PeerID))
self, err := peers.FindOrCreate(id)
if err != nil {
return nil, err
}
self.SetType(peer.Local)
self, err = peers.Add(self)
if err != nil {
return nil, err
}
self.SetVersions(handshake.ClientVersion, handshake.IpfsVersion.String())
// when not online, don't need to parse private keys (yet)
if online {
skb, err := base64.StdEncoding.DecodeString(cfg.PrivKey)
if err != nil {
return nil, err
}
if err := self.LoadAndVerifyKeyPair(skb); err != nil {
return nil, err
}
if !online {
return id, nil, nil
}
return self, nil
sk, err := loadPrivateKey(cfg, id)
if err != nil {
return "", nil, err
}
return id, sk, nil
}
func loadPrivateKey(cfg *config.Identity, id peer.ID) (ic.PrivKey, error) {
sk, err := cfg.DecodePrivateKey("passphrase todo!")
if err != nil {
return nil, err
}
id2, err := peer.IDFromPrivateKey(sk)
if err != nil {
return nil, err
}
if id2 != id {
return nil, fmt.Errorf("private key in config does not match id: %s != %s", id, id2)
}
return sk, nil
}
func listenAddresses(cfg *config.Config) ([]ma.Multiaddr, error) {

View File

@ -3,9 +3,8 @@ package core
import (
"testing"
config "github.com/jbenet/go-ipfs/config"
"github.com/jbenet/go-ipfs/peer"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
config "github.com/jbenet/go-ipfs/config"
)
func TestInitialization(t *testing.T) {
@ -60,22 +59,6 @@ func TestInitialization(t *testing.T) {
}
}
func TestPeerIsLocal(t *testing.T) {
t.Log("Ensure that peer is Local after initializing identity")
online := false
peers := peer.NewPeerstore()
cfg := testIdentity
p, err := initIdentity(&cfg, peers, online)
if err != nil {
t.Fatal(err)
}
if p.GetType() != peer.Local {
t.Fail()
}
}
var testIdentity = config.Identity{
PeerID: "QmNgdzLieYi8tgfo2WfTUzNVH5hQK9oAYGVf6dxN12NrHt",
PrivKey: "CAASrRIwggkpAgEAAoICAQCwt67GTUQ8nlJhks6CgbLKOx7F5tl1r9zF4m3TUrG3Pe8h64vi+ILDRFd7QJxaJ/n8ux9RUDoxLjzftL4uTdtv5UXl2vaufCc/C0bhCRvDhuWPhVsD75/DZPbwLsepxocwVWTyq7/ZHsCfuWdoh/KNczfy+Gn33gVQbHCnip/uhTVxT7ARTiv8Qa3d7qmmxsR+1zdL/IRO0mic/iojcb3Oc/PRnYBTiAZFbZdUEit/99tnfSjMDg02wRayZaT5ikxa6gBTMZ16Yvienq7RwSELzMQq2jFA4i/TdiGhS9uKywltiN2LrNDBcQJSN02pK12DKoiIy+wuOCRgs2NTQEhU2sXCk091v7giTTOpFX2ij9ghmiRfoSiBFPJA5RGwiH6ansCHtWKY1K8BS5UORM0o3dYk87mTnKbCsdz4bYnGtOWafujYwzueGx8r+IWiys80IPQKDeehnLW6RgoyjszKgL/2XTyP54xMLSW+Qb3BPgDcPaPO0hmop1hW9upStxKsefW2A2d46Ds4HEpJEry7PkS5M4gKL/zCKHuxuXVk14+fZQ1rstMuvKjrekpAC2aVIKMI9VRA3awtnje8HImQMdj+r+bPmv0N8rTTr3eS4J8Yl7k12i95LLfK+fWnmUh22oTNzkRlaiERQrUDyE4XNCtJc0xs1oe1yXGqazCIAQIDAQABAoICAQCk1N/ftahlRmOfAXk//8wNl7FvdJD3le6+YSKBj0uWmN1ZbUSQk64chr12iGCOM2WY180xYjy1LOS44PTXaeW5bEiTSnb3b3SH+HPHaWCNM2EiSogHltYVQjKW+3tfH39vlOdQ9uQ+l9Gh6iTLOqsCRyszpYPqIBwi1NMLY2Ej8PpVU7ftnFWouHZ9YKS7nAEiMoowhTu/7cCIVwZlAy3AySTuKxPMVj9LORqC32PVvBHZaMPJ+X1Xyijqg6aq39WyoztkXg3+Xxx5j5eOrK6vO/Lp6ZUxaQilHDXoJkKEJjgIBDZpluss08UPfOgiWAGkW+L4fgUxY0qDLDAEMhyEBAn6KOKVL1JhGTX6GjhWziI94bddSpHKYOEIDzUy4H8BXnKhtnyQV6ELS65C2hj9D0IMBTj7edCF1poJy0QfdK0cuXgMvxHLeUO5uc2YWfbNosvKxqygB9rToy4b22YvNwsZUXsTY6Jt+p9V2OgXSKfB5VPeRbjTJL6xqvvUJpQytmII/C9JmSDUtCbYceHj6X9jgigLk20VV6nWHqCTj3utXD6NPAjoycVpLKDlnWEgfVELDIk0gobxUqqSm3jTPEKRPJgxkgPxbwxYumtw++1UY2y35w3WRDc2xYPaWKBCQeZy+mL6ByXp9bWlNvxS3Knb6oZp36/ovGnf2pGvdQKCAQEAyKpipz2lIUySDyE0avVWAmQb2tWGKXALPohzj7AwkcfEg2GuwoC6GyVE2sTJD1HRazIjOKn3yQORg2uOPeG7sx7EKHxSxCKDrbPawkvLCq8JYSy9TLvhqKUVVGYPqMBzu2POSLEA81QXas+aYjKOFWA2Zrjq26zV9ey3+6Lc6WULePgRQybU8+RHJc6fdjUCCfUxgOrUO2IQOuTJ+FsDpVnrMUGlokmWn23OjL4qTL9wGDnWGUs2pjSzNbj3qA0d8iqaiMUyHX/D/VS0wpeT1osNBSm8suvSibYBn+7wbIApbwXUxZaxMv2OHGz3empae4ckvNZs7r8wsI9UwFt8mwKCAQEA4XK6gZkv9t+3YCcSPw2ensLvL/xU7i2bkC9tfTGdjnQfzZXIf5KNdVuj/SerOl2S1s45NMs3ysJbADwRb4ahElD/V71nGzV8fpFTitC20ro9fuX4J0+twmBolHqeH9pmeGTjAeL1rvt6vxs4FkeG/yNft7GdXpXTtEGaObn8Mt0tPY+aB3UnKrnCQoQAlPyGHFrVRX0UEcp6wyyNGhJCNKeNOvqCHTFObhbhO+KWpWSN0MkVHnqaIBnIn1Te8FtvP/iTwXGnKc0YXJUG6+LM6LmOguW6tg8ZqiQeYyyR+e9eCFH4csLzkrTl1GxCxwEsoSLIMm7UDcjttW6tYEghkwKCAQEAmeCO5lCPYImnN5Lu71ZTLmI2OgmjaANTnBBnDbi+hgv61gUCToUIMejSdDCTPfwv61P3TmyIZs0luPGxkiKYHTNqmOE9Vspgz8Mr7fLRMNApESuNvloVIY32XVImj/GEzh4rAfM6F15U1sN8T/EUo6+0B/Glp+9R49QzAfRSE2g48/rGwgf1JVHYfVWFUtAzUA+GdqWdOixo5cCsYJbqpNHfWVZN/bUQnBFIYwUwysnC29D+LUdQEQQ4qOm+gFAOtrWU62zMkXJ4iLt8Ify6kbrvsRXgbhQIzzGS7WH9XDarj0eZciuslr15TLMC1Azadf+cXHLR9gMHA13mT9vYIQKCAQA/DjGv8cKCkAvf7s2hqROGYAs6Jp8yhrsN1tYOwAPLRhtnCs+rLrg17M2vDptLlcRuI/vIElamdTmylRpjUQpX7yObzLO73nfVhpwRJVMdGU394iBIDncQ+JoHfUwgqJskbUM40dvZdyjbrqc/Q/4z+hbZb+oN/GXb8sVKBATPzSDMKQ/xqgisYIw+wmDPStnPsHAaIWOtni47zIgilJzD0WEk78/YjmPbUrboYvWziK5JiRRJFA1rkQqV1c0M+OXixIm+/yS8AksgCeaHr0WUieGcJtjT9uE8vyFop5ykhRiNxy9wGaq6i7IEecsrkd6DqxDHWkwhFuO1bSE83q/VAoIBAEA+RX1i/SUi08p71ggUi9WFMqXmzELp1L3hiEjOc2AklHk2rPxsaTh9+G95BvjhP7fRa/Yga+yDtYuyjO99nedStdNNSg03aPXILl9gs3r2dPiQKUEXZJ3FrH6tkils/8BlpOIRfbkszrdZIKTO9GCdLWQ30dQITDACs8zV/1GFGrHFrqnnMe/NpIFHWNZJ0/WZMi8wgWO6Ik8jHEpQtVXRiXLqy7U6hk170pa4GHOzvftfPElOZZjy9qn7KjdAQqy6spIrAE94OEL+fBgbHQZGLpuTlj6w6YGbMtPU8uo7sXKoc6WOCb68JWft3tejGLDa1946HAWqVM9B/UcneNc=",

View File

@ -25,24 +25,23 @@ func NewMockNode() (*IpfsNode, error) {
return nil, err
}
p, err := peer.IDFromPublicKey(pk)
if err != nil {
return nil, err
}
nd.Identity = p
nd.PrivateKey = sk
nd.Peerstore = peer.NewPeerstore()
p, err := nd.Peerstore.WithKeyPair(sk, pk)
if err != nil {
return nil, err
}
nd.Identity, err = nd.Peerstore.Add(p)
if err != nil {
return nil, err
}
nd.Peerstore.AddPrivKey(p, sk)
nd.Peerstore.AddPubKey(p, pk)
// Temp Datastore
dstore := ds.NewMapDatastore()
nd.Datastore = ds2.CloserWrap(syncds.MutexWrap(dstore))
// Routing
dht := mdht.NewServer().ClientWithDatastore(nd.Identity, nd.Datastore)
dht := mdht.NewServer().ClientWithDatastore(peer.PeerInfo{ID: p}, nd.Datastore)
nd.Routing = dht
// Bitswap

View File

@ -5,6 +5,7 @@ package crypto
import (
"bytes"
"encoding/base64"
"errors"
"fmt"
@ -82,7 +83,7 @@ func GenerateKeyPair(typ, bits int) (PrivKey, PubKey, error) {
return nil, nil, err
}
pk := &priv.PublicKey
return &RsaPrivateKey{priv}, &RsaPublicKey{pk}, nil
return &RsaPrivateKey{sk: priv}, &RsaPublicKey{pk}, nil
default:
return nil, nil, ErrBadKeyType
}
@ -239,6 +240,20 @@ func UnmarshalPublicKey(data []byte) (PubKey, error) {
}
}
// MarshalPublicKey converts a public key object into a protobuf serialized
// public key
func MarshalPublicKey(k PubKey) ([]byte, error) {
b, err := MarshalRsaPublicKey(k.(*RsaPublicKey))
if err != nil {
return nil, err
}
pmes := new(pb.PublicKey)
typ := pb.KeyType_RSA // for now only type.
pmes.Type = &typ
pmes.Data = b
return proto.Marshal(pmes)
}
// UnmarshalPrivateKey converts a protobuf serialized private key into its
// representative object
func UnmarshalPrivateKey(data []byte) (PrivKey, error) {
@ -256,6 +271,26 @@ func UnmarshalPrivateKey(data []byte) (PrivKey, error) {
}
}
// MarshalPrivateKey converts a key object into its protobuf serialized form.
func MarshalPrivateKey(k PrivKey) ([]byte, error) {
b := MarshalRsaPrivateKey(k.(*RsaPrivateKey))
pmes := new(pb.PrivateKey)
typ := pb.KeyType_RSA // for now only type.
pmes.Type = &typ
pmes.Data = b
return proto.Marshal(pmes)
}
// ConfigDecodeKey decodes from b64 (for config file), and unmarshals.
func ConfigDecodeKey(b string) ([]byte, error) {
return base64.StdEncoding.DecodeString(b)
}
// ConfigEncodeKey encodes to b64 (for config file), and marshals.
func ConfigEncodeKey(b []byte) string {
return base64.StdEncoding.EncodeToString(b)
}
// KeyEqual checks whether two
func KeyEqual(k1, k2 Key) bool {
if k1 == k2 {

View File

@ -1,6 +1,9 @@
package crypto
import "testing"
import (
"bytes"
"testing"
)
func TestRsaKeys(t *testing.T) {
sk, pk, err := GenerateKeyPair(RSA, 512)
@ -33,26 +36,44 @@ func testKeySignature(t *testing.T, sk PrivKey) {
}
func testKeyEncoding(t *testing.T, sk PrivKey) {
skb, err := sk.Bytes()
skbm, err := MarshalPrivateKey(sk)
if err != nil {
t.Fatal(err)
}
_, err = UnmarshalPrivateKey(skb)
sk2, err := UnmarshalPrivateKey(skbm)
if err != nil {
t.Fatal(err)
}
skbm2, err := MarshalPrivateKey(sk2)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(skbm, skbm2) {
t.Error("skb -> marshal -> unmarshal -> skb failed.\n", skbm, "\n", skbm2)
}
pk := sk.GetPublic()
pkb, err := pk.Bytes()
pkbm, err := MarshalPublicKey(pk)
if err != nil {
t.Fatal(err)
}
_, err = UnmarshalPublicKey(pkb)
_, err = UnmarshalPublicKey(pkbm)
if err != nil {
t.Fatal(err)
}
pkbm2, err := MarshalPublicKey(pk)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(pkbm, pkbm2) {
t.Error("skb -> marshal -> unmarshal -> skb failed.\n", pkbm, "\n", pkbm2)
}
}
func testKeyEquals(t *testing.T, k Key) {

View File

@ -14,7 +14,8 @@ import (
)
type RsaPrivateKey struct {
k *rsa.PrivateKey
sk *rsa.PrivateKey
pk *rsa.PublicKey
}
type RsaPublicKey struct {
@ -64,19 +65,22 @@ func (sk *RsaPrivateKey) GenSecret() []byte {
func (sk *RsaPrivateKey) Sign(message []byte) ([]byte, error) {
hashed := sha256.Sum256(message)
return rsa.SignPKCS1v15(rand.Reader, sk.k, crypto.SHA256, hashed[:])
return rsa.SignPKCS1v15(rand.Reader, sk.sk, crypto.SHA256, hashed[:])
}
func (sk *RsaPrivateKey) GetPublic() PubKey {
return &RsaPublicKey{&sk.k.PublicKey}
if sk.pk == nil {
sk.pk = &sk.sk.PublicKey
}
return &RsaPublicKey{sk.pk}
}
func (sk *RsaPrivateKey) Decrypt(b []byte) ([]byte, error) {
return rsa.DecryptPKCS1v15(rand.Reader, sk.k, b)
return rsa.DecryptPKCS1v15(rand.Reader, sk.sk, b)
}
func (sk *RsaPrivateKey) Bytes() ([]byte, error) {
b := x509.MarshalPKCS1PrivateKey(sk.k)
b := x509.MarshalPKCS1PrivateKey(sk.sk)
pbmes := new(pb.PrivateKey)
typ := pb.KeyType_RSA
pbmes.Type = &typ
@ -98,7 +102,11 @@ func UnmarshalRsaPrivateKey(b []byte) (*RsaPrivateKey, error) {
if err != nil {
return nil, err
}
return &RsaPrivateKey{sk}, nil
return &RsaPrivateKey{sk: sk}, nil
}
func MarshalRsaPrivateKey(k *RsaPrivateKey) []byte {
return x509.MarshalPKCS1PrivateKey(k.sk)
}
func UnmarshalRsaPublicKey(b []byte) (*RsaPublicKey, error) {
@ -112,3 +120,7 @@ func UnmarshalRsaPublicKey(b []byte) (*RsaPublicKey, error) {
}
return &RsaPublicKey{pk}, nil
}
func MarshalRsaPublicKey(k *RsaPublicKey) ([]byte, error) {
return x509.MarshalPKIXPublicKey(k.k)
}

View File

@ -4,6 +4,8 @@ package secio
import (
"io"
ci "github.com/jbenet/go-ipfs/crypto"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
msgio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-msgio"
@ -12,8 +14,8 @@ import (
// SessionGenerator constructs secure communication sessions for a peer.
type SessionGenerator struct {
Local peer.Peer
Peerstore peer.Peerstore
LocalID peer.ID
PrivateKey ci.PrivKey
}
// NewSession takes an insecure io.ReadWriter, performs a TLS-like
@ -23,12 +25,15 @@ type SessionGenerator struct {
func (sg *SessionGenerator) NewSession(ctx context.Context,
insecure io.ReadWriter) (Session, error) {
ss, err := newSecureSession(sg.LocalID, sg.PrivateKey)
if err != nil {
return nil, err
}
if ctx == nil {
ctx = context.Background()
}
ctx, cancel := context.WithCancel(ctx)
ss := newSecureSession(sg.Local, sg.Peerstore)
if err := ss.handshake(ctx, insecure); err != nil {
cancel()
return nil, err
@ -42,10 +47,17 @@ type Session interface {
ReadWriter() msgio.ReadWriteCloser
// LocalPeer retrieves the local peer.
LocalPeer() peer.Peer
LocalPeer() peer.ID
// LocalPrivateKey retrieves the local private key
LocalPrivateKey() ci.PrivKey
// RemotePeer retrieves the remote peer.
RemotePeer() peer.Peer
RemotePeer() peer.ID
// RemotePublicKey retrieves the remote's public key
// which was received during the handshake.
RemotePublicKey() ci.PubKey
// Close closes the secure session
Close() error
@ -57,15 +69,25 @@ func (s *secureSession) ReadWriter() msgio.ReadWriteCloser {
}
// LocalPeer retrieves the local peer.
func (s *secureSession) LocalPeer() peer.Peer {
func (s *secureSession) LocalPeer() peer.ID {
return s.localPeer
}
// LocalPrivateKey retrieves the local peer's PrivateKey
func (s *secureSession) LocalPrivateKey() ci.PrivKey {
return s.localKey
}
// RemotePeer retrieves the remote peer.
func (s *secureSession) RemotePeer() peer.Peer {
func (s *secureSession) RemotePeer() peer.ID {
return s.remotePeer
}
// RemotePeer retrieves the remote peer.
func (s *secureSession) RemotePublicKey() ci.PubKey {
return s.remote.permanentPubKey
}
// Close closes the secure session
func (s *secureSession) Close() error {
return s.secure.Close()

View File

@ -11,7 +11,7 @@ import (
msgio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-msgio"
ci "github.com/jbenet/go-ipfs/crypto"
pb "github.com/jbenet/go-ipfs/crypto/spipe/internal/pb"
pb "github.com/jbenet/go-ipfs/crypto/secio/internal/pb"
peer "github.com/jbenet/go-ipfs/peer"
u "github.com/jbenet/go-ipfs/util"
eventlog "github.com/jbenet/go-ipfs/util/eventlog"
@ -36,9 +36,9 @@ type secureSession struct {
insecure io.ReadWriter
insecureM msgio.ReadWriter
peers peer.Peerstore
localPeer peer.Peer
remotePeer peer.Peer
localKey ci.PrivKey
localPeer peer.ID
remotePeer peer.ID
local encParams
remote encParams
@ -46,8 +46,19 @@ type secureSession struct {
sharedSecret []byte
}
func newSecureSession(local peer.Peer, peers peer.Peerstore) *secureSession {
return &secureSession{peers: peers, localPeer: local}
func newSecureSession(local peer.ID, key ci.PrivKey) (*secureSession, error) {
s := &secureSession{localPeer: local, localKey: key}
switch {
case s.localPeer == "":
return nil, errors.New("no local id provided")
case s.localKey == nil:
return nil, errors.New("no local private key provided")
case !s.localPeer.MatchesPrivateKey(s.localKey):
return nil, fmt.Errorf("peer.ID does not match PrivateKey")
}
return s, nil
}
// handsahke performs initial communication over insecure channel to share
@ -71,7 +82,7 @@ func (s *secureSession) handshake(ctx context.Context, insecure io.ReadWriter) e
log.Debugf("handshake: %s <--start--> %s", s.localPeer, s.remotePeer)
log.Event(ctx, "secureHandshakeStart", s.localPeer)
s.local.permanentPubKey = s.localPeer.PubKey()
s.local.permanentPubKey = s.localKey.GetPublic()
myPubKeyBytes, err := s.local.permanentPubKey.Bytes()
if err != nil {
return err
@ -84,6 +95,9 @@ func (s *secureSession) handshake(ctx context.Context, insecure io.ReadWriter) e
proposeOut.Ciphers = &SupportedCiphers
proposeOut.Hashes = &SupportedHashes
// log.Debugf("1.0 Propose: nonce:%s exchanges:%s ciphers:%s hashes:%s",
// nonceOut, SupportedExchanges, SupportedCiphers, SupportedHashes)
// Send Propose packet (respects ctx)
proposeOutBytes, err := writeMsgCtx(ctx, s.insecureM, proposeOut)
if err != nil {
@ -97,6 +111,9 @@ func (s *secureSession) handshake(ctx context.Context, insecure io.ReadWriter) e
return err
}
// log.Debugf("1.0.1 Propose recv: nonce:%s exchanges:%s ciphers:%s hashes:%s",
// proposeIn.GetRand(), proposeIn.GetExchanges(), proposeIn.GetCiphers(), proposeIn.GetHashes())
// =============================================================================
// step 1.1 Identify -- get identity from their key
@ -106,12 +123,13 @@ func (s *secureSession) handshake(ctx context.Context, insecure io.ReadWriter) e
return err
}
// get or construct peer
s.remotePeer, err = getOrConstructPeer(s.peers, s.remote.permanentPubKey)
// get peer id
s.remotePeer, err = peer.IDFromPublicKey(s.remote.permanentPubKey)
if err != nil {
return err
}
// log.Debugf("%s Remote Peer Identified as %s", s.localPeer, s.remotePeer)
log.Debugf("1.1 Identify: %s Remote Peer Identified as %s", s.localPeer, s.remotePeer)
// =============================================================================
// step 1.2 Selection -- select/agree on best encryption parameters
@ -141,6 +159,9 @@ func (s *secureSession) handshake(ctx context.Context, insecure io.ReadWriter) e
s.remote.cipherT = s.local.cipherT
s.remote.hashT = s.local.hashT
// log.Debugf("1.2 selection: exchange:%s cipher:%s hash:%s",
// s.local.curveT, s.local.cipherT, s.local.hashT)
// =============================================================================
// step 2. Exchange -- exchange (signed) ephemeral keys. verify signatures.
@ -155,9 +176,10 @@ func (s *secureSession) handshake(ctx context.Context, insecure io.ReadWriter) e
selectionOut.Write(s.local.ephemeralPubKey)
selectionOutBytes := selectionOut.Bytes()
// log.Debugf("2.0 exchange: %v", selectionOutBytes)
exchangeOut := new(pb.Exchange)
exchangeOut.Epubkey = s.local.ephemeralPubKey
exchangeOut.Signature, err = s.localPeer.PrivKey().Sign(selectionOutBytes)
exchangeOut.Signature, err = s.localKey.Sign(selectionOutBytes)
if err != nil {
return err
}
@ -184,16 +206,21 @@ func (s *secureSession) handshake(ctx context.Context, insecure io.ReadWriter) e
selectionIn.Write(proposeOutBytes)
selectionIn.Write(s.remote.ephemeralPubKey)
selectionInBytes := selectionIn.Bytes()
// log.Debugf("2.0.1 exchange recv: %v", selectionInBytes)
// u.POut("Remote Peer Identified as %s\n", s.remote)
sigOK, err := s.remotePeer.PubKey().Verify(selectionInBytes, exchangeIn.GetSignature())
sigOK, err := s.remote.permanentPubKey.Verify(selectionInBytes, exchangeIn.GetSignature())
if err != nil {
// log.Error("2.1 Verify: failed: %s", err)
return err
}
if !sigOK {
return errors.New("Bad signature!")
err := errors.New("Bad signature!")
// log.Error("2.1 Verify: failed: %s", err)
return err
}
// log.Debugf("2.1 Verify: signature verified.")
// =============================================================================
// step 2.2. Keys -- generate keys for mac + encryption
@ -223,6 +250,9 @@ func (s *secureSession) handshake(ctx context.Context, insecure io.ReadWriter) e
s.local.keys = k1
s.remote.keys = k2
// log.Debug("2.2 keys:\n\tshared: %v\n\tk1: %v\n\tk2: %v",
// s.sharedSecret, s.local.keys, s.remote.keys)
// =============================================================================
// step 2.3. MAC + Cipher -- prepare MAC + cipher
@ -234,6 +264,8 @@ func (s *secureSession) handshake(ctx context.Context, insecure io.ReadWriter) e
return err
}
// log.Debug("2.3 mac + cipher.")
// =============================================================================
// step 3. Finish -- send expected message (the nonces), verify encryption works
@ -242,6 +274,7 @@ func (s *secureSession) handshake(ctx context.Context, insecure io.ReadWriter) e
r := NewETMReader(s.insecure, s.remote.cipher, s.remote.mac)
s.secure = msgio.Combine(w, r).(msgio.ReadWriteCloser)
// log.Debug("3.0 finish. sending: %v", proposeIn.GetRand())
// send their Nonce.
if _, err := s.secure.Write(proposeIn.GetRand()); err != nil {
return fmt.Errorf("Failed to write Finish nonce: %s", err)
@ -252,6 +285,8 @@ func (s *secureSession) handshake(ctx context.Context, insecure io.ReadWriter) e
if _, err := io.ReadFull(s.secure, nonceOut2); err != nil {
return fmt.Errorf("Failed to read Finish nonce: %s", err)
}
// log.Debug("3.0 finish.\n\texpect: %v\n\tactual: %v", nonceOut, nonceOut2)
if !bytes.Equal(nonceOut, nonceOut2) {
return fmt.Errorf("Failed to read our encrypted nonce: %s != %s", nonceOut2, nonceOut)
}
@ -261,25 +296,3 @@ func (s *secureSession) handshake(ctx context.Context, insecure io.ReadWriter) e
log.Event(ctx, "secureHandshakeFinish", s.localPeer, s.remotePeer)
return nil
}
// getOrConstructPeer attempts to fetch a peer from a peerstore.
// if succeeds, verify ID and PubKey match.
// else, construct it.
func getOrConstructPeer(peers peer.Peerstore, rpk ci.PubKey) (peer.Peer, error) {
rid, err := peer.IDFromPubKey(rpk)
if err != nil {
return nil, err
}
npeer, err := peers.FindOrCreate(rid)
if err != nil {
return nil, err // unexpected error happened.
}
// public key verification happens in Peer.VerifyAndSetPubKey
if err := npeer.VerifyAndSetPubKey(rpk); err != nil {
return nil, err // pubkey mismatch or other problem
}
return npeer, nil
}

View File

@ -1,389 +0,0 @@
// package spipe handles establishing secure communication between two peers.
package spipe
import (
"bytes"
"errors"
"fmt"
"strings"
"crypto/aes"
"crypto/cipher"
"crypto/hmac"
"crypto/rand"
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"hash"
bfish "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.crypto/blowfish"
proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
ci "github.com/jbenet/go-ipfs/crypto"
pb "github.com/jbenet/go-ipfs/crypto/spipe/internal/pb"
peer "github.com/jbenet/go-ipfs/peer"
u "github.com/jbenet/go-ipfs/util"
)
var log = u.Logger("handshake")
// List of supported ECDH curves
var SupportedExchanges = "P-256,P-224,P-384,P-521"
// List of supported Ciphers
var SupportedCiphers = "AES-256,AES-128,Blowfish"
// List of supported Hashes
var SupportedHashes = "SHA256,SHA512"
// ErrUnsupportedKeyType is returned when a private key cast/type switch fails.
var ErrUnsupportedKeyType = errors.New("unsupported key type")
// ErrClosed signals the closing of a connection.
var ErrClosed = errors.New("connection closed")
// handsahke performs initial communication over insecure channel to share
// keys, IDs, and initiate communication.
func (s *SecurePipe) handshake() error {
// Generate and send Hello packet.
// Hello = (rand, PublicKey, Supported)
nonce := make([]byte, 16)
_, err := rand.Read(nonce)
if err != nil {
return err
}
log.Debugf("handshake: %s <--> %s", s.local, s.remote)
myPubKey, err := s.local.PubKey().Bytes()
if err != nil {
return err
}
proposeMsg := new(pb.Propose)
proposeMsg.Rand = nonce
proposeMsg.Pubkey = myPubKey
proposeMsg.Exchanges = &SupportedExchanges
proposeMsg.Ciphers = &SupportedCiphers
proposeMsg.Hashes = &SupportedHashes
encoded, err := proto.Marshal(proposeMsg)
if err != nil {
return err
}
// Send our Propose packet
select {
case s.insecure.Out <- encoded:
case <-s.ctx.Done():
return ErrClosed
}
// Parse their Propose packet and generate an Exchange packet.
// Exchange = (EphemeralPubKey, Signature)
var resp []byte
select {
case <-s.ctx.Done():
return ErrClosed
case resp = <-s.insecure.In:
}
// u.POut("received encoded handshake\n")
proposeResp := new(pb.Propose)
err = proto.Unmarshal(resp, proposeResp)
if err != nil {
return err
}
// get remote identity
remotePubKey, err := ci.UnmarshalPublicKey(proposeResp.GetPubkey())
if err != nil {
return err
}
// get or construct peer
s.remote, err = getOrConstructPeer(s.peers, remotePubKey)
if err != nil {
return err
}
log.Debugf("%s Remote Peer Identified as %s", s.local, s.remote)
exchange, err := SelectBest(SupportedExchanges, proposeResp.GetExchanges())
if err != nil {
return err
}
cipherType, err := SelectBest(SupportedCiphers, proposeResp.GetCiphers())
if err != nil {
return err
}
hashType, err := SelectBest(SupportedHashes, proposeResp.GetHashes())
if err != nil {
return err
}
// u.POut("Selected %s %s %s\n", exchange, cipherType, hashType)
epubkey, genSharedKey, err := ci.GenerateEKeyPair(exchange) // Generate EphemeralPubKey
var handshake bytes.Buffer // Gather corpus to sign.
handshake.Write(encoded)
handshake.Write(resp)
handshake.Write(epubkey)
exPacket := new(pb.Exchange)
exPacket.Epubkey = epubkey
exPacket.Signature, err = s.local.PrivKey().Sign(handshake.Bytes())
if err != nil {
return err
}
exEncoded, err := proto.Marshal(exPacket)
// send out Exchange packet
select {
case s.insecure.Out <- exEncoded:
case <-s.ctx.Done():
return ErrClosed
}
// Parse their Exchange packet and generate a Finish packet.
// Finish = E('Finish')
var resp1 []byte
select {
case <-s.ctx.Done():
return ErrClosed
case resp1 = <-s.insecure.In:
}
exchangeResp := new(pb.Exchange)
err = proto.Unmarshal(resp1, exchangeResp)
if err != nil {
return err
}
var theirHandshake bytes.Buffer
theirHandshake.Write(resp)
theirHandshake.Write(encoded)
theirHandshake.Write(exchangeResp.GetEpubkey())
// u.POut("Remote Peer Identified as %s\n", s.remote)
ok, err := s.remote.PubKey().Verify(theirHandshake.Bytes(), exchangeResp.GetSignature())
if err != nil {
return err
}
if !ok {
return errors.New("Bad signature!")
}
secret, err := genSharedKey(exchangeResp.GetEpubkey())
if err != nil {
return err
}
k1, k2 := ci.KeyStretcher(cipherType, hashType, secret)
cmp := bytes.Compare(myPubKey, proposeResp.GetPubkey())
switch cmp {
case 1:
case -1:
k1, k2 = k2, k1 // swap
case 0: // really shouldnt kappen.
copy(k2.IV, k1.IV)
copy(k2.MacKey, k1.MacKey)
copy(k2.CipherKey, k1.CipherKey)
}
go s.handleSecureIn(hashType, cipherType, k2.IV, k2.CipherKey, k2.MacKey)
go s.handleSecureOut(hashType, cipherType, k1.IV, k1.CipherKey, k1.MacKey)
finished := []byte("Finished")
// send finished msg
select {
case <-s.ctx.Done():
return ErrClosed
case s.Out <- finished:
}
// recv finished msg
var resp2 []byte
select {
case <-s.ctx.Done():
return ErrClosed
case resp2 = <-s.In:
}
if bytes.Compare(resp2, finished) != 0 {
return fmt.Errorf("Negotiation failed, got: %s", resp2)
}
log.Debugf("%s handshake: Got node id: %s", s.local, s.remote)
return nil
}
func makeMac(hashType string, key []byte) (hash.Hash, int) {
switch hashType {
case "SHA1":
return hmac.New(sha1.New, key), sha1.Size
case "SHA512":
return hmac.New(sha512.New, key), sha512.Size
default:
return hmac.New(sha256.New, key), sha256.Size
}
}
func makeCipher(cipherType string, CKey []byte) (cipher.Block, error) {
switch cipherType {
case "AES-128", "AES-256":
return aes.NewCipher(CKey)
case "Blowfish":
return bfish.NewCipher(CKey)
default:
return nil, fmt.Errorf("Unrecognized cipher string: %s", cipherType)
}
}
func (s *SecurePipe) handleSecureIn(hashType, cipherType string, tIV, tCKey, tMKey []byte) {
theirBlock, err := makeCipher(cipherType, tCKey)
if err != nil {
log.Criticalf("Invalid Cipher: %s", err)
s.cancel()
return
}
theirCipher := cipher.NewCTR(theirBlock, tIV)
theirMac, macSize := makeMac(hashType, tMKey)
for {
var data []byte
ok := true
select {
case <-s.ctx.Done():
ok = false // return out
case data, ok = <-s.insecure.In:
}
if !ok {
close(s.Duplex.In)
return
}
// log.Debug("[peer %s] secure in [from = %s] %d", s.local, s.remote, len(data))
if len(data) <= macSize {
continue
}
mark := len(data) - macSize
theirMac.Write(data[0:mark])
expected := theirMac.Sum(nil)
theirMac.Reset()
hmacOk := hmac.Equal(data[mark:], expected)
if !hmacOk {
continue
}
theirCipher.XORKeyStream(data, data[0:mark])
s.Duplex.In <- data[:mark]
}
}
func (s *SecurePipe) handleSecureOut(hashType, cipherType string, mIV, mCKey, mMKey []byte) {
myBlock, err := makeCipher(cipherType, mCKey)
if err != nil {
log.Criticalf("Invalid Cipher: %s", err)
s.cancel()
return
}
myCipher := cipher.NewCTR(myBlock, mIV)
myMac, macSize := makeMac(hashType, mMKey)
for {
var data []byte
ok := true
select {
case <-s.ctx.Done():
ok = false // return out
case data, ok = <-s.Out:
}
if !ok {
close(s.insecure.Out)
return
}
if len(data) == 0 {
continue
}
buff := make([]byte, len(data)+macSize)
myCipher.XORKeyStream(buff, data)
myMac.Write(buff[0:len(data)])
copy(buff[len(data):], myMac.Sum(nil))
myMac.Reset()
// log.Debug("[peer %s] secure out [to = %s] %d", s.local, s.remote, len(buff))
s.insecure.Out <- buff
}
}
// Determines which algorithm to use. Note: f(a, b) = f(b, a)
func SelectBest(myPrefs, theirPrefs string) (string, error) {
// Person with greatest hash gets first choice.
myHash := u.Hash([]byte(myPrefs))
theirHash := u.Hash([]byte(theirPrefs))
cmp := bytes.Compare(myHash, theirHash)
var firstChoiceArr, secChoiceArr []string
if cmp == -1 {
firstChoiceArr = strings.Split(theirPrefs, ",")
secChoiceArr = strings.Split(myPrefs, ",")
} else if cmp == 1 {
firstChoiceArr = strings.Split(myPrefs, ",")
secChoiceArr = strings.Split(theirPrefs, ",")
} else { // Exact same preferences.
myPrefsArr := strings.Split(myPrefs, ",")
return myPrefsArr[0], nil
}
for _, secChoice := range secChoiceArr {
for _, firstChoice := range firstChoiceArr {
if firstChoice == secChoice {
return firstChoice, nil
}
}
}
return "", errors.New("No algorithms in common!")
}
// getOrConstructPeer attempts to fetch a peer from a peerstore.
// if succeeds, verify ID and PubKey match.
// else, construct it.
func getOrConstructPeer(peers peer.Peerstore, rpk ci.PubKey) (peer.Peer, error) {
rid, err := peer.IDFromPubKey(rpk)
if err != nil {
return nil, err
}
npeer, err := peers.FindOrCreate(rid)
if err != nil {
return nil, err // unexpected error happened.
}
// public key verification happens in Peer.VerifyAndSetPubKey
if err := npeer.VerifyAndSetPubKey(rpk); err != nil {
return nil, err // pubkey mismatch or other problem
}
return npeer, nil
}

View File

@ -1,78 +0,0 @@
package spipe
import (
"errors"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
peer "github.com/jbenet/go-ipfs/peer"
pipes "github.com/jbenet/go-ipfs/util/pipes"
)
// SecurePipe objects represent a bi-directional message channel.
type SecurePipe struct {
pipes.Duplex
insecure pipes.Duplex
local peer.Peer
remote peer.Peer
peers peer.Peerstore
params params
ctx context.Context
cancel context.CancelFunc
}
// options in a secure pipe
type params struct {
}
// NewSecurePipe constructs a pipe with channels of a given buffer size.
func NewSecurePipe(ctx context.Context, bufsize int, local peer.Peer,
peers peer.Peerstore, insecure pipes.Duplex) (*SecurePipe, error) {
ctx, cancel := context.WithCancel(ctx)
sp := &SecurePipe{
Duplex: pipes.Duplex{
In: make(chan []byte, bufsize),
Out: make(chan []byte, bufsize),
},
local: local,
peers: peers,
insecure: insecure,
ctx: ctx,
cancel: cancel,
}
if err := sp.handshake(); err != nil {
sp.Close()
return nil, err
}
return sp, nil
}
// LocalPeer retrieves the local peer.
func (s *SecurePipe) LocalPeer() peer.Peer {
return s.local
}
// RemotePeer retrieves the local peer.
func (s *SecurePipe) RemotePeer() peer.Peer {
return s.remote
}
// Close closes the secure pipe
func (s *SecurePipe) Close() error {
select {
case <-s.ctx.Done():
return errors.New("already closed")
default:
}
s.cancel()
return nil
}

View File

@ -1,161 +0,0 @@
package spipe
import (
"testing"
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
ci "github.com/jbenet/go-ipfs/crypto"
"github.com/jbenet/go-ipfs/peer"
"github.com/jbenet/go-ipfs/util"
"github.com/jbenet/go-ipfs/util/pipes"
testutil "github.com/jbenet/go-ipfs/util/testutil"
)
func getPeer(tb testing.TB) peer.Peer {
privk, pubk, err := ci.GenerateKeyPair(ci.RSA, 1024)
if err != nil {
tb.Fatal(err)
}
p, err := testutil.NewPeerWithKeyPair(privk, pubk)
if err != nil {
tb.Fatal(err)
}
return p
}
func bindDuplexNoCopy(a, b pipes.Duplex) {
go func() {
for m := range b.Out {
a.In <- m
}
}()
for m := range a.Out {
b.In <- m
}
}
var globuf = make([]byte, 4*1024*1024)
func bindDuplexWithCopy(a, b pipes.Duplex) {
dup := func(byt []byte) []byte {
n := globuf[:len(byt)]
copy(n, byt)
return n
}
go func() {
for m := range b.Out {
a.In <- dup(m)
}
}()
for m := range a.Out {
b.In <- dup(m)
}
}
func BenchmarkDataEncryptDefault(b *testing.B) {
SupportedExchanges = "P-256,P-224,P-384,P-521"
SupportedCiphers = "AES-256,AES-128"
SupportedHashes = "SHA256,SHA512,SHA1"
runEncryptBenchmark(b)
}
func BenchmarkDataEncryptLite(b *testing.B) {
SupportedExchanges = "P-256"
SupportedCiphers = "AES-128"
SupportedHashes = "SHA1"
runEncryptBenchmark(b)
}
func BenchmarkDataEncryptBlowfish(b *testing.B) {
SupportedExchanges = "P-256"
SupportedCiphers = "Blowfish"
SupportedHashes = "SHA1"
runEncryptBenchmark(b)
}
func runEncryptBenchmark(b *testing.B) {
pstore := peer.NewPeerstore()
ctx := context.TODO()
bufsize := 1024 * 1024
pa := getPeer(b)
pb := getPeer(b)
duplexa := pipes.NewDuplex(16)
duplexb := pipes.NewDuplex(16)
go bindDuplexNoCopy(duplexa, duplexb)
var spb *SecurePipe
done := make(chan struct{})
go func() {
var err error
spb, err = NewSecurePipe(ctx, bufsize, pb, pstore, duplexb)
if err != nil {
b.Fatal(err)
}
done <- struct{}{}
}()
spa, err := NewSecurePipe(ctx, bufsize, pa, pstore, duplexa)
if err != nil {
b.Fatal(err)
}
<-done
go func() {
for _ = range spa.In {
// Throw it all away,
// all of your hopes and dreams
// piped out to /dev/null...
done <- struct{}{}
}
}()
data := make([]byte, 1024*512)
util.NewTimeSeededRand().Read(data)
// Begin actual benchmarking
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.SetBytes(int64(len(data)))
spb.Out <- data
<-done
}
}
func BenchmarkDataTransfer(b *testing.B) {
duplexa := pipes.NewDuplex(16)
duplexb := pipes.NewDuplex(16)
go bindDuplexWithCopy(duplexa, duplexb)
done := make(chan struct{})
go func() {
for _ = range duplexa.In {
// Throw it all away,
// all of your hopes and dreams
// piped out to /dev/null...
done <- struct{}{}
}
}()
data := make([]byte, 1024*512)
util.NewTimeSeededRand().Read(data)
// Begin actual benchmarking
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.SetBytes(int64(len(data)))
duplexb.Out <- data
<-done
}
}

View File

@ -13,8 +13,8 @@ import (
"crypto/rand"
ggio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/io"
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
ggio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/io"
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
pb "github.com/jbenet/go-ipfs/diagnostics/internal/pb"
@ -31,7 +31,7 @@ const ResponseTimeout = time.Second * 10
// requests
type Diagnostics struct {
network net.Network
self peer.Peer
self peer.ID
diagLock sync.Mutex
diagMap map[string]time.Time
@ -39,7 +39,7 @@ type Diagnostics struct {
}
// NewDiagnostics instantiates a new diagnostics service running on the given network
func NewDiagnostics(self peer.Peer, inet net.Network) *Diagnostics {
func NewDiagnostics(self peer.ID, inet net.Network) *Diagnostics {
d := &Diagnostics{
network: inet,
self: self,
@ -91,20 +91,20 @@ func (di *DiagInfo) Marshal() []byte {
return b
}
func (d *Diagnostics) getPeers() []peer.Peer {
func (d *Diagnostics) getPeers() []peer.ID {
return d.network.Peers()
}
func (d *Diagnostics) getDiagInfo() *DiagInfo {
di := new(DiagInfo)
di.CodeVersion = "github.com/jbenet/go-ipfs"
di.ID = d.self.ID().Pretty()
di.ID = d.self.Pretty()
di.LifeSpan = time.Since(d.birth)
di.Keys = nil // Currently no way to query datastore
di.BwIn, di.BwOut = d.network.BandwidthTotals()
for _, p := range d.getPeers() {
d := connDiagInfo{p.GetLatency(), p.ID().Pretty()}
d := connDiagInfo{d.network.Peerstore().LatencyEWMA(p), p.Pretty()}
di.Connections = append(di.Connections, d)
}
return di
@ -142,7 +142,7 @@ func (d *Diagnostics) GetDiagnostic(timeout time.Duration) ([]*DiagInfo, error)
for _, p := range peers {
log.Debugf("Sending getDiagnostic to: %s", p)
sends++
go func(p peer.Peer) {
go func(p peer.ID) {
data, err := d.getDiagnosticFromPeer(ctx, p, pmes)
if err != nil {
log.Errorf("GetDiagnostic error: %v", err)
@ -181,7 +181,7 @@ func appendDiagnostics(data []byte, cur []*DiagInfo) []*DiagInfo {
}
// TODO: this method no longer needed.
func (d *Diagnostics) getDiagnosticFromPeer(ctx context.Context, p peer.Peer, mes *pb.Message) ([]byte, error) {
func (d *Diagnostics) getDiagnosticFromPeer(ctx context.Context, p peer.ID, mes *pb.Message) ([]byte, error) {
rpmes, err := d.sendRequest(ctx, p, mes)
if err != nil {
return nil, err
@ -195,7 +195,7 @@ func newMessage(diagID string) *pb.Message {
return pmes
}
func (d *Diagnostics) sendRequest(ctx context.Context, p peer.Peer, pmes *pb.Message) (*pb.Message, error) {
func (d *Diagnostics) sendRequest(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
s, err := d.network.NewStream(net.ProtocolDiag, p)
if err != nil {
@ -225,7 +225,7 @@ func (d *Diagnostics) sendRequest(ctx context.Context, p peer.Peer, pmes *pb.Mes
return rpmes, nil
}
func (d *Diagnostics) handleDiagnostic(p peer.Peer, pmes *pb.Message) (*pb.Message, error) {
func (d *Diagnostics) handleDiagnostic(p peer.ID, pmes *pb.Message) (*pb.Message, error) {
log.Debugf("HandleDiagnostic from %s for id = %s", p, pmes.GetDiagID())
resp := newMessage(pmes.GetDiagID())
@ -250,7 +250,7 @@ func (d *Diagnostics) handleDiagnostic(p peer.Peer, pmes *pb.Message) (*pb.Messa
for _, p := range d.getPeers() {
log.Debugf("Sending diagnostic request to peer: %s", p)
sendcount++
go func(p peer.Peer) {
go func(p peer.ID) {
out, err := d.getDiagnosticFromPeer(ctx, p, pmes)
if err != nil {
log.Errorf("getDiagnostic error: %v", err)
@ -288,7 +288,7 @@ func (d *Diagnostics) HandleMessage(ctx context.Context, s net.Stream) error {
// Print out diagnostic
log.Infof("[peer: %s] Got message from [%s]\n",
d.self.ID().Pretty(), s.Conn().RemotePeer().ID().Pretty())
d.self.Pretty(), s.Conn().RemotePeer())
// dispatch handler.
p := s.Conn().RemotePeer()

View File

@ -1,22 +1,14 @@
RANDOMSRC = Godeps/_workspace/src/github.com/jbenet/go-random/random
IPFS_DOCKER_IMAGE = zaqwsx_ipfs-test-img
IMAGE_NAME = ipfs-test-latest
test: clean setup
fig build --no-cache
fig up --no-color | tee build/fig.log
make save_logs # save the ipfs logs for inspection
# fig up won't report the error using an error code, so we grep the
# fig.log file to find out whether the call succeeded
tail build/fig.log | grep "exited with code 0"
./run-test-on-img.sh $(IMAGE_NAME)
setup: docker_ipfs_image data/filetiny data/filerand
save_logs:
sh bin/save_logs.sh
docker_ipfs_image:
docker images | grep $(IPFS_DOCKER_IMAGE)
data/filetiny: Makefile
cp Makefile ./data/filetiny # simple
@ -26,6 +18,12 @@ data/filerand: bin/random
bin/random:
go build -o ./bin/random ../$(RANDOMSRC)
# just build it every time... this part isn't
# even the lengthy part, and it decreases pain.
docker_ipfs_image:
cd .. && docker build -t $(IMAGE_NAME) .
docker images | grep $(IMAGE_NAME)
clean:
sh bin/clean.sh
fig stop

View File

@ -5,4 +5,6 @@ ADD . /tmp/id
RUN mv -f /tmp/id/config /root/.go-ipfs/config
RUN ipfs id
ENV IPFS_LOGGING_FMT nocolor
EXPOSE 4011 4012/udp

View File

@ -1,2 +1 @@
*.log
go-random
.built_img

View File

@ -7,5 +7,7 @@ RUN ipfs id
EXPOSE 4031 4032/udp
ENV IPFS_LOGGING_FMT nocolor
ENTRYPOINT ["/bin/bash"]
CMD ["/tmp/id/run.sh"]

33
dockertest/run-test-on-img.sh Executable file
View File

@ -0,0 +1,33 @@
#!/bin/sh
if [ "$#" -ne 1 ]; then
echo "usage: $0 <docker-image-ref>"
echo "runs this test on image matching <docker-image-ref>"
exit 1
fi
# this tag is used by the dockerfiles in
# {data, server, client, bootstrap}
tag=zaqwsx_ipfs-test-img
# could use set -v, but i dont want to see the comments...
img=$(docker images | grep $1 | awk '{print $3}')
echo "using docker image: $img ($1)"
echo docker tag -f $img $tag
docker tag -f $img $tag
echo "fig build --no-cache"
fig build --no-cache
echo "fig up --no-color | tee build/fig.log"
fig up --no-color | tee build/fig.log
# save the ipfs logs for inspection
echo "make save_logs"
make save_logs
# fig up won't report the error using an error code, so we grep the
# fig.log file to find out whether the call succeeded
echo 'tail build/fig.log | grep "exited with code 0"'
tail build/fig.log | grep "exited with code 0"

View File

@ -8,5 +8,7 @@ RUN chmod +x /tmp/test/run.sh
EXPOSE 4021 4022/udp
ENV IPFS_LOGGING_FMT nocolor
ENTRYPOINT ["/bin/bash"]
CMD ["/tmp/test/run.sh"]

View File

@ -87,11 +87,12 @@ func RandomBytes(n int64) []byte {
func AddCatBytes(data []byte, conf Config) error {
sessionGenerator := bitswap.NewSessionGenerator(
tn.VirtualNetwork(delay.Fixed(conf.NetworkLatency)), // TODO rename VirtualNetwork
mockrouting.NewServerWithDelay(mockrouting.DelayConfig{
Query: delay.Fixed(conf.RoutingLatency),
ValueVisibility: delay.Fixed(conf.RoutingLatency),
}),
tn.VirtualNetwork(
mockrouting.NewServerWithDelay(mockrouting.DelayConfig{
Query: delay.Fixed(conf.RoutingLatency),
ValueVisibility: delay.Fixed(conf.RoutingLatency),
}),
delay.Fixed(conf.NetworkLatency)), // TODO rename VirtualNetwork
)
defer sessionGenerator.Close()

View File

@ -8,6 +8,7 @@ import (
"time"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
blocks "github.com/jbenet/go-ipfs/blocks"
blockstore "github.com/jbenet/go-ipfs/blocks/blockstore"
exchange "github.com/jbenet/go-ipfs/exchange"
@ -18,8 +19,10 @@ import (
wantlist "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist"
peer "github.com/jbenet/go-ipfs/peer"
u "github.com/jbenet/go-ipfs/util"
errors "github.com/jbenet/go-ipfs/util/debugerror"
"github.com/jbenet/go-ipfs/util/delay"
eventlog "github.com/jbenet/go-ipfs/util/eventlog"
pset "github.com/jbenet/go-ipfs/util/peerset"
pset "github.com/jbenet/go-ipfs/util/peerset" // TODO move this to peerstore
)
var log = eventlog.Logger("bitswap")
@ -36,14 +39,14 @@ const (
)
var (
rebroadcastDelay = time.Second * 10
rebroadcastDelay = delay.Fixed(time.Second * 10)
)
// New initializes a BitSwap instance that communicates over the provided
// BitSwapNetwork. This function registers the returned instance as the network
// delegate.
// Runs until context is cancelled.
func New(parent context.Context, p peer.Peer, network bsnet.BitSwapNetwork, routing bsnet.Routing,
func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork,
bstore blockstore.Blockstore, nice bool) exchange.Interface {
ctx, cancelFunc := context.WithCancel(parent)
@ -60,8 +63,7 @@ func New(parent context.Context, p peer.Peer, network bsnet.BitSwapNetwork, rout
cancelFunc: cancelFunc,
notifications: notif,
engine: decision.NewEngine(ctx, bstore),
routing: routing,
sender: network,
network: network,
wantlist: wantlist.NewThreadSafe(),
batchRequests: make(chan []u.Key, sizeBatchRequestChan),
}
@ -75,16 +77,13 @@ func New(parent context.Context, p peer.Peer, network bsnet.BitSwapNetwork, rout
// bitswap instances implement the bitswap protocol.
type bitswap struct {
// sender delivers messages on behalf of the session
sender bsnet.BitSwapNetwork
// network delivers messages on behalf of the session
network bsnet.BitSwapNetwork
// blockstore is the local database
// NB: ensure threadsafety
blockstore blockstore.Blockstore
// routing interface for communication
routing bsnet.Routing
notifications notifications.PubSub
// Requests for a set of related blocks
@ -162,10 +161,10 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error {
}
bs.wantlist.Remove(blk.Key())
bs.notifications.Publish(blk)
return bs.routing.Provide(ctx, blk.Key())
return bs.network.Provide(ctx, blk.Key())
}
func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) error {
func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.ID) error {
if peers == nil {
panic("Cant send wantlist to nil peerchan")
}
@ -177,25 +176,12 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e
for peerToQuery := range peers {
log.Event(ctx, "PeerToQuery", peerToQuery)
wg.Add(1)
go func(p peer.Peer) {
go func(p peer.ID) {
defer wg.Done()
log.Event(ctx, "DialPeer", p)
err := bs.sender.DialPeer(ctx, p)
if err != nil {
log.Errorf("Error sender.DialPeer(%s): %s", p, err)
if err := bs.send(ctx, p, message); err != nil {
log.Error(err)
return
}
err = bs.sender.SendMessage(ctx, p, message)
if err != nil {
log.Errorf("Error sender.SendMessage(%s) = %s", p, err)
return
}
// FIXME ensure accounting is handled correctly when
// communication fails. May require slightly different API to
// get better guarantees. May need shared sequence numbers.
bs.engine.MessageSent(p, message)
}(peerToQuery)
}
wg.Wait()
@ -212,7 +198,7 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context, wantlist *wantli
message.AddEntry(e.Key, e.Priority)
}
ps := pset.New()
set := pset.New()
// Get providers for all entries in wantlist (could take a while)
wg := sync.WaitGroup{}
@ -221,10 +207,9 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context, wantlist *wantli
go func(k u.Key) {
defer wg.Done()
child, _ := context.WithTimeout(ctx, providerRequestTimeout)
providers := bs.routing.FindProvidersAsync(child, k, maxProvidersPerRequest)
providers := bs.network.FindProvidersAsync(child, k, maxProvidersPerRequest)
for prov := range providers {
if ps.TryAdd(prov) { //Do once per peer
if set.TryAdd(prov) { //Do once per peer
bs.send(ctx, prov, message)
}
}
@ -249,7 +234,7 @@ func (bs *bitswap) clientWorker(parent context.Context) {
ctx, cancel := context.WithCancel(parent)
broadcastSignal := time.After(rebroadcastDelay)
broadcastSignal := time.After(rebroadcastDelay.Get())
defer cancel()
for {
@ -257,7 +242,7 @@ func (bs *bitswap) clientWorker(parent context.Context) {
case <-broadcastSignal:
// Resend unfulfilled wantlist keys
bs.sendWantlistToProviders(ctx, bs.wantlist)
broadcastSignal = time.After(rebroadcastDelay)
broadcastSignal = time.After(rebroadcastDelay.Get())
case ks := <-bs.batchRequests:
if len(ks) == 0 {
log.Warning("Received batch request for zero blocks")
@ -274,8 +259,7 @@ func (bs *bitswap) clientWorker(parent context.Context) {
// it. Later, this assumption may not hold as true if we implement
// newer bitswap strategies.
child, _ := context.WithTimeout(ctx, providerRequestTimeout)
providers := bs.routing.FindProvidersAsync(child, ks[0], maxProvidersPerRequest)
providers := bs.network.FindProvidersAsync(child, ks[0], maxProvidersPerRequest)
err := bs.sendWantListTo(ctx, providers)
if err != nil {
log.Errorf("error sending wantlist: %s", err)
@ -287,19 +271,19 @@ func (bs *bitswap) clientWorker(parent context.Context) {
}
// TODO(brian): handle errors
func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsmsg.BitSwapMessage) (
peer.Peer, bsmsg.BitSwapMessage) {
func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) (
peer.ID, bsmsg.BitSwapMessage) {
log.Debugf("ReceiveMessage from %s", p)
if p == nil {
if p == "" {
log.Error("Received message from nil peer!")
// TODO propagate the error upward
return nil, nil
return "", nil
}
if incoming == nil {
log.Error("Got nil bitswap message!")
// TODO propagate the error upward
return nil, nil
return "", nil
}
// This call records changes to wantlists, blocks received,
@ -321,7 +305,7 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm
bs.cancelBlocks(ctx, keys)
// TODO: consider changing this function to not return anything
return nil, nil
return "", nil
}
func (bs *bitswap) cancelBlocks(ctx context.Context, bkeys []u.Key) {
@ -349,9 +333,14 @@ func (bs *bitswap) ReceiveError(err error) {
// send strives to ensure that accounting is always performed when a message is
// sent
func (bs *bitswap) send(ctx context.Context, p peer.Peer, m bsmsg.BitSwapMessage) error {
if err := bs.sender.SendMessage(ctx, p, m); err != nil {
return err
func (bs *bitswap) send(ctx context.Context, p peer.ID, m bsmsg.BitSwapMessage) error {
log.Event(ctx, "DialPeer", p)
err := bs.network.DialPeer(ctx, p)
if err != nil {
return errors.Wrap(err)
}
if err := bs.network.SendMessage(ctx, p, m); err != nil {
return errors.Wrap(err)
}
return bs.engine.MessageSent(p, m)
}

View File

@ -7,13 +7,14 @@ import (
"time"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
blocks "github.com/jbenet/go-ipfs/blocks"
blocksutil "github.com/jbenet/go-ipfs/blocks/blocksutil"
tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet"
peer "github.com/jbenet/go-ipfs/peer"
mockrouting "github.com/jbenet/go-ipfs/routing/mock"
u "github.com/jbenet/go-ipfs/util"
delay "github.com/jbenet/go-ipfs/util/delay"
testutil "github.com/jbenet/go-ipfs/util/testutil"
)
// FIXME the tests are really sensitive to the network delay. fix them to work
@ -23,9 +24,8 @@ const kNetworkDelay = 0 * time.Millisecond
func TestClose(t *testing.T) {
// TODO
t.Skip("TODO Bitswap's Close implementation is a WIP")
vnet := tn.VirtualNetwork(delay.Fixed(kNetworkDelay))
rout := mockrouting.NewServer()
sesgen := NewSessionGenerator(vnet, rout)
vnet := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
sesgen := NewSessionGenerator(vnet)
defer sesgen.Close()
bgen := blocksutil.NewBlockGenerator()
@ -38,9 +38,8 @@ func TestClose(t *testing.T) {
func TestGetBlockTimeout(t *testing.T) {
net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay))
rs := mockrouting.NewServer()
g := NewSessionGenerator(net, rs)
net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
g := NewSessionGenerator(net)
defer g.Close()
self := g.Next()
@ -54,15 +53,16 @@ func TestGetBlockTimeout(t *testing.T) {
}
}
func TestProviderForKeyButNetworkCannotFind(t *testing.T) {
func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TODO revisit this
net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay))
rs := mockrouting.NewServer()
g := NewSessionGenerator(net, rs)
net := tn.VirtualNetwork(rs, delay.Fixed(kNetworkDelay))
g := NewSessionGenerator(net)
defer g.Close()
block := blocks.NewBlock([]byte("block"))
rs.Client(testutil.NewPeerWithIDString("testing")).Provide(context.Background(), block.Key()) // but not on network
pinfo := peer.PeerInfo{ID: peer.ID("testing")}
rs.Client(pinfo).Provide(context.Background(), block.Key()) // but not on network
solo := g.Next()
defer solo.Exchange.Close()
@ -79,10 +79,9 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) {
func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) {
net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay))
rs := mockrouting.NewServer()
net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
block := blocks.NewBlock([]byte("block"))
g := NewSessionGenerator(net, rs)
g := NewSessionGenerator(net)
defer g.Close()
hasBlock := g.Next()
@ -134,9 +133,8 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) {
if testing.Short() {
t.SkipNow()
}
net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay))
rs := mockrouting.NewServer()
sg := NewSessionGenerator(net, rs)
net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
sg := NewSessionGenerator(net)
defer sg.Close()
bg := blocksutil.NewBlockGenerator()
@ -150,10 +148,9 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) {
var blkeys []u.Key
first := instances[0]
for _, b := range blocks {
first.Blockstore().Put(b)
first.Blockstore().Put(b) // TODO remove. don't need to do this. bitswap owns block
blkeys = append(blkeys, b.Key())
first.Exchange.HasBlock(context.Background(), b)
rs.Client(first.Peer).Provide(context.Background(), b.Key())
}
t.Log("Distribute!")
@ -200,15 +197,13 @@ func TestSendToWantingPeer(t *testing.T) {
t.SkipNow()
}
net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay))
rs := mockrouting.NewServer()
sg := NewSessionGenerator(net, rs)
net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
sg := NewSessionGenerator(net)
defer sg.Close()
bg := blocksutil.NewBlockGenerator()
oldVal := rebroadcastDelay
rebroadcastDelay = time.Second / 2
defer func() { rebroadcastDelay = oldVal }()
prev := rebroadcastDelay.Set(time.Second / 2)
defer func() { rebroadcastDelay.Set(prev) }()
peerA := sg.Next()
peerB := sg.Next()
@ -247,9 +242,8 @@ func TestSendToWantingPeer(t *testing.T) {
}
func TestBasicBitswap(t *testing.T) {
net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay))
rs := mockrouting.NewServer()
sg := NewSessionGenerator(net, rs)
net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
sg := NewSessionGenerator(net)
bg := blocksutil.NewBlockGenerator()
t.Log("Test a few nodes trying to get one file with a lot of blocks")

View File

@ -50,7 +50,7 @@ const (
// Envelope contains a message for a Peer
type Envelope struct {
// Peer is the intended recipient
Peer peer.Peer
Peer peer.ID
// Message is the payload
Message bsmsg.BitSwapMessage
}
@ -75,12 +75,12 @@ type Engine struct {
lock sync.RWMutex // protects the fields immediatly below
// ledgerMap lists Ledgers by their Partner key.
ledgerMap map[u.Key]*ledger
ledgerMap map[peer.ID]*ledger
}
func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine {
e := &Engine{
ledgerMap: make(map[u.Key]*ledger),
ledgerMap: make(map[peer.ID]*ledger),
bs: bs,
peerRequestQueue: newTaskQueue(),
outbox: make(chan Envelope, sizeOutboxChan),
@ -126,11 +126,11 @@ func (e *Engine) Outbox() <-chan Envelope {
}
// Returns a slice of Peers with whom the local node has active sessions
func (e *Engine) Peers() []peer.Peer {
func (e *Engine) Peers() []peer.ID {
e.lock.RLock()
defer e.lock.RUnlock()
response := make([]peer.Peer, 0)
response := make([]peer.ID, 0)
for _, ledger := range e.ledgerMap {
response = append(response, ledger.Partner)
}
@ -139,7 +139,7 @@ func (e *Engine) Peers() []peer.Peer {
// MessageReceived performs book-keeping. Returns error if passed invalid
// arguments.
func (e *Engine) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) error {
func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error {
newWorkExists := false
defer func() {
if newWorkExists {
@ -189,7 +189,7 @@ func (e *Engine) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) error {
// inconsistent. Would need to ensure that Sends and acknowledgement of the
// send happen atomically
func (e *Engine) MessageSent(p peer.Peer, m bsmsg.BitSwapMessage) error {
func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) error {
e.lock.Lock()
defer e.lock.Unlock()
@ -203,22 +203,22 @@ func (e *Engine) MessageSent(p peer.Peer, m bsmsg.BitSwapMessage) error {
return nil
}
func (e *Engine) numBytesSentTo(p peer.Peer) uint64 {
func (e *Engine) numBytesSentTo(p peer.ID) uint64 {
// NB not threadsafe
return e.findOrCreate(p).Accounting.BytesSent
}
func (e *Engine) numBytesReceivedFrom(p peer.Peer) uint64 {
func (e *Engine) numBytesReceivedFrom(p peer.ID) uint64 {
// NB not threadsafe
return e.findOrCreate(p).Accounting.BytesRecv
}
// ledger lazily instantiates a ledger
func (e *Engine) findOrCreate(p peer.Peer) *ledger {
l, ok := e.ledgerMap[p.Key()]
func (e *Engine) findOrCreate(p peer.ID) *ledger {
l, ok := e.ledgerMap[p]
if !ok {
l = newLedger(p)
e.ledgerMap[p.Key()] = l
e.ledgerMap[p] = l
}
return l
}

View File

@ -7,21 +7,21 @@ import (
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
sync "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync"
blocks "github.com/jbenet/go-ipfs/blocks"
blockstore "github.com/jbenet/go-ipfs/blocks/blockstore"
message "github.com/jbenet/go-ipfs/exchange/bitswap/message"
peer "github.com/jbenet/go-ipfs/peer"
testutil "github.com/jbenet/go-ipfs/util/testutil"
)
type peerAndEngine struct {
peer.Peer
Peer peer.ID
Engine *Engine
}
func newPeerAndLedgermanager(idStr string) peerAndEngine {
return peerAndEngine{
Peer: testutil.NewPeerWithIDString(idStr),
Peer: peer.ID(idStr),
//Strategy: New(true),
Engine: NewEngine(context.TODO(),
blockstore.NewBlockstore(sync.MutexWrap(ds.NewMapDatastore()))),
@ -70,7 +70,7 @@ func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) {
sanfrancisco.Engine.MessageSent(seattle.Peer, m)
seattle.Engine.MessageReceived(sanfrancisco.Peer, m)
if seattle.Peer.Key() == sanfrancisco.Peer.Key() {
if seattle.Peer == sanfrancisco.Peer {
t.Fatal("Sanity Check: Peers have same Key!")
}
@ -83,9 +83,9 @@ func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) {
}
}
func peerIsPartner(p peer.Peer, e *Engine) bool {
func peerIsPartner(p peer.ID, e *Engine) bool {
for _, partner := range e.Peers() {
if partner.Key() == p.Key() {
if partner == p {
return true
}
}

View File

@ -12,7 +12,7 @@ import (
// access/lookups.
type keySet map[u.Key]struct{}
func newLedger(p peer.Peer) *ledger {
func newLedger(p peer.ID) *ledger {
return &ledger{
wantList: wl.New(),
Partner: p,
@ -24,7 +24,7 @@ func newLedger(p peer.Peer) *ledger {
// NOT threadsafe
type ledger struct {
// Partner is the remote Peer.
Partner peer.Peer
Partner peer.ID
// Accounting tracks bytes sent and recieved.
Accounting debtRatio

View File

@ -26,12 +26,12 @@ func newTaskQueue() *taskQueue {
type task struct {
Entry wantlist.Entry
Target peer.Peer
Target peer.ID
Trash bool
}
// Push currently adds a new task to the end of the list
func (tl *taskQueue) Push(entry wantlist.Entry, to peer.Peer) {
func (tl *taskQueue) Push(entry wantlist.Entry, to peer.ID) {
tl.lock.Lock()
defer tl.lock.Unlock()
if task, ok := tl.taskmap[taskKey(to, entry.Key)]; ok {
@ -69,7 +69,7 @@ func (tl *taskQueue) Pop() *task {
}
// Remove lazily removes a task from the queue
func (tl *taskQueue) Remove(k u.Key, p peer.Peer) {
func (tl *taskQueue) Remove(k u.Key, p peer.ID) {
tl.lock.Lock()
t, ok := tl.taskmap[taskKey(p, k)]
if ok {
@ -79,6 +79,6 @@ func (tl *taskQueue) Remove(k u.Key, p peer.Peer) {
}
// taskKey returns a key that uniquely identifies a task.
func taskKey(p peer.Peer, k u.Key) string {
return string(p.Key() + k)
func taskKey(p peer.ID, k u.Key) string {
return string(p) + string(k)
}

View File

@ -12,37 +12,39 @@ import (
type BitSwapNetwork interface {
// DialPeer ensures there is a connection to peer.
DialPeer(context.Context, peer.Peer) error
DialPeer(context.Context, peer.ID) error
// SendMessage sends a BitSwap message to a peer.
SendMessage(
context.Context,
peer.Peer,
peer.ID,
bsmsg.BitSwapMessage) error
// SendRequest sends a BitSwap message to a peer and waits for a response.
SendRequest(
context.Context,
peer.Peer,
peer.ID,
bsmsg.BitSwapMessage) (incoming bsmsg.BitSwapMessage, err error)
// SetDelegate registers the Reciver to handle messages received from the
// network.
SetDelegate(Receiver)
Routing
}
// Implement Receiver to receive messages from the BitSwapNetwork
type Receiver interface {
ReceiveMessage(
ctx context.Context, sender peer.Peer, incoming bsmsg.BitSwapMessage) (
destination peer.Peer, outgoing bsmsg.BitSwapMessage)
ctx context.Context, sender peer.ID, incoming bsmsg.BitSwapMessage) (
destination peer.ID, outgoing bsmsg.BitSwapMessage)
ReceiveError(error)
}
type Routing interface {
// FindProvidersAsync returns a channel of providers for the given key
FindProvidersAsync(context.Context, u.Key, int) <-chan peer.Peer
FindProvidersAsync(context.Context, u.Key, int) <-chan peer.ID
// Provide provides the key to the network
Provide(context.Context, u.Key) error

View File

@ -2,10 +2,10 @@ package network
import (
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message"
inet "github.com/jbenet/go-ipfs/net"
peer "github.com/jbenet/go-ipfs/peer"
routing "github.com/jbenet/go-ipfs/routing"
util "github.com/jbenet/go-ipfs/util"
)
@ -13,9 +13,10 @@ var log = util.Logger("bitswap_network")
// NewFromIpfsNetwork returns a BitSwapNetwork supported by underlying IPFS
// Dialer & Service
func NewFromIpfsNetwork(n inet.Network) BitSwapNetwork {
func NewFromIpfsNetwork(n inet.Network, r routing.IpfsRouting) BitSwapNetwork {
bitswapNetwork := impl{
network: n,
routing: r,
}
n.SetHandler(inet.ProtocolBitswap, bitswapNetwork.handleNewStream)
return &bitswapNetwork
@ -25,11 +26,74 @@ func NewFromIpfsNetwork(n inet.Network) BitSwapNetwork {
// NetMessage objects, into the bitswap network interface.
type impl struct {
network inet.Network
routing routing.IpfsRouting
// inbound messages from the network are forwarded to the receiver
receiver Receiver
}
func (bsnet *impl) DialPeer(ctx context.Context, p peer.ID) error {
return bsnet.network.DialPeer(ctx, p)
}
func (bsnet *impl) SendMessage(
ctx context.Context,
p peer.ID,
outgoing bsmsg.BitSwapMessage) error {
s, err := bsnet.network.NewStream(inet.ProtocolBitswap, p)
if err != nil {
return err
}
defer s.Close()
return outgoing.ToNet(s)
}
func (bsnet *impl) SendRequest(
ctx context.Context,
p peer.ID,
outgoing bsmsg.BitSwapMessage) (bsmsg.BitSwapMessage, error) {
s, err := bsnet.network.NewStream(inet.ProtocolBitswap, p)
if err != nil {
return nil, err
}
defer s.Close()
if err := outgoing.ToNet(s); err != nil {
return nil, err
}
return bsmsg.FromNet(s)
}
func (bsnet *impl) SetDelegate(r Receiver) {
bsnet.receiver = r
}
// FindProvidersAsync returns a channel of providers for the given key
func (bsnet *impl) FindProvidersAsync(ctx context.Context, k util.Key, max int) <-chan peer.ID {
out := make(chan peer.ID)
go func() {
defer close(out)
providers := bsnet.routing.FindProvidersAsync(ctx, k, max)
for info := range providers {
bsnet.network.Peerstore().AddAddresses(info.ID, info.Addrs)
select {
case <-ctx.Done():
case out <- info.ID:
}
}
}()
return out
}
// Provide provides the key to the network
func (bsnet *impl) Provide(ctx context.Context, k util.Key) error {
return bsnet.routing.Provide(ctx, k)
}
// handleNewStream receives a new stream from the network.
func (bsnet *impl) handleNewStream(s inet.Stream) {
@ -52,43 +116,3 @@ func (bsnet *impl) handleNewStream(s inet.Stream) {
}()
}
func (bsnet *impl) DialPeer(ctx context.Context, p peer.Peer) error {
return bsnet.network.DialPeer(ctx, p)
}
func (bsnet *impl) SendMessage(
ctx context.Context,
p peer.Peer,
outgoing bsmsg.BitSwapMessage) error {
s, err := bsnet.network.NewStream(inet.ProtocolBitswap, p)
if err != nil {
return err
}
defer s.Close()
return outgoing.ToNet(s)
}
func (bsnet *impl) SendRequest(
ctx context.Context,
p peer.Peer,
outgoing bsmsg.BitSwapMessage) (bsmsg.BitSwapMessage, error) {
s, err := bsnet.network.NewStream(inet.ProtocolBitswap, p)
if err != nil {
return nil, err
}
defer s.Close()
if err := outgoing.ToNet(s); err != nil {
return nil, err
}
return bsmsg.FromNet(s)
}
func (bsnet *impl) SetDelegate(r Receiver) {
bsnet.receiver = r
}

View File

@ -1,11 +1,13 @@
package bitswap
import (
"bytes"
"errors"
"fmt"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
"github.com/jbenet/go-ipfs/routing"
"github.com/jbenet/go-ipfs/routing/mock"
bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message"
bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network"
peer "github.com/jbenet/go-ipfs/peer"
@ -14,49 +16,52 @@ import (
)
type Network interface {
Adapter(peer.Peer) bsnet.BitSwapNetwork
Adapter(peer.ID) bsnet.BitSwapNetwork
HasPeer(peer.Peer) bool
HasPeer(peer.ID) bool
SendMessage(
ctx context.Context,
from peer.Peer,
to peer.Peer,
from peer.ID,
to peer.ID,
message bsmsg.BitSwapMessage) error
SendRequest(
ctx context.Context,
from peer.Peer,
to peer.Peer,
from peer.ID,
to peer.ID,
message bsmsg.BitSwapMessage) (
incoming bsmsg.BitSwapMessage, err error)
}
// network impl
func VirtualNetwork(d delay.D) Network {
func VirtualNetwork(rs mockrouting.Server, d delay.D) Network {
return &network{
clients: make(map[util.Key]bsnet.Receiver),
delay: d,
clients: make(map[peer.ID]bsnet.Receiver),
delay: d,
routingserver: rs,
}
}
type network struct {
clients map[util.Key]bsnet.Receiver
delay delay.D
clients map[peer.ID]bsnet.Receiver
routingserver mockrouting.Server
delay delay.D
}
func (n *network) Adapter(p peer.Peer) bsnet.BitSwapNetwork {
func (n *network) Adapter(p peer.ID) bsnet.BitSwapNetwork {
client := &networkClient{
local: p,
network: n,
routing: n.routingserver.Client(peer.PeerInfo{ID: p}),
}
n.clients[p.Key()] = client
n.clients[p] = client
return client
}
func (n *network) HasPeer(p peer.Peer) bool {
_, found := n.clients[p.Key()]
func (n *network) HasPeer(p peer.ID) bool {
_, found := n.clients[p]
return found
}
@ -64,11 +69,11 @@ func (n *network) HasPeer(p peer.Peer) bool {
// TODO what does the network layer do with errors received from services?
func (n *network) SendMessage(
ctx context.Context,
from peer.Peer,
to peer.Peer,
from peer.ID,
to peer.ID,
message bsmsg.BitSwapMessage) error {
receiver, ok := n.clients[to.Key()]
receiver, ok := n.clients[to]
if !ok {
return errors.New("Cannot locate peer on network")
}
@ -82,8 +87,8 @@ func (n *network) SendMessage(
}
func (n *network) deliver(
r bsnet.Receiver, from peer.Peer, message bsmsg.BitSwapMessage) error {
if message == nil || from == nil {
r bsnet.Receiver, from peer.ID, message bsmsg.BitSwapMessage) error {
if message == nil || from == "" {
return errors.New("Invalid input")
}
@ -91,15 +96,15 @@ func (n *network) deliver(
nextPeer, nextMsg := r.ReceiveMessage(context.TODO(), from, message)
if (nextPeer == nil && nextMsg != nil) || (nextMsg == nil && nextPeer != nil) {
if (nextPeer == "" && nextMsg != nil) || (nextMsg == nil && nextPeer != "") {
return errors.New("Malformed client request")
}
if nextPeer == nil && nextMsg == nil { // no response to send
if nextPeer == "" && nextMsg == nil { // no response to send
return nil
}
nextReceiver, ok := n.clients[nextPeer.Key()]
nextReceiver, ok := n.clients[nextPeer]
if !ok {
return errors.New("Cannot locate peer on network")
}
@ -110,32 +115,32 @@ func (n *network) deliver(
// TODO
func (n *network) SendRequest(
ctx context.Context,
from peer.Peer,
to peer.Peer,
from peer.ID,
to peer.ID,
message bsmsg.BitSwapMessage) (
incoming bsmsg.BitSwapMessage, err error) {
r, ok := n.clients[to.Key()]
r, ok := n.clients[to]
if !ok {
return nil, errors.New("Cannot locate peer on network")
}
nextPeer, nextMsg := r.ReceiveMessage(context.TODO(), from, message)
// TODO dedupe code
if (nextPeer == nil && nextMsg != nil) || (nextMsg == nil && nextPeer != nil) {
if (nextPeer == "" && nextMsg != nil) || (nextMsg == nil && nextPeer != "") {
r.ReceiveError(errors.New("Malformed client request"))
return nil, nil
}
// TODO dedupe code
if nextPeer == nil && nextMsg == nil {
if nextPeer == "" && nextMsg == nil {
return nil, nil
}
// TODO test when receiver doesn't immediately respond to the initiator of the request
if !bytes.Equal(nextPeer.ID(), from.ID()) {
if nextPeer != from {
go func() {
nextReceiver, ok := n.clients[nextPeer.Key()]
nextReceiver, ok := n.clients[nextPeer]
if !ok {
// TODO log the error?
}
@ -147,26 +152,54 @@ func (n *network) SendRequest(
}
type networkClient struct {
local peer.Peer
local peer.ID
bsnet.Receiver
network Network
routing routing.IpfsRouting
}
func (nc *networkClient) SendMessage(
ctx context.Context,
to peer.Peer,
to peer.ID,
message bsmsg.BitSwapMessage) error {
return nc.network.SendMessage(ctx, nc.local, to, message)
}
func (nc *networkClient) SendRequest(
ctx context.Context,
to peer.Peer,
to peer.ID,
message bsmsg.BitSwapMessage) (incoming bsmsg.BitSwapMessage, err error) {
return nc.network.SendRequest(ctx, nc.local, to, message)
}
func (nc *networkClient) DialPeer(ctx context.Context, p peer.Peer) error {
// FindProvidersAsync returns a channel of providers for the given key
func (nc *networkClient) FindProvidersAsync(ctx context.Context, k util.Key, max int) <-chan peer.ID {
// NB: this function duplicates the PeerInfo -> ID transformation in the
// bitswap network adapter. Not to worry. This network client will be
// deprecated once the ipfsnet.Mock is added. The code below is only
// temporary.
out := make(chan peer.ID)
go func() {
defer close(out)
providers := nc.routing.FindProvidersAsync(ctx, k, max)
for info := range providers {
select {
case <-ctx.Done():
case out <- info.ID:
}
}
}()
return out
}
// Provide provides the key to the network
func (nc *networkClient) Provide(ctx context.Context, k util.Key) error {
return nc.routing.Provide(ctx, k)
}
func (nc *networkClient) DialPeer(ctx context.Context, p peer.ID) error {
// no need to do anything because dialing isn't a thing in this test net.
if !nc.network.HasPeer(p) {
return fmt.Errorf("Peer not in network: %s", p)

View File

@ -5,30 +5,31 @@ import (
"testing"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
blocks "github.com/jbenet/go-ipfs/blocks"
bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message"
bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network"
peer "github.com/jbenet/go-ipfs/peer"
delay "github.com/jbenet/go-ipfs/util/delay"
testutil "github.com/jbenet/go-ipfs/util/testutil"
mockrouting "github.com/jbenet/go-ipfs/routing/mock"
)
func TestSendRequestToCooperativePeer(t *testing.T) {
net := VirtualNetwork(delay.Fixed(0))
net := VirtualNetwork(mockrouting.NewServer(),delay.Fixed(0))
idOfRecipient := []byte("recipient")
idOfRecipient := peer.ID("recipient")
t.Log("Get two network adapters")
initiator := net.Adapter(testutil.NewPeerWithIDString("initiator"))
recipient := net.Adapter(testutil.NewPeerWithID(idOfRecipient))
initiator := net.Adapter(peer.ID("initiator"))
recipient := net.Adapter(idOfRecipient)
expectedStr := "response from recipient"
recipient.SetDelegate(lambda(func(
ctx context.Context,
from peer.Peer,
from peer.ID,
incoming bsmsg.BitSwapMessage) (
peer.Peer, bsmsg.BitSwapMessage) {
peer.ID, bsmsg.BitSwapMessage) {
t.Log("Recipient received a message from the network")
@ -45,13 +46,17 @@ func TestSendRequestToCooperativePeer(t *testing.T) {
message := bsmsg.New()
message.AddBlock(blocks.NewBlock([]byte("data")))
response, err := initiator.SendRequest(
context.Background(), testutil.NewPeerWithID(idOfRecipient), message)
context.Background(), idOfRecipient, message)
if err != nil {
t.Fatal(err)
}
t.Log("Check the contents of the response from recipient")
if response == nil {
t.Fatal("Should have received a response")
}
for _, blockFromRecipient := range response.Blocks() {
if string(blockFromRecipient.Data) == expectedStr {
return
@ -61,10 +66,10 @@ func TestSendRequestToCooperativePeer(t *testing.T) {
}
func TestSendMessageAsyncButWaitForResponse(t *testing.T) {
net := VirtualNetwork(delay.Fixed(0))
idOfResponder := []byte("responder")
waiter := net.Adapter(testutil.NewPeerWithIDString("waiter"))
responder := net.Adapter(testutil.NewPeerWithID(idOfResponder))
net := VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0))
idOfResponder := peer.ID("responder")
waiter := net.Adapter(peer.ID("waiter"))
responder := net.Adapter(idOfResponder)
var wg sync.WaitGroup
@ -74,9 +79,9 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) {
responder.SetDelegate(lambda(func(
ctx context.Context,
fromWaiter peer.Peer,
fromWaiter peer.ID,
msgFromWaiter bsmsg.BitSwapMessage) (
peer.Peer, bsmsg.BitSwapMessage) {
peer.ID, bsmsg.BitSwapMessage) {
msgToWaiter := bsmsg.New()
msgToWaiter.AddBlock(blocks.NewBlock([]byte(expectedStr)))
@ -86,9 +91,9 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) {
waiter.SetDelegate(lambda(func(
ctx context.Context,
fromResponder peer.Peer,
fromResponder peer.ID,
msgFromResponder bsmsg.BitSwapMessage) (
peer.Peer, bsmsg.BitSwapMessage) {
peer.ID, bsmsg.BitSwapMessage) {
// TODO assert that this came from the correct peer and that the message contents are as expected
ok := false
@ -103,13 +108,13 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) {
t.Fatal("Message not received from the responder")
}
return nil, nil
return "", nil
}))
messageSentAsync := bsmsg.New()
messageSentAsync.AddBlock(blocks.NewBlock([]byte("data")))
errSending := waiter.SendMessage(
context.Background(), testutil.NewPeerWithID(idOfResponder), messageSentAsync)
context.Background(), idOfResponder, messageSentAsync)
if errSending != nil {
t.Fatal(errSending)
}
@ -117,8 +122,8 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) {
wg.Wait() // until waiter delegate function is executed
}
type receiverFunc func(ctx context.Context, p peer.Peer,
incoming bsmsg.BitSwapMessage) (peer.Peer, bsmsg.BitSwapMessage)
type receiverFunc func(ctx context.Context, p peer.ID,
incoming bsmsg.BitSwapMessage) (peer.ID, bsmsg.BitSwapMessage)
// lambda returns a Receiver instance given a receiver function
func lambda(f receiverFunc) bsnet.Receiver {
@ -128,13 +133,13 @@ func lambda(f receiverFunc) bsnet.Receiver {
}
type lambdaImpl struct {
f func(ctx context.Context, p peer.Peer, incoming bsmsg.BitSwapMessage) (
peer.Peer, bsmsg.BitSwapMessage)
f func(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) (
peer.ID, bsmsg.BitSwapMessage)
}
func (lam *lambdaImpl) ReceiveMessage(ctx context.Context,
p peer.Peer, incoming bsmsg.BitSwapMessage) (
peer.Peer, bsmsg.BitSwapMessage) {
p peer.ID, incoming bsmsg.BitSwapMessage) (
peer.ID, bsmsg.BitSwapMessage) {
return lam.f(ctx, p, incoming)
}

View File

@ -10,18 +10,16 @@ import (
exchange "github.com/jbenet/go-ipfs/exchange"
tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet"
peer "github.com/jbenet/go-ipfs/peer"
mockrouting "github.com/jbenet/go-ipfs/routing/mock"
datastore2 "github.com/jbenet/go-ipfs/util/datastore2"
delay "github.com/jbenet/go-ipfs/util/delay"
)
func NewSessionGenerator(
net tn.Network, rs mockrouting.Server) SessionGenerator {
net tn.Network) SessionGenerator {
ctx, cancel := context.WithCancel(context.TODO())
return SessionGenerator{
ps: peer.NewPeerstore(),
net: net,
rs: rs,
seq: 0,
ctx: ctx, // TODO take ctx as param to Next, Instances
cancel: cancel,
@ -31,7 +29,6 @@ func NewSessionGenerator(
type SessionGenerator struct {
seq int
net tn.Network
rs mockrouting.Server
ps peer.Peerstore
ctx context.Context
cancel context.CancelFunc
@ -44,7 +41,7 @@ func (g *SessionGenerator) Close() error {
func (g *SessionGenerator) Next() Instance {
g.seq++
return session(g.ctx, g.net, g.rs, g.ps, []byte(string(g.seq)))
return session(g.ctx, g.net, g.ps, peer.ID(g.seq))
}
func (g *SessionGenerator) Instances(n int) []Instance {
@ -57,7 +54,7 @@ func (g *SessionGenerator) Instances(n int) []Instance {
}
type Instance struct {
Peer peer.Peer
Peer peer.ID
Exchange exchange.Interface
blockstore blockstore.Blockstore
@ -77,11 +74,9 @@ func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration {
// NB: It's easy make mistakes by providing the same peer ID to two different
// sessions. To safeguard, use the SessionGenerator to generate sessions. It's
// just a much better idea.
func session(ctx context.Context, net tn.Network, rs mockrouting.Server, ps peer.Peerstore, id peer.ID) Instance {
p := ps.WithID(id)
func session(ctx context.Context, net tn.Network, ps peer.Peerstore, p peer.ID) Instance {
adapter := net.Adapter(p)
htc := rs.Client(p)
bsdelay := delay.Fixed(0)
const kWriteCacheElems = 100
@ -93,7 +88,7 @@ func session(ctx context.Context, net tn.Network, rs mockrouting.Server, ps peer
const alwaysSendToPeer = true
bs := New(ctx, p, adapter, htc, bstore, alwaysSendToPeer)
bs := New(ctx, p, adapter, bstore, alwaysSendToPeer)
return Instance{
Peer: p,

View File

@ -69,7 +69,7 @@ func setupIpnsTest(t *testing.T, node *core.IpfsNode) (*core.IpfsNode, *fstest.M
}
}
fs, err := NewIpns(node, "")
fs, err := NewIpns(node, node.PrivateKey, "")
if err != nil {
t.Fatal(err)
}
@ -226,7 +226,7 @@ func TestFastRepublish(t *testing.T) {
node, mnt := setupIpnsTest(t, nil)
h, err := node.Identity.PrivKey().GetPublic().Hash()
h, err := node.PrivateKey.GetPublic().Hash()
if err != nil {
t.Fatal(err)
}

View File

@ -37,8 +37,8 @@ type FileSystem struct {
}
// NewFileSystem constructs new fs using given core.IpfsNode instance.
func NewIpns(ipfs *core.IpfsNode, ipfspath string) (*FileSystem, error) {
root, err := CreateRoot(ipfs, []ci.PrivKey{ipfs.Identity.PrivKey()}, ipfspath)
func NewIpns(ipfs *core.IpfsNode, sk ci.PrivKey, ipfspath string) (*FileSystem, error) {
root, err := CreateRoot(ipfs, []ci.PrivKey{sk}, ipfspath)
if err != nil {
return nil, err
}

View File

@ -48,7 +48,7 @@ func internalMount(ipfs *core.IpfsNode, fpath string, ipfspath string) error {
}
defer c.Close()
fsys, err := NewIpns(ipfs, ipfspath)
fsys, err := NewIpns(ipfs, ipfs.PrivateKey, ipfspath)
if err != nil {
return err
}

View File

@ -184,9 +184,7 @@ type dagService struct {
// Add adds a node to the dagService, storing the block in the BlockService
func (n *dagService) Add(nd *Node) (u.Key, error) {
k, _ := nd.Key()
log.Debugf("DagService Add [%s]", k)
if n == nil {
if n == nil { // FIXME remove this assertion. protect with constructor invariant
return "", fmt.Errorf("dagService is nil")
}

View File

@ -4,14 +4,18 @@ import (
"testing"
ci "github.com/jbenet/go-ipfs/crypto"
peer "github.com/jbenet/go-ipfs/peer"
mockrouting "github.com/jbenet/go-ipfs/routing/mock"
u "github.com/jbenet/go-ipfs/util"
testutil "github.com/jbenet/go-ipfs/util/testutil"
)
func TestRoutingResolve(t *testing.T) {
local := testutil.NewPeerWithIDString("testID")
d := mockrouting.NewServer().Client(local)
local, err := testutil.RandPeerID()
if err != nil {
t.Fatal(err)
}
d := mockrouting.NewServer().Client(peer.PeerInfo{ID: local})
resolver := NewRoutingResolver(d)
publisher := NewRoutingPublisher(d)

View File

@ -17,15 +17,19 @@ import (
var log = eventlog.Logger("backpressure")
func GenNetwork(ctx context.Context) (inet.Network, error) {
p, err := testutil.PeerWithKeysAndAddress(testutil.RandLocalTCPAddress())
if err != nil {
return nil, err
}
listen := p.Addresses()
func GenNetwork(t *testing.T, ctx context.Context) (inet.Network, error) {
p := testutil.RandPeerNetParams(t)
ps := peer.NewPeerstore()
return inet.NewNetwork(ctx, listen, p, ps)
ps.AddAddress(p.ID, p.Addr)
ps.AddPubKey(p.ID, p.PubKey)
ps.AddPrivKey(p.ID, p.PrivKey)
return inet.NewNetwork(ctx, ps.Addresses(p.ID), p.ID, ps)
}
func divulgeAddresses(a, b inet.Network) {
id := a.LocalPeer()
addrs := a.Peerstore().Addresses(id)
b.Peerstore().AddAddresses(id, addrs)
}
// TestBackpressureStreamHandler tests whether mux handler
@ -83,7 +87,7 @@ a problem.
}
// the sender opens streams as fast as possible
sender := func(net inet.Network, remote peer.Peer) {
sender := func(net inet.Network, remote peer.ID) {
var s inet.Stream
var err error
defer func() {
@ -145,11 +149,11 @@ a problem.
// ok that's enough setup. let's do it!
ctx := context.Background()
n1, err := GenNetwork(ctx)
n1, err := GenNetwork(t, ctx)
if err != nil {
t.Fatal(err)
}
n2, err := GenNetwork(ctx)
n2, err := GenNetwork(t, ctx)
if err != nil {
t.Fatal(err)
}
@ -287,15 +291,18 @@ func TestStBackpressureStreamWrite(t *testing.T) {
// setup the networks
ctx := context.Background()
n1, err := GenNetwork(ctx)
n1, err := GenNetwork(t, ctx)
if err != nil {
t.Fatal(err)
}
n2, err := GenNetwork(ctx)
n2, err := GenNetwork(t, ctx)
if err != nil {
t.Fatal(err)
}
divulgeAddresses(n1, n2)
divulgeAddresses(n2, n1)
// setup sender handler on 1
n1.SetHandler(inet.ProtocolTesting, sender)

View File

@ -11,6 +11,7 @@ import (
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
manet "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net"
ic "github.com/jbenet/go-ipfs/crypto"
peer "github.com/jbenet/go-ipfs/peer"
u "github.com/jbenet/go-ipfs/util"
eventlog "github.com/jbenet/go-ipfs/util/eventlog"
@ -32,14 +33,14 @@ func ReleaseBuffer(b []byte) {
// singleConn represents a single connection to another Peer (IPFS Node).
type singleConn struct {
local peer.Peer
remote peer.Peer
local peer.ID
remote peer.ID
maconn manet.Conn
msgrw msgio.ReadWriteCloser
}
// newConn constructs a new connection
func newSingleConn(ctx context.Context, local, remote peer.Peer, maconn manet.Conn) (Conn, error) {
func newSingleConn(ctx context.Context, local, remote peer.ID, maconn manet.Conn) (Conn, error) {
conn := &singleConn{
local: local,
@ -83,6 +84,14 @@ func (c *singleConn) RemoteAddr() net.Addr {
return c.maconn.RemoteAddr()
}
func (c *singleConn) LocalPrivateKey() ic.PrivKey {
return nil
}
func (c *singleConn) RemotePublicKey() ic.PubKey {
return nil
}
func (c *singleConn) SetDeadline(t time.Time) error {
return c.maconn.SetDeadline(t)
}
@ -105,12 +114,12 @@ func (c *singleConn) RemoteMultiaddr() ma.Multiaddr {
}
// LocalPeer is the Peer on this side
func (c *singleConn) LocalPeer() peer.Peer {
func (c *singleConn) LocalPeer() peer.ID {
return c.local
}
// RemotePeer is the Peer on the remote side
func (c *singleConn) RemotePeer() peer.Peer {
func (c *singleConn) RemotePeer() peer.ID {
return c.remote
}
@ -145,8 +154,8 @@ func (c *singleConn) ReleaseMsg(m []byte) {
// ID returns the ID of a given Conn.
func ID(c Conn) string {
l := fmt.Sprintf("%s/%s", c.LocalMultiaddr(), c.LocalPeer().ID())
r := fmt.Sprintf("%s/%s", c.RemoteMultiaddr(), c.RemotePeer().ID())
l := fmt.Sprintf("%s/%s", c.LocalMultiaddr(), c.LocalPeer().Pretty())
r := fmt.Sprintf("%s/%s", c.RemoteMultiaddr(), c.RemotePeer().Pretty())
lh := u.Hash([]byte(l))
rh := u.Hash([]byte(r))
ch := u.XOR(lh, rh)

View File

@ -5,7 +5,6 @@ import (
"fmt"
"os"
"runtime"
"strconv"
"sync"
"testing"
"time"
@ -14,6 +13,7 @@ import (
)
func testOneSendRecv(t *testing.T, c1, c2 Conn) {
log.Debugf("testOneSendRecv from %s to %s", c1.LocalPeer(), c2.LocalPeer())
m1 := []byte("hello")
if err := c1.WriteMsg(m1); err != nil {
t.Fatal(err)
@ -41,8 +41,9 @@ func testNotOneSendRecv(t *testing.T, c1, c2 Conn) {
func TestClose(t *testing.T) {
// t.Skip("Skipping in favor of another test")
ctx := context.Background()
c1, c2 := setupSingleConn(t, ctx, "/ip4/127.0.0.1/tcp/5534", "/ip4/127.0.0.1/tcp/5545")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c1, c2, _, _ := setupSingleConn(t, ctx)
testOneSendRecv(t, c1, c2)
testOneSendRecv(t, c2, c1)
@ -56,6 +57,7 @@ func TestClose(t *testing.T) {
}
func TestCloseLeak(t *testing.T) {
// t.Skip("Skipping in favor of another test")
if testing.Short() {
t.SkipNow()
}
@ -66,11 +68,9 @@ func TestCloseLeak(t *testing.T) {
var wg sync.WaitGroup
runPair := func(p1, p2, num int) {
a1 := strconv.Itoa(p1)
a2 := strconv.Itoa(p2)
runPair := func(num int) {
ctx, cancel := context.WithCancel(context.Background())
c1, c2 := setupSingleConn(t, ctx, "/ip4/127.0.0.1/tcp/"+a1, "/ip4/127.0.0.1/tcp/"+a2)
c1, c2, _, _ := setupSingleConn(t, ctx)
for i := 0; i < num; i++ {
b1 := []byte(fmt.Sprintf("beep%d", i))
@ -102,15 +102,15 @@ func TestCloseLeak(t *testing.T) {
wg.Done()
}
var cons = 1
var msgs = 100
fmt.Printf("Running %d connections * %d msgs.\n", cons, msgs)
var cons = 5
var msgs = 50
log.Debugf("Running %d connections * %d msgs.\n", cons, msgs)
for i := 0; i < cons; i++ {
wg.Add(1)
go runPair(2000+i, 2001+i, msgs)
go runPair(msgs)
}
fmt.Printf("Waiting...\n")
log.Debugf("Waiting...\n")
wg.Wait()
// done!

View File

@ -1,6 +1,7 @@
package conn
import (
"fmt"
"strings"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
@ -11,49 +12,32 @@ import (
debugerror "github.com/jbenet/go-ipfs/util/debugerror"
)
// Dial connects to a particular peer, over a given network
// Example: d.Dial(ctx, "udp", peer)
func (d *Dialer) Dial(ctx context.Context, network string, remote peer.Peer) (Conn, error) {
raddr := remote.NetAddress(network)
if raddr == nil {
return nil, debugerror.Errorf("No remote address for network %s", network)
}
return d.DialAddr(ctx, raddr, remote)
// String returns the string rep of d.
func (d *Dialer) String() string {
return fmt.Sprintf("<Dialer %s %s ...>", d.LocalPeer, d.LocalAddrs[0])
}
// DialAddr connects to a peer over a particular address
// Dial connects to a peer over a particular address
// Ensures raddr is part of peer.Addresses()
// Example: d.DialAddr(ctx, peer.Addresses()[0], peer)
func (d *Dialer) DialAddr(ctx context.Context, raddr ma.Multiaddr, remote peer.Peer) (Conn, error) {
found := false
for _, addr := range remote.Addresses() {
if addr.Equal(raddr) {
found = true
}
}
if !found {
return nil, debugerror.Errorf("address %s is not in peer %s", raddr, remote)
}
func (d *Dialer) Dial(ctx context.Context, raddr ma.Multiaddr, remote peer.ID) (Conn, error) {
network, _, err := manet.DialArgs(raddr)
if err != nil {
return nil, err
}
laddr := d.LocalPeer.NetAddress(network)
if laddr == nil {
return nil, debugerror.Errorf("No local address for network %s", network)
}
if strings.HasPrefix(raddr.String(), "/ip4/0.0.0.0") {
return nil, debugerror.Errorf("Attempted to connect to zero address: %s", raddr)
}
remote.SetType(peer.Remote)
remote, err = d.Peerstore.Add(remote)
if err != nil {
log.Errorf("Error putting peer into peerstore: %s", remote)
var laddr ma.Multiaddr
if len(d.LocalAddrs) > 0 {
// laddr := MultiaddrNetMatch(raddr, d.LocalAddrs)
laddr = NetAddress(network, d.LocalAddrs)
if laddr == nil {
return nil, debugerror.Errorf("No local address for network %s", network)
}
}
// TODO: try to get reusing addr/ports to work.
@ -69,7 +53,7 @@ func (d *Dialer) DialAddr(ctx context.Context, raddr ma.Multiaddr, remote peer.P
select {
case <-ctx.Done():
maconn.Close()
return nil, err
return nil, ctx.Err()
default:
}
@ -78,17 +62,58 @@ func (d *Dialer) DialAddr(ctx context.Context, raddr ma.Multiaddr, remote peer.P
return nil, err
}
if d.WithoutSecureTransport {
if d.PrivateKey == nil {
log.Warning("dialer %s dialing INSECURELY %s at %s!", d, remote, raddr)
return c, nil
}
select {
case <-ctx.Done():
c.Close()
return nil, err
return nil, ctx.Err()
default:
}
// return c, nil
return newSecureConn(ctx, c, d.Peerstore)
return newSecureConn(ctx, d.PrivateKey, c)
}
// MultiaddrProtocolsMatch returns whether two multiaddrs match in protocol stacks.
func MultiaddrProtocolsMatch(a, b ma.Multiaddr) bool {
ap := a.Protocols()
bp := b.Protocols()
if len(ap) != len(bp) {
return false
}
for i, api := range ap {
if api != bp[i] {
return false
}
}
return true
}
// MultiaddrNetMatch returns the first Multiaddr found to match network.
func MultiaddrNetMatch(tgt ma.Multiaddr, srcs []ma.Multiaddr) ma.Multiaddr {
for _, a := range srcs {
if MultiaddrProtocolsMatch(tgt, a) {
return a
}
}
return nil
}
// NetAddress returns the first Multiaddr found for a given network.
func NetAddress(n string, addrs []ma.Multiaddr) ma.Multiaddr {
for _, a := range addrs {
for _, p := range a.Protocols() {
if p.Name == n {
return a
}
}
}
return nil
}

View File

@ -2,45 +2,35 @@ package conn
import (
"io"
"net"
"testing"
"time"
ci "github.com/jbenet/go-ipfs/crypto"
peer "github.com/jbenet/go-ipfs/peer"
testutil "github.com/jbenet/go-ipfs/util/testutil"
tu "github.com/jbenet/go-ipfs/util/testutil"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
)
func setupPeer(addr string) (peer.Peer, error) {
tcp, err := ma.NewMultiaddr(addr)
if err != nil {
return nil, err
}
sk, pk, err := ci.GenerateKeyPair(ci.RSA, 512)
if err != nil {
return nil, err
}
p, err := testutil.NewPeerWithKeyPair(sk, pk)
if err != nil {
return nil, err
}
p.AddAddress(tcp)
return p, nil
}
func echoListen(ctx context.Context, listener Listener) {
for {
c, err := listener.Accept()
if err != nil {
select {
case <-ctx.Done():
return
default:
}
if ne, ok := err.(net.Error); ok && ne.Temporary() {
<-time.After(time.Microsecond * 10)
continue
}
log.Debugf("echoListen: listener appears to be closing")
return
}
go echo(c.(Conn))
}
}
@ -49,106 +39,86 @@ func echo(c Conn) {
io.Copy(c, c)
}
func setupSecureConn(t *testing.T, ctx context.Context, a1, a2 string) (a, b Conn) {
return setupConn(t, ctx, a1, a2, true)
func setupSecureConn(t *testing.T, ctx context.Context) (a, b Conn, p1, p2 tu.PeerNetParams) {
return setupConn(t, ctx, true)
}
func setupSingleConn(t *testing.T, ctx context.Context, a1, a2 string) (a, b Conn) {
return setupConn(t, ctx, a1, a2, false)
func setupSingleConn(t *testing.T, ctx context.Context) (a, b Conn, p1, p2 tu.PeerNetParams) {
return setupConn(t, ctx, false)
}
func setupConn(t *testing.T, ctx context.Context, a1, a2 string, secure bool) (a, b Conn) {
func setupConn(t *testing.T, ctx context.Context, secure bool) (a, b Conn, p1, p2 tu.PeerNetParams) {
p1, err := setupPeer(a1)
if err != nil {
t.Fatal("error setting up peer", err)
p1 = tu.RandPeerNetParams(t)
p2 = tu.RandPeerNetParams(t)
laddr := p1.Addr
key1 := p1.PrivKey
key2 := p2.PrivKey
if !secure {
key1 = nil
key2 = nil
}
p2, err := setupPeer(a2)
if err != nil {
t.Fatal("error setting up peer", err)
}
laddr := p1.NetAddress("tcp")
if laddr == nil {
t.Fatal("Listen address is nil.")
}
ps1 := peer.NewPeerstore()
ps2 := peer.NewPeerstore()
ps1.Add(p1)
ps2.Add(p2)
l1, err := Listen(ctx, laddr, p1, ps1)
l1.SetWithoutSecureTransport(!secure)
l1, err := Listen(ctx, laddr, p1.ID, key1)
if err != nil {
t.Fatal(err)
}
d2 := &Dialer{
Peerstore: ps2,
LocalPeer: p2,
WithoutSecureTransport: !secure,
LocalPeer: p2.ID,
PrivateKey: key2,
}
var c2 Conn
done := make(chan struct{})
done := make(chan error)
go func() {
c2, err = d2.Dial(ctx, "tcp", p1)
var err error
c2, err = d2.Dial(ctx, p1.Addr, p1.ID)
if err != nil {
t.Fatal("error dialing peer", err)
done <- err
}
done <- struct{}{}
close(done)
}()
c1, err := l1.Accept()
if err != nil {
t.Fatal("failed to accept")
t.Fatal("failed to accept", err)
}
if err := <-done; err != nil {
t.Fatal(err)
}
<-done
return c1.(Conn), c2
return c1.(Conn), c2, p1, p2
}
func TestDialer(t *testing.T) {
func testDialer(t *testing.T, secure bool) {
// t.Skip("Skipping in favor of another test")
p1, err := setupPeer("/ip4/127.0.0.1/tcp/4234")
if err != nil {
t.Fatal("error setting up peer", err)
}
p1 := tu.RandPeerNetParams(t)
p2 := tu.RandPeerNetParams(t)
p2, err := setupPeer("/ip4/127.0.0.1/tcp/4235")
if err != nil {
t.Fatal("error setting up peer", err)
key1 := p1.PrivKey
key2 := p2.PrivKey
if !secure {
key1 = nil
key2 = nil
}
ctx, cancel := context.WithCancel(context.Background())
laddr := p1.NetAddress("tcp")
if laddr == nil {
t.Fatal("Listen address is nil.")
}
ps1 := peer.NewPeerstore()
ps2 := peer.NewPeerstore()
ps1.Add(p1)
ps2.Add(p2)
l, err := Listen(ctx, laddr, p1, ps1)
l1, err := Listen(ctx, p1.Addr, p1.ID, key1)
if err != nil {
t.Fatal(err)
}
go echoListen(ctx, l)
d := &Dialer{
Peerstore: ps2,
LocalPeer: p2,
d2 := &Dialer{
LocalPeer: p2.ID,
PrivateKey: key2,
}
c, err := d.Dial(ctx, "tcp", p1)
go echoListen(ctx, l1)
c, err := d2.Dial(ctx, p1.Addr, p1.ID)
if err != nil {
t.Fatal("error dialing peer", err)
}
@ -180,83 +150,16 @@ func TestDialer(t *testing.T) {
// fmt.Println("closing")
c.Close()
l.Close()
l1.Close()
cancel()
}
func TestDialAddr(t *testing.T) {
func TestDialerInsecure(t *testing.T) {
// t.Skip("Skipping in favor of another test")
p1, err := setupPeer("/ip4/127.0.0.1/tcp/4334")
if err != nil {
t.Fatal("error setting up peer", err)
}
p2, err := setupPeer("/ip4/127.0.0.1/tcp/4335")
if err != nil {
t.Fatal("error setting up peer", err)
}
ctx, cancel := context.WithCancel(context.Background())
laddr := p1.NetAddress("tcp")
if laddr == nil {
t.Fatal("Listen address is nil.")
}
ps1 := peer.NewPeerstore()
ps2 := peer.NewPeerstore()
ps1.Add(p1)
ps2.Add(p2)
l, err := Listen(ctx, laddr, p1, ps1)
if err != nil {
t.Fatal(err)
}
go echoListen(ctx, l)
d := &Dialer{
Peerstore: ps2,
LocalPeer: p2,
}
raddr := p1.NetAddress("tcp")
if raddr == nil {
t.Fatal("Dial address is nil.")
}
c, err := d.DialAddr(ctx, raddr, p1)
if err != nil {
t.Fatal("error dialing peer", err)
}
// fmt.Println("sending")
c.WriteMsg([]byte("beep"))
c.WriteMsg([]byte("boop"))
out, err := c.ReadMsg()
if err != nil {
t.Fatal(err)
}
// fmt.Println("recving", string(out))
data := string(out)
if data != "beep" {
t.Error("unexpected conn output", data)
}
out, err = c.ReadMsg()
if err != nil {
t.Fatal(err)
}
data = string(out)
if string(out) != "boop" {
t.Error("unexpected conn output", data)
}
// fmt.Println("closing")
c.Close()
l.Close()
cancel()
testDialer(t, false)
}
func TestDialerSecure(t *testing.T) {
// t.Skip("Skipping in favor of another test")
testDialer(t, true)
}

View File

@ -2,13 +2,12 @@ package conn
import (
"fmt"
"io"
handshake "github.com/jbenet/go-ipfs/net/handshake"
hspb "github.com/jbenet/go-ipfs/net/handshake/pb"
ggprotoio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/io"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
ggprotoio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/io"
)
// Handshake1 exchanges local and remote versions and compares them
@ -51,38 +50,3 @@ func Handshake1(ctx context.Context, c Conn) error {
log.Debugf("%s version handshake compatible %s", lpeer, rpeer)
return nil
}
// Handshake3 exchanges local and remote service information
func Handshake3(ctx context.Context, stream io.ReadWriter, c Conn) (*handshake.Handshake3Result, error) {
rpeer := c.RemotePeer()
lpeer := c.LocalPeer()
// setup up protobuf io
maxSize := 4096
r := ggprotoio.NewDelimitedReader(stream, maxSize)
w := ggprotoio.NewDelimitedWriter(stream)
localH := handshake.Handshake3Msg(lpeer, c.RemoteMultiaddr())
remoteH := new(hspb.Handshake3)
// setup + send the message to remote
if err := w.WriteMsg(localH); err != nil {
return nil, err
}
log.Debugf("Handshake3: sent to %s", rpeer)
log.Event(ctx, "handshake3Sent", lpeer, rpeer)
// wait + listen for response
if err := r.ReadMsg(remoteH); err != nil {
return nil, fmt.Errorf("Handshake3 could not receive remote msg: %q", err)
}
log.Debugf("Handshake3: received from %s", rpeer)
log.Event(ctx, "handshake3Received", lpeer, rpeer)
// actually update our state based on the new knowledge
res, err := handshake.Handshake3Update(lpeer, rpeer, remoteH)
if err != nil {
log.Errorf("Handshake3 failed to update %s", rpeer)
}
res.RemoteObservedAddress = c.RemoteMultiaddr()
return res, nil
}

View File

@ -5,6 +5,7 @@ import (
"net"
"time"
ic "github.com/jbenet/go-ipfs/crypto"
peer "github.com/jbenet/go-ipfs/peer"
u "github.com/jbenet/go-ipfs/util"
@ -16,17 +17,15 @@ import (
type Map map[u.Key]Conn
type PeerConn interface {
// LocalMultiaddr is the Multiaddr on this side
// LocalPeer (this side) ID, PrivateKey, and Address
LocalPeer() peer.ID
LocalPrivateKey() ic.PrivKey
LocalMultiaddr() ma.Multiaddr
// LocalPeer is the Peer on our side of the connection
LocalPeer() peer.Peer
// RemoteMultiaddr is the Multiaddr on the remote side
// RemotePeer ID, PublicKey, and Address
RemotePeer() peer.ID
RemotePublicKey() ic.PubKey
RemoteMultiaddr() ma.Multiaddr
// RemotePeer is the Peer on the remote side
RemotePeer() peer.Peer
}
// Conn is a generic message-based Peer-to-Peer connection.
@ -54,16 +53,14 @@ type Conn interface {
type Dialer struct {
// LocalPeer is the identity of the local Peer.
LocalPeer peer.Peer
LocalPeer peer.ID
// Peerstore is the set of peers we know about locally. The Dialer needs it
// because when an incoming connection is identified, we should reuse the
// same peer objects (otherwise things get inconsistent).
Peerstore peer.Peerstore
// LocalAddrs is a set of local addresses to use.
LocalAddrs []ma.Multiaddr
// WithoutSecureTransport determines whether to initialize an insecure connection.
// Phrased negatively so default is Secure, and verbosely to be very clear.
WithoutSecureTransport bool
// PrivateKey used to initialize a secure connection.
// Warning: if PrivateKey is nil, connection will not be secured.
PrivateKey ic.PrivKey
}
// Listener is an object that can accept connections. It matches net.Listener
@ -72,11 +69,6 @@ type Listener interface {
// Accept waits for and returns the next connection to the listener.
Accept() (net.Conn, error)
// {Set}WithoutSecureTransport decides whether to start insecure connections.
// Phrased negatively so default is Secure, and verbosely to be very clear.
WithoutSecureTransport() bool
SetWithoutSecureTransport(bool)
// Addr is the local address
Addr() net.Addr
@ -84,12 +76,7 @@ type Listener interface {
Multiaddr() ma.Multiaddr
// LocalPeer is the identity of the local Peer.
LocalPeer() peer.Peer
// Peerstore is the set of peers we know about locally. The Listener needs it
// because when an incoming connection is identified, we should reuse the
// same peer objects (otherwise things get inconsistent).
Peerstore() peer.Peerstore
LocalPeer() peer.ID
// Close closes the listener.
// Any blocked Accept operations will be unblocked and return errors.

View File

@ -5,31 +5,37 @@ import (
"net"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
ctxgroup "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-ctxgroup"
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
manet "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net"
ic "github.com/jbenet/go-ipfs/crypto"
peer "github.com/jbenet/go-ipfs/peer"
)
// listener is an object that can accept connections. It implements Listener
type listener struct {
withoutSecureTransport bool
manet.Listener
// Local multiaddr to listen on
maddr ma.Multiaddr
maddr ma.Multiaddr // Local multiaddr to listen on
local peer.ID // LocalPeer is the identity of the local Peer
privk ic.PrivKey // private key to use to initialize secure conns
// LocalPeer is the identity of the local Peer.
local peer.Peer
cg ctxgroup.ContextGroup
}
// Peerstore is the set of peers we know about locally
peers peer.Peerstore
func (l *listener) teardown() error {
defer log.Debugf("listener closed: %s %s", l.local, l.maddr)
return l.Listener.Close()
}
func (l *listener) Close() error {
log.Infof("listener closing: %s %s", l.local, l.maddr)
return l.Listener.Close()
log.Debugf("listener closing: %s %s", l.local, l.maddr)
return l.cg.Close()
}
func (l *listener) String() string {
return fmt.Sprintf("<Listener %s %s>", l.local, l.maddr)
}
// Accept waits for and returns the next connection to the listener.
@ -46,29 +52,22 @@ func (l *listener) Accept() (net.Conn, error) {
return nil, err
}
c, err := newSingleConn(ctx, l.local, nil, maconn)
c, err := newSingleConn(ctx, l.local, "", maconn)
if err != nil {
return nil, fmt.Errorf("Error accepting connection: %v", err)
}
if l.withoutSecureTransport {
if l.privk == nil {
log.Warning("listener %s listening INSECURELY!", l)
return c, nil
}
sc, err := newSecureConn(ctx, c, l.peers)
sc, err := newSecureConn(ctx, l.privk, c)
if err != nil {
return nil, fmt.Errorf("Error securing connection: %v", err)
}
return sc, nil
}
func (l *listener) WithoutSecureTransport() bool {
return l.withoutSecureTransport
}
func (l *listener) SetWithoutSecureTransport(b bool) {
l.withoutSecureTransport = b
}
func (l *listener) Addr() net.Addr {
return l.Listener.Addr()
}
@ -79,29 +78,22 @@ func (l *listener) Multiaddr() ma.Multiaddr {
}
// LocalPeer is the identity of the local Peer.
func (l *listener) LocalPeer() peer.Peer {
func (l *listener) LocalPeer() peer.ID {
return l.local
}
// Peerstore is the set of peers we know about locally. The Listener needs it
// because when an incoming connection is identified, we should reuse the
// same peer objects (otherwise things get inconsistent).
func (l *listener) Peerstore() peer.Peerstore {
return l.peers
}
func (l *listener) Loggable() map[string]interface{} {
return map[string]interface{}{
"listener": map[string]interface{}{
"peer": l.LocalPeer(),
"address": l.Multiaddr(),
"withoutSecureTransport": l.withoutSecureTransport,
"peer": l.LocalPeer(),
"address": l.Multiaddr(),
"secure": (l.privk != nil),
},
}
}
// Listen listens on the particular multiaddr, with given peer and peerstore.
func Listen(ctx context.Context, addr ma.Multiaddr, local peer.Peer, peers peer.Peerstore) (Listener, error) {
func Listen(ctx context.Context, addr ma.Multiaddr, local peer.ID, sk ic.PrivKey) (Listener, error) {
ml, err := manet.Listen(addr)
if err != nil {
@ -111,10 +103,11 @@ func Listen(ctx context.Context, addr ma.Multiaddr, local peer.Peer, peers peer.
l := &listener{
Listener: ml,
maddr: addr,
peers: peers,
local: local,
withoutSecureTransport: false,
privk: sk,
cg: ctxgroup.WithContext(ctx),
}
l.cg.SetTeardown(l.teardown)
log.Infof("swarm listening on %s\n", l.Multiaddr())
log.Event(ctx, "swarmListen", l)

View File

@ -8,8 +8,10 @@ import (
msgio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-msgio"
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
ic "github.com/jbenet/go-ipfs/crypto"
secio "github.com/jbenet/go-ipfs/crypto/secio"
peer "github.com/jbenet/go-ipfs/peer"
errors "github.com/jbenet/go-ipfs/util/debugerror"
)
// secureConn wraps another Conn object with an encrypted channel.
@ -26,10 +28,21 @@ type secureConn struct {
}
// newConn constructs a new connection
func newSecureConn(ctx context.Context, insecure Conn, peers peer.Peerstore) (Conn, error) {
func newSecureConn(ctx context.Context, sk ic.PrivKey, insecure Conn) (Conn, error) {
if insecure == nil {
return nil, errors.New("insecure is nil")
}
if insecure.LocalPeer() == "" {
return nil, errors.New("insecure.LocalPeer() is nil")
}
if sk == nil {
panic("way")
return nil, errors.New("private key is nil")
}
// NewSession performs the secure handshake, which takes multiple RTT
sessgen := secio.SessionGenerator{Local: insecure.LocalPeer(), Peerstore: peers}
sessgen := secio.SessionGenerator{LocalID: insecure.LocalPeer(), PrivateKey: sk}
session, err := sessgen.NewSession(ctx, insecure)
if err != nil {
return nil, err
@ -92,15 +105,25 @@ func (c *secureConn) RemoteMultiaddr() ma.Multiaddr {
}
// LocalPeer is the Peer on this side
func (c *secureConn) LocalPeer() peer.Peer {
func (c *secureConn) LocalPeer() peer.ID {
return c.session.LocalPeer()
}
// RemotePeer is the Peer on the remote side
func (c *secureConn) RemotePeer() peer.Peer {
func (c *secureConn) RemotePeer() peer.ID {
return c.session.RemotePeer()
}
// LocalPrivateKey is the public key of the peer on this side
func (c *secureConn) LocalPrivateKey() ic.PrivKey {
return c.session.LocalPrivateKey()
}
// RemotePubKey is the public key of the peer on the remote side
func (c *secureConn) RemotePublicKey() ic.PubKey {
return c.session.RemotePublicKey()
}
// Read reads data, net.Conn style
func (c *secureConn) Read(buf []byte) (int, error) {
return c.secure.Read(buf)

View File

@ -2,50 +2,78 @@ package conn
import (
"bytes"
"fmt"
"os"
"runtime"
"strconv"
"sync"
"testing"
"time"
peer "github.com/jbenet/go-ipfs/peer"
ic "github.com/jbenet/go-ipfs/crypto"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
)
func upgradeToSecureConn(t *testing.T, ctx context.Context, c Conn) (Conn, error) {
func upgradeToSecureConn(t *testing.T, ctx context.Context, sk ic.PrivKey, c Conn) (Conn, error) {
if c, ok := c.(*secureConn); ok {
return c, nil
}
// shouldn't happen, because dial + listen already return secure conns.
s, err := newSecureConn(ctx, c, peer.NewPeerstore())
s, err := newSecureConn(ctx, sk, c)
if err != nil {
return nil, err
}
return s, nil
}
func secureHandshake(t *testing.T, ctx context.Context, c Conn, done chan error) {
_, err := upgradeToSecureConn(t, ctx, c)
func secureHandshake(t *testing.T, ctx context.Context, sk ic.PrivKey, c Conn, done chan error) {
_, err := upgradeToSecureConn(t, ctx, sk, c)
done <- err
}
func TestSecureSimple(t *testing.T) {
// t.Skip("Skipping in favor of another test")
numMsgs := 100
if testing.Short() {
numMsgs = 10
}
ctx := context.Background()
c1, c2, p1, p2 := setupSingleConn(t, ctx)
done := make(chan error)
go secureHandshake(t, ctx, p1.PrivKey, c1, done)
go secureHandshake(t, ctx, p2.PrivKey, c2, done)
for i := 0; i < 2; i++ {
if err := <-done; err != nil {
t.Fatal(err)
}
}
for i := 0; i < numMsgs; i++ {
testOneSendRecv(t, c1, c2)
testOneSendRecv(t, c2, c1)
}
c1.Close()
c2.Close()
}
func TestSecureClose(t *testing.T) {
// t.Skip("Skipping in favor of another test")
ctx := context.Background()
c1, c2 := setupSingleConn(t, ctx, "/ip4/127.0.0.1/tcp/6634", "/ip4/127.0.0.1/tcp/6645")
c1, c2, p1, p2 := setupSingleConn(t, ctx)
done := make(chan error)
go secureHandshake(t, ctx, c1, done)
go secureHandshake(t, ctx, c2, done)
go secureHandshake(t, ctx, p1.PrivKey, c1, done)
go secureHandshake(t, ctx, p2.PrivKey, c2, done)
for i := 0; i < 2; i++ {
if err := <-done; err != nil {
t.Error(err)
t.Fatal(err)
}
}
@ -64,13 +92,13 @@ func TestSecureCancelHandshake(t *testing.T) {
// t.Skip("Skipping in favor of another test")
ctx, cancel := context.WithCancel(context.Background())
c1, c2 := setupSingleConn(t, ctx, "/ip4/127.0.0.1/tcp/6634", "/ip4/127.0.0.1/tcp/6645")
c1, c2, p1, p2 := setupSingleConn(t, ctx)
done := make(chan error)
go secureHandshake(t, ctx, c1, done)
<-time.After(50 * time.Millisecond)
go secureHandshake(t, ctx, p1.PrivKey, c1, done)
<-time.After(time.Millisecond)
cancel() // cancel ctx
go secureHandshake(t, ctx, c2, done)
go secureHandshake(t, ctx, p2.PrivKey, c2, done)
for i := 0; i < 2; i++ {
if err := <-done; err == nil {
@ -79,6 +107,24 @@ func TestSecureCancelHandshake(t *testing.T) {
}
}
func TestSecureHandshakeFailsWithWrongKeys(t *testing.T) {
// t.Skip("Skipping in favor of another test")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c1, c2, p1, p2 := setupSingleConn(t, ctx)
done := make(chan error)
go secureHandshake(t, ctx, p2.PrivKey, c1, done)
go secureHandshake(t, ctx, p1.PrivKey, c2, done)
for i := 0; i < 2; i++ {
if err := <-done; err == nil {
t.Fatal("wrong keys should've errored out.")
}
}
}
func TestSecureCloseLeak(t *testing.T) {
// t.Skip("Skipping in favor of another test")
@ -89,15 +135,11 @@ func TestSecureCloseLeak(t *testing.T) {
t.Skip("this doesn't work well on travis")
}
var wg sync.WaitGroup
runPair := func(p1, p2, num int) {
a1 := strconv.Itoa(p1)
a2 := strconv.Itoa(p2)
ctx, cancel := context.WithCancel(context.Background())
c1, c2 := setupSecureConn(t, ctx, "/ip4/127.0.0.1/tcp/"+a1, "/ip4/127.0.0.1/tcp/"+a2)
runPair := func(c1, c2 Conn, num int) {
log.Debugf("runPair %d", num)
for i := 0; i < num; i++ {
log.Debugf("runPair iteration %d", i)
b1 := []byte("beep")
c1.WriteMsg(b1)
b2, err := c2.ReadMsg()
@ -120,22 +162,32 @@ func TestSecureCloseLeak(t *testing.T) {
<-time.After(time.Microsecond * 5)
}
c1.Close()
c2.Close()
cancel() // close the listener
wg.Done()
}
var cons = 20
var msgs = 100
fmt.Printf("Running %d connections * %d msgs.\n", cons, msgs)
var cons = 5
var msgs = 50
log.Debugf("Running %d connections * %d msgs.\n", cons, msgs)
var wg sync.WaitGroup
for i := 0; i < cons; i++ {
wg.Add(1)
go runPair(2000+i, 2001+i, msgs)
ctx, cancel := context.WithCancel(context.Background())
c1, c2, _, _ := setupSecureConn(t, ctx)
go func(c1, c2 Conn) {
defer func() {
c1.Close()
c2.Close()
cancel()
wg.Done()
}()
runPair(c1, c2, msgs)
}(c1, c2)
}
fmt.Printf("Waiting...\n")
log.Debugf("Waiting...\n")
wg.Wait()
// done!

View File

@ -6,10 +6,13 @@ import (
config "github.com/jbenet/go-ipfs/config"
pb "github.com/jbenet/go-ipfs/net/handshake/pb"
u "github.com/jbenet/go-ipfs/util"
semver "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/coreos/go-semver/semver"
)
var log = u.Logger("handshake")
// IpfsVersion holds the current protocol version for a client running this code
var IpfsVersion *semver.Version
var ClientVersion = "go-ipfs/" + config.CurrentVersionNumber
@ -51,6 +54,13 @@ func Handshake1Compatible(handshakeA, handshakeB *pb.Handshake1) error {
// NewHandshake1 creates a new Handshake1 from the two strings
func NewHandshake1(protoVer, agentVer string) *pb.Handshake1 {
if protoVer == "" {
protoVer = IpfsVersion.String()
}
if agentVer == "" {
agentVer = ClientVersion
}
return &pb.Handshake1{
ProtocolVersion: &protoVer,
AgentVersion: &agentVer,

View File

@ -1,82 +0,0 @@
package handshake
import (
"fmt"
pb "github.com/jbenet/go-ipfs/net/handshake/pb"
peer "github.com/jbenet/go-ipfs/peer"
u "github.com/jbenet/go-ipfs/util"
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
)
var log = u.Logger("handshake")
// Handshake3Msg constructs a Handshake3 msg.
func Handshake3Msg(localPeer peer.Peer, remoteAddr ma.Multiaddr) *pb.Handshake3 {
var msg pb.Handshake3
// don't need publicKey after secure channel.
// msg.PublicKey = localPeer.PubKey().Bytes()
// local listen addresses
addrs := localPeer.Addresses()
msg.ListenAddrs = make([][]byte, len(addrs))
for i, a := range addrs {
msg.ListenAddrs[i] = a.Bytes()
}
// observed remote address
msg.ObservedAddr = remoteAddr.Bytes()
// services
// srv := localPeer.Services()
// msg.Services = make([]mux.ProtocolID, len(srv))
// for i, pid := range srv {
// msg.Services[i] = pid
// }
return &msg
}
// Handshake3Update updates local knowledge with the information in the
// handshake3 msg we received from remote client.
func Handshake3Update(lpeer, rpeer peer.Peer, msg *pb.Handshake3) (*Handshake3Result, error) {
res := &Handshake3Result{}
// our observed address
observedAddr, err := ma.NewMultiaddrBytes(msg.GetObservedAddr())
if err != nil {
return res, err
}
if lpeer.AddAddress(observedAddr) {
log.Debugf("(nat) added new local, remote-observed address: %s", observedAddr)
}
res.LocalObservedAddress = observedAddr
// remote's reported addresses
for _, a := range msg.GetListenAddrs() {
addr, err := ma.NewMultiaddrBytes(a)
if err != nil {
err = fmt.Errorf("remote peer address not a multiaddr: %s", err)
log.Errorf("Handshake3 error %s", err)
return res, err
}
rpeer.AddAddress(addr)
res.RemoteListenAddresses = append(res.RemoteListenAddresses, addr)
}
return res, nil
}
// Handshake3Result collects the knowledge gained in Handshake3.
type Handshake3Result struct {
// The addresses reported by the remote client
RemoteListenAddresses []ma.Multiaddr
// The address of the remote client we observed in this connection
RemoteObservedAddress ma.Multiaddr
// The address the remote client observed from this connection
LocalObservedAddress ma.Multiaddr
}

View File

@ -53,8 +53,16 @@ func (m *Handshake1) GetAgentVersion() string {
// Handshake3 is delivered _after_ the secure channel is initialized
type Handshake3 struct {
// can include all the values in handshake1, for protocol version, etc.
H1 *Handshake1 `protobuf:"bytes,5,opt,name=h1" json:"h1,omitempty"`
// publicKey is this node's public key (which also gives its node.ID)
// - may not need to be sent, as secure channel implies it has been sent.
// - then again, if we change / disable secure channel, may still want it.
PublicKey []byte `protobuf:"bytes,1,opt,name=publicKey" json:"publicKey,omitempty"`
// listenAddrs are the multiaddrs the sender node listens for open connections on
ListenAddrs [][]byte `protobuf:"bytes,2,rep,name=listenAddrs" json:"listenAddrs,omitempty"`
// protocols are the services this node is running
Protocols []string `protobuf:"bytes,3,rep,name=protocols" json:"protocols,omitempty"`
// oservedAddr is the multiaddr of the remote endpoint that the sender node perceives
// this is useful information to convey to the other side, as it helps the remote endpoint
// determine whether its connection to the local peer goes through NAT.
@ -66,6 +74,20 @@ func (m *Handshake3) Reset() { *m = Handshake3{} }
func (m *Handshake3) String() string { return proto.CompactTextString(m) }
func (*Handshake3) ProtoMessage() {}
func (m *Handshake3) GetH1() *Handshake1 {
if m != nil {
return m.H1
}
return nil
}
func (m *Handshake3) GetPublicKey() []byte {
if m != nil {
return m.PublicKey
}
return nil
}
func (m *Handshake3) GetListenAddrs() [][]byte {
if m != nil {
return m.ListenAddrs
@ -73,6 +95,13 @@ func (m *Handshake3) GetListenAddrs() [][]byte {
return nil
}
func (m *Handshake3) GetProtocols() []string {
if m != nil {
return m.Protocols
}
return nil
}
func (m *Handshake3) GetObservedAddr() []byte {
if m != nil {
return m.ObservedAddr

View File

@ -17,19 +17,19 @@ message Handshake1 {
// Handshake3 is delivered _after_ the secure channel is initialized
message Handshake3 {
// can include all the values in handshake1, for protocol version, etc.
optional Handshake1 h1 = 5;
// publicKey is this node's public key (which also gives its node.ID)
// - may not need to be sent, as secure channel implies it has been sent.
// - then again, if we change / disable secure channel, may still want it.
// optional bytes publicKey = 1;
optional bytes publicKey = 1;
// listenAddrs are the multiaddrs the sender node listens for open connections on
repeated bytes listenAddrs = 2;
// TODO
// services list the services this node is running
// repeated mux.ProtocolID services = 3;
// we'll have more fields here later.
// protocols are the services this node is running
repeated string protocols = 3;
// oservedAddr is the multiaddr of the remote endpoint that the sender node perceives
// this is useful information to convey to the other side, as it helps the remote endpoint

178
net/id.go Normal file
View File

@ -0,0 +1,178 @@
package net
import (
"sync"
handshake "github.com/jbenet/go-ipfs/net/handshake"
pb "github.com/jbenet/go-ipfs/net/handshake/pb"
ggio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/io"
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
)
// IDService is a structure that implements ProtocolIdentify.
// It is a trivial service that gives the other peer some
// useful information about the local peer. A sort of hello.
//
// The IDService sends:
// * Our IPFS Protocol Version
// * Our IPFS Agent Version
// * Our public Listen Addresses
type IDService struct {
Network Network
// connections undergoing identification
// for wait purposes
currid map[Conn]chan struct{}
currmu sync.RWMutex
}
func NewIDService(n Network) *IDService {
s := &IDService{
Network: n,
currid: make(map[Conn]chan struct{}),
}
n.SetHandler(ProtocolIdentify, s.RequestHandler)
return s
}
func (ids *IDService) IdentifyConn(c Conn) {
ids.currmu.Lock()
if _, found := ids.currid[c]; found {
ids.currmu.Unlock()
log.Debugf("IdentifyConn called twice on: %s", c)
return // already identifying it.
}
ids.currid[c] = make(chan struct{})
ids.currmu.Unlock()
s, err := c.NewStreamWithProtocol(ProtocolIdentify)
if err != nil {
log.Error("network: unable to open initial stream for %s", ProtocolIdentify)
log.Event(ids.Network.CtxGroup().Context(), "IdentifyOpenFailed", c.RemotePeer())
}
// ok give the response to our handler.
ids.ResponseHandler(s)
ids.currmu.Lock()
ch, found := ids.currid[c]
delete(ids.currid, c)
ids.currmu.Unlock()
if !found {
log.Errorf("IdentifyConn failed to find channel (programmer error) for %s", c)
return
}
close(ch) // release everyone waiting.
}
func (ids *IDService) RequestHandler(s Stream) {
defer s.Close()
c := s.Conn()
w := ggio.NewDelimitedWriter(s)
mes := pb.Handshake3{}
ids.populateMessage(&mes, s.Conn())
w.WriteMsg(&mes)
log.Debugf("%s sent message to %s %s", ProtocolIdentify,
c.RemotePeer(), c.RemoteMultiaddr())
}
func (ids *IDService) ResponseHandler(s Stream) {
defer s.Close()
c := s.Conn()
r := ggio.NewDelimitedReader(s, 2048)
mes := pb.Handshake3{}
if err := r.ReadMsg(&mes); err != nil {
log.Errorf("%s error receiving message from %s %s", ProtocolIdentify,
c.RemotePeer(), c.RemoteMultiaddr())
return
}
ids.consumeMessage(&mes, c)
log.Debugf("%s received message from %s %s", ProtocolIdentify,
c.RemotePeer(), c.RemoteMultiaddr())
}
func (ids *IDService) populateMessage(mes *pb.Handshake3, c Conn) {
// set protocols this node is currently handling
protos := ids.Network.Protocols()
mes.Protocols = make([]string, len(protos))
for i, p := range protos {
mes.Protocols[i] = string(p)
}
// observed address so other side is informed of their
// "public" address, at least in relation to us.
mes.ObservedAddr = c.RemoteMultiaddr().Bytes()
// set listen addrs
laddrs, err := ids.Network.InterfaceListenAddresses()
if err != nil {
log.Error(err)
} else {
mes.ListenAddrs = make([][]byte, len(laddrs))
for i, addr := range laddrs {
mes.ListenAddrs[i] = addr.Bytes()
}
log.Debugf("%s sent listen addrs to %s: %s", c.LocalPeer(), c.RemotePeer(), laddrs)
}
// set protocol versions
mes.H1 = handshake.NewHandshake1("", "")
}
func (ids *IDService) consumeMessage(mes *pb.Handshake3, c Conn) {
p := c.RemotePeer()
// mes.Protocols
// mes.ObservedAddr
// mes.ListenAddrs
laddrs := mes.GetListenAddrs()
lmaddrs := make([]ma.Multiaddr, 0, len(laddrs))
for _, addr := range laddrs {
maddr, err := ma.NewMultiaddrBytes(addr)
if err != nil {
log.Errorf("%s failed to parse multiaddr from %s %s", ProtocolIdentify, p,
c.RemoteMultiaddr())
continue
}
lmaddrs = append(lmaddrs, maddr)
}
// update our peerstore with the addresses.
ids.Network.Peerstore().AddAddresses(p, lmaddrs)
log.Debugf("%s received listen addrs for %s: %s", c.LocalPeer(), c.RemotePeer(), lmaddrs)
// get protocol versions
pv := *mes.H1.ProtocolVersion
av := *mes.H1.AgentVersion
ids.Network.Peerstore().Put(p, "ProtocolVersion", pv)
ids.Network.Peerstore().Put(p, "AgentVersion", av)
}
// IdentifyWait returns a channel which will be closed once
// "ProtocolIdentify" (handshake3) finishes on given conn.
// This happens async so the connection can start to be used
// even if handshake3 knowledge is not necesary.
// Users **MUST** call IdentifyWait _after_ IdentifyConn
func (ids *IDService) IdentifyWait(c Conn) <-chan struct{} {
ids.currmu.Lock()
ch, found := ids.currid[c]
ids.currmu.Unlock()
if found {
return ch
}
// if not found, it means we are already done identifying it, or
// haven't even started. either way, return a new channel closed.
ch = make(chan struct{})
close(ch)
return ch
}

130
net/id_test.go Normal file
View File

@ -0,0 +1,130 @@
package net_test
import (
"testing"
"time"
inet "github.com/jbenet/go-ipfs/net"
handshake "github.com/jbenet/go-ipfs/net/handshake"
peer "github.com/jbenet/go-ipfs/peer"
testutil "github.com/jbenet/go-ipfs/util/testutil"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
)
func GenNetwork(t *testing.T, ctx context.Context) inet.Network {
p := testutil.RandPeerNetParams(t)
ps := peer.NewPeerstore()
ps.AddAddress(p.ID, p.Addr)
ps.AddPubKey(p.ID, p.PubKey)
ps.AddPrivKey(p.ID, p.PrivKey)
n, err := inet.NewNetwork(ctx, ps.Addresses(p.ID), p.ID, ps)
if err != nil {
t.Fatal(err)
}
return n
}
func DivulgeAddresses(a, b inet.Network) {
id := a.LocalPeer()
addrs := a.Peerstore().Addresses(id)
b.Peerstore().AddAddresses(id, addrs)
}
func subtestIDService(t *testing.T, postDialWait time.Duration) {
// the generated networks should have the id service wired in.
ctx := context.Background()
n1 := GenNetwork(t, ctx)
n2 := GenNetwork(t, ctx)
n1p := n1.LocalPeer()
n2p := n2.LocalPeer()
testKnowsAddrs(t, n1, n2p, []ma.Multiaddr{}) // nothing
testKnowsAddrs(t, n2, n1p, []ma.Multiaddr{}) // nothing
// have n2 tell n1, so we can dial...
DivulgeAddresses(n2, n1)
testKnowsAddrs(t, n1, n2p, n2.Peerstore().Addresses(n2p)) // has them
testKnowsAddrs(t, n2, n1p, []ma.Multiaddr{}) // nothing
if err := n1.DialPeer(ctx, n2p); err != nil {
t.Fatalf("Failed to dial:", err)
}
// we need to wait here if Dial returns before ID service is finished.
if postDialWait > 0 {
<-time.After(postDialWait)
}
// the IDService should be opened automatically, by the network.
// what we should see now is that both peers know about each others listen addresses.
testKnowsAddrs(t, n1, n2p, n2.Peerstore().Addresses(n2p)) // has them
testHasProtocolVersions(t, n1, n2p)
// now, this wait we do have to do. it's the wait for the Listening side
// to be done identifying the connection.
c := n2.ConnsToPeer(n1.LocalPeer())
if len(c) < 1 {
t.Fatal("should have connection by now at least.")
}
<-n2.IdentifyProtocol().IdentifyWait(c[0])
// and the protocol versions.
testKnowsAddrs(t, n2, n1p, n1.Peerstore().Addresses(n1p)) // has them
testHasProtocolVersions(t, n2, n1p)
}
func testKnowsAddrs(t *testing.T, n inet.Network, p peer.ID, expected []ma.Multiaddr) {
actual := n.Peerstore().Addresses(p)
if len(actual) != len(expected) {
t.Error("dont have the same addresses")
}
have := map[string]struct{}{}
for _, addr := range actual {
have[addr.String()] = struct{}{}
}
for _, addr := range expected {
if _, found := have[addr.String()]; !found {
t.Errorf("%s did not have addr for %s: %s", n.LocalPeer(), p, addr)
// panic("ahhhhhhh")
}
}
}
func testHasProtocolVersions(t *testing.T, n inet.Network, p peer.ID) {
v, err := n.Peerstore().Get(p, "ProtocolVersion")
if v == nil {
t.Error("no protocol version")
return
}
if v.(string) != handshake.IpfsVersion.String() {
t.Error("protocol mismatch", err)
}
v, err = n.Peerstore().Get(p, "AgentVersion")
if v.(string) != handshake.ClientVersion {
t.Error("agent version mismatch", err)
}
}
// TestIDServiceWait gives the ID service 100ms to finish after dialing
// this is becasue it used to be concurrent. Now, Dial wait till the
// id service is done.
func TestIDServiceWait(t *testing.T) {
N := 3
for i := 0; i < N; i++ {
subtestIDService(t, 100*time.Millisecond)
}
}
func TestIDServiceNoWait(t *testing.T) {
N := 3
for i := 0; i < N; i++ {
subtestIDService(t, 0)
}
}

View File

@ -18,10 +18,11 @@ type ProtocolID string
// These are the ProtocolIDs of the protocols running. It is useful
// to keep them in one place.
const (
ProtocolTesting ProtocolID = "/ipfs/testing"
ProtocolBitswap ProtocolID = "/ipfs/bitswap"
ProtocolDHT ProtocolID = "/ipfs/dht"
ProtocolDiag ProtocolID = "/ipfs/diagnostics"
ProtocolTesting ProtocolID = "/ipfs/testing"
ProtocolBitswap ProtocolID = "/ipfs/bitswap"
ProtocolDHT ProtocolID = "/ipfs/dht"
ProtocolIdentify ProtocolID = "/ipfs/id"
ProtocolDiag ProtocolID = "/ipfs/diagnostics"
)
// MessageSizeMax is a soft (recommended) maximum for network messages.
@ -56,8 +57,8 @@ type StreamHandlerMap map[ProtocolID]StreamHandler
type Conn interface {
conn.PeerConn
// NewStreamWithProtocol constructs a new Stream directly connected to p.
NewStreamWithProtocol(pr ProtocolID, p peer.Peer) (Stream, error)
// NewStreamWithProtocol constructs a new Stream over this conn.
NewStreamWithProtocol(pr ProtocolID) (Stream, error)
}
// Network is the interface IPFS uses for connecting to the world.
@ -72,17 +73,24 @@ type Network interface {
// This operation is threadsafe.
SetHandler(ProtocolID, StreamHandler)
// Protocols returns the list of protocols this network currently
// has registered handlers for.
Protocols() []ProtocolID
// NewStream returns a new stream to given peer p.
// If there is no connection to p, attempts to create one.
// If ProtocolID is "", writes no header.
NewStream(ProtocolID, peer.Peer) (Stream, error)
NewStream(ProtocolID, peer.ID) (Stream, error)
// Peers returns the peers connected
Peers() []peer.Peer
Peers() []peer.ID
// Conns returns the connections in this Netowrk
Conns() []Conn
// ConnsToPeer returns the connections in this Netowrk for given peer.
ConnsToPeer(p peer.ID) []Conn
// BandwidthTotals returns the total number of bytes passed through
// the network since it was instantiated
BandwidthTotals() (uint64, uint64)
@ -97,23 +105,34 @@ type Network interface {
// CtxGroup returns the network's contextGroup
CtxGroup() ctxgroup.ContextGroup
// IdentifyProtocol returns the instance of the object running the Identify
// Protocol. This is what runs the ifps handshake-- this should be removed
// if this abstracted out to its own package.
IdentifyProtocol() *IDService
}
// Dialer represents a service that can dial out to peers
// (this is usually just a Network, but other services may not need the whole
// stack, and thus it becomes easier to mock)
type Dialer interface {
// Peerstore returns the internal peerstore
// This is useful to tell the dialer about a new address for a peer.
// Or use one of the public keys found out over the network.
Peerstore() peer.Peerstore
// LocalPeer returns the local peer associated with this network
LocalPeer() peer.Peer
LocalPeer() peer.ID
// DialPeer attempts to establish a connection to a given peer
DialPeer(context.Context, peer.Peer) error
DialPeer(context.Context, peer.ID) error
// ClosePeer closes the connection to a given peer
ClosePeer(peer.Peer) error
ClosePeer(peer.ID) error
// Connectedness returns a state signaling connection capabilities
Connectedness(peer.Peer) Connectedness
Connectedness(peer.ID) Connectedness
}
// Connectedness signals the capacity for a connection with a given node.

View File

@ -10,21 +10,28 @@ import (
"io"
"time"
ic "github.com/jbenet/go-ipfs/crypto"
inet "github.com/jbenet/go-ipfs/net"
peer "github.com/jbenet/go-ipfs/peer"
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
)
type Mocknet interface {
// GenPeer generates a peer and its inet.Network in the Mocknet
GenPeer() (inet.Network, error)
AddPeer(peer.ID) (inet.Network, error)
// AddPeer adds an existing peer. we need both a privkey and addr.
// ID is derived from PrivKey
AddPeer(ic.PrivKey, ma.Multiaddr) (inet.Network, error)
// retrieve things
Peer(peer.ID) peer.Peer
Peers() []peer.Peer
Peers() []peer.ID
Net(peer.ID) inet.Network
Nets() []inet.Network
Links() LinkMap
LinksBetweenPeers(a, b peer.Peer) []Link
LinksBetweenPeers(a, b peer.ID) []Link
LinksBetweenNets(a, b inet.Network) []Link
// Links are the **ability to connect**.
@ -32,10 +39,10 @@ type Mocknet interface {
// For p1 and p2 to connect, a link must exist between them.
// (this makes it possible to test dial failures, and
// things like relaying traffic)
LinkPeers(peer.Peer, peer.Peer) (Link, error)
LinkPeers(peer.ID, peer.ID) (Link, error)
LinkNets(inet.Network, inet.Network) (Link, error)
Unlink(Link) error
UnlinkPeers(peer.Peer, peer.Peer) error
UnlinkPeers(peer.ID, peer.ID) error
UnlinkNets(inet.Network, inet.Network) error
// LinkDefaults are the default options that govern links
@ -45,9 +52,9 @@ type Mocknet interface {
// Connections are the usual. Connecting means Dialing.
// **to succeed, peers must be linked beforehand**
ConnectPeers(peer.Peer, peer.Peer) error
ConnectPeers(peer.ID, peer.ID) error
ConnectNets(inet.Network, inet.Network) error
DisconnectPeers(peer.Peer, peer.Peer) error
DisconnectPeers(peer.ID, peer.ID) error
DisconnectNets(inet.Network, inet.Network) error
}
@ -66,7 +73,7 @@ type LinkOptions struct {
// nodes cannot talk to each other directly. :)
type Link interface {
Networks() []inet.Network
Peers() []peer.Peer
Peers() []peer.ID
SetOptions(LinkOptions)
Options() LinkOptions

View File

@ -4,6 +4,7 @@ import (
"container/list"
"sync"
ic "github.com/jbenet/go-ipfs/crypto"
inet "github.com/jbenet/go-ipfs/net"
peer "github.com/jbenet/go-ipfs/peer"
@ -14,8 +15,15 @@ import (
// live connection between two peers.
// it goes over a particular link.
type conn struct {
local peer.Peer
remote peer.Peer
local peer.ID
remote peer.ID
localAddr ma.Multiaddr
remoteAddr ma.Multiaddr
localPrivKey ic.PrivKey
remotePubKey ic.PubKey
net *peernet
link *link
rconn *conn // counterpart
@ -74,8 +82,8 @@ func (c *conn) openStream() *stream {
return sl
}
func (c *conn) NewStreamWithProtocol(pr inet.ProtocolID, p peer.Peer) (inet.Stream, error) {
log.Debugf("Conn.NewStreamWithProtocol: %s --> %s", c.local, p)
func (c *conn) NewStreamWithProtocol(pr inet.ProtocolID) (inet.Stream, error) {
log.Debugf("Conn.NewStreamWithProtocol: %s --> %s", c.local, c.remote)
s := c.openStream()
if err := inet.WriteProtocolHeader(pr, s); err != nil {
@ -87,20 +95,30 @@ func (c *conn) NewStreamWithProtocol(pr inet.ProtocolID, p peer.Peer) (inet.Stre
// LocalMultiaddr is the Multiaddr on this side
func (c *conn) LocalMultiaddr() ma.Multiaddr {
return nil
return c.localAddr
}
// LocalPeer is the Peer on our side of the connection
func (c *conn) LocalPeer() peer.Peer {
func (c *conn) LocalPeer() peer.ID {
return c.local
}
// LocalPrivateKey is the private key of the peer on our side.
func (c *conn) LocalPrivateKey() ic.PrivKey {
return c.localPrivKey
}
// RemoteMultiaddr is the Multiaddr on the remote side
func (c *conn) RemoteMultiaddr() ma.Multiaddr {
return nil
return c.remoteAddr
}
// RemotePeer is the Peer on the remote side
func (c *conn) RemotePeer() peer.Peer {
func (c *conn) RemotePeer() peer.ID {
return c.remote
}
// RemotePublicKey is the private key of the peer on our side.
func (c *conn) RemotePublicKey() ic.PubKey {
return c.remotePubKey
}

View File

@ -1,7 +1,6 @@
package mocknet
import (
"fmt"
"io"
"sync"
@ -16,6 +15,8 @@ type link struct {
nets []*peernet
opts LinkOptions
// this could have addresses on both sides.
sync.RWMutex
}
@ -27,20 +28,22 @@ func (l *link) newConnPair(dialer *peernet) (*conn, *conn) {
l.RLock()
defer l.RUnlock()
mkconn := func(n *peernet, rid peer.ID) *conn {
c := &conn{net: n, link: l}
c.local = n.peer
mkconn := func(ln, rn *peernet) *conn {
c := &conn{net: ln, link: l}
c.local = ln.peer
c.remote = rn.peer
c.localAddr = ln.ps.Addresses(ln.peer)[0]
c.remoteAddr = rn.ps.Addresses(rn.peer)[0]
c.localPrivKey = ln.ps.PrivKey(ln.peer)
c.remotePubKey = rn.ps.PubKey(rn.peer)
r, err := n.ps.FindOrCreate(rid)
if err != nil {
panic(fmt.Errorf("error creating peer: %s", err))
}
c.remote = r
return c
}
c1 := mkconn(l.nets[0], l.nets[1].peer.ID())
c2 := mkconn(l.nets[1], l.nets[0].peer.ID())
c1 := mkconn(l.nets[0], l.nets[1])
c2 := mkconn(l.nets[1], l.nets[0])
c1.rconn = c2
c2.rconn = c1
@ -70,11 +73,11 @@ func (l *link) Networks() []inet.Network {
return cp
}
func (l *link) Peers() []peer.Peer {
func (l *link) Peers() []peer.ID {
l.RLock()
defer l.RUnlock()
cp := make([]peer.Peer, len(l.nets))
cp := make([]peer.ID, len(l.nets))
for i, n := range l.nets {
cp[i] = n.peer
}

View File

@ -4,27 +4,27 @@ import (
"fmt"
"sync"
ic "github.com/jbenet/go-ipfs/crypto"
inet "github.com/jbenet/go-ipfs/net"
peer "github.com/jbenet/go-ipfs/peer"
testutil "github.com/jbenet/go-ipfs/util/testutil"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
ctxgroup "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-ctxgroup"
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
)
type peerID string
// mocknet implements mocknet.Mocknet
type mocknet struct {
// must map on peer.ID (instead of peer.Peer) because
// must map on peer.ID (instead of peer.ID) because
// each inet.Network has different peerstore
nets map[peerID]*peernet
nets map[peer.ID]*peernet
// links make it possible to connect two peers.
// think of links as the physical medium.
// usually only one, but there could be multiple
// **links are shared between peers**
links map[peerID]map[peerID]map[*link]struct{}
links map[peer.ID]map[peer.ID]map[*link]struct{}
linkDefaults LinkOptions
@ -34,62 +34,53 @@ type mocknet struct {
func New(ctx context.Context) Mocknet {
return &mocknet{
nets: map[peerID]*peernet{},
links: map[peerID]map[peerID]map[*link]struct{}{},
nets: map[peer.ID]*peernet{},
links: map[peer.ID]map[peer.ID]map[*link]struct{}{},
cg: ctxgroup.WithContext(ctx),
}
}
func (mn *mocknet) GenPeer() (inet.Network, error) {
p, err := testutil.PeerWithNewKeys()
sk, _, err := testutil.RandKeyPair(512)
if err != nil {
return nil, err
}
n, err := mn.AddPeer(p.ID())
if err != nil {
return nil, err
}
a := testutil.RandLocalTCPAddress()
// copy over keys
if err := n.LocalPeer().Update(p); err != nil {
n, err := mn.AddPeer(sk, a)
if err != nil {
return nil, err
}
return n, nil
}
func (mn *mocknet) AddPeer(p peer.ID) (inet.Network, error) {
n, err := newPeernet(mn.cg.Context(), mn, p)
func (mn *mocknet) AddPeer(k ic.PrivKey, a ma.Multiaddr) (inet.Network, error) {
n, err := newPeernet(mn.cg.Context(), mn, k, a)
if err != nil {
return nil, err
}
// make sure to add listening address!
// this makes debugging things simpler as remembering to register
// an address may cause unexpected failure.
n.Peerstore().AddAddress(n.LocalPeer(), a)
log.Debugf("mocknet added listen addr for peer: %s -- %s", n.LocalPeer(), a)
mn.cg.AddChildGroup(n.cg)
mn.Lock()
mn.nets[pid(n.peer)] = n
mn.nets[n.peer] = n
mn.Unlock()
return n, nil
}
func (mn *mocknet) Peer(pid peer.ID) peer.Peer {
func (mn *mocknet) Peers() []peer.ID {
mn.RLock()
defer mn.RUnlock()
for _, n := range mn.nets {
if n.peer.ID().Equal(pid) {
return n.peer
}
}
return nil
}
func (mn *mocknet) Peers() []peer.Peer {
mn.RLock()
defer mn.RUnlock()
cp := make([]peer.Peer, 0, len(mn.nets))
cp := make([]peer.ID, 0, len(mn.nets))
for _, n := range mn.nets {
cp = append(cp, n.peer)
}
@ -101,7 +92,7 @@ func (mn *mocknet) Net(pid peer.ID) inet.Network {
defer mn.RUnlock()
for _, n := range mn.nets {
if n.peer.ID().Equal(pid) {
if n.peer == pid {
return n
}
}
@ -152,10 +143,10 @@ func (mn *mocknet) LinkAll() error {
return nil
}
func (mn *mocknet) LinkPeers(p1, p2 peer.Peer) (Link, error) {
func (mn *mocknet) LinkPeers(p1, p2 peer.ID) (Link, error) {
mn.RLock()
n1 := mn.nets[pid(p1)]
n2 := mn.nets[pid(p2)]
n1 := mn.nets[p1]
n2 := mn.nets[p2]
mn.RUnlock()
if n1 == nil {
@ -177,7 +168,7 @@ func (mn *mocknet) validate(n inet.Network) (*peernet, error) {
return nil, fmt.Errorf("Network not supported (use mock package nets only)")
}
if _, found := mn.nets[pid(nr.peer)]; !found {
if _, found := mn.nets[nr.peer]; !found {
return nil, fmt.Errorf("Network not on mocknet. is it from another mocknet?")
}
@ -215,7 +206,7 @@ func (mn *mocknet) Unlink(l2 Link) error {
return nil
}
func (mn *mocknet) UnlinkPeers(p1, p2 peer.Peer) error {
func (mn *mocknet) UnlinkPeers(p1, p2 peer.ID) error {
ls := mn.LinksBetweenPeers(p1, p2)
if ls == nil {
return fmt.Errorf("no link between p1 and p2")
@ -234,19 +225,19 @@ func (mn *mocknet) UnlinkNets(n1, n2 inet.Network) error {
}
// get from the links map. and lazily contruct.
func (mn *mocknet) linksMapGet(p1, p2 peer.Peer) *map[*link]struct{} {
func (mn *mocknet) linksMapGet(p1, p2 peer.ID) *map[*link]struct{} {
l1, found := mn.links[pid(p1)]
l1, found := mn.links[p1]
if !found {
mn.links[pid(p1)] = map[peerID]map[*link]struct{}{}
l1 = mn.links[pid(p1)] // so we make sure it's there.
mn.links[p1] = map[peer.ID]map[*link]struct{}{}
l1 = mn.links[p1] // so we make sure it's there.
}
l2, found := l1[pid(p2)]
l2, found := l1[p2]
if !found {
m := map[*link]struct{}{}
l1[pid(p2)] = m
l2 = l1[pid(p2)]
l1[p2] = m
l2 = l1[p2]
}
return &l2
@ -286,23 +277,23 @@ func (mn *mocknet) ConnectAll() error {
return nil
}
func (mn *mocknet) ConnectPeers(a, b peer.Peer) error {
return mn.Net(a.ID()).DialPeer(mn.cg.Context(), b)
func (mn *mocknet) ConnectPeers(a, b peer.ID) error {
return mn.Net(a).DialPeer(mn.cg.Context(), b)
}
func (mn *mocknet) ConnectNets(a, b inet.Network) error {
return a.DialPeer(mn.cg.Context(), b.LocalPeer())
}
func (mn *mocknet) DisconnectPeers(p1, p2 peer.Peer) error {
return mn.Net(p1.ID()).ClosePeer(p2)
func (mn *mocknet) DisconnectPeers(p1, p2 peer.ID) error {
return mn.Net(p1).ClosePeer(p2)
}
func (mn *mocknet) DisconnectNets(n1, n2 inet.Network) error {
return n1.ClosePeer(n2.LocalPeer())
}
func (mn *mocknet) LinksBetweenPeers(p1, p2 peer.Peer) []Link {
func (mn *mocknet) LinksBetweenPeers(p1, p2 peer.ID) []Link {
mn.RLock()
defer mn.RUnlock()

View File

@ -5,6 +5,7 @@ import (
"math/rand"
"sync"
ic "github.com/jbenet/go-ipfs/crypto"
inet "github.com/jbenet/go-ipfs/net"
peer "github.com/jbenet/go-ipfs/peer"
@ -17,33 +18,38 @@ import (
type peernet struct {
mocknet *mocknet // parent
peer peer.Peer
peer peer.ID
ps peer.Peerstore
// conns are actual live connections between peers.
// many conns could run over each link.
// **conns are NOT shared between peers**
connsByPeer map[peerID]map[*conn]struct{}
connsByPeer map[peer.ID]map[*conn]struct{}
connsByLink map[*link]map[*conn]struct{}
// needed to implement inet.Network
mux inet.Mux
ids *inet.IDService
cg ctxgroup.ContextGroup
sync.RWMutex
}
// newPeernet constructs a new peernet
func newPeernet(ctx context.Context, m *mocknet, id peer.ID) (*peernet, error) {
func newPeernet(ctx context.Context, m *mocknet, k ic.PrivKey,
a ma.Multiaddr) (*peernet, error) {
// create our own entirely, so that peers dont get shuffled across
// network divides. dont share peers.
ps := peer.NewPeerstore()
p, err := ps.FindOrCreate(id)
p, err := peer.IDFromPublicKey(k.GetPublic())
if err != nil {
return nil, err
}
// create our own entirely, so that peers knowledge doesn't get shared
ps := peer.NewPeerstore()
ps.AddAddress(p, a)
ps.AddPrivKey(p, k)
ps.AddPubKey(p, k.GetPublic())
n := &peernet{
mocknet: m,
peer: p,
@ -51,11 +57,16 @@ func newPeernet(ctx context.Context, m *mocknet, id peer.ID) (*peernet, error) {
mux: inet.Mux{Handlers: inet.StreamHandlerMap{}},
cg: ctxgroup.WithContext(ctx),
connsByPeer: map[peerID]map[*conn]struct{}{},
connsByPeer: map[peer.ID]map[*conn]struct{}{},
connsByLink: map[*link]map[*conn]struct{}{},
}
n.cg.SetTeardown(n.teardown)
// setup a conn handler that immediately "asks the other side about them"
// this is ProtocolIdentify.
n.ids = inet.NewIDService(n)
return n, nil
}
@ -86,6 +97,14 @@ func (pn *peernet) Close() error {
return pn.cg.Close()
}
func (pn *peernet) Protocols() []inet.ProtocolID {
return pn.mux.Protocols()
}
func (pn *peernet) Peerstore() peer.Peerstore {
return pn.ps
}
func (pn *peernet) String() string {
return fmt.Sprintf("<mock.peernet %s - %d conns>", pn.peer, len(pn.allConns()))
}
@ -97,28 +116,21 @@ func (pn *peernet) handleNewStream(s inet.Stream) {
// DialPeer attempts to establish a connection to a given peer.
// Respects the context.
func (pn *peernet) DialPeer(ctx context.Context, p peer.Peer) error {
func (pn *peernet) DialPeer(ctx context.Context, p peer.ID) error {
return pn.connect(p)
}
func (pn *peernet) connect(p peer.Peer) error {
log.Debugf("%s dialing %s", pn.peer, p)
// cannot trust the peer we get. typical for tests to give us
// a peer from some other peerstore...
p, err := pn.ps.Add(p)
if err != nil {
return err
}
func (pn *peernet) connect(p peer.ID) error {
// first, check if we already have live connections
pn.RLock()
cs, found := pn.connsByPeer[pid(p)]
cs, found := pn.connsByPeer[p]
pn.RUnlock()
if found && len(cs) > 0 {
return nil
}
log.Debugf("%s (newly) dialing %s", pn.peer, p)
// ok, must create a new connection. we need a link
links := pn.mocknet.LinksBetweenPeers(pn.peer, p)
if len(links) < 1 {
@ -136,7 +148,7 @@ func (pn *peernet) connect(p peer.Peer) error {
return nil
}
func (pn *peernet) openConn(r peer.Peer, l *link) *conn {
func (pn *peernet) openConn(r peer.ID, l *link) *conn {
lc, rc := l.newConnPair(pn)
log.Debugf("%s opening connection to %s", pn.LocalPeer(), lc.RemotePeer())
pn.addConn(lc)
@ -152,13 +164,17 @@ func (pn *peernet) remoteOpenedConn(c *conn) {
// addConn constructs and adds a connection
// to given remote peer over given link
func (pn *peernet) addConn(c *conn) {
// run the Identify protocol/handshake.
pn.ids.IdentifyConn(c)
pn.Lock()
cs, found := pn.connsByPeer[pid(c.RemotePeer())]
cs, found := pn.connsByPeer[c.RemotePeer()]
if !found {
cs = map[*conn]struct{}{}
pn.connsByPeer[pid(c.RemotePeer())] = cs
pn.connsByPeer[c.RemotePeer()] = cs
}
pn.connsByPeer[pid(c.RemotePeer())][c] = struct{}{}
pn.connsByPeer[c.RemotePeer()][c] = struct{}{}
cs, found = pn.connsByLink[c.link]
if !found {
@ -180,7 +196,7 @@ func (pn *peernet) removeConn(c *conn) {
}
delete(cs, c)
cs, found = pn.connsByPeer[pid(c.remote)]
cs, found = pn.connsByPeer[c.remote]
if !found {
panic("attempting to remove a conn that doesnt exist")
}
@ -193,16 +209,16 @@ func (pn *peernet) CtxGroup() ctxgroup.ContextGroup {
}
// LocalPeer the network's LocalPeer
func (pn *peernet) LocalPeer() peer.Peer {
func (pn *peernet) LocalPeer() peer.ID {
return pn.peer
}
// Peers returns the connected peers
func (pn *peernet) Peers() []peer.Peer {
func (pn *peernet) Peers() []peer.ID {
pn.RLock()
defer pn.RUnlock()
peers := make([]peer.Peer, 0, len(pn.connsByPeer))
peers := make([]peer.ID, 0, len(pn.connsByPeer))
for _, cs := range pn.connsByPeer {
for c := range cs {
peers = append(peers, c.remote)
@ -226,11 +242,11 @@ func (pn *peernet) Conns() []inet.Conn {
return out
}
func (pn *peernet) ConnsToPeer(p peer.Peer) []inet.Conn {
func (pn *peernet) ConnsToPeer(p peer.ID) []inet.Conn {
pn.RLock()
defer pn.RUnlock()
cs, found := pn.connsByPeer[pid(p)]
cs, found := pn.connsByPeer[p]
if !found || len(cs) == 0 {
return nil
}
@ -243,9 +259,9 @@ func (pn *peernet) ConnsToPeer(p peer.Peer) []inet.Conn {
}
// ClosePeer connections to peer
func (pn *peernet) ClosePeer(p peer.Peer) error {
func (pn *peernet) ClosePeer(p peer.ID) error {
pn.RLock()
cs, found := pn.connsByPeer[pid(p)]
cs, found := pn.connsByPeer[p]
pn.RUnlock()
if !found {
return nil
@ -266,23 +282,23 @@ func (pn *peernet) BandwidthTotals() (in uint64, out uint64) {
// ListenAddresses returns a list of addresses at which this network listens.
func (pn *peernet) ListenAddresses() []ma.Multiaddr {
return []ma.Multiaddr{}
return pn.Peerstore().Addresses(pn.LocalPeer())
}
// InterfaceListenAddresses returns a list of addresses at which this network
// listens. It expands "any interface" addresses (/ip4/0.0.0.0, /ip6/::) to
// use the known local interfaces.
func (pn *peernet) InterfaceListenAddresses() ([]ma.Multiaddr, error) {
return []ma.Multiaddr{}, nil
return pn.ListenAddresses(), nil
}
// Connectedness returns a state signaling connection capabilities
// For now only returns Connecter || NotConnected. Expand into more later.
func (pn *peernet) Connectedness(p peer.Peer) inet.Connectedness {
func (pn *peernet) Connectedness(p peer.ID) inet.Connectedness {
pn.Lock()
defer pn.Unlock()
cs, found := pn.connsByPeer[pid(p)]
cs, found := pn.connsByPeer[p]
if found && len(cs) > 0 {
return inet.Connected
}
@ -292,11 +308,11 @@ func (pn *peernet) Connectedness(p peer.Peer) inet.Connectedness {
// NewStream returns a new stream to given peer p.
// If there is no connection to p, attempts to create one.
// If ProtocolID is "", writes no header.
func (pn *peernet) NewStream(pr inet.ProtocolID, p peer.Peer) (inet.Stream, error) {
func (pn *peernet) NewStream(pr inet.ProtocolID, p peer.ID) (inet.Stream, error) {
pn.Lock()
defer pn.Unlock()
cs, found := pn.connsByPeer[pid(p)]
cs, found := pn.connsByPeer[p]
if !found || len(cs) < 1 {
return nil, fmt.Errorf("no connection to peer")
}
@ -313,7 +329,7 @@ func (pn *peernet) NewStream(pr inet.ProtocolID, p peer.Peer) (inet.Stream, erro
n--
}
return c.NewStreamWithProtocol(pr, p)
return c.NewStreamWithProtocol(pr)
}
// SetHandler sets the protocol handler on the Network's Muxer.
@ -322,6 +338,6 @@ func (pn *peernet) SetHandler(p inet.ProtocolID, h inet.StreamHandler) {
pn.mux.SetHandler(p, h)
}
func pid(p peer.Peer) peerID {
return peerID(p.ID())
func (pn *peernet) IdentifyProtocol() *inet.IDService {
return pn.ids
}

View File

@ -14,51 +14,64 @@ import (
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
)
func randPeer(t *testing.T) peer.ID {
p, err := testutil.RandPeerID()
if err != nil {
t.Fatal(err)
}
return p
}
func TestNetworkSetup(t *testing.T) {
ctx := context.Background()
p1 := testutil.RandPeer()
p2 := testutil.RandPeer()
p3 := testutil.RandPeer()
sk1, _, err := testutil.RandKeyPair(512)
if err != nil {
t.Fatal(t)
}
sk2, _, err := testutil.RandKeyPair(512)
if err != nil {
t.Fatal(t)
}
sk3, _, err := testutil.RandKeyPair(512)
if err != nil {
t.Fatal(t)
}
mn := New(ctx)
// peers := []peer.Peer{p1, p2, p3}
// peers := []peer.ID{p1, p2, p3}
// add peers to mock net
n1, err := mn.AddPeer(p1.ID())
if err != nil {
t.Fatal(err)
}
a1 := testutil.RandLocalTCPAddress()
a2 := testutil.RandLocalTCPAddress()
a3 := testutil.RandLocalTCPAddress()
n2, err := mn.AddPeer(p2.ID())
n1, err := mn.AddPeer(sk1, a1)
if err != nil {
t.Fatal(err)
}
p1 := n1.LocalPeer()
n3, err := mn.AddPeer(p3.ID())
n2, err := mn.AddPeer(sk2, a2)
if err != nil {
t.Fatal(err)
}
p2 := n2.LocalPeer()
n3, err := mn.AddPeer(sk3, a3)
if err != nil {
t.Fatal(err)
}
p3 := n3.LocalPeer()
// check peers and net
if !mn.Peer(p1.ID()).ID().Equal(p1.ID()) {
t.Error("peer for p1.ID != p1.ID")
}
if !mn.Peer(p2.ID()).ID().Equal(p2.ID()) {
t.Error("peer for p2.ID != p2.ID")
}
if !mn.Peer(p3.ID()).ID().Equal(p3.ID()) {
t.Error("peer for p3.ID != p3.ID")
}
if mn.Net(p1.ID()) != n1 {
if mn.Net(p1) != n1 {
t.Error("net for p1.ID != n1")
}
if mn.Net(p2.ID()) != n2 {
if mn.Net(p2) != n2 {
t.Error("net for p2.ID != n1")
}
if mn.Net(p3.ID()) != n3 {
if mn.Net(p3) != n3 {
t.Error("net for p3.ID != n1")
}
@ -373,24 +386,32 @@ func TestStreamsStress(t *testing.T) {
}(i)
}
wg.Done()
wg.Wait()
}
func TestAdding(t *testing.T) {
mn := New(context.Background())
p1 := testutil.RandPeer()
p2 := testutil.RandPeer()
p3 := testutil.RandPeer()
peers := []peer.Peer{p1, p2, p3}
for _, p := range peers {
if _, err := mn.AddPeer(p.ID()); err != nil {
t.Error(err)
peers := []peer.ID{}
for i := 0; i < 3; i++ {
sk, _, err := testutil.RandKeyPair(512)
if err != nil {
t.Fatal(err)
}
a := testutil.RandLocalTCPAddress()
n, err := mn.AddPeer(sk, a)
if err != nil {
t.Fatal(err)
}
peers = append(peers, n.LocalPeer())
}
p1 := peers[0]
p2 := peers[1]
// link them
for _, p1 := range peers {
for _, p2 := range peers {
@ -401,9 +422,9 @@ func TestAdding(t *testing.T) {
}
// set the new stream handler on p2
n2 := mn.Net(p2.ID())
n2 := mn.Net(p2)
if n2 == nil {
t.Fatalf("no network for %s", p2.ID())
t.Fatalf("no network for %s", p2)
}
n2.SetHandler(inet.ProtocolBitswap, func(s inet.Stream) {
go func() {
@ -429,9 +450,9 @@ func TestAdding(t *testing.T) {
}
// talk to p2
n1 := mn.Net(p1.ID())
n1 := mn.Net(p1)
if n1 == nil {
t.Fatalf("no network for %s", p1.ID())
t.Fatalf("no network for %s", p1)
}
s, err := n1.NewStream(inet.ProtocolBitswap, p2)

View File

@ -11,7 +11,7 @@ import (
lgbl "github.com/jbenet/go-ipfs/util/eventlog/loggables"
)
var log = eventlog.Logger("mux2")
var log = eventlog.Logger("network")
// Mux provides simple stream multixplexing.
// It helps you precisely when:
@ -37,6 +37,17 @@ type Mux struct {
sync.RWMutex
}
// Protocols returns the list of protocols this muxer has handlers for
func (m *Mux) Protocols() []ProtocolID {
m.RLock()
l := make([]ProtocolID, 0, len(m.Handlers))
for p := range m.Handlers {
l = append(l, p)
}
m.RUnlock()
return l
}
// ReadProtocolHeader reads the stream and returns the next Handler function
// according to the muxer encoding.
func (m *Mux) ReadProtocolHeader(s io.Reader) (string, StreamHandler, error) {

View File

@ -2,6 +2,9 @@
package net
import (
"fmt"
ic "github.com/jbenet/go-ipfs/crypto"
swarm "github.com/jbenet/go-ipfs/net/swarm"
peer "github.com/jbenet/go-ipfs/peer"
@ -39,11 +42,15 @@ func (s *stream) Write(p []byte) (n int, err error) {
type conn_ swarm.Conn
func (s *conn_) String() string {
return s.SwarmConn().String()
}
func (c *conn_) SwarmConn() *swarm.Conn {
return (*swarm.Conn)(c)
}
func (c *conn_) NewStreamWithProtocol(pr ProtocolID, p peer.Peer) (Stream, error) {
func (c *conn_) NewStreamWithProtocol(pr ProtocolID) (Stream, error) {
s, err := (*swarm.Conn)(c).NewStream()
if err != nil {
return nil, err
@ -59,37 +66,43 @@ func (c *conn_) NewStreamWithProtocol(pr ProtocolID, p peer.Peer) (Stream, error
return ss, nil
}
// LocalMultiaddr is the Multiaddr on this side
func (c *conn_) LocalMultiaddr() ma.Multiaddr {
return c.SwarmConn().LocalMultiaddr()
}
// LocalPeer is the Peer on our side of the connection
func (c *conn_) LocalPeer() peer.Peer {
return c.SwarmConn().LocalPeer()
}
// RemoteMultiaddr is the Multiaddr on the remote side
func (c *conn_) RemoteMultiaddr() ma.Multiaddr {
return c.SwarmConn().RemoteMultiaddr()
}
// RemotePeer is the Peer on the remote side
func (c *conn_) RemotePeer() peer.Peer {
func (c *conn_) LocalPeer() peer.ID {
return c.SwarmConn().LocalPeer()
}
func (c *conn_) RemotePeer() peer.ID {
return c.SwarmConn().RemotePeer()
}
func (c *conn_) LocalPrivateKey() ic.PrivKey {
return c.SwarmConn().LocalPrivateKey()
}
func (c *conn_) RemotePublicKey() ic.PubKey {
return c.SwarmConn().RemotePublicKey()
}
// network implements the Network interface,
type network struct {
local peer.Peer // local peer
local peer.ID // local peer
mux Mux // protocol multiplexing
swarm *swarm.Swarm // peer connection multiplexing
ps peer.Peerstore
ids *IDService
cg ctxgroup.ContextGroup // for Context closing
}
// NewNetwork constructs a new network and starts listening on given addresses.
func NewNetwork(ctx context.Context, listen []ma.Multiaddr, local peer.Peer,
func NewNetwork(ctx context.Context, listen []ma.Multiaddr, local peer.ID,
peers peer.Peerstore) (Network, error) {
s, err := swarm.NewSwarm(ctx, listen, local, peers)
@ -102,22 +115,46 @@ func NewNetwork(ctx context.Context, listen []ma.Multiaddr, local peer.Peer,
swarm: s,
mux: Mux{Handlers: StreamHandlerMap{}},
cg: ctxgroup.WithContext(ctx),
ps: peers,
}
n.cg.SetTeardown(n.close)
n.cg.AddChildGroup(s.CtxGroup())
s.SetStreamHandler(func(s *swarm.Stream) {
n.mux.Handle((*stream)(s))
})
n.cg.SetTeardown(n.close)
n.cg.AddChildGroup(s.CtxGroup())
// setup a conn handler that immediately "asks the other side about them"
// this is ProtocolIdentify.
n.ids = NewIDService(n)
s.SetConnHandler(n.newConnHandler)
return n, nil
}
func (n *network) newConnHandler(c *swarm.Conn) {
cc := (*conn_)(c)
n.ids.IdentifyConn(cc)
}
// DialPeer attempts to establish a connection to a given peer.
// Respects the context.
func (n *network) DialPeer(ctx context.Context, p peer.Peer) error {
_, err := n.swarm.Dial(ctx, p)
return err
func (n *network) DialPeer(ctx context.Context, p peer.ID) error {
log.Debugf("[%s] network dialing peer [%s]", n.local, p)
sc, err := n.swarm.Dial(ctx, p)
if err != nil {
return err
}
// identify the connection before returning.
n.ids.IdentifyConn((*conn_)(sc))
log.Debugf("network for %s finished dialing %s", n.local, p)
return nil
}
func (n *network) Protocols() []ProtocolID {
return n.mux.Protocols()
}
// CtxGroup returns the network's ContextGroup
@ -131,15 +168,20 @@ func (n *network) Swarm() *swarm.Swarm {
}
// LocalPeer the network's LocalPeer
func (n *network) LocalPeer() peer.Peer {
func (n *network) LocalPeer() peer.ID {
return n.swarm.LocalPeer()
}
// Peers returns the connected peers
func (n *network) Peers() []peer.Peer {
func (n *network) Peers() []peer.ID {
return n.swarm.Peers()
}
// Peers returns the connected peers
func (n *network) Peerstore() peer.Peerstore {
return n.ps
}
// Conns returns the connected peers
func (n *network) Conns() []Conn {
conns1 := n.swarm.Connections()
@ -150,8 +192,18 @@ func (n *network) Conns() []Conn {
return out
}
// ConnsToPeer returns the connections in this Netowrk for given peer.
func (n *network) ConnsToPeer(p peer.ID) []Conn {
conns1 := n.swarm.ConnectionsToPeer(p)
out := make([]Conn, len(conns1))
for i, c := range conns1 {
out[i] = (*conn_)(c)
}
return out
}
// ClosePeer connection to peer
func (n *network) ClosePeer(p peer.Peer) error {
func (n *network) ClosePeer(p peer.ID) error {
return n.swarm.CloseConnection(p)
}
@ -186,9 +238,9 @@ func (n *network) InterfaceListenAddresses() ([]ma.Multiaddr, error) {
// Connectedness returns a state signaling connection capabilities
// For now only returns Connected || NotConnected. Expand into more later.
func (n *network) Connectedness(p peer.Peer) Connectedness {
func (n *network) Connectedness(p peer.ID) Connectedness {
c := n.swarm.ConnectionsToPeer(p)
if c != nil && len(c) < 1 {
if c != nil && len(c) > 0 {
return Connected
}
return NotConnected
@ -197,8 +249,9 @@ func (n *network) Connectedness(p peer.Peer) Connectedness {
// NewStream returns a new stream to given peer p.
// If there is no connection to p, attempts to create one.
// If ProtocolID is "", writes no header.
func (c *network) NewStream(pr ProtocolID, p peer.Peer) (Stream, error) {
s, err := c.swarm.NewStreamWithPeer(p)
func (n *network) NewStream(pr ProtocolID, p peer.ID) (Stream, error) {
log.Debugf("[%s] network opening stream to peer [%s]: %s", n.local, p, pr)
s, err := n.swarm.NewStreamWithPeer(p)
if err != nil {
return nil, err
}
@ -219,6 +272,14 @@ func (n *network) SetHandler(p ProtocolID, h StreamHandler) {
n.mux.SetHandler(p, h)
}
func (n *network) String() string {
return fmt.Sprintf("<Network %s>", n.LocalPeer())
}
func (n *network) IdentifyProtocol() *IDService {
return n.ids
}
func WriteProtocolHeader(pr ProtocolID, s Stream) error {
if pr != "" { // only write proper protocol headers
if err := WriteLengthPrefix(s, string(pr)); err != nil {

76
net/net_test.go Normal file
View File

@ -0,0 +1,76 @@
package net_test
import (
"fmt"
"testing"
"time"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
inet "github.com/jbenet/go-ipfs/net"
)
// TestConnectednessCorrect starts a few networks, connects a few
// and tests Connectedness value is correct.
func TestConnectednessCorrect(t *testing.T) {
ctx := context.Background()
nets := make([]inet.Network, 4)
for i := 0; i < 4; i++ {
nets[i] = GenNetwork(t, ctx)
}
// connect 0-1, 0-2, 0-3, 1-2, 2-3
dial := func(a, b inet.Network) {
DivulgeAddresses(b, a)
if err := a.DialPeer(ctx, b.LocalPeer()); err != nil {
t.Fatalf("Failed to dial: %s", err)
}
}
dial(nets[0], nets[1])
dial(nets[0], nets[3])
dial(nets[1], nets[2])
dial(nets[3], nets[2])
// there's something wrong with dial, i think. it's not finishing
// completely. there must be some async stuff.
<-time.After(100 * time.Millisecond)
// test those connected show up correctly
// test connected
expectConnectedness(t, nets[0], nets[1], inet.Connected)
expectConnectedness(t, nets[0], nets[3], inet.Connected)
expectConnectedness(t, nets[1], nets[2], inet.Connected)
expectConnectedness(t, nets[3], nets[2], inet.Connected)
// test not connected
expectConnectedness(t, nets[0], nets[2], inet.NotConnected)
expectConnectedness(t, nets[1], nets[3], inet.NotConnected)
for _, n := range nets {
n.Close()
}
}
func expectConnectedness(t *testing.T, a, b inet.Network, expected inet.Connectedness) {
es := "%s is connected to %s, but Connectedness incorrect. %s %s"
if a.Connectedness(b.LocalPeer()) != expected {
t.Errorf(es, a, b, printConns(a), printConns(b))
}
// test symmetric case
if b.Connectedness(a.LocalPeer()) != expected {
t.Errorf(es, b, a, printConns(b), printConns(a))
}
}
func printConns(n inet.Network) string {
s := fmt.Sprintf("Connections in %s:\n", n)
for _, c := range n.Conns() {
s = s + fmt.Sprintf("- %s\n", c)
}
return s
}

View File

@ -1,36 +1,29 @@
package swarm
import (
"fmt"
"sync"
"testing"
"time"
peer "github.com/jbenet/go-ipfs/peer"
"github.com/jbenet/go-ipfs/util/testutil"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
)
func TestSimultOpen(t *testing.T) {
// t.Skip("skipping for another test")
addrs := []string{
"/ip4/127.0.0.1/tcp/1244",
"/ip4/127.0.0.1/tcp/1245",
}
ctx := context.Background()
swarms, _ := makeSwarms(ctx, t, addrs)
swarms, peers := makeSwarms(ctx, t, 2)
// connect everyone
{
var wg sync.WaitGroup
connect := func(s *Swarm, dst peer.Peer) {
connect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) {
// copy for other peer
cp := testutil.NewPeerWithID(dst.ID())
cp.AddAddress(dst.Addresses()[0])
if _, err := s.Dial(ctx, cp); err != nil {
s.peers.AddAddress(dst, addr)
if _, err := s.Dial(ctx, dst); err != nil {
t.Fatal("error swarm dialing to peer", err)
}
wg.Done()
@ -38,8 +31,8 @@ func TestSimultOpen(t *testing.T) {
log.Info("Connecting swarms simultaneously.")
wg.Add(2)
go connect(swarms[0], swarms[1].local)
go connect(swarms[1], swarms[0].local)
go connect(swarms[0], swarms[1].local, peers[1].Addr)
go connect(swarms[1], swarms[0].local, peers[0].Addr)
wg.Wait()
}
@ -51,13 +44,7 @@ func TestSimultOpen(t *testing.T) {
func TestSimultOpenMany(t *testing.T) {
// t.Skip("very very slow")
many := 10
addrs := []string{}
for i := 2200; i < (2200 + many); i++ {
s := fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", i)
addrs = append(addrs, s)
}
addrs := 20
SubtestSwarm(t, addrs, 10)
}
@ -67,14 +54,13 @@ func TestSimultOpenFewStress(t *testing.T) {
}
// t.Skip("skipping for another test")
num := 10
// num := 100
for i := 0; i < num; i++ {
addrs := []string{
fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 1900+i),
fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 2900+i),
}
msgs := 40
swarms := 2
rounds := 10
// rounds := 100
SubtestSwarm(t, addrs, 10)
for i := 0; i < rounds; i++ {
SubtestSwarm(t, swarms, msgs)
<-time.After(10 * time.Millisecond)
}
}

View File

@ -22,21 +22,16 @@ var log = eventlog.Logger("swarm2")
// Uses peerstream.Swarm
type Swarm struct {
swarm *ps.Swarm
local peer.Peer
local peer.ID
peers peer.Peerstore
connh ConnHandler
cg ctxgroup.ContextGroup
}
// NewSwarm constructs a Swarm, with a Chan.
func NewSwarm(ctx context.Context, listenAddrs []ma.Multiaddr,
local peer.Peer, peers peer.Peerstore) (*Swarm, error) {
// make sure our own peer is in our peerstore...
local, err := peers.Add(local)
if err != nil {
return nil, err
}
local peer.ID, peers peer.Peerstore) (*Swarm, error) {
s := &Swarm{
swarm: ps.NewSwarm(),
@ -47,7 +42,7 @@ func NewSwarm(ctx context.Context, listenAddrs []ma.Multiaddr,
// configure Swarm
s.cg.SetTeardown(s.teardown)
s.swarm.SetConnHandler(s.connHandler)
s.SetConnHandler(nil) // make sure to setup our own conn handler.
return s, s.listen(listenAddrs)
}
@ -71,6 +66,27 @@ func (s *Swarm) StreamSwarm() *ps.Swarm {
return s.swarm
}
// SetConnHandler assigns the handler for new connections.
// See peerstream. You will rarely use this. See SetStreamHandler
func (s *Swarm) SetConnHandler(handler ConnHandler) {
// handler is nil if user wants to clear the old handler.
if handler == nil {
s.swarm.SetConnHandler(func(psconn *ps.Conn) {
s.connHandler(psconn)
})
return
}
s.swarm.SetConnHandler(func(psconn *ps.Conn) {
// sc is nil if closed in our handler.
if sc := s.connHandler(psconn); sc != nil {
// call the user's handler. in a goroutine for sync safety.
go handler(sc)
}
})
}
// SetStreamHandler assigns the handler for new streams.
// See peerstream.
func (s *Swarm) SetStreamHandler(handler StreamHandler) {
@ -80,13 +96,7 @@ func (s *Swarm) SetStreamHandler(handler StreamHandler) {
}
// NewStreamWithPeer creates a new stream on any available connection to p
func (s *Swarm) NewStreamWithPeer(p peer.Peer) (*Stream, error) {
// make sure we use OUR peers. (the tests mess with you...)
p, err := s.peers.Add(p)
if err != nil {
return nil, err
}
func (s *Swarm) NewStreamWithPeer(p peer.ID) (*Stream, error) {
// if we have no connections, try connecting.
if len(s.ConnectionsToPeer(p)) == 0 {
log.Debug("Swarm: NewStreamWithPeer no connections. Attempting to connect...")
@ -101,21 +111,12 @@ func (s *Swarm) NewStreamWithPeer(p peer.Peer) (*Stream, error) {
}
// StreamsWithPeer returns all the live Streams to p
func (s *Swarm) StreamsWithPeer(p peer.Peer) []*Stream {
// make sure we use OUR peers. (the tests mess with you...)
if p2, err := s.peers.Add(p); err == nil {
p = p2
}
func (s *Swarm) StreamsWithPeer(p peer.ID) []*Stream {
return wrapStreams(ps.StreamsWithGroup(p, s.swarm.Streams()))
}
// ConnectionsToPeer returns all the live connections to p
func (s *Swarm) ConnectionsToPeer(p peer.Peer) []*Conn {
// make sure we use OUR peers. (the tests mess with you...)
if p2, err := s.peers.Add(p); err == nil {
p = p2
}
func (s *Swarm) ConnectionsToPeer(p peer.ID) []*Conn {
return wrapConns(ps.ConnsWithGroup(p, s.swarm.Conns()))
}
@ -125,13 +126,7 @@ func (s *Swarm) Connections() []*Conn {
}
// CloseConnection removes a given peer from swarm + closes the connection
func (s *Swarm) CloseConnection(p peer.Peer) error {
// make sure we use OUR peers. (the tests mess with you...)
p, err := s.peers.Add(p)
if err != nil {
return err
}
func (s *Swarm) CloseConnection(p peer.ID) error {
conns := s.swarm.ConnsWithGroup(p) // boom.
for _, c := range conns {
c.Close()
@ -140,11 +135,11 @@ func (s *Swarm) CloseConnection(p peer.Peer) error {
}
// Peers returns a copy of the set of peers swarm is connected to.
func (s *Swarm) Peers() []peer.Peer {
func (s *Swarm) Peers() []peer.ID {
conns := s.Connections()
seen := make(map[peer.Peer]struct{})
peers := make([]peer.Peer, 0, len(conns))
seen := make(map[peer.ID]struct{})
peers := make([]peer.ID, 0, len(conns))
for _, c := range conns {
p := c.RemotePeer()
if _, found := seen[p]; found {
@ -157,6 +152,6 @@ func (s *Swarm) Peers() []peer.Peer {
}
// LocalPeer returns the local peer swarm is associated to.
func (s *Swarm) LocalPeer() peer.Peer {
func (s *Swarm) LocalPeer() peer.ID {
return s.local
}

View File

@ -3,6 +3,7 @@ package swarm
import (
"fmt"
ic "github.com/jbenet/go-ipfs/crypto"
conn "github.com/jbenet/go-ipfs/net/conn"
peer "github.com/jbenet/go-ipfs/peer"
@ -23,6 +24,10 @@ import (
// layers do build up pieces of functionality. and they're all just io.RW :) )
type Conn ps.Conn
// ConnHandler is called when new conns are opened from remote peers.
// See peerstream.ConnHandler
type ConnHandler func(*Conn)
func (c *Conn) StreamConn() *ps.Conn {
return (*ps.Conn)(c)
}
@ -35,13 +40,17 @@ func (c *Conn) RawConn() conn.Conn {
return (*ps.Conn)(c).NetConn().(conn.Conn)
}
func (c *Conn) String() string {
return fmt.Sprintf("<SwarmConn %s>", c.RawConn())
}
// LocalMultiaddr is the Multiaddr on this side
func (c *Conn) LocalMultiaddr() ma.Multiaddr {
return c.RawConn().LocalMultiaddr()
}
// LocalPeer is the Peer on our side of the connection
func (c *Conn) LocalPeer() peer.Peer {
func (c *Conn) LocalPeer() peer.ID {
return c.RawConn().LocalPeer()
}
@ -51,10 +60,20 @@ func (c *Conn) RemoteMultiaddr() ma.Multiaddr {
}
// RemotePeer is the Peer on the remote side
func (c *Conn) RemotePeer() peer.Peer {
func (c *Conn) RemotePeer() peer.ID {
return c.RawConn().RemotePeer()
}
// LocalPrivateKey is the public key of the peer on this side
func (c *Conn) LocalPrivateKey() ic.PrivKey {
return c.RawConn().LocalPrivateKey()
}
// RemotePublicKey is the public key of the peer on the remote side
func (c *Conn) RemotePublicKey() ic.PubKey {
return c.RawConn().RemotePublicKey()
}
// NewStream returns a new Stream from this connection
func (c *Conn) NewStream() (*Stream, error) {
s, err := c.StreamConn().NewStream()
@ -96,12 +115,12 @@ func (s *Swarm) newConnSetup(ctx context.Context, psConn *ps.Conn) (*Conn, error
return nil, err
}
// removing this for now, as it has to change. we can put this in a different
// sub-protocol anyway.
// // run Handshake3
// if err := runHandshake3(ctx, s, sc); err != nil {
// return nil, err
// }
// if we have a public key, make sure we add it to our peerstore!
// This is an important detail. Otherwise we must fetch the public
// key from the DHT or some other system.
if pk := sc.RemotePublicKey(); pk != nil {
s.peers.AddPubKey(sc.RemotePeer(), pk)
}
// ok great! we can use it. add it to our group.
@ -113,29 +132,3 @@ func (s *Swarm) newConnSetup(ctx context.Context, psConn *ps.Conn) (*Conn, error
return sc, nil
}
// func runHandshake3(ctx context.Context, s *Swarm, c *Conn) error {
// log.Event(ctx, "newConnection", c.LocalPeer(), c.RemotePeer())
// stream, err := c.NewStream()
// if err != nil {
// return err
// }
// // handshake3 (this whole thing is ugly. maybe lets get rid of it...)
// h3result, err := conn.Handshake3(ctx, stream, c.RawConn())
// if err != nil {
// return fmt.Errorf("Handshake3 failed: %s", err)
// }
// // check for nats. you know, just in case.
// if h3result.LocalObservedAddress != nil {
// checkNATWarning(s, h3result.LocalObservedAddress, c.LocalMultiaddr())
// } else {
// log.Warningf("Received nil observed address from %s", c.RemotePeer())
// }
// stream.Close()
// log.Event(ctx, "handshake3Succeeded", c.LocalPeer(), c.RemotePeer())
// return nil
// }

View File

@ -17,9 +17,9 @@ import (
// the connection will happen over. Swarm can use whichever it choses.
// This allows us to use various transport protocols, do NAT traversal/relay,
// etc. to achive connection.
func (s *Swarm) Dial(ctx context.Context, p peer.Peer) (*Conn, error) {
func (s *Swarm) Dial(ctx context.Context, p peer.ID) (*Conn, error) {
if p.ID().Equal(s.local.ID()) {
if p == s.local {
return nil, errors.New("Attempted connection to self!")
}
@ -31,28 +31,35 @@ func (s *Swarm) Dial(ctx context.Context, p peer.Peer) (*Conn, error) {
}
}
// check if we don't have the peer in Peerstore
p, err := s.peers.Add(p)
if err != nil {
return nil, err
sk := s.peers.PrivKey(s.local)
if sk == nil {
// may be fine for sk to be nil, just log a warning.
log.Warning("Dial not given PrivateKey, so WILL NOT SECURE conn.")
}
remoteAddrs := s.peers.Addresses(p)
if len(remoteAddrs) == 0 {
return nil, errors.New("peer has no addresses")
}
localAddrs := s.peers.Addresses(s.local)
if len(localAddrs) == 0 {
log.Debug("Dialing out with no local addresses.")
}
// open connection to peer
d := &conn.Dialer{
LocalPeer: s.local,
Peerstore: s.peers,
}
if len(p.Addresses()) == 0 {
return nil, errors.New("peer has no addresses")
LocalPeer: s.local,
LocalAddrs: localAddrs,
PrivateKey: sk,
}
// try to connect to one of the peer's known addresses.
// for simplicity, we do this sequentially.
// A future commit will do this asynchronously.
var connC conn.Conn
for _, addr := range p.Addresses() {
connC, err = d.DialAddr(ctx, addr, p)
var err error
for _, addr := range remoteAddrs {
connC, err = d.Dial(ctx, addr, p)
if err == nil {
break
}

View File

@ -6,8 +6,8 @@ import (
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
multierr "github.com/jbenet/go-ipfs/util/multierr"
ps "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-peerstream"
multierr "github.com/jbenet/go-ipfs/util/multierr"
)
// Open listeners for each network the swarm should listen on
@ -35,21 +35,26 @@ func (s *Swarm) listen(addrs []ma.Multiaddr) error {
// Listen for new connections on the given multiaddr
func (s *Swarm) setupListener(maddr ma.Multiaddr) error {
resolved, err := resolveUnspecifiedAddresses([]ma.Multiaddr{maddr})
// TODO rethink how this has to work. (jbenet)
//
// resolved, err := resolveUnspecifiedAddresses([]ma.Multiaddr{maddr})
// if err != nil {
// return err
// }
// for _, a := range resolved {
// s.peers.AddAddress(s.local, a)
// }
sk := s.peers.PrivKey(s.local)
if sk == nil {
// may be fine for sk to be nil, just log a warning.
log.Warning("Listener not given PrivateKey, so WILL NOT SECURE conns.")
}
list, err := conn.Listen(s.cg.Context(), maddr, s.local, sk)
if err != nil {
return err
}
list, err := conn.Listen(s.cg.Context(), maddr, s.local, s.peers)
if err != nil {
return err
}
// add resolved local addresses to peer
for _, addr := range resolved {
s.local.AddAddress(addr)
}
// AddListener to the peerstream Listener. this will begin accepting connections
// and streams!
_, err = s.swarm.AddListener(list)
@ -60,21 +65,22 @@ func (s *Swarm) setupListener(maddr ma.Multiaddr) error {
// here we configure it slightly. Note that this is sequential, so if anything
// will take a while do it in a goroutine.
// See https://godoc.org/github.com/jbenet/go-peerstream for more information
func (s *Swarm) connHandler(c *ps.Conn) {
go func() {
ctx := context.Background()
// this context is for running the handshake, which -- when receiveing connections
// -- we have no bound on beyond what the transport protocol bounds it at.
// note that setup + the handshake are bounded by underlying io.
// (i.e. if TCP or UDP disconnects (or the swarm closes), we're done.
// Q: why not have a shorter handshake? think about an HTTP server on really slow conns.
// as long as the conn is live (TCP says its online), it tries its best. we follow suit.)
func (s *Swarm) connHandler(c *ps.Conn) *Conn {
ctx := context.Background()
// this context is for running the handshake, which -- when receiveing connections
// -- we have no bound on beyond what the transport protocol bounds it at.
// note that setup + the handshake are bounded by underlying io.
// (i.e. if TCP or UDP disconnects (or the swarm closes), we're done.
// Q: why not have a shorter handshake? think about an HTTP server on really slow conns.
// as long as the conn is live (TCP says its online), it tries its best. we follow suit.)
if _, err := s.newConnSetup(ctx, c); err != nil {
log.Error(err)
log.Event(ctx, "newConnHandlerDisconnect", lgbl.NetConn(c.NetConn()), lgbl.Error(err))
c.Close() // boom. close it.
return
}
}()
sc, err := s.newConnSetup(ctx, c)
if err != nil {
log.Error(err)
log.Event(ctx, "newConnHandlerDisconnect", lgbl.NetConn(c.NetConn()), lgbl.Error(err))
c.Close() // boom. close it.
return nil
}
return sc
}

View File

@ -7,9 +7,7 @@ import (
"testing"
"time"
ci "github.com/jbenet/go-ipfs/crypto"
peer "github.com/jbenet/go-ipfs/peer"
u "github.com/jbenet/go-ipfs/util"
errors "github.com/jbenet/go-ipfs/util/debugerror"
testutil "github.com/jbenet/go-ipfs/util/testutil"
@ -48,88 +46,68 @@ func EchoStreamHandler(stream *Stream) {
}()
}
func setupPeer(t *testing.T, addr string) peer.Peer {
tcp, err := ma.NewMultiaddr(addr)
if err != nil {
t.Fatal(err)
}
func makeSwarms(ctx context.Context, t *testing.T, num int) ([]*Swarm, []testutil.PeerNetParams) {
swarms := make([]*Swarm, 0, num)
peersnp := make([]testutil.PeerNetParams, 0, num)
sk, pk, err := ci.GenerateKeyPair(ci.RSA, 512)
if err != nil {
t.Fatal(err)
}
for i := 0; i < num; i++ {
localnp := testutil.RandPeerNetParams(t)
peersnp = append(peersnp, localnp)
p, err := testutil.NewPeerWithKeyPair(sk, pk)
if err != nil {
t.Fatal(err)
}
p.AddAddress(tcp)
return p
}
func makeSwarms(ctx context.Context, t *testing.T, addrs []string) ([]*Swarm, []peer.Peer) {
swarms := []*Swarm{}
for _, addr := range addrs {
local := setupPeer(t, addr)
peerstore := peer.NewPeerstore()
swarm, err := NewSwarm(ctx, local.Addresses(), local, peerstore)
peerstore.AddAddress(localnp.ID, localnp.Addr)
peerstore.AddPubKey(localnp.ID, localnp.PubKey)
peerstore.AddPrivKey(localnp.ID, localnp.PrivKey)
addrs := peerstore.Addresses(localnp.ID)
swarm, err := NewSwarm(ctx, addrs, localnp.ID, peerstore)
if err != nil {
t.Fatal(err)
}
swarm.SetStreamHandler(EchoStreamHandler)
swarms = append(swarms, swarm)
}
peers := make([]peer.Peer, len(swarms))
for i, s := range swarms {
peers[i] = s.local
}
return swarms, peers
return swarms, peersnp
}
func SubtestSwarm(t *testing.T, addrs []string, MsgNum int) {
func connectSwarms(t *testing.T, ctx context.Context, swarms []*Swarm, peersnp []testutil.PeerNetParams) {
var wg sync.WaitGroup
connect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) {
// TODO: make a DialAddr func.
s.peers.AddAddress(dst, addr)
if _, err := s.Dial(ctx, dst); err != nil {
t.Fatal("error swarm dialing to peer", err)
}
wg.Done()
}
log.Info("Connecting swarms simultaneously.")
for _, s := range swarms {
for _, p := range peersnp {
if p.ID != s.local { // don't connect to self.
wg.Add(1)
connect(s, p.ID, p.Addr)
}
}
}
wg.Wait()
for _, s := range swarms {
log.Infof("%s swarm routing table: %s", s.local, s.Peers())
}
}
func SubtestSwarm(t *testing.T, SwarmNum int, MsgNum int) {
// t.Skip("skipping for another test")
ctx := context.Background()
swarms, peers := makeSwarms(ctx, t, addrs)
swarms, peersnp := makeSwarms(ctx, t, SwarmNum)
// connect everyone
{
var wg sync.WaitGroup
connect := func(s *Swarm, dst peer.Peer) {
// copy for other peer
cp, err := s.peers.FindOrCreate(dst.ID())
if err != nil {
t.Fatal(err)
}
cp.AddAddress(dst.Addresses()[0])
log.Infof("SWARM TEST: %s dialing %s", s.local, dst)
if _, err := s.Dial(ctx, cp); err != nil {
t.Fatal("error swarm dialing to peer", err)
}
log.Infof("SWARM TEST: %s connected to %s", s.local, dst)
wg.Done()
}
log.Info("Connecting swarms simultaneously.")
for _, s := range swarms {
for _, p := range peers {
if p != s.local { // don't connect to self.
wg.Add(1)
connect(s, p)
}
}
}
wg.Wait()
for _, s := range swarms {
log.Infof("%s swarm routing table: %s", s.local, s.Peers())
}
}
connectSwarms(t, ctx, swarms, peersnp)
// ping/pong
for _, s1 := range swarms {
@ -138,13 +116,8 @@ func SubtestSwarm(t *testing.T, addrs []string, MsgNum int) {
log.Debugf("-------------------------------------------------------")
_, cancel := context.WithCancel(ctx)
peers, err := s1.peers.All()
if err != nil {
t.Fatal(err)
}
got := map[u.Key]int{}
errChan := make(chan error, MsgNum*len(*peers))
got := map[peer.ID]int{}
errChan := make(chan error, MsgNum*len(peersnp))
streamChan := make(chan *Stream, MsgNum)
// send out "ping" x MsgNum to every peer
@ -152,7 +125,7 @@ func SubtestSwarm(t *testing.T, addrs []string, MsgNum int) {
defer close(streamChan)
var wg sync.WaitGroup
send := func(p peer.Peer) {
send := func(p peer.ID) {
defer wg.Done()
// first, one stream per peer (nice)
@ -173,13 +146,13 @@ func SubtestSwarm(t *testing.T, addrs []string, MsgNum int) {
streamChan <- stream
}
for _, p := range *peers {
if p == s1.local {
for _, p := range peersnp {
if p.ID == s1.local {
continue // dont send to self...
}
wg.Add(1)
go send(p)
go send(p.ID)
}
wg.Wait()
}()
@ -188,7 +161,7 @@ func SubtestSwarm(t *testing.T, addrs []string, MsgNum int) {
go func() {
defer close(errChan)
count := 0
countShouldBe := MsgNum * (len(*peers) - 1)
countShouldBe := MsgNum * (len(peersnp) - 1)
for stream := range streamChan { // one per peer
defer stream.Close()
@ -215,7 +188,7 @@ func SubtestSwarm(t *testing.T, addrs []string, MsgNum int) {
msgCount++
}
got[p.Key()] = msgCount
got[p] = msgCount
count += msgCount
}
@ -232,8 +205,8 @@ func SubtestSwarm(t *testing.T, addrs []string, MsgNum int) {
}
log.Debugf("%s got pongs", s1.local)
if (len(*peers) - 1) != len(got) {
t.Error("got less messages than sent")
if (len(peersnp) - 1) != len(got) {
t.Errorf("got (%d) less messages than sent (%d).", len(got), len(peersnp))
}
for p, n := range got {
@ -254,15 +227,36 @@ func SubtestSwarm(t *testing.T, addrs []string, MsgNum int) {
func TestSwarm(t *testing.T) {
// t.Skip("skipping for another test")
addrs := []string{
"/ip4/127.0.0.1/tcp/10234",
"/ip4/127.0.0.1/tcp/10235",
"/ip4/127.0.0.1/tcp/10236",
"/ip4/127.0.0.1/tcp/10237",
"/ip4/127.0.0.1/tcp/10238",
}
// msgs := 1000
msgs := 100
SubtestSwarm(t, addrs, msgs)
swarms := 5
SubtestSwarm(t, swarms, msgs)
}
func TestConnHandler(t *testing.T) {
// t.Skip("skipping for another test")
ctx := context.Background()
swarms, peersnp := makeSwarms(ctx, t, 5)
gotconn := make(chan struct{}, 10)
swarms[0].SetConnHandler(func(conn *Conn) {
gotconn <- struct{}{}
})
connectSwarms(t, ctx, swarms, peersnp)
<-time.After(time.Millisecond)
// should've gotten 5 by now.
close(gotconn)
expect := 4
actual := 0
for _ = range gotconn {
actual++
}
if actual != expect {
t.Fatal("should have connected to %d swarms. got: %d", actual, expect)
}
}

63
peer/metrics.go Normal file
View File

@ -0,0 +1,63 @@
package peer
import (
"sync"
"time"
)
// LatencyEWMASmooting governs the decay of the EWMA (the speed
// at which it changes). This must be a normalized (0-1) value.
// 1 is 100% change, 0 is no change.
var LatencyEWMASmoothing = 0.1
// Metrics is just an object that tracks metrics
// across a set of peers.
type Metrics interface {
// RecordLatency records a new latency measurement
RecordLatency(ID, time.Duration)
// LatencyEWMA returns an exponentially-weighted moving avg.
// of all measurements of a peer's latency.
LatencyEWMA(ID) time.Duration
}
type metrics struct {
latmap map[ID]time.Duration
latmu sync.RWMutex
}
func NewMetrics() Metrics {
return &metrics{
latmap: make(map[ID]time.Duration),
}
}
// RecordLatency records a new latency measurement
func (m *metrics) RecordLatency(p ID, next time.Duration) {
nextf := float64(next)
s := LatencyEWMASmoothing
if s > 1 || s < 0 {
s = 0.1 // ignore the knob. it's broken. look, it jiggles.
}
m.latmu.Lock()
ewma, found := m.latmap[p]
ewmaf := float64(ewma)
if !found {
m.latmap[p] = next // when no data, just take it as the mean.
} else {
nextf = ((1.0 - s) * ewmaf) + (s * nextf)
m.latmap[p] = time.Duration(nextf)
}
m.latmu.Unlock()
}
// LatencyEWMA returns an exponentially-weighted moving avg.
// of all measurements of a peer's latency.
func (m *metrics) LatencyEWMA(p ID) time.Duration {
m.latmu.RLock()
lat := m.latmap[p]
m.latmu.RUnlock()
return time.Duration(lat)
}

40
peer/metrics_test.go Normal file
View File

@ -0,0 +1,40 @@
package peer_test
import (
"fmt"
"math/rand"
"testing"
"time"
peer "github.com/jbenet/go-ipfs/peer"
testutil "github.com/jbenet/go-ipfs/util/testutil"
)
func TestLatencyEWMAFun(t *testing.T) {
t.Skip("run it for fun")
m := peer.NewMetrics()
id, err := testutil.RandPeerID()
if err != nil {
t.Fatal(err)
}
mu := 100.0
sig := 10.0
next := func() time.Duration {
mu = (rand.NormFloat64() * sig) + mu
return time.Duration(mu)
}
print := func() {
fmt.Printf("%3.f %3.f --> %d\n", sig, mu, m.LatencyEWMA(id))
}
for {
select {
case <-time.After(200 * time.Millisecond):
m.RecordLatency(id, next())
print()
}
}
}

View File

@ -2,11 +2,8 @@
package peer
import (
"bytes"
"errors"
"encoding/hex"
"fmt"
"sync"
"time"
b58 "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-base58"
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
@ -18,139 +15,18 @@ import (
var log = u.Logger("peer")
// ID is a byte slice representing the identity of a peer.
type ID mh.Multihash
// String is utililty function for printing out peer ID strings.
func (id ID) String() string {
return id.Pretty()
}
// Equal is utililty function for comparing two peer ID's
func (id ID) Equal(other ID) bool {
return bytes.Equal(id, other)
}
// ID represents the identity of a peer.
type ID string
// Pretty returns a b58-encoded string of the ID
func (id ID) Pretty() string {
return b58.Encode(id)
return IDB58Encode(id)
}
// DecodePrettyID returns a b58-encoded string of the ID
func DecodePrettyID(s string) (ID, error) {
m, err := mh.FromB58String(s)
if err != nil {
return nil, err
func (id ID) Loggable() map[string]interface{} {
return map[string]interface{}{
"peerID": id.Pretty(),
}
return ID(m), err
}
// IDFromPubKey retrieves a Public Key from the peer given by pk
func IDFromPubKey(pk ic.PubKey) (ID, error) {
b, err := pk.Bytes()
if err != nil {
return nil, err
}
hash := u.Hash(b)
return ID(hash), nil
}
// Map maps Key (string) : *peer (slices are not comparable).
type Map map[u.Key]Peer
// Peer represents the identity information of an IPFS Node, including
// ID, and relevant Addresses.
type Peer interface {
// TODO reduce the peer interface to be read-only. Force mutations to occur
// on the peer store eg. peerstore.SetLatency(peerId, value).
// ID returns the peer's ID
ID() ID
// Key returns the ID as a Key (string) for maps.
Key() u.Key
// Addresses returns the peer's multiaddrs
Addresses() []ma.Multiaddr
// AddAddress adds the given Multiaddr address to Peer's addresses.
// returns whether this was a newly added address.
AddAddress(a ma.Multiaddr) bool
// NetAddress returns the first Multiaddr found for a given network.
NetAddress(n string) ma.Multiaddr
// Services returns the peer's services
// Services() []mux.ProtocolID
// SetServices([]mux.ProtocolID)
// Priv/PubKey returns the peer's Private Key
PrivKey() ic.PrivKey
PubKey() ic.PubKey
// LoadAndVerifyKeyPair unmarshalls, loads a private/public key pair.
// Error if (a) unmarshalling fails, or (b) pubkey does not match id.
LoadAndVerifyKeyPair(marshalled []byte) error
VerifyAndSetPrivKey(sk ic.PrivKey) error
VerifyAndSetPubKey(pk ic.PubKey) error
// Get/SetLatency manipulate the current latency measurement.
GetLatency() (out time.Duration)
SetLatency(laten time.Duration)
// Get/SetType indicate whether this is a local or remote peer
GetType() Type
SetType(Type)
//Get/Set Agent and Protocol Versions
GetVersions() (agent, protocol string)
SetVersions(agent, protocol string)
// Update with the data of another peer instance
Update(Peer) error
Loggable() map[string]interface{}
}
type Type uint8
const (
// Unspecified indicates peer was created without specifying Type
Unspecified Type = iota
Local
Remote
)
func (t Type) String() string {
switch t {
case Local:
return "localPeer"
case Remote:
return "remotePeer"
default:
}
return "unspecifiedPeer"
}
type peer struct {
id ID
addresses []ma.Multiaddr
// services []mux.ProtocolID
privKey ic.PrivKey
pubKey ic.PubKey
// TODO move latency away from peer into the package that uses it. Instead,
// within that package, map from ID to latency value.
latency time.Duration
protocolVersion string
agentVersion string
// typ can be Local, Remote, or Unspecified (default)
typ Type
sync.RWMutex
}
// String prints out the peer.
@ -159,242 +35,98 @@ type peer struct {
// enforce this by only exposing functions that generate
// IDs safely. Then any peer.ID type found in the
// codebase is known to be correct.
func (p *peer) String() string {
pid := p.id.String()
func (id ID) String() string {
pid := id.Pretty()
maxRunes := 6
if len(pid) < maxRunes {
maxRunes = len(pid)
}
return fmt.Sprintf("peer %s", pid[:maxRunes])
return fmt.Sprintf("<peer.ID %s>", pid[:maxRunes])
}
func (p *peer) Loggable() map[string]interface{} {
return map[string]interface{}{
p.GetType().String(): map[string]interface{}{
"id": p.ID().String(),
"latency": p.GetLatency(),
},
}
// MatchesPrivateKey tests whether this ID was derived from sk
func (id ID) MatchesPrivateKey(sk ic.PrivKey) bool {
return id.MatchesPublicKey(sk.GetPublic())
}
// Key returns the ID as a Key (string) for maps.
func (p *peer) Key() u.Key {
return u.Key(p.id)
}
// ID returns the peer's ID
func (p *peer) ID() ID {
return p.id
}
// PrivKey returns the peer's Private Key
func (p *peer) PrivKey() ic.PrivKey {
return p.privKey
}
// PubKey returns the peer's Private Key
func (p *peer) PubKey() ic.PubKey {
return p.pubKey
}
// Addresses returns the peer's multiaddrs
func (p *peer) Addresses() []ma.Multiaddr {
cp := make([]ma.Multiaddr, len(p.addresses))
p.RLock()
copy(cp, p.addresses)
defer p.RUnlock()
return cp
}
// AddAddress adds the given Multiaddr address to Peer's addresses.
// Returns whether this address was a newly added address
func (p *peer) AddAddress(a ma.Multiaddr) bool {
if a == nil {
panic("adding a nil Multiaddr")
}
p.Lock()
defer p.Unlock()
for _, addr := range p.addresses {
if addr.Equal(a) {
return false
}
}
p.addresses = append(p.addresses, a)
return true
}
// NetAddress returns the first Multiaddr found for a given network.
func (p *peer) NetAddress(n string) ma.Multiaddr {
p.RLock()
defer p.RUnlock()
for _, a := range p.addresses {
for _, p := range a.Protocols() {
if p.Name == n {
return a
}
}
}
return nil
}
// func (p *peer) Services() []mux.ProtocolID {
// p.RLock()
// defer p.RUnlock()
// return p.services
// }
//
// func (p *peer) SetServices(s []mux.ProtocolID) {
// p.Lock()
// defer p.Unlock()
// p.services = s
// }
// GetLatency retrieves the current latency measurement.
func (p *peer) GetLatency() (out time.Duration) {
p.RLock()
out = p.latency
p.RUnlock()
return
}
// SetLatency sets the latency measurement.
// TODO: Instead of just keeping a single number,
// keep a running average over the last hour or so
// Yep, should be EWMA or something. (-jbenet)
func (p *peer) SetLatency(laten time.Duration) {
p.Lock()
if p.latency == 0 {
p.latency = laten
} else {
p.latency = ((p.latency * 9) + laten) / 10
}
p.Unlock()
}
func (p *peer) SetType(t Type) {
p.Lock()
p.typ = t
defer p.Unlock()
}
func (p *peer) GetType() Type {
p.Lock()
defer p.Unlock()
return p.typ
}
// LoadAndVerifyKeyPair unmarshalls, loads a private/public key pair.
// Error if (a) unmarshalling fails, or (b) pubkey does not match id.
func (p *peer) LoadAndVerifyKeyPair(marshalled []byte) error {
sk, err := ic.UnmarshalPrivateKey(marshalled)
// MatchesPublicKey tests whether this ID was derived from pk
func (id ID) MatchesPublicKey(pk ic.PubKey) bool {
oid, err := IDFromPublicKey(pk)
if err != nil {
return fmt.Errorf("Failed to unmarshal private key: %v", err)
return false
}
return p.VerifyAndSetPrivKey(sk)
return oid == id
}
// VerifyAndSetPrivKey sets private key, given its pubkey matches the peer.ID
func (p *peer) VerifyAndSetPrivKey(sk ic.PrivKey) error {
// construct and assign pubkey. ensure it matches this peer
if err := p.VerifyAndSetPubKey(sk.GetPublic()); err != nil {
return err
// IDFromString cast a string to ID type, and validate
// the id to make sure it is a multihash.
func IDFromString(s string) (ID, error) {
if _, err := mh.Cast([]byte(s)); err != nil {
return ID(""), err
}
p.Lock()
defer p.Unlock()
// if we didn't have the priavte key, assign it
if p.privKey == nil {
p.privKey = sk
return nil
}
// if we already had the keys, check they're equal.
if p.privKey.Equals(sk) {
return nil // as expected. keep the old objects.
}
// keys not equal. invariant violated. this warrants a panic.
// these keys should be _the same_ because peer.ID = H(pk)
// this mismatch should never happen.
log.Errorf("%s had PrivKey: %v -- got %v", p, p.privKey, sk)
panic("invariant violated: unexpected key mismatch")
return ID(s), nil
}
// VerifyAndSetPubKey sets public key, given it matches the peer.ID
func (p *peer) VerifyAndSetPubKey(pk ic.PubKey) error {
pkid, err := IDFromPubKey(pk)
// IDFromBytes cast a string to ID type, and validate
// the id to make sure it is a multihash.
func IDFromBytes(b []byte) (ID, error) {
if _, err := mh.Cast(b); err != nil {
return ID(""), err
}
return ID(b), nil
}
// IDB58Decode returns a b58-decoded Peer
func IDB58Decode(s string) (ID, error) {
m, err := mh.FromB58String(s)
if err != nil {
return fmt.Errorf("Failed to hash public key: %v", err)
return "", err
}
p.Lock()
defer p.Unlock()
if !p.id.Equal(pkid) {
return fmt.Errorf("Public key does not match peer.ID.")
}
// if we didn't have the keys, assign them.
if p.pubKey == nil {
p.pubKey = pk
return nil
}
// if we already had the pubkey, check they're equal.
if p.pubKey.Equals(pk) {
return nil // as expected. keep the old objects.
}
// keys not equal. invariant violated. this warrants a panic.
// these keys should be _the same_ because peer.ID = H(pk)
// this mismatch should never happen.
log.Errorf("%s had PubKey: %v -- got %v", p, p.pubKey, pk)
panic("invariant violated: unexpected key mismatch")
return ID(m), err
}
// Updates this peer with information from another peer instance
func (p *peer) Update(other Peer) error {
if !p.ID().Equal(other.ID()) {
return errors.New("peer ids do not match")
}
for _, a := range other.Addresses() {
p.AddAddress(a)
}
p.SetLatency(other.GetLatency())
p.SetType(other.GetType())
sk := other.PrivKey()
pk := other.PubKey()
p.Lock()
if p.privKey == nil {
p.privKey = sk
}
if p.pubKey == nil {
p.pubKey = pk
}
defer p.Unlock()
return nil
// IDB58Encode returns b58-encoded string
func IDB58Encode(id ID) string {
return b58.Encode([]byte(id))
}
func (p *peer) GetVersions() (agent, protocol string) {
p.RLock()
defer p.RUnlock()
return p.agentVersion, p.protocolVersion
// IDHexDecode returns a b58-decoded Peer
func IDHexDecode(s string) (ID, error) {
m, err := mh.FromHexString(s)
if err != nil {
return "", err
}
return ID(m), err
}
func (p *peer) SetVersions(agent, protocol string) {
p.Lock()
defer p.Unlock()
p.agentVersion = agent
p.protocolVersion = protocol
// IDHexEncode returns b58-encoded string
func IDHexEncode(id ID) string {
return hex.EncodeToString([]byte(id))
}
// IDFromPublicKey returns the Peer ID corresponding to pk
func IDFromPublicKey(pk ic.PubKey) (ID, error) {
b, err := pk.Bytes()
if err != nil {
return "", err
}
hash := u.Hash(b)
return ID(hash), nil
}
// IDFromPrivateKey returns the Peer ID corresponding to sk
func IDFromPrivateKey(sk ic.PrivKey) (ID, error) {
return IDFromPublicKey(sk.GetPublic())
}
// Map maps a Peer ID to a struct.
type Set map[ID]struct{}
// PeerInfo is a small struct used to pass around a peer with
// a set of addresses (and later, keys?). This is not meant to be
// a complete view of the system, but rather to model updates to
// the peerstore. It is used by things like the routing system.
type PeerInfo struct {
ID ID
Addrs []ma.Multiaddr
}

View File

@ -1,65 +1,161 @@
package peer
import (
"encoding/base64"
"fmt"
"strings"
"testing"
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash"
ic "github.com/jbenet/go-ipfs/crypto"
u "github.com/jbenet/go-ipfs/util"
b58 "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-base58"
)
func TestNetAddress(t *testing.T) {
var gen1 keyset // generated
var gen2 keyset // generated
var man keyset // manual
tcp, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/1234")
if err != nil {
t.Error(err)
return
func init() {
if err := gen1.generate(); err != nil {
panic(err)
}
if err := gen2.generate(); err != nil {
panic(err)
}
udp, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/2345")
if err != nil {
t.Error(err)
return
}
mh, err := mh.FromHexString("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33")
if err != nil {
t.Error(err)
return
}
p := NewPeerstore().WithID(ID(mh))
p.AddAddress(tcp)
p.AddAddress(udp)
p.AddAddress(tcp)
if len(p.Addresses()) == 3 {
t.Error("added same address twice")
}
tcp2 := p.NetAddress("tcp")
if tcp2 != tcp {
t.Error("NetAddress lookup failed", tcp, tcp2)
}
udp2 := p.NetAddress("udp")
if udp2 != udp {
t.Error("NetAddress lookup failed", udp, udp2)
skManBytes = strings.Replace(skManBytes, "\n", "", -1)
if err := man.load(hpkpMan, skManBytes); err != nil {
panic(err)
}
}
func TestStringMethodWithSmallId(t *testing.T) {
p := NewPeerstore().WithID([]byte(string(0)))
p1, ok := p.(*peer)
if !ok {
t.Fatal("WithID doesn't return a peer")
}
p1.String()
type keyset struct {
sk ic.PrivKey
pk ic.PubKey
hpk string
hpkp string
}
func TestDefaultType(t *testing.T) {
t.Log("Ensure that peers are initialized to Unspecified by default")
p := peer{}
if p.GetType() != Unspecified {
t.Fatalf("Peer's default type is was not `Unspecified`")
func (ks *keyset) generate() error {
var err error
ks.sk, ks.pk, err = ic.GenerateKeyPair(ic.RSA, 1024)
if err != nil {
return err
}
bpk, err := ks.pk.Bytes()
if err != nil {
return err
}
ks.hpk = string(u.Hash(bpk))
ks.hpkp = b58.Encode([]byte(ks.hpk))
return nil
}
func (ks *keyset) load(hpkp, skBytesStr string) error {
skBytes, err := base64.StdEncoding.DecodeString(skBytesStr)
if err != nil {
return err
}
ks.sk, err = ic.UnmarshalPrivateKey(skBytes)
if err != nil {
return err
}
ks.pk = ks.sk.GetPublic()
bpk, err := ks.pk.Bytes()
if err != nil {
return err
}
ks.hpk = string(u.Hash(bpk))
ks.hpkp = b58.Encode([]byte(ks.hpk))
if ks.hpkp != hpkp {
return fmt.Errorf("hpkp doesn't match key. %s", hpkp)
}
return nil
}
func TestIDMatchesPublicKey(t *testing.T) {
test := func(ks keyset) {
p1, err := IDB58Decode(ks.hpkp)
if err != nil {
t.Fatal(err)
}
if ks.hpk != string(p1) {
t.Error("p1 and hpk differ")
}
if !p1.MatchesPublicKey(ks.pk) {
t.Fatal("p1 does not match pk")
}
p2, err := IDFromPublicKey(ks.pk)
if err != nil {
t.Fatal(err)
}
if p1 != p2 {
t.Error("p1 and p2 differ", p1.Pretty(), p2.Pretty())
}
if p2.Pretty() != ks.hpkp {
t.Error("hpkp and p2.Pretty differ", ks.hpkp, p2.Pretty())
}
}
test(gen1)
test(gen2)
test(man)
}
func TestIDMatchesPrivateKey(t *testing.T) {
test := func(ks keyset) {
p1, err := IDB58Decode(ks.hpkp)
if err != nil {
t.Fatal(err)
}
if ks.hpk != string(p1) {
t.Error("p1 and hpk differ")
}
if !p1.MatchesPrivateKey(ks.sk) {
t.Fatal("p1 does not match sk")
}
p2, err := IDFromPrivateKey(ks.sk)
if err != nil {
t.Fatal(err)
}
if p1 != p2 {
t.Error("p1 and p2 differ", p1.Pretty(), p2.Pretty())
}
}
test(gen1)
test(gen2)
test(man)
}
var hpkpMan = `QmRK3JgmVEGiewxWbhpXLJyjWuGuLeSTMTndA1coMHEy5o`
var skManBytes = `
CAAS4AQwggJcAgEAAoGBAL7w+Wc4VhZhCdM/+Hccg5Nrf4q9NXWwJylbSrXz/unFS24wyk6pEk0zi3W
7li+vSNVO+NtJQw9qGNAMtQKjVTP+3Vt/jfQRnQM3s6awojtjueEWuLYVt62z7mofOhCtj+VwIdZNBo
/EkLZ0ETfcvN5LVtLYa8JkXybnOPsLvK+PAgMBAAECgYBdk09HDM7zzL657uHfzfOVrdslrTCj6p5mo
DzvCxLkkjIzYGnlPuqfNyGjozkpSWgSUc+X+EGLLl3WqEOVdWJtbM61fewEHlRTM5JzScvwrJ39t7o6
CCAjKA0cBWBd6UWgbN/t53RoWvh9HrA2AW5YrT0ZiAgKe9y7EMUaENVJ8QJBAPhpdmb4ZL4Fkm4OKia
NEcjzn6mGTlZtef7K/0oRC9+2JkQnCuf6HBpaRhJoCJYg7DW8ZY+AV6xClKrgjBOfERMCQQDExhnzu2
dsQ9k8QChBlpHO0TRbZBiQfC70oU31kM1AeLseZRmrxv9Yxzdl8D693NNWS2JbKOXl0kMHHcuGQLMVA
kBZ7WvkmPV3aPL6jnwp2pXepntdVnaTiSxJ1dkXShZ/VSSDNZMYKY306EtHrIu3NZHtXhdyHKcggDXr
qkBrdgErAkAlpGPojUwemOggr4FD8sLX1ot2hDJyyV7OK2FXfajWEYJyMRL1Gm9Uk1+Un53RAkJneqp
JGAzKpyttXBTIDO51AkEA98KTiROMnnU8Y6Mgcvr68/SMIsvCYMt9/mtwSBGgl80VaTQ5Hpaktl6Xbh
VUt5Wv0tRxlXZiViCGCD1EtrrwTw==
`

View File

@ -1,132 +1,252 @@
package peer
import (
"errors"
"sync"
ic "github.com/jbenet/go-ipfs/crypto"
u "github.com/jbenet/go-ipfs/util"
errors "github.com/jbenet/go-ipfs/util/debugerror"
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
dssync "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync"
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
)
// Peerstore provides a threadsafe collection for peers.
// Peerstore provides a threadsafe store of Peer related
// information.
type Peerstore interface {
FindOrCreate(ID) (Peer, error)
Add(Peer) (Peer, error)
Delete(ID) error
All() (*Map, error)
KeyBook
AddressBook
Metrics
WithKeyPair(sk ic.PrivKey, pk ic.PubKey) (Peer, error)
WithID(id ID) Peer
WithIDString(id string) Peer
// Peers returns a list of all peer.IDs in this Peerstore
Peers() []ID
// PeerInfo returns a peer.PeerInfo struct for given peer.ID.
// This is a small slice of the information Peerstore has on
// that peer, useful to other services.
PeerInfo(ID) PeerInfo
// Get/Put is a simple registry for other peer-related key/value pairs.
// if we find something we use often, it should become its own set of
// methods. this is a last resort.
Get(id ID, key string) (interface{}, error)
Put(id ID, key string, val interface{}) error
}
// AddressBook tracks the addresses of Peers
type AddressBook interface {
Addresses(ID) []ma.Multiaddr
AddAddress(ID, ma.Multiaddr)
AddAddresses(ID, []ma.Multiaddr)
}
type addressMap map[string]ma.Multiaddr
type addressbook struct {
addrs map[ID]addressMap
sync.RWMutex
}
func newAddressbook() *addressbook {
return &addressbook{addrs: map[ID]addressMap{}}
}
func (ab *addressbook) Peers() []ID {
ab.RLock()
ps := make([]ID, 0, len(ab.addrs))
for p := range ab.addrs {
ps = append(ps, p)
}
ab.RUnlock()
return ps
}
func (ab *addressbook) Addresses(p ID) []ma.Multiaddr {
ab.RLock()
defer ab.RUnlock()
maddrs, found := ab.addrs[p]
if !found {
return nil
}
maddrs2 := make([]ma.Multiaddr, 0, len(maddrs))
for _, m := range maddrs {
maddrs2 = append(maddrs2, m)
}
return maddrs2
}
func (ab *addressbook) AddAddress(p ID, m ma.Multiaddr) {
ab.Lock()
defer ab.Unlock()
_, found := ab.addrs[p]
if !found {
ab.addrs[p] = addressMap{}
}
ab.addrs[p][m.String()] = m
}
func (ab *addressbook) AddAddresses(p ID, ms []ma.Multiaddr) {
ab.Lock()
defer ab.Unlock()
for _, m := range ms {
_, found := ab.addrs[p]
if !found {
ab.addrs[p] = addressMap{}
}
ab.addrs[p][m.String()] = m
}
}
// KeyBook tracks the Public keys of Peers.
type KeyBook interface {
PubKey(ID) ic.PubKey
AddPubKey(ID, ic.PubKey) error
PrivKey(ID) ic.PrivKey
AddPrivKey(ID, ic.PrivKey) error
}
type keybook struct {
pks map[ID]ic.PubKey
sks map[ID]ic.PrivKey
sync.RWMutex // same lock. wont happen a ton.
}
func newKeybook() *keybook {
return &keybook{
pks: map[ID]ic.PubKey{},
sks: map[ID]ic.PrivKey{},
}
}
func (kb *keybook) Peers() []ID {
kb.RLock()
ps := make([]ID, 0, len(kb.pks)+len(kb.sks))
for p := range kb.pks {
ps = append(ps, p)
}
for p := range kb.sks {
if _, found := kb.pks[p]; !found {
ps = append(ps, p)
}
}
kb.RUnlock()
return ps
}
func (kb *keybook) PubKey(p ID) ic.PubKey {
kb.RLock()
pk := kb.pks[p]
kb.RUnlock()
return pk
}
func (kb *keybook) AddPubKey(p ID, pk ic.PubKey) error {
// check it's correct first
if !p.MatchesPublicKey(pk) {
return errors.New("ID does not match PublicKey")
}
kb.Lock()
kb.pks[p] = pk
kb.Unlock()
return nil
}
func (kb *keybook) PrivKey(p ID) ic.PrivKey {
kb.RLock()
sk := kb.sks[p]
kb.RUnlock()
return sk
}
func (kb *keybook) AddPrivKey(p ID, sk ic.PrivKey) error {
if sk == nil {
return errors.New("sk is nil (PrivKey)")
}
// check it's correct first
if !p.MatchesPrivateKey(sk) {
return errors.New("ID does not match PrivateKey")
}
kb.Lock()
kb.sks[p] = sk
kb.Unlock()
return nil
}
type peerstore struct {
sync.RWMutex
data map[string]Peer // key is string(ID)
keybook
addressbook
metrics
// store other data, like versions
ds ds.ThreadSafeDatastore
}
// NewPeerstore creates a threadsafe collection of peers.
func NewPeerstore() Peerstore {
return &peerstore{
data: make(map[string]Peer),
keybook: *newKeybook(),
addressbook: *newAddressbook(),
metrics: *(NewMetrics()).(*metrics),
ds: dssync.MutexWrap(ds.NewMapDatastore()),
}
}
func (ps *peerstore) FindOrCreate(i ID) (Peer, error) {
ps.Lock()
defer ps.Unlock()
if i == nil {
panic("wat")
}
p, ok := ps.data[i.String()]
if !ok { // not found, construct it ourselves, add it to datastore, and return.
// TODO(brian) kinda dangerous, no? If ID is invalid and doesn't
// correspond to an actual valid peer ID, this peerstore will return an
// instantiated peer value, allowing the error to propagate. It might
// be better to nip this at the bud by returning nil and making the
// client manually add a Peer. To keep the peerstore in control, this
// can even be a peerstore method that performs cursory validation.
//
// Potential bad case: Suppose values arrive from untrusted providers
// in the DHT.
p = &peer{id: i}
ps.data[i.String()] = p
}
// no error, got it back fine
return p, nil
func (ps *peerstore) Put(p ID, key string, val interface{}) error {
dsk := ds.NewKey(string(p) + "/" + key)
return ps.ds.Put(dsk, val)
}
func (p *peerstore) Add(peer Peer) (Peer, error) {
p.Lock()
defer p.Unlock()
existing, ok := p.data[peer.Key().String()]
if !ok { // not found? just add and return.
p.data[peer.Key().String()] = peer
return peer, nil
}
// already here.
if peer == existing {
return peer, nil
}
existing.Update(peer) // must do some merging.
return existing, nil
func (ps *peerstore) Get(p ID, key string) (interface{}, error) {
dsk := ds.NewKey(string(p) + "/" + key)
return ps.ds.Get(dsk)
}
func (p *peerstore) Delete(i ID) error {
p.Lock()
defer p.Unlock()
delete(p.data, i.String())
return nil
}
func (p *peerstore) All() (*Map, error) {
p.Lock()
defer p.Unlock()
ps := Map{}
for k, v := range p.data {
ps[u.Key(k)] = v
func (ps *peerstore) Peers() []ID {
set := map[ID]struct{}{}
for _, p := range ps.keybook.Peers() {
set[p] = struct{}{}
}
return &ps, nil
}
// WithKeyPair returns a Peer object with given keys.
func (ps *peerstore) WithKeyPair(sk ic.PrivKey, pk ic.PubKey) (Peer, error) {
if sk == nil && pk == nil {
return nil, errors.Errorf("PeerWithKeyPair nil keys")
for _, p := range ps.addressbook.Peers() {
set[p] = struct{}{}
}
pk2 := sk.GetPublic()
if pk == nil {
pk = pk2
} else if !pk.Equals(pk2) {
return nil, errors.Errorf("key mismatch. pubkey is not privkey's pubkey")
pps := make([]ID, 0, len(set))
for p := range set {
pps = append(pps, p)
}
return pps
}
pkid, err := IDFromPubKey(pk)
if err != nil {
return nil, errors.Errorf("Failed to hash public key: %v", err)
func (ps *peerstore) PeerInfo(p ID) PeerInfo {
return PeerInfo{
ID: p,
Addrs: ps.addressbook.Addresses(p),
}
p := &peer{id: pkid, pubKey: pk, privKey: sk}
ps.Add(p)
return p, nil
}
// WithID constructs a peer with given ID.
func (ps *peerstore) WithID(id ID) Peer {
p := &peer{id: id}
ps.Add(p)
return p
func PeerInfos(ps Peerstore, peers []ID) []PeerInfo {
pi := make([]PeerInfo, len(peers))
for i, p := range peers {
pi[i] = ps.PeerInfo(p)
}
return pi
}
// WithIDString constructs a peer with given ID (string).
func (ps *peerstore) WithIDString(id string) Peer {
return ps.WithID(ID(id))
func PeerInfoIDs(pis []PeerInfo) []ID {
ps := make([]ID, len(pis))
for i, pi := range pis {
ps[i] = pi.ID
}
return ps
}

View File

@ -1,90 +1,77 @@
package peer
import (
"errors"
"testing"
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
)
func setupPeer(ps Peerstore, id string, addr string) (Peer, error) {
tcp, err := ma.NewMultiaddr(addr)
func IDS(t *testing.T, ids string) ID {
id, err := IDB58Decode(ids)
if err != nil {
return nil, err
t.Fatal(err)
}
p := ps.WithIDString(id)
p.AddAddress(tcp)
return p, nil
return id
}
func TestPeerstore(t *testing.T) {
func MA(t *testing.T, m string) ma.Multiaddr {
maddr, err := ma.NewMultiaddr(m)
if err != nil {
t.Fatal(err)
}
return maddr
}
func TestAddresses(t *testing.T) {
ps := NewPeerstore()
p11, _ := setupPeer(ps, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31", "/ip4/127.0.0.1/tcp/1234")
p21, _ := setupPeer(ps, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a32", "/ip4/127.0.0.1/tcp/2345")
// p31, _ := setupPeer("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33", "/ip4/127.0.0.1/tcp/3456")
// p41, _ := setupPeer("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a34", "/ip4/127.0.0.1/tcp/4567")
id1 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQN")
id2 := IDS(t, "QmRmPL3FDZKE3Qiwv1RosLdwdvbvg17b2hB39QPScgWKKZ")
id3 := IDS(t, "QmPhi7vBsChP7sjRoZGgg7bcKqF6MmCcQwvRbDte8aJ6Kn")
p13, err := ps.Add(p11)
if err != nil {
t.Error(err)
ma11 := MA(t, "/ip4/1.2.3.1/tcp/1111")
ma21 := MA(t, "/ip4/1.2.3.2/tcp/1111")
ma22 := MA(t, "/ip4/1.2.3.2/tcp/2222")
ma31 := MA(t, "/ip4/1.2.3.3/tcp/1111")
ma32 := MA(t, "/ip4/1.2.3.3/tcp/2222")
ma33 := MA(t, "/ip4/1.2.3.3/tcp/3333")
ps.AddAddress(id1, ma11)
ps.AddAddress(id2, ma21)
ps.AddAddress(id2, ma22)
ps.AddAddress(id3, ma31)
ps.AddAddress(id3, ma32)
ps.AddAddress(id3, ma33)
test := func(exp, act []ma.Multiaddr) {
if len(exp) != len(act) {
t.Fatal("lengths not the same")
}
for _, a := range exp {
found := false
for _, b := range act {
if a.Equal(b) {
found = true
break
}
}
if !found {
t.Fatal("expected address %s not found", a)
}
}
}
if p13 != p11 {
t.Error("these should be the same")
}
p12, err := ps.FindOrCreate(ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31"))
if err != nil {
t.Error(err)
}
if p11 != p12 {
t.Error(errors.New("peers should be the same"))
}
p23, err := ps.Add(p21)
if err != nil {
t.Error(err)
}
if p23 != p21 {
t.Error("These should be the same")
}
p22, err := ps.FindOrCreate(ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a32"))
if err != nil {
t.Error(err)
}
if p21 != p22 {
t.Error(errors.New("peers should be the same"))
}
_, err = ps.FindOrCreate(ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"))
if err != nil {
t.Error(errors.New("should not have an error here"))
}
err = ps.Delete(ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31"))
if err != nil {
t.Error(err)
}
// reconstruct!
_, err = ps.FindOrCreate(ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31"))
if err != nil {
t.Error(errors.New("should not have an error anyway. reconstruct!"))
}
p22, err = ps.FindOrCreate(ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a32"))
if err != nil {
t.Error(err)
}
if p21 != p22 {
t.Error(errors.New("peers should be the same"))
}
// test the Addresses return value
test([]ma.Multiaddr{ma11}, ps.Addresses(id1))
test([]ma.Multiaddr{ma21, ma22}, ps.Addresses(id2))
test([]ma.Multiaddr{ma31, ma32, ma33}, ps.Addresses(id3))
// test also the PeerInfo return
test([]ma.Multiaddr{ma11}, ps.PeerInfo(id1).Addrs)
test([]ma.Multiaddr{ma21, ma22}, ps.PeerInfo(id2).Addrs)
test([]ma.Multiaddr{ma31, ma32, ma33}, ps.PeerInfo(id3).Addrs)
}

View File

@ -13,7 +13,7 @@ import (
// peerMetric tracks a peer and its distance to something else.
type peerMetric struct {
// the peer
peer peer.Peer
peer peer.ID
// big.Int for XOR metric
metric *big.Int
@ -64,11 +64,11 @@ func (pq *distancePQ) Len() int {
return len(pq.heap)
}
func (pq *distancePQ) Enqueue(p peer.Peer) {
func (pq *distancePQ) Enqueue(p peer.ID) {
pq.Lock()
defer pq.Unlock()
distance := ks.XORKeySpace.Key(p.ID()).Distance(pq.from)
distance := ks.XORKeySpace.Key([]byte(p)).Distance(pq.from)
heap.Push(&pq.heap, &peerMetric{
peer: p,
@ -76,7 +76,7 @@ func (pq *distancePQ) Enqueue(p peer.Peer) {
})
}
func (pq *distancePQ) Dequeue() peer.Peer {
func (pq *distancePQ) Dequeue() peer.ID {
pq.Lock()
defer pq.Unlock()

View File

@ -11,8 +11,8 @@ type PeerQueue interface {
Len() int
// Enqueue adds this node to the queue.
Enqueue(peer.Peer)
Enqueue(peer.ID)
// Dequeue retrieves the highest (smallest int) priority node
Dequeue() peer.Peer
Dequeue() peer.ID
}

View File

@ -8,22 +8,18 @@ import (
peer "github.com/jbenet/go-ipfs/peer"
u "github.com/jbenet/go-ipfs/util"
testutil "github.com/jbenet/go-ipfs/util/testutil"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
)
func newPeer(id string) peer.Peer {
return testutil.NewPeerWithIDString(id)
}
func TestQueue(t *testing.T) {
p1 := newPeer("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31")
p2 := newPeer("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a32")
p3 := newPeer("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33")
p4 := newPeer("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a34")
p5 := newPeer("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31")
p1 := peer.ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31") // these aren't valid, because need to hex-decode.
p2 := peer.ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a32") // these aren't valid, because need to hex-decode.
p3 := peer.ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33") // these aren't valid, because need to hex-decode.
p4 := peer.ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a34") // these aren't valid, because need to hex-decode.
p5 := peer.ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31") // these aren't valid, because need to hex-decode.
// but they work.
// these are the peer.IDs' XORKeySpace Key values:
// [228 47 151 130 156 102 222 232 218 31 132 94 170 208 80 253 120 103 55 35 91 237 48 157 81 245 57 247 66 150 9 40]
@ -67,10 +63,10 @@ func TestQueue(t *testing.T) {
}
func newPeerTime(t time.Time) peer.Peer {
func newPeerTime(t time.Time) peer.ID {
s := fmt.Sprintf("hmmm time: %v", t)
h := u.Hash([]byte(s))
return testutil.NewPeerWithID(peer.ID(h))
return peer.ID(h)
}
func TestSyncQueue(t *testing.T) {

View File

@ -9,8 +9,8 @@ import (
// ChanQueue makes any PeerQueue synchronizable through channels.
type ChanQueue struct {
Queue PeerQueue
EnqChan chan<- peer.Peer
DeqChan <-chan peer.Peer
EnqChan chan<- peer.ID
DeqChan <-chan peer.ID
}
// NewChanQueue creates a ChanQueue by wrapping pq.
@ -23,8 +23,8 @@ func NewChanQueue(ctx context.Context, pq PeerQueue) *ChanQueue {
func (cq *ChanQueue) process(ctx context.Context) {
// construct the channels here to be able to use them bidirectionally
enqChan := make(chan peer.Peer, 10)
deqChan := make(chan peer.Peer, 10)
enqChan := make(chan peer.ID)
deqChan := make(chan peer.ID)
cq.EnqChan = enqChan
cq.DeqChan = deqChan
@ -32,8 +32,8 @@ func (cq *ChanQueue) process(ctx context.Context) {
go func() {
defer close(deqChan)
var next peer.Peer
var item peer.Peer
var next peer.ID
var item peer.ID
var more bool
for {
@ -60,10 +60,10 @@ func (cq *ChanQueue) process(ctx context.Context) {
cq.Queue.Enqueue(item)
cq.Queue.Enqueue(next)
next = nil
next = ""
case deqChan <- next:
next = nil
next = ""
case <-ctx.Done():
return

View File

@ -34,11 +34,10 @@ const doPinging = false
// It is used to implement the base IpfsRouting module.
type IpfsDHT struct {
network inet.Network // the network services we need
self peer.Peer // Local peer (yourself)
peerstore peer.Peerstore // Other peers
self peer.ID // Local peer (yourself)
peerstore peer.Peerstore // Peer Registry
datastore ds.Datastore // Local data
dslock sync.Mutex
datastore ds.ThreadSafeDatastore // Local data
routingTable *kb.RoutingTable // Array of routing tables for differently distanced nodes
providers *ProviderManager
@ -53,19 +52,19 @@ type IpfsDHT struct {
}
// NewDHT creates a new DHT object with the given peer as the 'local' host
func NewDHT(ctx context.Context, p peer.Peer, ps peer.Peerstore, n inet.Network, dstore ds.Datastore) *IpfsDHT {
func NewDHT(ctx context.Context, p peer.ID, n inet.Network, dstore ds.ThreadSafeDatastore) *IpfsDHT {
dht := new(IpfsDHT)
dht.datastore = dstore
dht.self = p
dht.peerstore = ps
dht.peerstore = n.Peerstore()
dht.ContextGroup = ctxgroup.WithContext(ctx)
dht.network = n
n.SetHandler(inet.ProtocolDHT, dht.handleNewStream)
dht.providers = NewProviderManager(dht.Context(), p.ID())
dht.providers = NewProviderManager(dht.Context(), p)
dht.AddChildGroup(dht.providers)
dht.routingTable = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID()), time.Minute)
dht.routingTable = kb.NewRoutingTable(20, kb.ConvertPeerID(p), time.Minute, dht.peerstore)
dht.birth = time.Now()
dht.Validators = make(map[string]ValidatorFunc)
@ -78,8 +77,13 @@ func NewDHT(ctx context.Context, p peer.Peer, ps peer.Peerstore, n inet.Network,
return dht
}
// LocalPeer returns the peer.Peer of the dht.
func (dht *IpfsDHT) LocalPeer() peer.ID {
return dht.self
}
// Connect to a new peer at the given address, ping and add to the routing table
func (dht *IpfsDHT) Connect(ctx context.Context, npeer peer.Peer) error {
func (dht *IpfsDHT) Connect(ctx context.Context, npeer peer.ID) error {
if err := dht.network.DialPeer(ctx, npeer); err != nil {
return err
}
@ -95,7 +99,8 @@ func (dht *IpfsDHT) Connect(ctx context.Context, npeer peer.Peer) error {
}
// putValueToNetwork stores the given key/value pair at the peer 'p'
func (dht *IpfsDHT) putValueToNetwork(ctx context.Context, p peer.Peer,
// meaning: it sends a PUT_VALUE message to p
func (dht *IpfsDHT) putValueToNetwork(ctx context.Context, p peer.ID,
key string, rec *pb.Record) error {
pmes := pb.NewMessage(pb.Message_PUT_VALUE, string(key), 0)
@ -113,25 +118,30 @@ func (dht *IpfsDHT) putValueToNetwork(ctx context.Context, p peer.Peer,
// putProvider sends a message to peer 'p' saying that the local node
// can provide the value of 'key'
func (dht *IpfsDHT) putProvider(ctx context.Context, p peer.Peer, key string) error {
func (dht *IpfsDHT) putProvider(ctx context.Context, p peer.ID, key string) error {
pmes := pb.NewMessage(pb.Message_ADD_PROVIDER, string(key), 0)
// add self as the provider
pmes.ProviderPeers = pb.PeersToPBPeers(dht.network, []peer.Peer{dht.self})
pi := dht.peerstore.PeerInfo(dht.self)
pmes.ProviderPeers = pb.PeerInfosToPBPeers(dht.network, []peer.PeerInfo{pi})
err := dht.sendMessage(ctx, p, pmes)
if err != nil {
return err
}
log.Debugf("%s putProvider: %s for %s", dht.self, p, u.Key(key))
log.Debugf("%s putProvider: %s for %s (%s)", dht.self, p, u.Key(key), pi.Addrs)
return nil
}
func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p peer.Peer,
key u.Key) ([]byte, []peer.Peer, error) {
// getValueOrPeers queries a particular peer p for the value for
// key. It returns either the value or a list of closer peers.
// NOTE: it will update the dht's peerstore with any new addresses
// it finds for the given peer.
func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p peer.ID,
key u.Key) ([]byte, []peer.PeerInfo, error) {
pmes, err := dht.getValueSingle(ctx, p, key)
if err != nil {
@ -142,8 +152,8 @@ func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p peer.Peer,
// Success! We were given the value
log.Debug("getValueOrPeers: got value")
// make sure record is still valid
err = dht.verifyRecord(record)
// make sure record is valid.
err = dht.verifyRecordOnline(ctx, record)
if err != nil {
log.Error("Received invalid record!")
return nil, nil, err
@ -151,24 +161,8 @@ func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p peer.Peer,
return record.GetValue(), nil, nil
}
// TODO decide on providers. This probably shouldn't be happening.
if prv := pmes.GetProviderPeers(); prv != nil && len(prv) > 0 {
val, err := dht.getFromPeerList(ctx, key, prv)
if err != nil {
return nil, nil, err
}
log.Debug("getValueOrPeers: get from providers")
return val, nil, nil
}
// Perhaps we were given closer peers
peers, errs := pb.PBPeersToPeers(dht.peerstore, pmes.GetCloserPeers())
for _, err := range errs {
if err != nil {
log.Error(err)
}
}
peers := pb.PBPeersToPeerInfos(pmes.GetCloserPeers())
if len(peers) > 0 {
log.Debug("getValueOrPeers: peers")
return nil, peers, nil
@ -179,51 +173,16 @@ func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p peer.Peer,
}
// getValueSingle simply performs the get value RPC with the given parameters
func (dht *IpfsDHT) getValueSingle(ctx context.Context, p peer.Peer,
func (dht *IpfsDHT) getValueSingle(ctx context.Context, p peer.ID,
key u.Key) (*pb.Message, error) {
pmes := pb.NewMessage(pb.Message_GET_VALUE, string(key), 0)
return dht.sendRequest(ctx, p, pmes)
}
// TODO: Im not certain on this implementation, we get a list of peers/providers
// from someone what do we do with it? Connect to each of them? randomly pick
// one to get the value from? Or just connect to one at a time until we get a
// successful connection and request the value from it?
func (dht *IpfsDHT) getFromPeerList(ctx context.Context, key u.Key,
peerlist []*pb.Message_Peer) ([]byte, error) {
for _, pinfo := range peerlist {
p, err := dht.ensureConnectedToPeer(ctx, pinfo)
if err != nil {
log.Errorf("getFromPeers error: %s", err)
continue
}
pmes, err := dht.getValueSingle(ctx, p, key)
if err != nil {
log.Errorf("getFromPeers error: %s\n", err)
continue
}
if record := pmes.GetRecord(); record != nil {
// Success! We were given the value
err := dht.verifyRecord(record)
if err != nil {
return nil, err
}
dht.providers.AddProvider(key, p)
return record.GetValue(), nil
}
}
return nil, routing.ErrNotFound
}
// getLocal attempts to retrieve the value from the datastore
func (dht *IpfsDHT) getLocal(key u.Key) ([]byte, error) {
dht.dslock.Lock()
defer dht.dslock.Unlock()
log.Debug("getLocal %s", key)
v, err := dht.datastore.Get(key.DsKey())
if err != nil {
@ -243,7 +202,7 @@ func (dht *IpfsDHT) getLocal(key u.Key) ([]byte, error) {
// TODO: 'if paranoid'
if u.Debug {
err = dht.verifyRecord(rec)
err = dht.verifyRecordLocally(rec)
if err != nil {
log.Errorf("local record verify failed: %s", err)
return nil, err
@ -269,41 +228,40 @@ func (dht *IpfsDHT) putLocal(key u.Key, value []byte) error {
// Update signals the routingTable to Update its last-seen status
// on the given peer.
func (dht *IpfsDHT) Update(ctx context.Context, p peer.Peer) {
func (dht *IpfsDHT) Update(ctx context.Context, p peer.ID) {
log.Event(ctx, "updatePeer", p)
dht.routingTable.Update(p)
}
// FindLocal looks for a peer with a given ID connected to this dht and returns the peer and the table it was found in.
func (dht *IpfsDHT) FindLocal(id peer.ID) (peer.Peer, *kb.RoutingTable) {
func (dht *IpfsDHT) FindLocal(id peer.ID) (peer.PeerInfo, *kb.RoutingTable) {
p := dht.routingTable.Find(id)
if p != nil {
return p, dht.routingTable
if p != "" {
return dht.peerstore.PeerInfo(p), dht.routingTable
}
return nil, nil
return peer.PeerInfo{}, nil
}
// findPeerSingle asks peer 'p' if they know where the peer with id 'id' is
func (dht *IpfsDHT) findPeerSingle(ctx context.Context, p peer.Peer, id peer.ID) (*pb.Message, error) {
func (dht *IpfsDHT) findPeerSingle(ctx context.Context, p peer.ID, id peer.ID) (*pb.Message, error) {
pmes := pb.NewMessage(pb.Message_FIND_NODE, string(id), 0)
return dht.sendRequest(ctx, p, pmes)
}
func (dht *IpfsDHT) findProvidersSingle(ctx context.Context, p peer.Peer, key u.Key) (*pb.Message, error) {
func (dht *IpfsDHT) findProvidersSingle(ctx context.Context, p peer.ID, key u.Key) (*pb.Message, error) {
pmes := pb.NewMessage(pb.Message_GET_PROVIDERS, string(key), 0)
return dht.sendRequest(ctx, p, pmes)
}
func (dht *IpfsDHT) addProviders(key u.Key, pbps []*pb.Message_Peer) []peer.Peer {
peers, errs := pb.PBPeersToPeers(dht.peerstore, pbps)
for _, err := range errs {
log.Errorf("error converting peer: %v", err)
}
func (dht *IpfsDHT) addProviders(key u.Key, pbps []*pb.Message_Peer) []peer.ID {
peers := pb.PBPeersToPeerInfos(pbps)
var provArr []peer.ID
for _, pi := range peers {
p := pi.ID
var provArr []peer.Peer
for _, p := range peers {
// Dont add outselves to the list
if p.ID().Equal(dht.self.ID()) {
if p == dht.self {
continue
}
@ -316,14 +274,14 @@ func (dht *IpfsDHT) addProviders(key u.Key, pbps []*pb.Message_Peer) []peer.Peer
}
// nearestPeersToQuery returns the routing tables closest peers.
func (dht *IpfsDHT) nearestPeersToQuery(pmes *pb.Message, count int) []peer.Peer {
func (dht *IpfsDHT) nearestPeersToQuery(pmes *pb.Message, count int) []peer.ID {
key := u.Key(pmes.GetKey())
closer := dht.routingTable.NearestPeers(kb.ConvertKey(key), count)
return closer
}
// betterPeerToQuery returns nearestPeersToQuery, but iff closer than self.
func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, count int) []peer.Peer {
func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, count int) []peer.ID {
closer := dht.nearestPeersToQuery(pmes, count)
// no node? nil
@ -333,17 +291,17 @@ func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, count int) []peer.Peer
// == to self? thats bad
for _, p := range closer {
if p.ID().Equal(dht.self.ID()) {
if p == dht.self {
log.Error("Attempted to return self! this shouldnt happen...")
return nil
}
}
var filtered []peer.Peer
var filtered []peer.ID
for _, p := range closer {
// must all be closer than self
key := u.Key(pmes.GetKey())
if !kb.Closer(dht.self.ID(), p.ID(), key) {
if !kb.Closer(dht.self, p, key) {
filtered = append(filtered, p)
}
}
@ -352,30 +310,13 @@ func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, count int) []peer.Peer
return filtered
}
// getPeer searches the peerstore for a peer with the given peer ID
func (dht *IpfsDHT) getPeer(id peer.ID) (peer.Peer, error) {
p, err := dht.peerstore.FindOrCreate(id)
if err != nil {
err = fmt.Errorf("Failed to get peer from peerstore: %s", err)
log.Error(err)
return nil, err
}
return p, nil
}
func (dht *IpfsDHT) ensureConnectedToPeer(ctx context.Context, pbp *pb.Message_Peer) (peer.Peer, error) {
p, err := pb.PBPeerToPeer(dht.peerstore, pbp)
if err != nil {
return nil, err
}
if dht.self.ID().Equal(p.ID()) {
return nil, errors.New("attempting to ensure connection to self")
func (dht *IpfsDHT) ensureConnectedToPeer(ctx context.Context, p peer.ID) error {
if p == dht.self {
return errors.New("attempting to ensure connection to self")
}
// dial connection
err = dht.network.DialPeer(ctx, p)
return p, err
return dht.network.DialPeer(ctx, p)
}
//TODO: this should be smarter about which keys it selects.
@ -421,14 +362,24 @@ func (dht *IpfsDHT) PingRoutine(t time.Duration) {
// Bootstrap builds up list of peers by requesting random peer IDs
func (dht *IpfsDHT) Bootstrap(ctx context.Context) {
id := make([]byte, 16)
rand.Read(id)
p, err := dht.FindPeer(ctx, peer.ID(id))
if err != nil {
log.Errorf("Bootstrap peer error: %s", err)
}
err = dht.network.DialPeer(ctx, p)
if err != nil {
log.Errorf("Bootstrap peer error: %s", err)
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
wg.Add(1)
go func() {
defer wg.Done()
id := make([]byte, 16)
rand.Read(id)
pi, err := dht.FindPeer(ctx, peer.ID(id))
if err != nil {
// NOTE: this is not an error. this is expected!
log.Errorf("Bootstrap peer error: %s", err)
}
// woah, we got a peer under a random id? it _cannot_ be valid.
log.Errorf("dht seemingly found a peer at a random bootstrap id (%s)...", pi)
}()
}
wg.Wait()
}

View File

@ -67,7 +67,7 @@ func (dht *IpfsDHT) handleNewMessage(s inet.Stream) {
// sendRequest sends out a request, but also makes sure to
// measure the RTT for latency measurements.
func (dht *IpfsDHT) sendRequest(ctx context.Context, p peer.Peer, pmes *pb.Message) (*pb.Message, error) {
func (dht *IpfsDHT) sendRequest(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
log.Debugf("%s dht starting stream", dht.self)
s, err := dht.network.NewStream(inet.ProtocolDHT, p)
@ -98,13 +98,13 @@ func (dht *IpfsDHT) sendRequest(ctx context.Context, p peer.Peer, pmes *pb.Messa
return nil, errors.New("no response to request")
}
p.SetLatency(time.Since(start))
dht.peerstore.RecordLatency(p, time.Since(start))
log.Event(ctx, "dhtReceivedMessage", dht.self, p, rpmes)
return rpmes, nil
}
// sendMessage sends out a message
func (dht *IpfsDHT) sendMessage(ctx context.Context, p peer.Peer, pmes *pb.Message) error {
func (dht *IpfsDHT) sendMessage(ctx context.Context, p peer.ID, pmes *pb.Message) error {
log.Debugf("%s dht starting stream", dht.self)
s, err := dht.network.NewStream(inet.ProtocolDHT, p)

View File

@ -2,44 +2,47 @@ package dht
import (
"bytes"
"math/rand"
"sort"
"testing"
"time"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
dssync "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync"
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
ci "github.com/jbenet/go-ipfs/crypto"
// ci "github.com/jbenet/go-ipfs/crypto"
inet "github.com/jbenet/go-ipfs/net"
peer "github.com/jbenet/go-ipfs/peer"
u "github.com/jbenet/go-ipfs/util"
testutil "github.com/jbenet/go-ipfs/util/testutil"
"fmt"
"time"
)
func randMultiaddr(t *testing.T) ma.Multiaddr {
func setupDHT(ctx context.Context, t *testing.T, addr ma.Multiaddr) *IpfsDHT {
s := fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 10000+rand.Intn(40000))
a, err := ma.NewMultiaddr(s)
sk, pk, err := testutil.RandKeyPair(512)
if err != nil {
t.Fatal(err)
}
p, err := peer.IDFromPublicKey(pk)
if err != nil {
t.Fatal(err)
}
return a
}
func setupDHT(ctx context.Context, t *testing.T, p peer.Peer) *IpfsDHT {
peerstore := peer.NewPeerstore()
peerstore.AddPrivKey(p, sk)
peerstore.AddPubKey(p, pk)
peerstore.AddAddress(p, addr)
n, err := inet.NewNetwork(ctx, p.Addresses(), p, peerstore)
n, err := inet.NewNetwork(ctx, []ma.Multiaddr{addr}, p, peerstore)
if err != nil {
t.Fatal(err)
}
d := NewDHT(ctx, p, peerstore, n, ds.NewMapDatastore())
dss := dssync.MutexWrap(ds.NewMapDatastore())
d := NewDHT(ctx, p, n, dss)
d.Validators["v"] = func(u.Key, []byte) error {
return nil
@ -47,77 +50,53 @@ func setupDHT(ctx context.Context, t *testing.T, p peer.Peer) *IpfsDHT {
return d
}
func setupDHTS(ctx context.Context, n int, t *testing.T) ([]ma.Multiaddr, []peer.Peer, []*IpfsDHT) {
var addrs []ma.Multiaddr
for i := 0; i < n; i++ {
r := rand.Intn(40000)
a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 10000+r))
if err != nil {
t.Fatal(err)
}
addrs = append(addrs, a)
}
var peers []peer.Peer
for i := 0; i < n; i++ {
p := makePeer(addrs[i])
peers = append(peers, p)
}
func setupDHTS(ctx context.Context, n int, t *testing.T) ([]ma.Multiaddr, []peer.ID, []*IpfsDHT) {
addrs := make([]ma.Multiaddr, n)
dhts := make([]*IpfsDHT, n)
peers := make([]peer.ID, n)
for i := 0; i < n; i++ {
dhts[i] = setupDHT(ctx, t, peers[i])
addrs[i] = testutil.RandLocalTCPAddress()
dhts[i] = setupDHT(ctx, t, addrs[i])
peers[i] = dhts[i].self
}
return addrs, peers, dhts
}
func makePeerString(t *testing.T, addr string) peer.Peer {
maddr, err := ma.NewMultiaddr(addr)
if err != nil {
func connect(t *testing.T, ctx context.Context, a, b *IpfsDHT) {
idB := b.self
addrB := b.peerstore.Addresses(idB)
if len(addrB) == 0 {
t.Fatal("peers setup incorrectly: no local address")
}
a.peerstore.AddAddresses(idB, addrB)
if err := a.Connect(ctx, idB); err != nil {
t.Fatal(err)
}
return makePeer(maddr)
}
func makePeer(addr ma.Multiaddr) peer.Peer {
sk, pk, err := ci.GenerateKeyPair(ci.RSA, 512)
if err != nil {
panic(err)
}
p, err := testutil.NewPeerWithKeyPair(sk, pk)
if err != nil {
panic(err)
}
p.AddAddress(addr)
return p
}
func TestPing(t *testing.T) {
// t.Skip("skipping test to debug another")
ctx := context.Background()
addrA := randMultiaddr(t)
addrB := randMultiaddr(t)
addrA := testutil.RandLocalTCPAddress()
addrB := testutil.RandLocalTCPAddress()
peerA := makePeer(addrA)
peerB := makePeer(addrB)
dhtA := setupDHT(ctx, t, addrA)
dhtB := setupDHT(ctx, t, addrB)
dhtA := setupDHT(ctx, t, peerA)
dhtB := setupDHT(ctx, t, peerB)
peerA := dhtA.self
peerB := dhtB.self
defer dhtA.Close()
defer dhtB.Close()
defer dhtA.network.Close()
defer dhtB.network.Close()
if err := dhtA.Connect(ctx, peerB); err != nil {
t.Fatal(err)
}
// if err := dhtB.Connect(ctx, peerA); err != nil {
// t.Fatal(err)
// }
connect(t, ctx, dhtA, dhtB)
//Test that we can ping the node
ctxT, _ := context.WithTimeout(ctx, 100*time.Millisecond)
@ -136,14 +115,16 @@ func TestValueGetSet(t *testing.T) {
ctx := context.Background()
addrA := randMultiaddr(t)
addrB := randMultiaddr(t)
addrA := testutil.RandLocalTCPAddress()
addrB := testutil.RandLocalTCPAddress()
peerA := makePeer(addrA)
peerB := makePeer(addrB)
dhtA := setupDHT(ctx, t, addrA)
dhtB := setupDHT(ctx, t, addrB)
dhtA := setupDHT(ctx, t, peerA)
dhtB := setupDHT(ctx, t, peerB)
defer dhtA.Close()
defer dhtB.Close()
defer dhtA.network.Close()
defer dhtB.network.Close()
vf := func(u.Key, []byte) error {
return nil
@ -151,15 +132,7 @@ func TestValueGetSet(t *testing.T) {
dhtA.Validators["v"] = vf
dhtB.Validators["v"] = vf
defer dhtA.Close()
defer dhtB.Close()
defer dhtA.network.Close()
defer dhtB.network.Close()
err := dhtA.Connect(ctx, peerB)
if err != nil {
t.Fatal(err)
}
connect(t, ctx, dhtA, dhtB)
ctxT, _ := context.WithTimeout(ctx, time.Second)
dhtA.PutValue(ctxT, "/v/hello", []byte("world"))
@ -189,7 +162,7 @@ func TestProvides(t *testing.T) {
// t.Skip("skipping test to debug another")
ctx := context.Background()
_, peers, dhts := setupDHTS(ctx, 4, t)
_, _, dhts := setupDHTS(ctx, 4, t)
defer func() {
for i := 0; i < 4; i++ {
dhts[i].Close()
@ -197,22 +170,11 @@ func TestProvides(t *testing.T) {
}
}()
err := dhts[0].Connect(ctx, peers[1])
if err != nil {
t.Fatal(err)
}
connect(t, ctx, dhts[0], dhts[1])
connect(t, ctx, dhts[1], dhts[2])
connect(t, ctx, dhts[1], dhts[3])
err = dhts[1].Connect(ctx, peers[2])
if err != nil {
t.Fatal(err)
}
err = dhts[1].Connect(ctx, peers[3])
if err != nil {
t.Fatal(err)
}
err = dhts[3].putLocal(u.Key("hello"), []byte("world"))
err := dhts[3].putLocal(u.Key("hello"), []byte("world"))
if err != nil {
t.Fatal(err)
}
@ -227,18 +189,21 @@ func TestProvides(t *testing.T) {
t.Fatal(err)
}
time.Sleep(time.Millisecond * 60)
// what is this timeout for? was 60ms before.
time.Sleep(time.Millisecond * 6)
ctxT, _ := context.WithTimeout(ctx, time.Second)
provchan := dhts[0].FindProvidersAsync(ctxT, u.Key("hello"), 1)
after := time.After(time.Second)
select {
case prov := <-provchan:
if prov == nil {
if prov.ID == "" {
t.Fatal("Got back nil provider")
}
case <-after:
if prov.ID != dhts[3].self {
t.Fatal("Got back nil provider")
}
case <-ctxT.Done():
t.Fatal("Did not get a provider back.")
}
}
@ -250,7 +215,7 @@ func TestProvidesAsync(t *testing.T) {
ctx := context.Background()
_, peers, dhts := setupDHTS(ctx, 4, t)
_, _, dhts := setupDHTS(ctx, 4, t)
defer func() {
for i := 0; i < 4; i++ {
dhts[i].Close()
@ -258,22 +223,11 @@ func TestProvidesAsync(t *testing.T) {
}
}()
err := dhts[0].Connect(ctx, peers[1])
if err != nil {
t.Fatal(err)
}
connect(t, ctx, dhts[0], dhts[1])
connect(t, ctx, dhts[1], dhts[2])
connect(t, ctx, dhts[1], dhts[3])
err = dhts[1].Connect(ctx, peers[2])
if err != nil {
t.Fatal(err)
}
err = dhts[1].Connect(ctx, peers[3])
if err != nil {
t.Fatal(err)
}
err = dhts[3].putLocal(u.Key("hello"), []byte("world"))
err := dhts[3].putLocal(u.Key("hello"), []byte("world"))
if err != nil {
t.Fatal(err)
}
@ -297,10 +251,10 @@ func TestProvidesAsync(t *testing.T) {
if !ok {
t.Fatal("Provider channel was closed...")
}
if p == nil {
if p.ID == "" {
t.Fatal("Got back nil provider!")
}
if !p.ID().Equal(dhts[3].self.ID()) {
if p.ID != dhts[3].self {
t.Fatalf("got a provider, but not the right one. %s", p)
}
case <-ctxT.Done():
@ -315,7 +269,7 @@ func TestLayeredGet(t *testing.T) {
ctx := context.Background()
_, peers, dhts := setupDHTS(ctx, 4, t)
_, _, dhts := setupDHTS(ctx, 4, t)
defer func() {
for i := 0; i < 4; i++ {
dhts[i].Close()
@ -323,22 +277,11 @@ func TestLayeredGet(t *testing.T) {
}
}()
err := dhts[0].Connect(ctx, peers[1])
if err != nil {
t.Fatalf("Failed to connect: %s", err)
}
connect(t, ctx, dhts[0], dhts[1])
connect(t, ctx, dhts[1], dhts[2])
connect(t, ctx, dhts[1], dhts[3])
err = dhts[1].Connect(ctx, peers[2])
if err != nil {
t.Fatal(err)
}
err = dhts[1].Connect(ctx, peers[3])
if err != nil {
t.Fatal(err)
}
err = dhts[3].putLocal(u.Key("/v/hello"), []byte("world"))
err := dhts[3].putLocal(u.Key("/v/hello"), []byte("world"))
if err != nil {
t.Fatal(err)
}
@ -377,32 +320,21 @@ func TestFindPeer(t *testing.T) {
}
}()
err := dhts[0].Connect(ctx, peers[1])
if err != nil {
t.Fatal(err)
}
err = dhts[1].Connect(ctx, peers[2])
if err != nil {
t.Fatal(err)
}
err = dhts[1].Connect(ctx, peers[3])
if err != nil {
t.Fatal(err)
}
connect(t, ctx, dhts[0], dhts[1])
connect(t, ctx, dhts[1], dhts[2])
connect(t, ctx, dhts[1], dhts[3])
ctxT, _ := context.WithTimeout(ctx, time.Second)
p, err := dhts[0].FindPeer(ctxT, peers[2].ID())
p, err := dhts[0].FindPeer(ctxT, peers[2])
if err != nil {
t.Fatal(err)
}
if p == nil {
if p.ID == "" {
t.Fatal("Failed to find peer.")
}
if !p.ID().Equal(peers[2].ID()) {
if p.ID != peers[2] {
t.Fatal("Didnt find expected peer.")
}
}
@ -426,25 +358,10 @@ func TestFindPeersConnectedToPeer(t *testing.T) {
// topology:
// 0-1, 1-2, 1-3, 2-3
err := dhts[0].Connect(ctx, peers[1])
if err != nil {
t.Fatal(err)
}
err = dhts[1].Connect(ctx, peers[2])
if err != nil {
t.Fatal(err)
}
err = dhts[1].Connect(ctx, peers[3])
if err != nil {
t.Fatal(err)
}
err = dhts[2].Connect(ctx, peers[3])
if err != nil {
t.Fatal(err)
}
connect(t, ctx, dhts[0], dhts[1])
connect(t, ctx, dhts[1], dhts[2])
connect(t, ctx, dhts[1], dhts[3])
connect(t, ctx, dhts[2], dhts[3])
// fmt.Println("0 is", peers[0])
// fmt.Println("1 is", peers[1])
@ -452,13 +369,13 @@ func TestFindPeersConnectedToPeer(t *testing.T) {
// fmt.Println("3 is", peers[3])
ctxT, _ := context.WithTimeout(ctx, time.Second)
pchan, err := dhts[0].FindPeersConnectedToPeer(ctxT, peers[2].ID())
pchan, err := dhts[0].FindPeersConnectedToPeer(ctxT, peers[2])
if err != nil {
t.Fatal(err)
}
// shouldFind := []peer.Peer{peers[1], peers[3]}
found := []peer.Peer{}
// shouldFind := []peer.ID{peers[1], peers[3]}
found := []peer.PeerInfo{}
for nextp := range pchan {
found = append(found, nextp)
}
@ -475,7 +392,7 @@ func TestFindPeersConnectedToPeer(t *testing.T) {
}
}
func testPeerListsMatch(t *testing.T, p1, p2 []peer.Peer) {
func testPeerListsMatch(t *testing.T, p1, p2 []peer.ID) {
if len(p1) != len(p2) {
t.Fatal("did not find as many peers as should have", p1, p2)
@ -485,11 +402,11 @@ func testPeerListsMatch(t *testing.T, p1, p2 []peer.Peer) {
ids2 := make([]string, len(p2))
for i, p := range p1 {
ids1[i] = p.ID().Pretty()
ids1[i] = string(p)
}
for i, p := range p2 {
ids2[i] = p.ID().Pretty()
ids2[i] = string(p)
}
sort.Sort(sort.StringSlice(ids1))
@ -514,39 +431,41 @@ func TestConnectCollision(t *testing.T) {
ctx := context.Background()
addrA := randMultiaddr(t)
addrB := randMultiaddr(t)
addrA := testutil.RandLocalTCPAddress()
addrB := testutil.RandLocalTCPAddress()
peerA := makePeer(addrA)
peerB := makePeer(addrB)
dhtA := setupDHT(ctx, t, addrA)
dhtB := setupDHT(ctx, t, addrB)
dhtA := setupDHT(ctx, t, peerA)
dhtB := setupDHT(ctx, t, peerB)
peerA := dhtA.self
peerB := dhtB.self
done := make(chan struct{})
errs := make(chan error)
go func() {
dhtA.peerstore.AddAddress(peerB, addrB)
err := dhtA.Connect(ctx, peerB)
if err != nil {
t.Fatal(err)
}
done <- struct{}{}
errs <- err
}()
go func() {
dhtB.peerstore.AddAddress(peerA, addrA)
err := dhtB.Connect(ctx, peerA)
if err != nil {
t.Fatal(err)
}
done <- struct{}{}
errs <- err
}()
timeout := time.After(time.Second)
select {
case <-done:
case e := <-errs:
if e != nil {
t.Fatal(e)
}
case <-timeout:
t.Fatal("Timeout received!")
}
select {
case <-done:
case e := <-errs:
if e != nil {
t.Fatal(e)
}
case <-timeout:
t.Fatal("Timeout received!")
}
@ -555,7 +474,5 @@ func TestConnectCollision(t *testing.T) {
dhtB.Close()
dhtA.network.Close()
dhtB.network.Close()
<-time.After(200 * time.Millisecond)
}
}

View File

@ -32,12 +32,12 @@ func (di *diagInfo) Marshal() []byte {
func (dht *IpfsDHT) getDiagInfo() *diagInfo {
di := new(diagInfo)
di.CodeVersion = "github.com/jbenet/go-ipfs"
di.ID = dht.self.ID()
di.ID = dht.self
di.LifeSpan = time.Since(dht.birth)
di.Keys = nil // Currently no way to query datastore
for _, p := range dht.routingTable.ListPeers() {
d := connDiagInfo{p.GetLatency(), p.ID()}
d := connDiagInfo{dht.peerstore.LatencyEWMA(p), p}
di.Connections = append(di.Connections, d)
}
return di

Some files were not shown because too many files have changed in this diff Show More