diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 959cb3580..666d918e0 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -141,7 +141,7 @@ }, { "ImportPath": "github.com/jbenet/go-peerstream", - "Rev": "5023d0d6b3efeb50c2c30535d011bdcb2351e212" + "Rev": "1c71a3e04eeef9297a12ecdff75a0b28ffa8bf90" }, { "ImportPath": "github.com/jbenet/go-random", diff --git a/Godeps/_workspace/src/github.com/jbenet/go-peerstream/conn.go b/Godeps/_workspace/src/github.com/jbenet/go-peerstream/conn.go index 63c0a5e1e..e5325e331 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-peerstream/conn.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-peerstream/conn.go @@ -159,7 +159,6 @@ func (s *Swarm) addConn(netConn net.Conn, server bool) (*Conn, error) { // first, check if we already have it... for c := range s.conns { if c.netConn == netConn { - s.connLock.Unlock() return c, nil } } @@ -167,7 +166,6 @@ func (s *Swarm) addConn(netConn net.Conn, server bool) (*Conn, error) { // create a new spdystream connection ssConn, err := ss.NewConnection(netConn, server) if err != nil { - s.connLock.Unlock() return nil, err } diff --git a/Godeps/_workspace/src/github.com/jbenet/go-peerstream/example/blockhandler/blockhandler.go b/Godeps/_workspace/src/github.com/jbenet/go-peerstream/example/blockhandler/blockhandler.go new file mode 100644 index 000000000..391aa0fbf --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-peerstream/example/blockhandler/blockhandler.go @@ -0,0 +1,77 @@ +package main + +import ( + "bufio" + "fmt" + "net" + "os" + "time" + + ps "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-peerstream" +) + +func die(err error) { + fmt.Fprintf(os.Stderr, "error: %s\n") + os.Exit(1) +} + +func main() { + // create a new Swarm + swarm := ps.NewSwarm() + defer swarm.Close() + + // tell swarm what to do with a new incoming streams. + // EchoHandler just echos back anything they write. + swarm.SetStreamHandler(ps.EchoHandler) + + l, err := net.Listen("tcp", "localhost:8001") + if err != nil { + die(err) + } + + if _, err := swarm.AddListener(l); err != nil { + die(err) + } + + nc, err := net.Dial("tcp", "localhost:8001") + if err != nil { + die(err) + } + + c, err := swarm.AddConn(nc) + if err != nil { + die(err) + } + + nRcvStream := 0 + bio := bufio.NewReader(os.Stdin) + swarm.SetStreamHandler(func(s *ps.Stream) { + log("handling new stream %d", nRcvStream) + nRcvStream++ + + line, err := bio.ReadString('\n') + if err != nil { + die(err) + } + _ = line + // line = "read: " + line + // s.Write([]byte(line)) + s.Close() + }) + + nSndStream := 0 + for { + <-time.After(200 * time.Millisecond) + s, err := swarm.NewStreamWithConn(c) + if err != nil { + die(err) + } + log("sender got new stream %d", nSndStream) + nSndStream++ + s.Wait() + } +} + +func log(s string, ifs ...interface{}) { + fmt.Fprintf(os.Stderr, s+"\n", ifs...) +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-peerstream/swarm.go b/Godeps/_workspace/src/github.com/jbenet/go-peerstream/swarm.go index 21f1313da..b6e8b4146 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-peerstream/swarm.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-peerstream/swarm.go @@ -110,28 +110,34 @@ func (s *Swarm) SelectConn() SelectConn { // Conns returns all the connections associated with this Swarm. func (s *Swarm) Conns() []*Conn { + s.connLock.RLock() conns := make([]*Conn, 0, len(s.conns)) for c := range s.conns { conns = append(conns, c) } + s.connLock.RUnlock() return conns } // Listeners returns all the listeners associated with this Swarm. func (s *Swarm) Listeners() []*Listener { + s.listenerLock.RLock() out := make([]*Listener, 0, len(s.listeners)) for c := range s.listeners { out = append(out, c) } + s.listenerLock.RUnlock() return out } // Streams returns all the streams associated with this Swarm. func (s *Swarm) Streams() []*Stream { + s.streamLock.RLock() out := make([]*Stream, 0, len(s.streams)) for c := range s.streams { out = append(out, c) } + s.streamLock.RUnlock() return out } diff --git a/Makefile b/Makefile index d8f2d80fb..79785f25e 100644 --- a/Makefile +++ b/Makefile @@ -18,8 +18,6 @@ test: test_go test_sharness test_expensive: test_go_expensive test_sharness_expensive test_docker: - cd ./src/github.com/jbenet/go-ipfs - docker build -t zaqwsx_ipfs-test-img . cd dockertest/ && make test_go: diff --git a/blockservice/blockservice.go b/blockservice/blockservice.go index 9014cab46..1a7ea6b7a 100644 --- a/blockservice/blockservice.go +++ b/blockservice/blockservice.go @@ -41,7 +41,6 @@ func New(bs blockstore.Blockstore, rem exchange.Interface) (*BlockService, error // TODO pass a context into this if the remote.HasBlock is going to remain here. func (s *BlockService) AddBlock(b *blocks.Block) (u.Key, error) { k := b.Key() - log.Debugf("blockservice: storing [%s] in datastore", k) err := s.Blockstore.Put(b) if err != nil { return k, err diff --git a/blockservice/mock.go b/blockservice/mock.go index 277519746..57432178e 100644 --- a/blockservice/mock.go +++ b/blockservice/mock.go @@ -11,9 +11,8 @@ import ( // Mocks returns |n| connected mock Blockservices func Mocks(t *testing.T, n int) []*BlockService { - net := tn.VirtualNetwork(delay.Fixed(0)) - rs := mockrouting.NewServer() - sg := bitswap.NewSessionGenerator(net, rs) + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0)) + sg := bitswap.NewSessionGenerator(net) instances := sg.Instances(n) diff --git a/cmd/ipfs/init.go b/cmd/ipfs/init.go index 78054c8ff..bc609659f 100644 --- a/cmd/ipfs/init.go +++ b/cmd/ipfs/init.go @@ -266,7 +266,7 @@ func identityConfig(nbits int) (config.Identity, error) { } ident.PrivKey = base64.StdEncoding.EncodeToString(skbytes) - id, err := peer.IDFromPubKey(pk) + id, err := peer.IDFromPublicKey(pk) if err != nil { return ident, err } diff --git a/cmd/seccat/seccat.go b/cmd/seccat/seccat.go index 861a59ed8..625f64ec0 100644 --- a/cmd/seccat/seccat.go +++ b/cmd/seccat/seccat.go @@ -109,24 +109,28 @@ func main() { } } -func setupPeer(a args) (peer.Peer, peer.Peerstore, error) { +func setupPeer(a args) (peer.ID, peer.Peerstore, error) { if a.keybits < 1024 { - return nil, nil, errors.New("Bitsize less than 1024 is considered unsafe.") + return "", nil, errors.New("Bitsize less than 1024 is considered unsafe.") } out("generating key pair...") sk, pk, err := ci.GenerateKeyPair(ci.RSA, a.keybits) if err != nil { - return nil, nil, err + return "", nil, err + } + + p, err := peer.IDFromPublicKey(pk) + if err != nil { + return "", nil, err } ps := peer.NewPeerstore() - peer, err := ps.WithKeyPair(sk, pk) - if err != nil { - return nil, nil, err - } - out("local peer id: %s", peer.ID()) - return peer, ps, nil + ps.AddPrivKey(p, sk) + ps.AddPubKey(p, pk) + + out("local peer id: %s", p) + return p, ps, nil } func connect(args args) error { @@ -149,12 +153,13 @@ func connect(args args) error { rwc := &logRW{n: "conn", rw: conn} // OK, let's setup the channel. - sg := secio.SessionGenerator{Local: p, Peerstore: ps} + sk := ps.PrivKey(p) + sg := secio.SessionGenerator{LocalID: p, PrivateKey: sk} sess, err := sg.NewSession(nil, rwc) if err != nil { return err } - out("remote peer id: %s", sess.RemotePeer().ID()) + out("remote peer id: %s", sess.RemotePeer()) netcat(sess.ReadWriter().(io.ReadWriteCloser)) return nil } diff --git a/config/config.go b/config/config.go index 56eb58347..d165f9886 100644 --- a/config/config.go +++ b/config/config.go @@ -2,12 +2,11 @@ package config import ( - "crypto" - "crypto/x509" "encoding/base64" "os" "path/filepath" + ic "github.com/jbenet/go-ipfs/crypto" u "github.com/jbenet/go-ipfs/util" "github.com/jbenet/go-ipfs/util/debugerror" ) @@ -132,7 +131,7 @@ func Filename(configroot string) (string, error) { } // DecodePrivateKey is a helper to decode the users PrivateKey -func (i *Identity) DecodePrivateKey(passphrase string) (crypto.PrivateKey, error) { +func (i *Identity) DecodePrivateKey(passphrase string) (ic.PrivKey, error) { pkb, err := base64.StdEncoding.DecodeString(i.PrivKey) if err != nil { return nil, err @@ -140,7 +139,7 @@ func (i *Identity) DecodePrivateKey(passphrase string) (crypto.PrivateKey, error // currently storing key unencrypted. in the future we need to encrypt it. // TODO(security) - return x509.ParsePKCS1PrivateKey(pkb) + return ic.UnmarshalPrivateKey(pkb) } // Load reads given file and returns the read config, or error. diff --git a/core/bootstrap.go b/core/bootstrap.go index ffb560a05..1539a761f 100644 --- a/core/bootstrap.go +++ b/core/bootstrap.go @@ -1,17 +1,21 @@ package core import ( + "errors" + "fmt" "math/rand" "sync" "time" - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" config "github.com/jbenet/go-ipfs/config" inet "github.com/jbenet/go-ipfs/net" peer "github.com/jbenet/go-ipfs/peer" dht "github.com/jbenet/go-ipfs/routing/dht" + lgbl "github.com/jbenet/go-ipfs/util/eventlog/loggables" math2 "github.com/jbenet/go-ipfs/util/math2" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ) const ( @@ -50,34 +54,59 @@ func bootstrap(ctx context.Context, connectedPeers := n.Peers() if len(connectedPeers) >= recoveryThreshold { + log.Event(ctx, "bootstrapSkip", n.LocalPeer()) + log.Debugf("%s bootstrap skipped -- connected to %d (> %d) nodes", + n.LocalPeer(), len(connectedPeers), recoveryThreshold) + return nil } numCxnsToCreate := recoveryThreshold - len(connectedPeers) - var bootstrapPeers []peer.Peer + log.Event(ctx, "bootstrapStart", n.LocalPeer()) + log.Debugf("%s bootstrapping to %d more nodes", n.LocalPeer(), numCxnsToCreate) + + var bootstrapPeers []peer.PeerInfo for _, bootstrap := range boots { - p, err := toPeer(ps, bootstrap) + p, err := toPeer(bootstrap) if err != nil { + log.Event(ctx, "bootstrapError", n.LocalPeer(), lgbl.Error(err)) + log.Errorf("%s bootstrap error: %s", n.LocalPeer(), err) return err } bootstrapPeers = append(bootstrapPeers, p) } - var notConnected []peer.Peer + var notConnected []peer.PeerInfo for _, p := range bootstrapPeers { - if n.Connectedness(p) != inet.Connected { + if n.Connectedness(p.ID) != inet.Connected { notConnected = append(notConnected, p) } } + if len(notConnected) < 1 { + s := "must bootstrap to %d more nodes, but already connected to all candidates" + err := fmt.Errorf(s, numCxnsToCreate) + log.Event(ctx, "bootstrapError", n.LocalPeer(), lgbl.Error(err)) + log.Errorf("%s bootstrap error: %s", n.LocalPeer(), err) + return err + } + var randomSubset = randomSubsetOfPeers(notConnected, numCxnsToCreate) - if err := connect(ctx, r, randomSubset); err != nil { + + log.Debugf("%s bootstrapping to %d nodes: %s", n.LocalPeer(), numCxnsToCreate, randomSubset) + if err := connect(ctx, ps, r, randomSubset); err != nil { + log.Event(ctx, "bootstrapError", n.LocalPeer(), lgbl.Error(err)) + log.Errorf("%s bootstrap error: %s", n.LocalPeer(), err) return err } return nil } -func connect(ctx context.Context, r *dht.IpfsDHT, peers []peer.Peer) error { +func connect(ctx context.Context, ps peer.Peerstore, r *dht.IpfsDHT, peers []peer.PeerInfo) error { + if len(peers) < 1 { + return errors.New("bootstrap set empty") + } + var wg sync.WaitGroup for _, p := range peers { @@ -86,42 +115,45 @@ func connect(ctx context.Context, r *dht.IpfsDHT, peers []peer.Peer) error { // fail/abort due to an expiring context. wg.Add(1) - go func(p peer.Peer) { + go func(p peer.PeerInfo) { defer wg.Done() - err := r.Connect(ctx, p) + log.Event(ctx, "bootstrapDial", r.LocalPeer(), p.ID) + log.Debugf("%s bootstrapping to %s", r.LocalPeer(), p.ID) + + ps.AddAddresses(p.ID, p.Addrs) + err := r.Connect(ctx, p.ID) if err != nil { - log.Event(ctx, "bootstrapFailed", p) - log.Criticalf("failed to bootstrap with %v", p) + log.Event(ctx, "bootstrapFailed", p.ID) + log.Criticalf("failed to bootstrap with %v", p.ID) return } - log.Event(ctx, "bootstrapSuccess", p) - log.Infof("bootstrapped with %v", p) + log.Event(ctx, "bootstrapSuccess", p.ID) + log.Infof("bootstrapped with %v", p.ID) }(p) } wg.Wait() return nil } -func toPeer(ps peer.Peerstore, bootstrap *config.BootstrapPeer) (peer.Peer, error) { - id, err := peer.DecodePrettyID(bootstrap.PeerID) +func toPeer(bootstrap *config.BootstrapPeer) (p peer.PeerInfo, err error) { + id, err := peer.IDB58Decode(bootstrap.PeerID) if err != nil { - return nil, err - } - p, err := ps.FindOrCreate(id) - if err != nil { - return nil, err + return } maddr, err := ma.NewMultiaddr(bootstrap.Address) if err != nil { - return nil, err + return } - p.AddAddress(maddr) - return p, nil + p = peer.PeerInfo{ + ID: id, + Addrs: []ma.Multiaddr{maddr}, + } + return } -func randomSubsetOfPeers(in []peer.Peer, max int) []peer.Peer { +func randomSubsetOfPeers(in []peer.PeerInfo, max int) []peer.PeerInfo { n := math2.IntMin(max, len(in)) - var out []peer.Peer + var out []peer.PeerInfo for _, val := range rand.Perm(n) { out = append(out, in[val]) } diff --git a/core/bootstrap_test.go b/core/bootstrap_test.go index 356b65399..636a8f808 100644 --- a/core/bootstrap_test.go +++ b/core/bootstrap_test.go @@ -8,10 +8,15 @@ import ( ) func TestSubsetWhenMaxIsGreaterThanLengthOfSlice(t *testing.T) { - var ps []peer.Peer + var ps []peer.PeerInfo sizeofSlice := 100 for i := 0; i < sizeofSlice; i++ { - ps = append(ps, testutil.RandPeer()) + pid, err := testutil.RandPeerID() + if err != nil { + t.Fatal(err) + } + + ps = append(ps, peer.PeerInfo{ID: pid}) } out := randomSubsetOfPeers(ps, 2*sizeofSlice) if len(out) != len(ps) { diff --git a/core/commands/id.go b/core/commands/id.go index 25bc9fdc3..96e462a09 100644 --- a/core/commands/id.go +++ b/core/commands/id.go @@ -11,6 +11,7 @@ import ( b58 "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-base58" cmds "github.com/jbenet/go-ipfs/commands" + ic "github.com/jbenet/go-ipfs/crypto" "github.com/jbenet/go-ipfs/peer" kb "github.com/jbenet/go-ipfs/routing/kbucket" u "github.com/jbenet/go-ipfs/util" @@ -49,7 +50,7 @@ if no peer is specified, prints out local peers info. } if len(req.Arguments()) == 0 { - return printPeer(node.Identity) + return printPeer(node.Peerstore, node.Identity) } pid := req.Arguments()[0] @@ -72,7 +73,7 @@ if no peer is specified, prints out local peers info. if err != nil { return nil, err } - return printPeer(p) + return printPeer(node.Peerstore, p.ID) }, Marshalers: cmds.MarshalerMap{ cmds.Text: func(res cmds.Response) ([]byte, error) { @@ -87,27 +88,36 @@ if no peer is specified, prints out local peers info. Type: &IdOutput{}, } -func printPeer(p peer.Peer) (interface{}, error) { - if p == nil { +func printPeer(ps peer.Peerstore, p peer.ID) (interface{}, error) { + if p == "" { return nil, errors.New("Attempted to print nil peer!") } - info := new(IdOutput) - info.ID = p.ID().String() - if p.PubKey() != nil { - pkb, err := p.PubKey().Bytes() + info := new(IdOutput) + info.ID = p.Pretty() + + if pk := ps.PubKey(p); pk != nil { + pkb, err := ic.MarshalPublicKey(pk) if err != nil { return nil, err } info.PublicKey = base64.StdEncoding.EncodeToString(pkb) } - for _, a := range p.Addresses() { + + for _, a := range ps.Addresses(p) { info.Addresses = append(info.Addresses, a.String()) } - agent, protocol := p.GetVersions() - info.AgentVersion = agent - info.ProtocolVersion = protocol + if v, err := ps.Get(p, "ProtocolVersion"); err == nil { + if vs, ok := v.(string); ok { + info.AgentVersion = vs + } + } + if v, err := ps.Get(p, "AgentVersion"); err == nil { + if vs, ok := v.(string); ok { + info.ProtocolVersion = vs + } + } return info, nil } diff --git a/core/commands/publish.go b/core/commands/publish.go index f7165394e..97455b9c7 100644 --- a/core/commands/publish.go +++ b/core/commands/publish.go @@ -57,7 +57,7 @@ Publish a to another public key: return nil, errNotOnline } - if n.Identity == nil { + if n.Identity == "" { return nil, errors.New("Identity not loaded!") } @@ -75,8 +75,7 @@ Publish a to another public key: } // TODO n.Keychain.Get(name).PrivKey - k := n.Identity.PrivKey() - return publish(n, k, ref) + return publish(n, n.PrivateKey, ref) }, Marshalers: cmds.MarshalerMap{ cmds.Text: func(res cmds.Response) ([]byte, error) { diff --git a/core/commands/resolve.go b/core/commands/resolve.go index 70ee9735e..dad72d755 100644 --- a/core/commands/resolve.go +++ b/core/commands/resolve.go @@ -52,10 +52,10 @@ Resolve te value of another name: } if len(req.Arguments()) == 0 { - if n.Identity == nil { + if n.Identity == "" { return nil, errors.New("Identity not loaded!") } - name = n.Identity.ID().String() + name = n.Identity.Pretty() } else { name = req.Arguments()[0] diff --git a/core/commands/swarm.go b/core/commands/swarm.go index c7fd7218c..9b583f3ae 100644 --- a/core/commands/swarm.go +++ b/core/commands/swarm.go @@ -58,7 +58,7 @@ ipfs swarm peers lists the set of peers this node is connected to. conns := n.Network.Conns() addrs := make([]string, len(conns)) for i, c := range conns { - pid := c.RemotePeer().ID() + pid := c.RemotePeer() addr := c.RemoteMultiaddr() addrs[i] = fmt.Sprintf("%s/%s", addr, pid) } @@ -106,7 +106,7 @@ ipfs swarm connect /ip4/104.131.131.82/tcp/4001/QmaCpDMGvV2BGHeYERUEnRQAwe3N8Szb output := make([]string, len(peers)) for i, p := range peers { - output[i] = "connect " + p.ID().String() + output[i] = "connect " + p.Pretty() err := n.Network.DialPeer(ctx, p) if err != nil { @@ -149,7 +149,7 @@ func splitAddresses(addrs []string) (maddrs []ma.Multiaddr, pids []peer.ID, err if err != nil { return nil, nil, cmds.ClientError("invalid peer address: " + err.Error()) } - id, err := peer.DecodePrettyID(path.Base(addr)) + id, err := peer.IDB58Decode(path.Base(addr)) if err != nil { return nil, nil, err } @@ -161,21 +161,14 @@ func splitAddresses(addrs []string) (maddrs []ma.Multiaddr, pids []peer.ID, err // peersWithAddresses is a function that takes in a slice of string peer addresses // (multiaddr + peerid) and returns a slice of properly constructed peers -func peersWithAddresses(ps peer.Peerstore, addrs []string) ([]peer.Peer, error) { +func peersWithAddresses(ps peer.Peerstore, addrs []string) ([]peer.ID, error) { maddrs, pids, err := splitAddresses(addrs) if err != nil { return nil, err } - peers := make([]peer.Peer, len(pids)) - for i, pid := range pids { - p, err := ps.FindOrCreate(pid) - if err != nil { - return nil, err - } - - p.AddAddress(maddrs[i]) - peers[i] = p + for i, p := range pids { + ps.AddAddress(p, maddrs[i]) } - return peers, nil + return pids, nil } diff --git a/core/core.go b/core/core.go index f389e635a..7cabef281 100644 --- a/core/core.go +++ b/core/core.go @@ -1,7 +1,6 @@ package core import ( - "encoding/base64" "fmt" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" @@ -12,6 +11,7 @@ import ( bstore "github.com/jbenet/go-ipfs/blocks/blockstore" bserv "github.com/jbenet/go-ipfs/blockservice" config "github.com/jbenet/go-ipfs/config" + ic "github.com/jbenet/go-ipfs/crypto" diag "github.com/jbenet/go-ipfs/diagnostics" exchange "github.com/jbenet/go-ipfs/exchange" bitswap "github.com/jbenet/go-ipfs/exchange/bitswap" @@ -21,7 +21,6 @@ import ( merkledag "github.com/jbenet/go-ipfs/merkledag" namesys "github.com/jbenet/go-ipfs/namesys" inet "github.com/jbenet/go-ipfs/net" - handshake "github.com/jbenet/go-ipfs/net/handshake" path "github.com/jbenet/go-ipfs/path" peer "github.com/jbenet/go-ipfs/peer" pin "github.com/jbenet/go-ipfs/pin" @@ -42,7 +41,8 @@ type IpfsNode struct { // Self Config *config.Config // the node's configuration - Identity peer.Peer // the local node's identity + Identity peer.ID // the local node's identity + PrivateKey ic.PrivKey // the local node's private Key onlineMode bool // alternatively, offline // Local node @@ -97,13 +97,18 @@ func NewIpfsNode(ctx context.Context, cfg *config.Config, online bool) (n *IpfsN return nil, debugerror.Wrap(err) } - // setup peerstore + local peer identity - n.Peerstore = peer.NewPeerstore() - n.Identity, err = initIdentity(&n.Config.Identity, n.Peerstore, online) + // setup local peer identity + n.Identity, n.PrivateKey, err = initIdentity(&n.Config.Identity, online) if err != nil { return nil, debugerror.Wrap(err) } + // setup Peerstore + n.Peerstore = peer.NewPeerstore() + if n.PrivateKey != nil { + n.Peerstore.AddPrivKey(n.Identity, n.PrivateKey) + } + blockstore, err := bstore.WriteCached(bstore.NewBlockstore(n.Datastore), kSizeBlockstoreWriteCache) n.Exchange = offline.Exchange(blockstore) @@ -122,11 +127,21 @@ func NewIpfsNode(ctx context.Context, cfg *config.Config, online bool) (n *IpfsN } n.AddChildGroup(n.Network.CtxGroup()) + // explicitly set these as our listen addrs. + // (why not do it inside inet.NewNetwork? because this way we can + // listen on addresses without necessarily advertising those publicly.) + addrs, err := n.Network.InterfaceListenAddresses() + if err != nil { + return nil, debugerror.Wrap(err) + } + + n.Peerstore.AddAddresses(n.Identity, addrs) + // setup diagnostics service n.Diagnostics = diag.NewDiagnostics(n.Identity, n.Network) // setup routing service - dhtRouting := dht.NewDHT(ctx, n.Identity, n.Peerstore, n.Network, n.Datastore) + dhtRouting := dht.NewDHT(ctx, n.Identity, n.Network, n.Datastore) dhtRouting.Validators[IpnsValidatorTag] = namesys.ValidateIpnsRecord // TODO(brian): perform this inside NewDHT factory method @@ -135,9 +150,9 @@ func NewIpfsNode(ctx context.Context, cfg *config.Config, online bool) (n *IpfsN // setup exchange service const alwaysSendToPeer = true // use YesManStrategy - bitswapNetwork := bsnet.NewFromIpfsNetwork(n.Network) + bitswapNetwork := bsnet.NewFromIpfsNetwork(n.Network, n.Routing) - n.Exchange = bitswap.New(ctx, n.Identity, bitswapNetwork, n.Routing, blockstore, alwaysSendToPeer) + n.Exchange = bitswap.New(ctx, n.Identity, bitswapNetwork, blockstore, alwaysSendToPeer) // TODO consider moving connection supervision into the Network. We've // discussed improvements to this Node constructor. One improvement @@ -178,42 +193,47 @@ func (n *IpfsNode) OnlineMode() bool { return n.onlineMode } -func initIdentity(cfg *config.Identity, peers peer.Peerstore, online bool) (peer.Peer, error) { +func initIdentity(cfg *config.Identity, online bool) (peer.ID, ic.PrivKey, error) { + if cfg.PeerID == "" { - return nil, debugerror.New("Identity was not set in config (was ipfs init run?)") + return "", nil, debugerror.New("Identity was not set in config (was ipfs init run?)") } if len(cfg.PeerID) == 0 { - return nil, debugerror.New("No peer ID in config! (was ipfs init run?)") + return "", nil, debugerror.New("No peer ID in config! (was ipfs init run?)") } - // get peer from peerstore (so it is constructed there) id := peer.ID(b58.Decode(cfg.PeerID)) - self, err := peers.FindOrCreate(id) - if err != nil { - return nil, err - } - self.SetType(peer.Local) - self, err = peers.Add(self) - if err != nil { - return nil, err - } - - self.SetVersions(handshake.ClientVersion, handshake.IpfsVersion.String()) // when not online, don't need to parse private keys (yet) - if online { - skb, err := base64.StdEncoding.DecodeString(cfg.PrivKey) - if err != nil { - return nil, err - } - - if err := self.LoadAndVerifyKeyPair(skb); err != nil { - return nil, err - } + if !online { + return id, nil, nil } - return self, nil + sk, err := loadPrivateKey(cfg, id) + if err != nil { + return "", nil, err + } + + return id, sk, nil +} + +func loadPrivateKey(cfg *config.Identity, id peer.ID) (ic.PrivKey, error) { + sk, err := cfg.DecodePrivateKey("passphrase todo!") + if err != nil { + return nil, err + } + + id2, err := peer.IDFromPrivateKey(sk) + if err != nil { + return nil, err + } + + if id2 != id { + return nil, fmt.Errorf("private key in config does not match id: %s != %s", id, id2) + } + + return sk, nil } func listenAddresses(cfg *config.Config) ([]ma.Multiaddr, error) { diff --git a/core/core_test.go b/core/core_test.go index 8ea0b6e8c..619adbb50 100644 --- a/core/core_test.go +++ b/core/core_test.go @@ -3,9 +3,8 @@ package core import ( "testing" - config "github.com/jbenet/go-ipfs/config" - "github.com/jbenet/go-ipfs/peer" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + config "github.com/jbenet/go-ipfs/config" ) func TestInitialization(t *testing.T) { @@ -60,22 +59,6 @@ func TestInitialization(t *testing.T) { } } -func TestPeerIsLocal(t *testing.T) { - t.Log("Ensure that peer is Local after initializing identity") - - online := false - peers := peer.NewPeerstore() - - cfg := testIdentity - p, err := initIdentity(&cfg, peers, online) - if err != nil { - t.Fatal(err) - } - if p.GetType() != peer.Local { - t.Fail() - } -} - var testIdentity = config.Identity{ PeerID: "QmNgdzLieYi8tgfo2WfTUzNVH5hQK9oAYGVf6dxN12NrHt", PrivKey: "CAASrRIwggkpAgEAAoICAQCwt67GTUQ8nlJhks6CgbLKOx7F5tl1r9zF4m3TUrG3Pe8h64vi+ILDRFd7QJxaJ/n8ux9RUDoxLjzftL4uTdtv5UXl2vaufCc/C0bhCRvDhuWPhVsD75/DZPbwLsepxocwVWTyq7/ZHsCfuWdoh/KNczfy+Gn33gVQbHCnip/uhTVxT7ARTiv8Qa3d7qmmxsR+1zdL/IRO0mic/iojcb3Oc/PRnYBTiAZFbZdUEit/99tnfSjMDg02wRayZaT5ikxa6gBTMZ16Yvienq7RwSELzMQq2jFA4i/TdiGhS9uKywltiN2LrNDBcQJSN02pK12DKoiIy+wuOCRgs2NTQEhU2sXCk091v7giTTOpFX2ij9ghmiRfoSiBFPJA5RGwiH6ansCHtWKY1K8BS5UORM0o3dYk87mTnKbCsdz4bYnGtOWafujYwzueGx8r+IWiys80IPQKDeehnLW6RgoyjszKgL/2XTyP54xMLSW+Qb3BPgDcPaPO0hmop1hW9upStxKsefW2A2d46Ds4HEpJEry7PkS5M4gKL/zCKHuxuXVk14+fZQ1rstMuvKjrekpAC2aVIKMI9VRA3awtnje8HImQMdj+r+bPmv0N8rTTr3eS4J8Yl7k12i95LLfK+fWnmUh22oTNzkRlaiERQrUDyE4XNCtJc0xs1oe1yXGqazCIAQIDAQABAoICAQCk1N/ftahlRmOfAXk//8wNl7FvdJD3le6+YSKBj0uWmN1ZbUSQk64chr12iGCOM2WY180xYjy1LOS44PTXaeW5bEiTSnb3b3SH+HPHaWCNM2EiSogHltYVQjKW+3tfH39vlOdQ9uQ+l9Gh6iTLOqsCRyszpYPqIBwi1NMLY2Ej8PpVU7ftnFWouHZ9YKS7nAEiMoowhTu/7cCIVwZlAy3AySTuKxPMVj9LORqC32PVvBHZaMPJ+X1Xyijqg6aq39WyoztkXg3+Xxx5j5eOrK6vO/Lp6ZUxaQilHDXoJkKEJjgIBDZpluss08UPfOgiWAGkW+L4fgUxY0qDLDAEMhyEBAn6KOKVL1JhGTX6GjhWziI94bddSpHKYOEIDzUy4H8BXnKhtnyQV6ELS65C2hj9D0IMBTj7edCF1poJy0QfdK0cuXgMvxHLeUO5uc2YWfbNosvKxqygB9rToy4b22YvNwsZUXsTY6Jt+p9V2OgXSKfB5VPeRbjTJL6xqvvUJpQytmII/C9JmSDUtCbYceHj6X9jgigLk20VV6nWHqCTj3utXD6NPAjoycVpLKDlnWEgfVELDIk0gobxUqqSm3jTPEKRPJgxkgPxbwxYumtw++1UY2y35w3WRDc2xYPaWKBCQeZy+mL6ByXp9bWlNvxS3Knb6oZp36/ovGnf2pGvdQKCAQEAyKpipz2lIUySDyE0avVWAmQb2tWGKXALPohzj7AwkcfEg2GuwoC6GyVE2sTJD1HRazIjOKn3yQORg2uOPeG7sx7EKHxSxCKDrbPawkvLCq8JYSy9TLvhqKUVVGYPqMBzu2POSLEA81QXas+aYjKOFWA2Zrjq26zV9ey3+6Lc6WULePgRQybU8+RHJc6fdjUCCfUxgOrUO2IQOuTJ+FsDpVnrMUGlokmWn23OjL4qTL9wGDnWGUs2pjSzNbj3qA0d8iqaiMUyHX/D/VS0wpeT1osNBSm8suvSibYBn+7wbIApbwXUxZaxMv2OHGz3empae4ckvNZs7r8wsI9UwFt8mwKCAQEA4XK6gZkv9t+3YCcSPw2ensLvL/xU7i2bkC9tfTGdjnQfzZXIf5KNdVuj/SerOl2S1s45NMs3ysJbADwRb4ahElD/V71nGzV8fpFTitC20ro9fuX4J0+twmBolHqeH9pmeGTjAeL1rvt6vxs4FkeG/yNft7GdXpXTtEGaObn8Mt0tPY+aB3UnKrnCQoQAlPyGHFrVRX0UEcp6wyyNGhJCNKeNOvqCHTFObhbhO+KWpWSN0MkVHnqaIBnIn1Te8FtvP/iTwXGnKc0YXJUG6+LM6LmOguW6tg8ZqiQeYyyR+e9eCFH4csLzkrTl1GxCxwEsoSLIMm7UDcjttW6tYEghkwKCAQEAmeCO5lCPYImnN5Lu71ZTLmI2OgmjaANTnBBnDbi+hgv61gUCToUIMejSdDCTPfwv61P3TmyIZs0luPGxkiKYHTNqmOE9Vspgz8Mr7fLRMNApESuNvloVIY32XVImj/GEzh4rAfM6F15U1sN8T/EUo6+0B/Glp+9R49QzAfRSE2g48/rGwgf1JVHYfVWFUtAzUA+GdqWdOixo5cCsYJbqpNHfWVZN/bUQnBFIYwUwysnC29D+LUdQEQQ4qOm+gFAOtrWU62zMkXJ4iLt8Ify6kbrvsRXgbhQIzzGS7WH9XDarj0eZciuslr15TLMC1Azadf+cXHLR9gMHA13mT9vYIQKCAQA/DjGv8cKCkAvf7s2hqROGYAs6Jp8yhrsN1tYOwAPLRhtnCs+rLrg17M2vDptLlcRuI/vIElamdTmylRpjUQpX7yObzLO73nfVhpwRJVMdGU394iBIDncQ+JoHfUwgqJskbUM40dvZdyjbrqc/Q/4z+hbZb+oN/GXb8sVKBATPzSDMKQ/xqgisYIw+wmDPStnPsHAaIWOtni47zIgilJzD0WEk78/YjmPbUrboYvWziK5JiRRJFA1rkQqV1c0M+OXixIm+/yS8AksgCeaHr0WUieGcJtjT9uE8vyFop5ykhRiNxy9wGaq6i7IEecsrkd6DqxDHWkwhFuO1bSE83q/VAoIBAEA+RX1i/SUi08p71ggUi9WFMqXmzELp1L3hiEjOc2AklHk2rPxsaTh9+G95BvjhP7fRa/Yga+yDtYuyjO99nedStdNNSg03aPXILl9gs3r2dPiQKUEXZJ3FrH6tkils/8BlpOIRfbkszrdZIKTO9GCdLWQ30dQITDACs8zV/1GFGrHFrqnnMe/NpIFHWNZJ0/WZMi8wgWO6Ik8jHEpQtVXRiXLqy7U6hk170pa4GHOzvftfPElOZZjy9qn7KjdAQqy6spIrAE94OEL+fBgbHQZGLpuTlj6w6YGbMtPU8uo7sXKoc6WOCb68JWft3tejGLDa1946HAWqVM9B/UcneNc=", diff --git a/core/mock.go b/core/mock.go index 9c893a6dc..83f1a5a81 100644 --- a/core/mock.go +++ b/core/mock.go @@ -25,24 +25,23 @@ func NewMockNode() (*IpfsNode, error) { return nil, err } + p, err := peer.IDFromPublicKey(pk) + if err != nil { + return nil, err + } + + nd.Identity = p + nd.PrivateKey = sk nd.Peerstore = peer.NewPeerstore() - - p, err := nd.Peerstore.WithKeyPair(sk, pk) - if err != nil { - return nil, err - } - - nd.Identity, err = nd.Peerstore.Add(p) - if err != nil { - return nil, err - } + nd.Peerstore.AddPrivKey(p, sk) + nd.Peerstore.AddPubKey(p, pk) // Temp Datastore dstore := ds.NewMapDatastore() nd.Datastore = ds2.CloserWrap(syncds.MutexWrap(dstore)) // Routing - dht := mdht.NewServer().ClientWithDatastore(nd.Identity, nd.Datastore) + dht := mdht.NewServer().ClientWithDatastore(peer.PeerInfo{ID: p}, nd.Datastore) nd.Routing = dht // Bitswap diff --git a/crypto/key.go b/crypto/key.go index e81ef5a13..67a316527 100644 --- a/crypto/key.go +++ b/crypto/key.go @@ -5,6 +5,7 @@ package crypto import ( "bytes" + "encoding/base64" "errors" "fmt" @@ -82,7 +83,7 @@ func GenerateKeyPair(typ, bits int) (PrivKey, PubKey, error) { return nil, nil, err } pk := &priv.PublicKey - return &RsaPrivateKey{priv}, &RsaPublicKey{pk}, nil + return &RsaPrivateKey{sk: priv}, &RsaPublicKey{pk}, nil default: return nil, nil, ErrBadKeyType } @@ -239,6 +240,20 @@ func UnmarshalPublicKey(data []byte) (PubKey, error) { } } +// MarshalPublicKey converts a public key object into a protobuf serialized +// public key +func MarshalPublicKey(k PubKey) ([]byte, error) { + b, err := MarshalRsaPublicKey(k.(*RsaPublicKey)) + if err != nil { + return nil, err + } + pmes := new(pb.PublicKey) + typ := pb.KeyType_RSA // for now only type. + pmes.Type = &typ + pmes.Data = b + return proto.Marshal(pmes) +} + // UnmarshalPrivateKey converts a protobuf serialized private key into its // representative object func UnmarshalPrivateKey(data []byte) (PrivKey, error) { @@ -256,6 +271,26 @@ func UnmarshalPrivateKey(data []byte) (PrivKey, error) { } } +// MarshalPrivateKey converts a key object into its protobuf serialized form. +func MarshalPrivateKey(k PrivKey) ([]byte, error) { + b := MarshalRsaPrivateKey(k.(*RsaPrivateKey)) + pmes := new(pb.PrivateKey) + typ := pb.KeyType_RSA // for now only type. + pmes.Type = &typ + pmes.Data = b + return proto.Marshal(pmes) +} + +// ConfigDecodeKey decodes from b64 (for config file), and unmarshals. +func ConfigDecodeKey(b string) ([]byte, error) { + return base64.StdEncoding.DecodeString(b) +} + +// ConfigEncodeKey encodes to b64 (for config file), and marshals. +func ConfigEncodeKey(b []byte) string { + return base64.StdEncoding.EncodeToString(b) +} + // KeyEqual checks whether two func KeyEqual(k1, k2 Key) bool { if k1 == k2 { diff --git a/crypto/key_test.go b/crypto/key_test.go index 112b99bee..1300be8f8 100644 --- a/crypto/key_test.go +++ b/crypto/key_test.go @@ -1,6 +1,9 @@ package crypto -import "testing" +import ( + "bytes" + "testing" +) func TestRsaKeys(t *testing.T) { sk, pk, err := GenerateKeyPair(RSA, 512) @@ -33,26 +36,44 @@ func testKeySignature(t *testing.T, sk PrivKey) { } func testKeyEncoding(t *testing.T, sk PrivKey) { - skb, err := sk.Bytes() + skbm, err := MarshalPrivateKey(sk) if err != nil { t.Fatal(err) } - _, err = UnmarshalPrivateKey(skb) + sk2, err := UnmarshalPrivateKey(skbm) if err != nil { t.Fatal(err) } + skbm2, err := MarshalPrivateKey(sk2) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(skbm, skbm2) { + t.Error("skb -> marshal -> unmarshal -> skb failed.\n", skbm, "\n", skbm2) + } + pk := sk.GetPublic() - pkb, err := pk.Bytes() + pkbm, err := MarshalPublicKey(pk) if err != nil { t.Fatal(err) } - _, err = UnmarshalPublicKey(pkb) + _, err = UnmarshalPublicKey(pkbm) if err != nil { t.Fatal(err) } + + pkbm2, err := MarshalPublicKey(pk) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(pkbm, pkbm2) { + t.Error("skb -> marshal -> unmarshal -> skb failed.\n", pkbm, "\n", pkbm2) + } } func testKeyEquals(t *testing.T, k Key) { diff --git a/crypto/rsa.go b/crypto/rsa.go index 1ef99776b..9f939807d 100644 --- a/crypto/rsa.go +++ b/crypto/rsa.go @@ -14,7 +14,8 @@ import ( ) type RsaPrivateKey struct { - k *rsa.PrivateKey + sk *rsa.PrivateKey + pk *rsa.PublicKey } type RsaPublicKey struct { @@ -64,19 +65,22 @@ func (sk *RsaPrivateKey) GenSecret() []byte { func (sk *RsaPrivateKey) Sign(message []byte) ([]byte, error) { hashed := sha256.Sum256(message) - return rsa.SignPKCS1v15(rand.Reader, sk.k, crypto.SHA256, hashed[:]) + return rsa.SignPKCS1v15(rand.Reader, sk.sk, crypto.SHA256, hashed[:]) } func (sk *RsaPrivateKey) GetPublic() PubKey { - return &RsaPublicKey{&sk.k.PublicKey} + if sk.pk == nil { + sk.pk = &sk.sk.PublicKey + } + return &RsaPublicKey{sk.pk} } func (sk *RsaPrivateKey) Decrypt(b []byte) ([]byte, error) { - return rsa.DecryptPKCS1v15(rand.Reader, sk.k, b) + return rsa.DecryptPKCS1v15(rand.Reader, sk.sk, b) } func (sk *RsaPrivateKey) Bytes() ([]byte, error) { - b := x509.MarshalPKCS1PrivateKey(sk.k) + b := x509.MarshalPKCS1PrivateKey(sk.sk) pbmes := new(pb.PrivateKey) typ := pb.KeyType_RSA pbmes.Type = &typ @@ -98,7 +102,11 @@ func UnmarshalRsaPrivateKey(b []byte) (*RsaPrivateKey, error) { if err != nil { return nil, err } - return &RsaPrivateKey{sk}, nil + return &RsaPrivateKey{sk: sk}, nil +} + +func MarshalRsaPrivateKey(k *RsaPrivateKey) []byte { + return x509.MarshalPKCS1PrivateKey(k.sk) } func UnmarshalRsaPublicKey(b []byte) (*RsaPublicKey, error) { @@ -112,3 +120,7 @@ func UnmarshalRsaPublicKey(b []byte) (*RsaPublicKey, error) { } return &RsaPublicKey{pk}, nil } + +func MarshalRsaPublicKey(k *RsaPublicKey) ([]byte, error) { + return x509.MarshalPKIXPublicKey(k.k) +} diff --git a/crypto/secio/interface.go b/crypto/secio/interface.go index 906b99939..2ae52d740 100644 --- a/crypto/secio/interface.go +++ b/crypto/secio/interface.go @@ -4,6 +4,8 @@ package secio import ( "io" + ci "github.com/jbenet/go-ipfs/crypto" + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" msgio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-msgio" @@ -12,8 +14,8 @@ import ( // SessionGenerator constructs secure communication sessions for a peer. type SessionGenerator struct { - Local peer.Peer - Peerstore peer.Peerstore + LocalID peer.ID + PrivateKey ci.PrivKey } // NewSession takes an insecure io.ReadWriter, performs a TLS-like @@ -23,12 +25,15 @@ type SessionGenerator struct { func (sg *SessionGenerator) NewSession(ctx context.Context, insecure io.ReadWriter) (Session, error) { + ss, err := newSecureSession(sg.LocalID, sg.PrivateKey) + if err != nil { + return nil, err + } + if ctx == nil { ctx = context.Background() } ctx, cancel := context.WithCancel(ctx) - - ss := newSecureSession(sg.Local, sg.Peerstore) if err := ss.handshake(ctx, insecure); err != nil { cancel() return nil, err @@ -42,10 +47,17 @@ type Session interface { ReadWriter() msgio.ReadWriteCloser // LocalPeer retrieves the local peer. - LocalPeer() peer.Peer + LocalPeer() peer.ID + + // LocalPrivateKey retrieves the local private key + LocalPrivateKey() ci.PrivKey // RemotePeer retrieves the remote peer. - RemotePeer() peer.Peer + RemotePeer() peer.ID + + // RemotePublicKey retrieves the remote's public key + // which was received during the handshake. + RemotePublicKey() ci.PubKey // Close closes the secure session Close() error @@ -57,15 +69,25 @@ func (s *secureSession) ReadWriter() msgio.ReadWriteCloser { } // LocalPeer retrieves the local peer. -func (s *secureSession) LocalPeer() peer.Peer { +func (s *secureSession) LocalPeer() peer.ID { return s.localPeer } +// LocalPrivateKey retrieves the local peer's PrivateKey +func (s *secureSession) LocalPrivateKey() ci.PrivKey { + return s.localKey +} + // RemotePeer retrieves the remote peer. -func (s *secureSession) RemotePeer() peer.Peer { +func (s *secureSession) RemotePeer() peer.ID { return s.remotePeer } +// RemotePeer retrieves the remote peer. +func (s *secureSession) RemotePublicKey() ci.PubKey { + return s.remote.permanentPubKey +} + // Close closes the secure session func (s *secureSession) Close() error { return s.secure.Close() diff --git a/crypto/spipe/internal/pb/Makefile b/crypto/secio/pb/Makefile similarity index 100% rename from crypto/spipe/internal/pb/Makefile rename to crypto/secio/pb/Makefile diff --git a/crypto/spipe/internal/pb/spipe.pb.go b/crypto/secio/pb/spipe.pb.go similarity index 100% rename from crypto/spipe/internal/pb/spipe.pb.go rename to crypto/secio/pb/spipe.pb.go diff --git a/crypto/spipe/internal/pb/spipe.proto b/crypto/secio/pb/spipe.proto similarity index 100% rename from crypto/spipe/internal/pb/spipe.proto rename to crypto/secio/pb/spipe.proto diff --git a/crypto/secio/protocol.go b/crypto/secio/protocol.go index 8a0bdd417..9dc492ff2 100644 --- a/crypto/secio/protocol.go +++ b/crypto/secio/protocol.go @@ -11,7 +11,7 @@ import ( msgio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-msgio" ci "github.com/jbenet/go-ipfs/crypto" - pb "github.com/jbenet/go-ipfs/crypto/spipe/internal/pb" + pb "github.com/jbenet/go-ipfs/crypto/secio/internal/pb" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" eventlog "github.com/jbenet/go-ipfs/util/eventlog" @@ -36,9 +36,9 @@ type secureSession struct { insecure io.ReadWriter insecureM msgio.ReadWriter - peers peer.Peerstore - localPeer peer.Peer - remotePeer peer.Peer + localKey ci.PrivKey + localPeer peer.ID + remotePeer peer.ID local encParams remote encParams @@ -46,8 +46,19 @@ type secureSession struct { sharedSecret []byte } -func newSecureSession(local peer.Peer, peers peer.Peerstore) *secureSession { - return &secureSession{peers: peers, localPeer: local} +func newSecureSession(local peer.ID, key ci.PrivKey) (*secureSession, error) { + s := &secureSession{localPeer: local, localKey: key} + + switch { + case s.localPeer == "": + return nil, errors.New("no local id provided") + case s.localKey == nil: + return nil, errors.New("no local private key provided") + case !s.localPeer.MatchesPrivateKey(s.localKey): + return nil, fmt.Errorf("peer.ID does not match PrivateKey") + } + + return s, nil } // handsahke performs initial communication over insecure channel to share @@ -71,7 +82,7 @@ func (s *secureSession) handshake(ctx context.Context, insecure io.ReadWriter) e log.Debugf("handshake: %s <--start--> %s", s.localPeer, s.remotePeer) log.Event(ctx, "secureHandshakeStart", s.localPeer) - s.local.permanentPubKey = s.localPeer.PubKey() + s.local.permanentPubKey = s.localKey.GetPublic() myPubKeyBytes, err := s.local.permanentPubKey.Bytes() if err != nil { return err @@ -84,6 +95,9 @@ func (s *secureSession) handshake(ctx context.Context, insecure io.ReadWriter) e proposeOut.Ciphers = &SupportedCiphers proposeOut.Hashes = &SupportedHashes + // log.Debugf("1.0 Propose: nonce:%s exchanges:%s ciphers:%s hashes:%s", + // nonceOut, SupportedExchanges, SupportedCiphers, SupportedHashes) + // Send Propose packet (respects ctx) proposeOutBytes, err := writeMsgCtx(ctx, s.insecureM, proposeOut) if err != nil { @@ -97,6 +111,9 @@ func (s *secureSession) handshake(ctx context.Context, insecure io.ReadWriter) e return err } + // log.Debugf("1.0.1 Propose recv: nonce:%s exchanges:%s ciphers:%s hashes:%s", + // proposeIn.GetRand(), proposeIn.GetExchanges(), proposeIn.GetCiphers(), proposeIn.GetHashes()) + // ============================================================================= // step 1.1 Identify -- get identity from their key @@ -106,12 +123,13 @@ func (s *secureSession) handshake(ctx context.Context, insecure io.ReadWriter) e return err } - // get or construct peer - s.remotePeer, err = getOrConstructPeer(s.peers, s.remote.permanentPubKey) + // get peer id + s.remotePeer, err = peer.IDFromPublicKey(s.remote.permanentPubKey) if err != nil { return err } - // log.Debugf("%s Remote Peer Identified as %s", s.localPeer, s.remotePeer) + + log.Debugf("1.1 Identify: %s Remote Peer Identified as %s", s.localPeer, s.remotePeer) // ============================================================================= // step 1.2 Selection -- select/agree on best encryption parameters @@ -141,6 +159,9 @@ func (s *secureSession) handshake(ctx context.Context, insecure io.ReadWriter) e s.remote.cipherT = s.local.cipherT s.remote.hashT = s.local.hashT + // log.Debugf("1.2 selection: exchange:%s cipher:%s hash:%s", + // s.local.curveT, s.local.cipherT, s.local.hashT) + // ============================================================================= // step 2. Exchange -- exchange (signed) ephemeral keys. verify signatures. @@ -155,9 +176,10 @@ func (s *secureSession) handshake(ctx context.Context, insecure io.ReadWriter) e selectionOut.Write(s.local.ephemeralPubKey) selectionOutBytes := selectionOut.Bytes() + // log.Debugf("2.0 exchange: %v", selectionOutBytes) exchangeOut := new(pb.Exchange) exchangeOut.Epubkey = s.local.ephemeralPubKey - exchangeOut.Signature, err = s.localPeer.PrivKey().Sign(selectionOutBytes) + exchangeOut.Signature, err = s.localKey.Sign(selectionOutBytes) if err != nil { return err } @@ -184,16 +206,21 @@ func (s *secureSession) handshake(ctx context.Context, insecure io.ReadWriter) e selectionIn.Write(proposeOutBytes) selectionIn.Write(s.remote.ephemeralPubKey) selectionInBytes := selectionIn.Bytes() + // log.Debugf("2.0.1 exchange recv: %v", selectionInBytes) // u.POut("Remote Peer Identified as %s\n", s.remote) - sigOK, err := s.remotePeer.PubKey().Verify(selectionInBytes, exchangeIn.GetSignature()) + sigOK, err := s.remote.permanentPubKey.Verify(selectionInBytes, exchangeIn.GetSignature()) if err != nil { + // log.Error("2.1 Verify: failed: %s", err) return err } if !sigOK { - return errors.New("Bad signature!") + err := errors.New("Bad signature!") + // log.Error("2.1 Verify: failed: %s", err) + return err } + // log.Debugf("2.1 Verify: signature verified.") // ============================================================================= // step 2.2. Keys -- generate keys for mac + encryption @@ -223,6 +250,9 @@ func (s *secureSession) handshake(ctx context.Context, insecure io.ReadWriter) e s.local.keys = k1 s.remote.keys = k2 + // log.Debug("2.2 keys:\n\tshared: %v\n\tk1: %v\n\tk2: %v", + // s.sharedSecret, s.local.keys, s.remote.keys) + // ============================================================================= // step 2.3. MAC + Cipher -- prepare MAC + cipher @@ -234,6 +264,8 @@ func (s *secureSession) handshake(ctx context.Context, insecure io.ReadWriter) e return err } + // log.Debug("2.3 mac + cipher.") + // ============================================================================= // step 3. Finish -- send expected message (the nonces), verify encryption works @@ -242,6 +274,7 @@ func (s *secureSession) handshake(ctx context.Context, insecure io.ReadWriter) e r := NewETMReader(s.insecure, s.remote.cipher, s.remote.mac) s.secure = msgio.Combine(w, r).(msgio.ReadWriteCloser) + // log.Debug("3.0 finish. sending: %v", proposeIn.GetRand()) // send their Nonce. if _, err := s.secure.Write(proposeIn.GetRand()); err != nil { return fmt.Errorf("Failed to write Finish nonce: %s", err) @@ -252,6 +285,8 @@ func (s *secureSession) handshake(ctx context.Context, insecure io.ReadWriter) e if _, err := io.ReadFull(s.secure, nonceOut2); err != nil { return fmt.Errorf("Failed to read Finish nonce: %s", err) } + + // log.Debug("3.0 finish.\n\texpect: %v\n\tactual: %v", nonceOut, nonceOut2) if !bytes.Equal(nonceOut, nonceOut2) { return fmt.Errorf("Failed to read our encrypted nonce: %s != %s", nonceOut2, nonceOut) } @@ -261,25 +296,3 @@ func (s *secureSession) handshake(ctx context.Context, insecure io.ReadWriter) e log.Event(ctx, "secureHandshakeFinish", s.localPeer, s.remotePeer) return nil } - -// getOrConstructPeer attempts to fetch a peer from a peerstore. -// if succeeds, verify ID and PubKey match. -// else, construct it. -func getOrConstructPeer(peers peer.Peerstore, rpk ci.PubKey) (peer.Peer, error) { - - rid, err := peer.IDFromPubKey(rpk) - if err != nil { - return nil, err - } - - npeer, err := peers.FindOrCreate(rid) - if err != nil { - return nil, err // unexpected error happened. - } - - // public key verification happens in Peer.VerifyAndSetPubKey - if err := npeer.VerifyAndSetPubKey(rpk); err != nil { - return nil, err // pubkey mismatch or other problem - } - return npeer, nil -} diff --git a/crypto/spipe/handshake.go b/crypto/spipe/handshake.go deleted file mode 100644 index 362063159..000000000 --- a/crypto/spipe/handshake.go +++ /dev/null @@ -1,389 +0,0 @@ -// package spipe handles establishing secure communication between two peers. -package spipe - -import ( - "bytes" - "errors" - "fmt" - "strings" - - "crypto/aes" - "crypto/cipher" - "crypto/hmac" - "crypto/rand" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "hash" - - bfish "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.crypto/blowfish" - - proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" - - ci "github.com/jbenet/go-ipfs/crypto" - pb "github.com/jbenet/go-ipfs/crypto/spipe/internal/pb" - peer "github.com/jbenet/go-ipfs/peer" - u "github.com/jbenet/go-ipfs/util" -) - -var log = u.Logger("handshake") - -// List of supported ECDH curves -var SupportedExchanges = "P-256,P-224,P-384,P-521" - -// List of supported Ciphers -var SupportedCiphers = "AES-256,AES-128,Blowfish" - -// List of supported Hashes -var SupportedHashes = "SHA256,SHA512" - -// ErrUnsupportedKeyType is returned when a private key cast/type switch fails. -var ErrUnsupportedKeyType = errors.New("unsupported key type") - -// ErrClosed signals the closing of a connection. -var ErrClosed = errors.New("connection closed") - -// handsahke performs initial communication over insecure channel to share -// keys, IDs, and initiate communication. -func (s *SecurePipe) handshake() error { - // Generate and send Hello packet. - // Hello = (rand, PublicKey, Supported) - nonce := make([]byte, 16) - _, err := rand.Read(nonce) - if err != nil { - return err - } - - log.Debugf("handshake: %s <--> %s", s.local, s.remote) - myPubKey, err := s.local.PubKey().Bytes() - if err != nil { - return err - } - - proposeMsg := new(pb.Propose) - proposeMsg.Rand = nonce - proposeMsg.Pubkey = myPubKey - proposeMsg.Exchanges = &SupportedExchanges - proposeMsg.Ciphers = &SupportedCiphers - proposeMsg.Hashes = &SupportedHashes - - encoded, err := proto.Marshal(proposeMsg) - if err != nil { - return err - } - - // Send our Propose packet - select { - case s.insecure.Out <- encoded: - case <-s.ctx.Done(): - return ErrClosed - } - - // Parse their Propose packet and generate an Exchange packet. - // Exchange = (EphemeralPubKey, Signature) - var resp []byte - select { - case <-s.ctx.Done(): - return ErrClosed - case resp = <-s.insecure.In: - } - - // u.POut("received encoded handshake\n") - proposeResp := new(pb.Propose) - err = proto.Unmarshal(resp, proposeResp) - if err != nil { - return err - } - - // get remote identity - remotePubKey, err := ci.UnmarshalPublicKey(proposeResp.GetPubkey()) - if err != nil { - return err - } - - // get or construct peer - s.remote, err = getOrConstructPeer(s.peers, remotePubKey) - if err != nil { - return err - } - log.Debugf("%s Remote Peer Identified as %s", s.local, s.remote) - - exchange, err := SelectBest(SupportedExchanges, proposeResp.GetExchanges()) - if err != nil { - return err - } - - cipherType, err := SelectBest(SupportedCiphers, proposeResp.GetCiphers()) - if err != nil { - return err - } - - hashType, err := SelectBest(SupportedHashes, proposeResp.GetHashes()) - if err != nil { - return err - } - - // u.POut("Selected %s %s %s\n", exchange, cipherType, hashType) - epubkey, genSharedKey, err := ci.GenerateEKeyPair(exchange) // Generate EphemeralPubKey - - var handshake bytes.Buffer // Gather corpus to sign. - handshake.Write(encoded) - handshake.Write(resp) - handshake.Write(epubkey) - - exPacket := new(pb.Exchange) - - exPacket.Epubkey = epubkey - exPacket.Signature, err = s.local.PrivKey().Sign(handshake.Bytes()) - if err != nil { - return err - } - - exEncoded, err := proto.Marshal(exPacket) - - // send out Exchange packet - select { - case s.insecure.Out <- exEncoded: - case <-s.ctx.Done(): - return ErrClosed - } - - // Parse their Exchange packet and generate a Finish packet. - // Finish = E('Finish') - var resp1 []byte - select { - case <-s.ctx.Done(): - return ErrClosed - case resp1 = <-s.insecure.In: - } - - exchangeResp := new(pb.Exchange) - err = proto.Unmarshal(resp1, exchangeResp) - if err != nil { - return err - } - - var theirHandshake bytes.Buffer - theirHandshake.Write(resp) - theirHandshake.Write(encoded) - theirHandshake.Write(exchangeResp.GetEpubkey()) - - // u.POut("Remote Peer Identified as %s\n", s.remote) - ok, err := s.remote.PubKey().Verify(theirHandshake.Bytes(), exchangeResp.GetSignature()) - if err != nil { - return err - } - - if !ok { - return errors.New("Bad signature!") - } - - secret, err := genSharedKey(exchangeResp.GetEpubkey()) - if err != nil { - return err - } - - k1, k2 := ci.KeyStretcher(cipherType, hashType, secret) - cmp := bytes.Compare(myPubKey, proposeResp.GetPubkey()) - switch cmp { - case 1: - case -1: - k1, k2 = k2, k1 // swap - case 0: // really shouldnt kappen. - copy(k2.IV, k1.IV) - copy(k2.MacKey, k1.MacKey) - copy(k2.CipherKey, k1.CipherKey) - } - go s.handleSecureIn(hashType, cipherType, k2.IV, k2.CipherKey, k2.MacKey) - go s.handleSecureOut(hashType, cipherType, k1.IV, k1.CipherKey, k1.MacKey) - - finished := []byte("Finished") - - // send finished msg - select { - case <-s.ctx.Done(): - return ErrClosed - case s.Out <- finished: - } - - // recv finished msg - var resp2 []byte - select { - case <-s.ctx.Done(): - return ErrClosed - case resp2 = <-s.In: - } - - if bytes.Compare(resp2, finished) != 0 { - return fmt.Errorf("Negotiation failed, got: %s", resp2) - } - - log.Debugf("%s handshake: Got node id: %s", s.local, s.remote) - return nil -} - -func makeMac(hashType string, key []byte) (hash.Hash, int) { - switch hashType { - case "SHA1": - return hmac.New(sha1.New, key), sha1.Size - case "SHA512": - return hmac.New(sha512.New, key), sha512.Size - default: - return hmac.New(sha256.New, key), sha256.Size - } -} - -func makeCipher(cipherType string, CKey []byte) (cipher.Block, error) { - switch cipherType { - case "AES-128", "AES-256": - return aes.NewCipher(CKey) - case "Blowfish": - return bfish.NewCipher(CKey) - default: - return nil, fmt.Errorf("Unrecognized cipher string: %s", cipherType) - } -} - -func (s *SecurePipe) handleSecureIn(hashType, cipherType string, tIV, tCKey, tMKey []byte) { - theirBlock, err := makeCipher(cipherType, tCKey) - if err != nil { - log.Criticalf("Invalid Cipher: %s", err) - s.cancel() - return - } - theirCipher := cipher.NewCTR(theirBlock, tIV) - - theirMac, macSize := makeMac(hashType, tMKey) - - for { - var data []byte - ok := true - - select { - case <-s.ctx.Done(): - ok = false // return out - case data, ok = <-s.insecure.In: - } - - if !ok { - close(s.Duplex.In) - return - } - - // log.Debug("[peer %s] secure in [from = %s] %d", s.local, s.remote, len(data)) - if len(data) <= macSize { - continue - } - - mark := len(data) - macSize - - theirMac.Write(data[0:mark]) - expected := theirMac.Sum(nil) - theirMac.Reset() - - hmacOk := hmac.Equal(data[mark:], expected) - if !hmacOk { - continue - } - - theirCipher.XORKeyStream(data, data[0:mark]) - - s.Duplex.In <- data[:mark] - } -} - -func (s *SecurePipe) handleSecureOut(hashType, cipherType string, mIV, mCKey, mMKey []byte) { - myBlock, err := makeCipher(cipherType, mCKey) - if err != nil { - log.Criticalf("Invalid Cipher: %s", err) - s.cancel() - return - } - myCipher := cipher.NewCTR(myBlock, mIV) - - myMac, macSize := makeMac(hashType, mMKey) - - for { - var data []byte - ok := true - - select { - case <-s.ctx.Done(): - ok = false // return out - case data, ok = <-s.Out: - } - - if !ok { - close(s.insecure.Out) - return - } - - if len(data) == 0 { - continue - } - - buff := make([]byte, len(data)+macSize) - - myCipher.XORKeyStream(buff, data) - - myMac.Write(buff[0:len(data)]) - copy(buff[len(data):], myMac.Sum(nil)) - myMac.Reset() - - // log.Debug("[peer %s] secure out [to = %s] %d", s.local, s.remote, len(buff)) - s.insecure.Out <- buff - } -} - -// Determines which algorithm to use. Note: f(a, b) = f(b, a) -func SelectBest(myPrefs, theirPrefs string) (string, error) { - // Person with greatest hash gets first choice. - myHash := u.Hash([]byte(myPrefs)) - theirHash := u.Hash([]byte(theirPrefs)) - - cmp := bytes.Compare(myHash, theirHash) - var firstChoiceArr, secChoiceArr []string - - if cmp == -1 { - firstChoiceArr = strings.Split(theirPrefs, ",") - secChoiceArr = strings.Split(myPrefs, ",") - } else if cmp == 1 { - firstChoiceArr = strings.Split(myPrefs, ",") - secChoiceArr = strings.Split(theirPrefs, ",") - } else { // Exact same preferences. - myPrefsArr := strings.Split(myPrefs, ",") - return myPrefsArr[0], nil - } - - for _, secChoice := range secChoiceArr { - for _, firstChoice := range firstChoiceArr { - if firstChoice == secChoice { - return firstChoice, nil - } - } - } - - return "", errors.New("No algorithms in common!") -} - -// getOrConstructPeer attempts to fetch a peer from a peerstore. -// if succeeds, verify ID and PubKey match. -// else, construct it. -func getOrConstructPeer(peers peer.Peerstore, rpk ci.PubKey) (peer.Peer, error) { - - rid, err := peer.IDFromPubKey(rpk) - if err != nil { - return nil, err - } - - npeer, err := peers.FindOrCreate(rid) - if err != nil { - return nil, err // unexpected error happened. - } - - // public key verification happens in Peer.VerifyAndSetPubKey - if err := npeer.VerifyAndSetPubKey(rpk); err != nil { - return nil, err // pubkey mismatch or other problem - } - return npeer, nil -} diff --git a/crypto/spipe/pipe.go b/crypto/spipe/pipe.go deleted file mode 100644 index 3f8f57b62..000000000 --- a/crypto/spipe/pipe.go +++ /dev/null @@ -1,78 +0,0 @@ -package spipe - -import ( - "errors" - - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - peer "github.com/jbenet/go-ipfs/peer" - - pipes "github.com/jbenet/go-ipfs/util/pipes" -) - -// SecurePipe objects represent a bi-directional message channel. -type SecurePipe struct { - pipes.Duplex - insecure pipes.Duplex - - local peer.Peer - remote peer.Peer - peers peer.Peerstore - - params params - - ctx context.Context - cancel context.CancelFunc -} - -// options in a secure pipe -type params struct { -} - -// NewSecurePipe constructs a pipe with channels of a given buffer size. -func NewSecurePipe(ctx context.Context, bufsize int, local peer.Peer, - peers peer.Peerstore, insecure pipes.Duplex) (*SecurePipe, error) { - - ctx, cancel := context.WithCancel(ctx) - - sp := &SecurePipe{ - Duplex: pipes.Duplex{ - In: make(chan []byte, bufsize), - Out: make(chan []byte, bufsize), - }, - local: local, - peers: peers, - insecure: insecure, - - ctx: ctx, - cancel: cancel, - } - - if err := sp.handshake(); err != nil { - sp.Close() - return nil, err - } - - return sp, nil -} - -// LocalPeer retrieves the local peer. -func (s *SecurePipe) LocalPeer() peer.Peer { - return s.local -} - -// RemotePeer retrieves the local peer. -func (s *SecurePipe) RemotePeer() peer.Peer { - return s.remote -} - -// Close closes the secure pipe -func (s *SecurePipe) Close() error { - select { - case <-s.ctx.Done(): - return errors.New("already closed") - default: - } - - s.cancel() - return nil -} diff --git a/crypto/spipe/spipe_test.go b/crypto/spipe/spipe_test.go deleted file mode 100644 index 56ccbea08..000000000 --- a/crypto/spipe/spipe_test.go +++ /dev/null @@ -1,161 +0,0 @@ -package spipe - -import ( - "testing" - - "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - - ci "github.com/jbenet/go-ipfs/crypto" - "github.com/jbenet/go-ipfs/peer" - "github.com/jbenet/go-ipfs/util" - "github.com/jbenet/go-ipfs/util/pipes" - testutil "github.com/jbenet/go-ipfs/util/testutil" -) - -func getPeer(tb testing.TB) peer.Peer { - privk, pubk, err := ci.GenerateKeyPair(ci.RSA, 1024) - if err != nil { - tb.Fatal(err) - } - - p, err := testutil.NewPeerWithKeyPair(privk, pubk) - if err != nil { - tb.Fatal(err) - } - - return p -} - -func bindDuplexNoCopy(a, b pipes.Duplex) { - go func() { - for m := range b.Out { - a.In <- m - } - }() - for m := range a.Out { - b.In <- m - } -} - -var globuf = make([]byte, 4*1024*1024) - -func bindDuplexWithCopy(a, b pipes.Duplex) { - dup := func(byt []byte) []byte { - n := globuf[:len(byt)] - copy(n, byt) - return n - } - go func() { - for m := range b.Out { - a.In <- dup(m) - } - }() - for m := range a.Out { - b.In <- dup(m) - } -} - -func BenchmarkDataEncryptDefault(b *testing.B) { - SupportedExchanges = "P-256,P-224,P-384,P-521" - SupportedCiphers = "AES-256,AES-128" - SupportedHashes = "SHA256,SHA512,SHA1" - - runEncryptBenchmark(b) -} - -func BenchmarkDataEncryptLite(b *testing.B) { - SupportedExchanges = "P-256" - SupportedCiphers = "AES-128" - SupportedHashes = "SHA1" - - runEncryptBenchmark(b) -} - -func BenchmarkDataEncryptBlowfish(b *testing.B) { - SupportedExchanges = "P-256" - SupportedCiphers = "Blowfish" - SupportedHashes = "SHA1" - - runEncryptBenchmark(b) -} - -func runEncryptBenchmark(b *testing.B) { - pstore := peer.NewPeerstore() - ctx := context.TODO() - bufsize := 1024 * 1024 - - pa := getPeer(b) - pb := getPeer(b) - duplexa := pipes.NewDuplex(16) - duplexb := pipes.NewDuplex(16) - - go bindDuplexNoCopy(duplexa, duplexb) - - var spb *SecurePipe - done := make(chan struct{}) - go func() { - var err error - spb, err = NewSecurePipe(ctx, bufsize, pb, pstore, duplexb) - if err != nil { - b.Fatal(err) - } - done <- struct{}{} - }() - - spa, err := NewSecurePipe(ctx, bufsize, pa, pstore, duplexa) - if err != nil { - b.Fatal(err) - } - - <-done - - go func() { - for _ = range spa.In { - // Throw it all away, - // all of your hopes and dreams - // piped out to /dev/null... - done <- struct{}{} - } - }() - - data := make([]byte, 1024*512) - util.NewTimeSeededRand().Read(data) - // Begin actual benchmarking - b.ResetTimer() - - for i := 0; i < b.N; i++ { - b.SetBytes(int64(len(data))) - spb.Out <- data - <-done - } - -} - -func BenchmarkDataTransfer(b *testing.B) { - duplexa := pipes.NewDuplex(16) - duplexb := pipes.NewDuplex(16) - - go bindDuplexWithCopy(duplexa, duplexb) - - done := make(chan struct{}) - go func() { - for _ = range duplexa.In { - // Throw it all away, - // all of your hopes and dreams - // piped out to /dev/null... - done <- struct{}{} - } - }() - - data := make([]byte, 1024*512) - util.NewTimeSeededRand().Read(data) - // Begin actual benchmarking - b.ResetTimer() - - for i := 0; i < b.N; i++ { - b.SetBytes(int64(len(data))) - duplexb.Out <- data - <-done - } - -} diff --git a/diagnostics/diag.go b/diagnostics/diag.go index 67b9f55e5..6664af6e7 100644 --- a/diagnostics/diag.go +++ b/diagnostics/diag.go @@ -13,8 +13,8 @@ import ( "crypto/rand" - ggio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/io" "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + ggio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/io" "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" pb "github.com/jbenet/go-ipfs/diagnostics/internal/pb" @@ -31,7 +31,7 @@ const ResponseTimeout = time.Second * 10 // requests type Diagnostics struct { network net.Network - self peer.Peer + self peer.ID diagLock sync.Mutex diagMap map[string]time.Time @@ -39,7 +39,7 @@ type Diagnostics struct { } // NewDiagnostics instantiates a new diagnostics service running on the given network -func NewDiagnostics(self peer.Peer, inet net.Network) *Diagnostics { +func NewDiagnostics(self peer.ID, inet net.Network) *Diagnostics { d := &Diagnostics{ network: inet, self: self, @@ -91,20 +91,20 @@ func (di *DiagInfo) Marshal() []byte { return b } -func (d *Diagnostics) getPeers() []peer.Peer { +func (d *Diagnostics) getPeers() []peer.ID { return d.network.Peers() } func (d *Diagnostics) getDiagInfo() *DiagInfo { di := new(DiagInfo) di.CodeVersion = "github.com/jbenet/go-ipfs" - di.ID = d.self.ID().Pretty() + di.ID = d.self.Pretty() di.LifeSpan = time.Since(d.birth) di.Keys = nil // Currently no way to query datastore di.BwIn, di.BwOut = d.network.BandwidthTotals() for _, p := range d.getPeers() { - d := connDiagInfo{p.GetLatency(), p.ID().Pretty()} + d := connDiagInfo{d.network.Peerstore().LatencyEWMA(p), p.Pretty()} di.Connections = append(di.Connections, d) } return di @@ -142,7 +142,7 @@ func (d *Diagnostics) GetDiagnostic(timeout time.Duration) ([]*DiagInfo, error) for _, p := range peers { log.Debugf("Sending getDiagnostic to: %s", p) sends++ - go func(p peer.Peer) { + go func(p peer.ID) { data, err := d.getDiagnosticFromPeer(ctx, p, pmes) if err != nil { log.Errorf("GetDiagnostic error: %v", err) @@ -181,7 +181,7 @@ func appendDiagnostics(data []byte, cur []*DiagInfo) []*DiagInfo { } // TODO: this method no longer needed. -func (d *Diagnostics) getDiagnosticFromPeer(ctx context.Context, p peer.Peer, mes *pb.Message) ([]byte, error) { +func (d *Diagnostics) getDiagnosticFromPeer(ctx context.Context, p peer.ID, mes *pb.Message) ([]byte, error) { rpmes, err := d.sendRequest(ctx, p, mes) if err != nil { return nil, err @@ -195,7 +195,7 @@ func newMessage(diagID string) *pb.Message { return pmes } -func (d *Diagnostics) sendRequest(ctx context.Context, p peer.Peer, pmes *pb.Message) (*pb.Message, error) { +func (d *Diagnostics) sendRequest(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { s, err := d.network.NewStream(net.ProtocolDiag, p) if err != nil { @@ -225,7 +225,7 @@ func (d *Diagnostics) sendRequest(ctx context.Context, p peer.Peer, pmes *pb.Mes return rpmes, nil } -func (d *Diagnostics) handleDiagnostic(p peer.Peer, pmes *pb.Message) (*pb.Message, error) { +func (d *Diagnostics) handleDiagnostic(p peer.ID, pmes *pb.Message) (*pb.Message, error) { log.Debugf("HandleDiagnostic from %s for id = %s", p, pmes.GetDiagID()) resp := newMessage(pmes.GetDiagID()) @@ -250,7 +250,7 @@ func (d *Diagnostics) handleDiagnostic(p peer.Peer, pmes *pb.Message) (*pb.Messa for _, p := range d.getPeers() { log.Debugf("Sending diagnostic request to peer: %s", p) sendcount++ - go func(p peer.Peer) { + go func(p peer.ID) { out, err := d.getDiagnosticFromPeer(ctx, p, pmes) if err != nil { log.Errorf("getDiagnostic error: %v", err) @@ -288,7 +288,7 @@ func (d *Diagnostics) HandleMessage(ctx context.Context, s net.Stream) error { // Print out diagnostic log.Infof("[peer: %s] Got message from [%s]\n", - d.self.ID().Pretty(), s.Conn().RemotePeer().ID().Pretty()) + d.self.Pretty(), s.Conn().RemotePeer()) // dispatch handler. p := s.Conn().RemotePeer() diff --git a/dockertest/Makefile b/dockertest/Makefile index 69cc91f08..694e8e491 100644 --- a/dockertest/Makefile +++ b/dockertest/Makefile @@ -1,22 +1,14 @@ RANDOMSRC = Godeps/_workspace/src/github.com/jbenet/go-random/random -IPFS_DOCKER_IMAGE = zaqwsx_ipfs-test-img +IMAGE_NAME = ipfs-test-latest test: clean setup - fig build --no-cache - fig up --no-color | tee build/fig.log - make save_logs # save the ipfs logs for inspection - # fig up won't report the error using an error code, so we grep the - # fig.log file to find out whether the call succeeded - tail build/fig.log | grep "exited with code 0" + ./run-test-on-img.sh $(IMAGE_NAME) setup: docker_ipfs_image data/filetiny data/filerand save_logs: sh bin/save_logs.sh -docker_ipfs_image: - docker images | grep $(IPFS_DOCKER_IMAGE) - data/filetiny: Makefile cp Makefile ./data/filetiny # simple @@ -26,6 +18,12 @@ data/filerand: bin/random bin/random: go build -o ./bin/random ../$(RANDOMSRC) +# just build it every time... this part isn't +# even the lengthy part, and it decreases pain. +docker_ipfs_image: + cd .. && docker build -t $(IMAGE_NAME) . + docker images | grep $(IMAGE_NAME) + clean: sh bin/clean.sh fig stop diff --git a/dockertest/bootstrap/Dockerfile b/dockertest/bootstrap/Dockerfile index 2b645857f..562b38ded 100644 --- a/dockertest/bootstrap/Dockerfile +++ b/dockertest/bootstrap/Dockerfile @@ -5,4 +5,6 @@ ADD . /tmp/id RUN mv -f /tmp/id/config /root/.go-ipfs/config RUN ipfs id +ENV IPFS_LOGGING_FMT nocolor + EXPOSE 4011 4012/udp diff --git a/dockertest/build/.gitignore b/dockertest/build/.gitignore index d1871380a..f8f13cfaf 100644 --- a/dockertest/build/.gitignore +++ b/dockertest/build/.gitignore @@ -1,2 +1 @@ -*.log -go-random +.built_img diff --git a/dockertest/client/Dockerfile b/dockertest/client/Dockerfile index 55b04f320..88d15dd0c 100644 --- a/dockertest/client/Dockerfile +++ b/dockertest/client/Dockerfile @@ -7,5 +7,7 @@ RUN ipfs id EXPOSE 4031 4032/udp +ENV IPFS_LOGGING_FMT nocolor + ENTRYPOINT ["/bin/bash"] CMD ["/tmp/id/run.sh"] diff --git a/dockertest/run-test-on-img.sh b/dockertest/run-test-on-img.sh new file mode 100755 index 000000000..d53cde34b --- /dev/null +++ b/dockertest/run-test-on-img.sh @@ -0,0 +1,33 @@ +#!/bin/sh +if [ "$#" -ne 1 ]; then + echo "usage: $0 " + echo "runs this test on image matching " + exit 1 +fi + +# this tag is used by the dockerfiles in +# {data, server, client, bootstrap} +tag=zaqwsx_ipfs-test-img + +# could use set -v, but i dont want to see the comments... + +img=$(docker images | grep $1 | awk '{print $3}') +echo "using docker image: $img ($1)" + +echo docker tag -f $img $tag +docker tag -f $img $tag + +echo "fig build --no-cache" +fig build --no-cache + +echo "fig up --no-color | tee build/fig.log" +fig up --no-color | tee build/fig.log + +# save the ipfs logs for inspection +echo "make save_logs" +make save_logs + +# fig up won't report the error using an error code, so we grep the +# fig.log file to find out whether the call succeeded +echo 'tail build/fig.log | grep "exited with code 0"' +tail build/fig.log | grep "exited with code 0" diff --git a/dockertest/server/Dockerfile b/dockertest/server/Dockerfile index 5590c7823..03cdbc595 100644 --- a/dockertest/server/Dockerfile +++ b/dockertest/server/Dockerfile @@ -8,5 +8,7 @@ RUN chmod +x /tmp/test/run.sh EXPOSE 4021 4022/udp +ENV IPFS_LOGGING_FMT nocolor + ENTRYPOINT ["/bin/bash"] CMD ["/tmp/test/run.sh"] diff --git a/epictest/addcat_test.go b/epictest/addcat_test.go index 09ba58f79..7cf4d04e8 100644 --- a/epictest/addcat_test.go +++ b/epictest/addcat_test.go @@ -87,11 +87,12 @@ func RandomBytes(n int64) []byte { func AddCatBytes(data []byte, conf Config) error { sessionGenerator := bitswap.NewSessionGenerator( - tn.VirtualNetwork(delay.Fixed(conf.NetworkLatency)), // TODO rename VirtualNetwork - mockrouting.NewServerWithDelay(mockrouting.DelayConfig{ - Query: delay.Fixed(conf.RoutingLatency), - ValueVisibility: delay.Fixed(conf.RoutingLatency), - }), + tn.VirtualNetwork( + mockrouting.NewServerWithDelay(mockrouting.DelayConfig{ + Query: delay.Fixed(conf.RoutingLatency), + ValueVisibility: delay.Fixed(conf.RoutingLatency), + }), + delay.Fixed(conf.NetworkLatency)), // TODO rename VirtualNetwork ) defer sessionGenerator.Close() diff --git a/exchange/bitswap/bitswap.go b/exchange/bitswap/bitswap.go index 912ed1210..58c7a3584 100644 --- a/exchange/bitswap/bitswap.go +++ b/exchange/bitswap/bitswap.go @@ -8,6 +8,7 @@ import ( "time" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + blocks "github.com/jbenet/go-ipfs/blocks" blockstore "github.com/jbenet/go-ipfs/blocks/blockstore" exchange "github.com/jbenet/go-ipfs/exchange" @@ -18,8 +19,10 @@ import ( wantlist "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" + errors "github.com/jbenet/go-ipfs/util/debugerror" + "github.com/jbenet/go-ipfs/util/delay" eventlog "github.com/jbenet/go-ipfs/util/eventlog" - pset "github.com/jbenet/go-ipfs/util/peerset" + pset "github.com/jbenet/go-ipfs/util/peerset" // TODO move this to peerstore ) var log = eventlog.Logger("bitswap") @@ -36,14 +39,14 @@ const ( ) var ( - rebroadcastDelay = time.Second * 10 + rebroadcastDelay = delay.Fixed(time.Second * 10) ) // New initializes a BitSwap instance that communicates over the provided // BitSwapNetwork. This function registers the returned instance as the network // delegate. // Runs until context is cancelled. -func New(parent context.Context, p peer.Peer, network bsnet.BitSwapNetwork, routing bsnet.Routing, +func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, bstore blockstore.Blockstore, nice bool) exchange.Interface { ctx, cancelFunc := context.WithCancel(parent) @@ -60,8 +63,7 @@ func New(parent context.Context, p peer.Peer, network bsnet.BitSwapNetwork, rout cancelFunc: cancelFunc, notifications: notif, engine: decision.NewEngine(ctx, bstore), - routing: routing, - sender: network, + network: network, wantlist: wantlist.NewThreadSafe(), batchRequests: make(chan []u.Key, sizeBatchRequestChan), } @@ -75,16 +77,13 @@ func New(parent context.Context, p peer.Peer, network bsnet.BitSwapNetwork, rout // bitswap instances implement the bitswap protocol. type bitswap struct { - // sender delivers messages on behalf of the session - sender bsnet.BitSwapNetwork + // network delivers messages on behalf of the session + network bsnet.BitSwapNetwork // blockstore is the local database // NB: ensure threadsafety blockstore blockstore.Blockstore - // routing interface for communication - routing bsnet.Routing - notifications notifications.PubSub // Requests for a set of related blocks @@ -162,10 +161,10 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { } bs.wantlist.Remove(blk.Key()) bs.notifications.Publish(blk) - return bs.routing.Provide(ctx, blk.Key()) + return bs.network.Provide(ctx, blk.Key()) } -func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) error { +func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.ID) error { if peers == nil { panic("Cant send wantlist to nil peerchan") } @@ -177,25 +176,12 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e for peerToQuery := range peers { log.Event(ctx, "PeerToQuery", peerToQuery) wg.Add(1) - go func(p peer.Peer) { + go func(p peer.ID) { defer wg.Done() - - log.Event(ctx, "DialPeer", p) - err := bs.sender.DialPeer(ctx, p) - if err != nil { - log.Errorf("Error sender.DialPeer(%s): %s", p, err) + if err := bs.send(ctx, p, message); err != nil { + log.Error(err) return } - - err = bs.sender.SendMessage(ctx, p, message) - if err != nil { - log.Errorf("Error sender.SendMessage(%s) = %s", p, err) - return - } - // FIXME ensure accounting is handled correctly when - // communication fails. May require slightly different API to - // get better guarantees. May need shared sequence numbers. - bs.engine.MessageSent(p, message) }(peerToQuery) } wg.Wait() @@ -212,7 +198,7 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context, wantlist *wantli message.AddEntry(e.Key, e.Priority) } - ps := pset.New() + set := pset.New() // Get providers for all entries in wantlist (could take a while) wg := sync.WaitGroup{} @@ -221,10 +207,9 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context, wantlist *wantli go func(k u.Key) { defer wg.Done() child, _ := context.WithTimeout(ctx, providerRequestTimeout) - providers := bs.routing.FindProvidersAsync(child, k, maxProvidersPerRequest) - + providers := bs.network.FindProvidersAsync(child, k, maxProvidersPerRequest) for prov := range providers { - if ps.TryAdd(prov) { //Do once per peer + if set.TryAdd(prov) { //Do once per peer bs.send(ctx, prov, message) } } @@ -249,7 +234,7 @@ func (bs *bitswap) clientWorker(parent context.Context) { ctx, cancel := context.WithCancel(parent) - broadcastSignal := time.After(rebroadcastDelay) + broadcastSignal := time.After(rebroadcastDelay.Get()) defer cancel() for { @@ -257,7 +242,7 @@ func (bs *bitswap) clientWorker(parent context.Context) { case <-broadcastSignal: // Resend unfulfilled wantlist keys bs.sendWantlistToProviders(ctx, bs.wantlist) - broadcastSignal = time.After(rebroadcastDelay) + broadcastSignal = time.After(rebroadcastDelay.Get()) case ks := <-bs.batchRequests: if len(ks) == 0 { log.Warning("Received batch request for zero blocks") @@ -274,8 +259,7 @@ func (bs *bitswap) clientWorker(parent context.Context) { // it. Later, this assumption may not hold as true if we implement // newer bitswap strategies. child, _ := context.WithTimeout(ctx, providerRequestTimeout) - providers := bs.routing.FindProvidersAsync(child, ks[0], maxProvidersPerRequest) - + providers := bs.network.FindProvidersAsync(child, ks[0], maxProvidersPerRequest) err := bs.sendWantListTo(ctx, providers) if err != nil { log.Errorf("error sending wantlist: %s", err) @@ -287,19 +271,19 @@ func (bs *bitswap) clientWorker(parent context.Context) { } // TODO(brian): handle errors -func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsmsg.BitSwapMessage) ( - peer.Peer, bsmsg.BitSwapMessage) { +func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) ( + peer.ID, bsmsg.BitSwapMessage) { log.Debugf("ReceiveMessage from %s", p) - if p == nil { + if p == "" { log.Error("Received message from nil peer!") // TODO propagate the error upward - return nil, nil + return "", nil } if incoming == nil { log.Error("Got nil bitswap message!") // TODO propagate the error upward - return nil, nil + return "", nil } // This call records changes to wantlists, blocks received, @@ -321,7 +305,7 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm bs.cancelBlocks(ctx, keys) // TODO: consider changing this function to not return anything - return nil, nil + return "", nil } func (bs *bitswap) cancelBlocks(ctx context.Context, bkeys []u.Key) { @@ -349,9 +333,14 @@ func (bs *bitswap) ReceiveError(err error) { // send strives to ensure that accounting is always performed when a message is // sent -func (bs *bitswap) send(ctx context.Context, p peer.Peer, m bsmsg.BitSwapMessage) error { - if err := bs.sender.SendMessage(ctx, p, m); err != nil { - return err +func (bs *bitswap) send(ctx context.Context, p peer.ID, m bsmsg.BitSwapMessage) error { + log.Event(ctx, "DialPeer", p) + err := bs.network.DialPeer(ctx, p) + if err != nil { + return errors.Wrap(err) + } + if err := bs.network.SendMessage(ctx, p, m); err != nil { + return errors.Wrap(err) } return bs.engine.MessageSent(p, m) } diff --git a/exchange/bitswap/bitswap_test.go b/exchange/bitswap/bitswap_test.go index 2c04b0508..6da4aaeff 100644 --- a/exchange/bitswap/bitswap_test.go +++ b/exchange/bitswap/bitswap_test.go @@ -7,13 +7,14 @@ import ( "time" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + blocks "github.com/jbenet/go-ipfs/blocks" blocksutil "github.com/jbenet/go-ipfs/blocks/blocksutil" tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" + peer "github.com/jbenet/go-ipfs/peer" mockrouting "github.com/jbenet/go-ipfs/routing/mock" u "github.com/jbenet/go-ipfs/util" delay "github.com/jbenet/go-ipfs/util/delay" - testutil "github.com/jbenet/go-ipfs/util/testutil" ) // FIXME the tests are really sensitive to the network delay. fix them to work @@ -23,9 +24,8 @@ const kNetworkDelay = 0 * time.Millisecond func TestClose(t *testing.T) { // TODO t.Skip("TODO Bitswap's Close implementation is a WIP") - vnet := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) - rout := mockrouting.NewServer() - sesgen := NewSessionGenerator(vnet, rout) + vnet := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + sesgen := NewSessionGenerator(vnet) defer sesgen.Close() bgen := blocksutil.NewBlockGenerator() @@ -38,9 +38,8 @@ func TestClose(t *testing.T) { func TestGetBlockTimeout(t *testing.T) { - net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) - rs := mockrouting.NewServer() - g := NewSessionGenerator(net, rs) + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + g := NewSessionGenerator(net) defer g.Close() self := g.Next() @@ -54,15 +53,16 @@ func TestGetBlockTimeout(t *testing.T) { } } -func TestProviderForKeyButNetworkCannotFind(t *testing.T) { +func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TODO revisit this - net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) rs := mockrouting.NewServer() - g := NewSessionGenerator(net, rs) + net := tn.VirtualNetwork(rs, delay.Fixed(kNetworkDelay)) + g := NewSessionGenerator(net) defer g.Close() block := blocks.NewBlock([]byte("block")) - rs.Client(testutil.NewPeerWithIDString("testing")).Provide(context.Background(), block.Key()) // but not on network + pinfo := peer.PeerInfo{ID: peer.ID("testing")} + rs.Client(pinfo).Provide(context.Background(), block.Key()) // but not on network solo := g.Next() defer solo.Exchange.Close() @@ -79,10 +79,9 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { - net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) - rs := mockrouting.NewServer() + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) block := blocks.NewBlock([]byte("block")) - g := NewSessionGenerator(net, rs) + g := NewSessionGenerator(net) defer g.Close() hasBlock := g.Next() @@ -134,9 +133,8 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { if testing.Short() { t.SkipNow() } - net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) - rs := mockrouting.NewServer() - sg := NewSessionGenerator(net, rs) + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + sg := NewSessionGenerator(net) defer sg.Close() bg := blocksutil.NewBlockGenerator() @@ -150,10 +148,9 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { var blkeys []u.Key first := instances[0] for _, b := range blocks { - first.Blockstore().Put(b) + first.Blockstore().Put(b) // TODO remove. don't need to do this. bitswap owns block blkeys = append(blkeys, b.Key()) first.Exchange.HasBlock(context.Background(), b) - rs.Client(first.Peer).Provide(context.Background(), b.Key()) } t.Log("Distribute!") @@ -200,15 +197,13 @@ func TestSendToWantingPeer(t *testing.T) { t.SkipNow() } - net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) - rs := mockrouting.NewServer() - sg := NewSessionGenerator(net, rs) + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + sg := NewSessionGenerator(net) defer sg.Close() bg := blocksutil.NewBlockGenerator() - oldVal := rebroadcastDelay - rebroadcastDelay = time.Second / 2 - defer func() { rebroadcastDelay = oldVal }() + prev := rebroadcastDelay.Set(time.Second / 2) + defer func() { rebroadcastDelay.Set(prev) }() peerA := sg.Next() peerB := sg.Next() @@ -247,9 +242,8 @@ func TestSendToWantingPeer(t *testing.T) { } func TestBasicBitswap(t *testing.T) { - net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) - rs := mockrouting.NewServer() - sg := NewSessionGenerator(net, rs) + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + sg := NewSessionGenerator(net) bg := blocksutil.NewBlockGenerator() t.Log("Test a few nodes trying to get one file with a lot of blocks") diff --git a/exchange/bitswap/decision/engine.go b/exchange/bitswap/decision/engine.go index ea4539437..da5ccfe6d 100644 --- a/exchange/bitswap/decision/engine.go +++ b/exchange/bitswap/decision/engine.go @@ -50,7 +50,7 @@ const ( // Envelope contains a message for a Peer type Envelope struct { // Peer is the intended recipient - Peer peer.Peer + Peer peer.ID // Message is the payload Message bsmsg.BitSwapMessage } @@ -75,12 +75,12 @@ type Engine struct { lock sync.RWMutex // protects the fields immediatly below // ledgerMap lists Ledgers by their Partner key. - ledgerMap map[u.Key]*ledger + ledgerMap map[peer.ID]*ledger } func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine { e := &Engine{ - ledgerMap: make(map[u.Key]*ledger), + ledgerMap: make(map[peer.ID]*ledger), bs: bs, peerRequestQueue: newTaskQueue(), outbox: make(chan Envelope, sizeOutboxChan), @@ -126,11 +126,11 @@ func (e *Engine) Outbox() <-chan Envelope { } // Returns a slice of Peers with whom the local node has active sessions -func (e *Engine) Peers() []peer.Peer { +func (e *Engine) Peers() []peer.ID { e.lock.RLock() defer e.lock.RUnlock() - response := make([]peer.Peer, 0) + response := make([]peer.ID, 0) for _, ledger := range e.ledgerMap { response = append(response, ledger.Partner) } @@ -139,7 +139,7 @@ func (e *Engine) Peers() []peer.Peer { // MessageReceived performs book-keeping. Returns error if passed invalid // arguments. -func (e *Engine) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) error { +func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { newWorkExists := false defer func() { if newWorkExists { @@ -189,7 +189,7 @@ func (e *Engine) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) error { // inconsistent. Would need to ensure that Sends and acknowledgement of the // send happen atomically -func (e *Engine) MessageSent(p peer.Peer, m bsmsg.BitSwapMessage) error { +func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) error { e.lock.Lock() defer e.lock.Unlock() @@ -203,22 +203,22 @@ func (e *Engine) MessageSent(p peer.Peer, m bsmsg.BitSwapMessage) error { return nil } -func (e *Engine) numBytesSentTo(p peer.Peer) uint64 { +func (e *Engine) numBytesSentTo(p peer.ID) uint64 { // NB not threadsafe return e.findOrCreate(p).Accounting.BytesSent } -func (e *Engine) numBytesReceivedFrom(p peer.Peer) uint64 { +func (e *Engine) numBytesReceivedFrom(p peer.ID) uint64 { // NB not threadsafe return e.findOrCreate(p).Accounting.BytesRecv } // ledger lazily instantiates a ledger -func (e *Engine) findOrCreate(p peer.Peer) *ledger { - l, ok := e.ledgerMap[p.Key()] +func (e *Engine) findOrCreate(p peer.ID) *ledger { + l, ok := e.ledgerMap[p] if !ok { l = newLedger(p) - e.ledgerMap[p.Key()] = l + e.ledgerMap[p] = l } return l } diff --git a/exchange/bitswap/decision/engine_test.go b/exchange/bitswap/decision/engine_test.go index 148937573..0196863b3 100644 --- a/exchange/bitswap/decision/engine_test.go +++ b/exchange/bitswap/decision/engine_test.go @@ -7,21 +7,21 @@ import ( context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" sync "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" + blocks "github.com/jbenet/go-ipfs/blocks" blockstore "github.com/jbenet/go-ipfs/blocks/blockstore" message "github.com/jbenet/go-ipfs/exchange/bitswap/message" peer "github.com/jbenet/go-ipfs/peer" - testutil "github.com/jbenet/go-ipfs/util/testutil" ) type peerAndEngine struct { - peer.Peer + Peer peer.ID Engine *Engine } func newPeerAndLedgermanager(idStr string) peerAndEngine { return peerAndEngine{ - Peer: testutil.NewPeerWithIDString(idStr), + Peer: peer.ID(idStr), //Strategy: New(true), Engine: NewEngine(context.TODO(), blockstore.NewBlockstore(sync.MutexWrap(ds.NewMapDatastore()))), @@ -70,7 +70,7 @@ func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) { sanfrancisco.Engine.MessageSent(seattle.Peer, m) seattle.Engine.MessageReceived(sanfrancisco.Peer, m) - if seattle.Peer.Key() == sanfrancisco.Peer.Key() { + if seattle.Peer == sanfrancisco.Peer { t.Fatal("Sanity Check: Peers have same Key!") } @@ -83,9 +83,9 @@ func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) { } } -func peerIsPartner(p peer.Peer, e *Engine) bool { +func peerIsPartner(p peer.ID, e *Engine) bool { for _, partner := range e.Peers() { - if partner.Key() == p.Key() { + if partner == p { return true } } diff --git a/exchange/bitswap/decision/ledger.go b/exchange/bitswap/decision/ledger.go index eea87af1f..f2b824603 100644 --- a/exchange/bitswap/decision/ledger.go +++ b/exchange/bitswap/decision/ledger.go @@ -12,7 +12,7 @@ import ( // access/lookups. type keySet map[u.Key]struct{} -func newLedger(p peer.Peer) *ledger { +func newLedger(p peer.ID) *ledger { return &ledger{ wantList: wl.New(), Partner: p, @@ -24,7 +24,7 @@ func newLedger(p peer.Peer) *ledger { // NOT threadsafe type ledger struct { // Partner is the remote Peer. - Partner peer.Peer + Partner peer.ID // Accounting tracks bytes sent and recieved. Accounting debtRatio diff --git a/exchange/bitswap/decision/taskqueue.go b/exchange/bitswap/decision/taskqueue.go index a76c56e9b..c86a73371 100644 --- a/exchange/bitswap/decision/taskqueue.go +++ b/exchange/bitswap/decision/taskqueue.go @@ -26,12 +26,12 @@ func newTaskQueue() *taskQueue { type task struct { Entry wantlist.Entry - Target peer.Peer + Target peer.ID Trash bool } // Push currently adds a new task to the end of the list -func (tl *taskQueue) Push(entry wantlist.Entry, to peer.Peer) { +func (tl *taskQueue) Push(entry wantlist.Entry, to peer.ID) { tl.lock.Lock() defer tl.lock.Unlock() if task, ok := tl.taskmap[taskKey(to, entry.Key)]; ok { @@ -69,7 +69,7 @@ func (tl *taskQueue) Pop() *task { } // Remove lazily removes a task from the queue -func (tl *taskQueue) Remove(k u.Key, p peer.Peer) { +func (tl *taskQueue) Remove(k u.Key, p peer.ID) { tl.lock.Lock() t, ok := tl.taskmap[taskKey(p, k)] if ok { @@ -79,6 +79,6 @@ func (tl *taskQueue) Remove(k u.Key, p peer.Peer) { } // taskKey returns a key that uniquely identifies a task. -func taskKey(p peer.Peer, k u.Key) string { - return string(p.Key() + k) +func taskKey(p peer.ID, k u.Key) string { + return string(p) + string(k) } diff --git a/exchange/bitswap/network/interface.go b/exchange/bitswap/network/interface.go index 44557b064..1bc14ca88 100644 --- a/exchange/bitswap/network/interface.go +++ b/exchange/bitswap/network/interface.go @@ -12,37 +12,39 @@ import ( type BitSwapNetwork interface { // DialPeer ensures there is a connection to peer. - DialPeer(context.Context, peer.Peer) error + DialPeer(context.Context, peer.ID) error // SendMessage sends a BitSwap message to a peer. SendMessage( context.Context, - peer.Peer, + peer.ID, bsmsg.BitSwapMessage) error // SendRequest sends a BitSwap message to a peer and waits for a response. SendRequest( context.Context, - peer.Peer, + peer.ID, bsmsg.BitSwapMessage) (incoming bsmsg.BitSwapMessage, err error) // SetDelegate registers the Reciver to handle messages received from the // network. SetDelegate(Receiver) + + Routing } // Implement Receiver to receive messages from the BitSwapNetwork type Receiver interface { ReceiveMessage( - ctx context.Context, sender peer.Peer, incoming bsmsg.BitSwapMessage) ( - destination peer.Peer, outgoing bsmsg.BitSwapMessage) + ctx context.Context, sender peer.ID, incoming bsmsg.BitSwapMessage) ( + destination peer.ID, outgoing bsmsg.BitSwapMessage) ReceiveError(error) } type Routing interface { // FindProvidersAsync returns a channel of providers for the given key - FindProvidersAsync(context.Context, u.Key, int) <-chan peer.Peer + FindProvidersAsync(context.Context, u.Key, int) <-chan peer.ID // Provide provides the key to the network Provide(context.Context, u.Key) error diff --git a/exchange/bitswap/network/ipfs_impl.go b/exchange/bitswap/network/ipfs_impl.go index 3e6e54787..5388c8e6d 100644 --- a/exchange/bitswap/network/ipfs_impl.go +++ b/exchange/bitswap/network/ipfs_impl.go @@ -2,10 +2,10 @@ package network import ( context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" inet "github.com/jbenet/go-ipfs/net" peer "github.com/jbenet/go-ipfs/peer" + routing "github.com/jbenet/go-ipfs/routing" util "github.com/jbenet/go-ipfs/util" ) @@ -13,9 +13,10 @@ var log = util.Logger("bitswap_network") // NewFromIpfsNetwork returns a BitSwapNetwork supported by underlying IPFS // Dialer & Service -func NewFromIpfsNetwork(n inet.Network) BitSwapNetwork { +func NewFromIpfsNetwork(n inet.Network, r routing.IpfsRouting) BitSwapNetwork { bitswapNetwork := impl{ network: n, + routing: r, } n.SetHandler(inet.ProtocolBitswap, bitswapNetwork.handleNewStream) return &bitswapNetwork @@ -25,11 +26,74 @@ func NewFromIpfsNetwork(n inet.Network) BitSwapNetwork { // NetMessage objects, into the bitswap network interface. type impl struct { network inet.Network + routing routing.IpfsRouting // inbound messages from the network are forwarded to the receiver receiver Receiver } +func (bsnet *impl) DialPeer(ctx context.Context, p peer.ID) error { + return bsnet.network.DialPeer(ctx, p) +} + +func (bsnet *impl) SendMessage( + ctx context.Context, + p peer.ID, + outgoing bsmsg.BitSwapMessage) error { + + s, err := bsnet.network.NewStream(inet.ProtocolBitswap, p) + if err != nil { + return err + } + defer s.Close() + + return outgoing.ToNet(s) +} + +func (bsnet *impl) SendRequest( + ctx context.Context, + p peer.ID, + outgoing bsmsg.BitSwapMessage) (bsmsg.BitSwapMessage, error) { + + s, err := bsnet.network.NewStream(inet.ProtocolBitswap, p) + if err != nil { + return nil, err + } + defer s.Close() + + if err := outgoing.ToNet(s); err != nil { + return nil, err + } + + return bsmsg.FromNet(s) +} + +func (bsnet *impl) SetDelegate(r Receiver) { + bsnet.receiver = r +} + +// FindProvidersAsync returns a channel of providers for the given key +func (bsnet *impl) FindProvidersAsync(ctx context.Context, k util.Key, max int) <-chan peer.ID { + out := make(chan peer.ID) + go func() { + defer close(out) + providers := bsnet.routing.FindProvidersAsync(ctx, k, max) + for info := range providers { + bsnet.network.Peerstore().AddAddresses(info.ID, info.Addrs) + select { + case <-ctx.Done(): + case out <- info.ID: + } + } + }() + return out +} + +// Provide provides the key to the network +func (bsnet *impl) Provide(ctx context.Context, k util.Key) error { + return bsnet.routing.Provide(ctx, k) +} + // handleNewStream receives a new stream from the network. func (bsnet *impl) handleNewStream(s inet.Stream) { @@ -52,43 +116,3 @@ func (bsnet *impl) handleNewStream(s inet.Stream) { }() } - -func (bsnet *impl) DialPeer(ctx context.Context, p peer.Peer) error { - return bsnet.network.DialPeer(ctx, p) -} - -func (bsnet *impl) SendMessage( - ctx context.Context, - p peer.Peer, - outgoing bsmsg.BitSwapMessage) error { - - s, err := bsnet.network.NewStream(inet.ProtocolBitswap, p) - if err != nil { - return err - } - defer s.Close() - - return outgoing.ToNet(s) -} - -func (bsnet *impl) SendRequest( - ctx context.Context, - p peer.Peer, - outgoing bsmsg.BitSwapMessage) (bsmsg.BitSwapMessage, error) { - - s, err := bsnet.network.NewStream(inet.ProtocolBitswap, p) - if err != nil { - return nil, err - } - defer s.Close() - - if err := outgoing.ToNet(s); err != nil { - return nil, err - } - - return bsmsg.FromNet(s) -} - -func (bsnet *impl) SetDelegate(r Receiver) { - bsnet.receiver = r -} diff --git a/exchange/bitswap/testnet/network.go b/exchange/bitswap/testnet/network.go index b8f61b413..3201ad5c4 100644 --- a/exchange/bitswap/testnet/network.go +++ b/exchange/bitswap/testnet/network.go @@ -1,11 +1,13 @@ package bitswap import ( - "bytes" "errors" "fmt" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + "github.com/jbenet/go-ipfs/routing" + "github.com/jbenet/go-ipfs/routing/mock" + bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" peer "github.com/jbenet/go-ipfs/peer" @@ -14,49 +16,52 @@ import ( ) type Network interface { - Adapter(peer.Peer) bsnet.BitSwapNetwork + Adapter(peer.ID) bsnet.BitSwapNetwork - HasPeer(peer.Peer) bool + HasPeer(peer.ID) bool SendMessage( ctx context.Context, - from peer.Peer, - to peer.Peer, + from peer.ID, + to peer.ID, message bsmsg.BitSwapMessage) error SendRequest( ctx context.Context, - from peer.Peer, - to peer.Peer, + from peer.ID, + to peer.ID, message bsmsg.BitSwapMessage) ( incoming bsmsg.BitSwapMessage, err error) } // network impl -func VirtualNetwork(d delay.D) Network { +func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { return &network{ - clients: make(map[util.Key]bsnet.Receiver), - delay: d, + clients: make(map[peer.ID]bsnet.Receiver), + delay: d, + routingserver: rs, } } type network struct { - clients map[util.Key]bsnet.Receiver - delay delay.D + clients map[peer.ID]bsnet.Receiver + routingserver mockrouting.Server + delay delay.D } -func (n *network) Adapter(p peer.Peer) bsnet.BitSwapNetwork { +func (n *network) Adapter(p peer.ID) bsnet.BitSwapNetwork { client := &networkClient{ local: p, network: n, + routing: n.routingserver.Client(peer.PeerInfo{ID: p}), } - n.clients[p.Key()] = client + n.clients[p] = client return client } -func (n *network) HasPeer(p peer.Peer) bool { - _, found := n.clients[p.Key()] +func (n *network) HasPeer(p peer.ID) bool { + _, found := n.clients[p] return found } @@ -64,11 +69,11 @@ func (n *network) HasPeer(p peer.Peer) bool { // TODO what does the network layer do with errors received from services? func (n *network) SendMessage( ctx context.Context, - from peer.Peer, - to peer.Peer, + from peer.ID, + to peer.ID, message bsmsg.BitSwapMessage) error { - receiver, ok := n.clients[to.Key()] + receiver, ok := n.clients[to] if !ok { return errors.New("Cannot locate peer on network") } @@ -82,8 +87,8 @@ func (n *network) SendMessage( } func (n *network) deliver( - r bsnet.Receiver, from peer.Peer, message bsmsg.BitSwapMessage) error { - if message == nil || from == nil { + r bsnet.Receiver, from peer.ID, message bsmsg.BitSwapMessage) error { + if message == nil || from == "" { return errors.New("Invalid input") } @@ -91,15 +96,15 @@ func (n *network) deliver( nextPeer, nextMsg := r.ReceiveMessage(context.TODO(), from, message) - if (nextPeer == nil && nextMsg != nil) || (nextMsg == nil && nextPeer != nil) { + if (nextPeer == "" && nextMsg != nil) || (nextMsg == nil && nextPeer != "") { return errors.New("Malformed client request") } - if nextPeer == nil && nextMsg == nil { // no response to send + if nextPeer == "" && nextMsg == nil { // no response to send return nil } - nextReceiver, ok := n.clients[nextPeer.Key()] + nextReceiver, ok := n.clients[nextPeer] if !ok { return errors.New("Cannot locate peer on network") } @@ -110,32 +115,32 @@ func (n *network) deliver( // TODO func (n *network) SendRequest( ctx context.Context, - from peer.Peer, - to peer.Peer, + from peer.ID, + to peer.ID, message bsmsg.BitSwapMessage) ( incoming bsmsg.BitSwapMessage, err error) { - r, ok := n.clients[to.Key()] + r, ok := n.clients[to] if !ok { return nil, errors.New("Cannot locate peer on network") } nextPeer, nextMsg := r.ReceiveMessage(context.TODO(), from, message) // TODO dedupe code - if (nextPeer == nil && nextMsg != nil) || (nextMsg == nil && nextPeer != nil) { + if (nextPeer == "" && nextMsg != nil) || (nextMsg == nil && nextPeer != "") { r.ReceiveError(errors.New("Malformed client request")) return nil, nil } // TODO dedupe code - if nextPeer == nil && nextMsg == nil { + if nextPeer == "" && nextMsg == nil { return nil, nil } // TODO test when receiver doesn't immediately respond to the initiator of the request - if !bytes.Equal(nextPeer.ID(), from.ID()) { + if nextPeer != from { go func() { - nextReceiver, ok := n.clients[nextPeer.Key()] + nextReceiver, ok := n.clients[nextPeer] if !ok { // TODO log the error? } @@ -147,26 +152,54 @@ func (n *network) SendRequest( } type networkClient struct { - local peer.Peer + local peer.ID bsnet.Receiver network Network + routing routing.IpfsRouting } func (nc *networkClient) SendMessage( ctx context.Context, - to peer.Peer, + to peer.ID, message bsmsg.BitSwapMessage) error { return nc.network.SendMessage(ctx, nc.local, to, message) } func (nc *networkClient) SendRequest( ctx context.Context, - to peer.Peer, + to peer.ID, message bsmsg.BitSwapMessage) (incoming bsmsg.BitSwapMessage, err error) { return nc.network.SendRequest(ctx, nc.local, to, message) } -func (nc *networkClient) DialPeer(ctx context.Context, p peer.Peer) error { +// FindProvidersAsync returns a channel of providers for the given key +func (nc *networkClient) FindProvidersAsync(ctx context.Context, k util.Key, max int) <-chan peer.ID { + + // NB: this function duplicates the PeerInfo -> ID transformation in the + // bitswap network adapter. Not to worry. This network client will be + // deprecated once the ipfsnet.Mock is added. The code below is only + // temporary. + + out := make(chan peer.ID) + go func() { + defer close(out) + providers := nc.routing.FindProvidersAsync(ctx, k, max) + for info := range providers { + select { + case <-ctx.Done(): + case out <- info.ID: + } + } + }() + return out +} + +// Provide provides the key to the network +func (nc *networkClient) Provide(ctx context.Context, k util.Key) error { + return nc.routing.Provide(ctx, k) +} + +func (nc *networkClient) DialPeer(ctx context.Context, p peer.ID) error { // no need to do anything because dialing isn't a thing in this test net. if !nc.network.HasPeer(p) { return fmt.Errorf("Peer not in network: %s", p) diff --git a/exchange/bitswap/testnet/network_test.go b/exchange/bitswap/testnet/network_test.go index 7a9f48e2d..0728f63d6 100644 --- a/exchange/bitswap/testnet/network_test.go +++ b/exchange/bitswap/testnet/network_test.go @@ -5,30 +5,31 @@ import ( "testing" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + blocks "github.com/jbenet/go-ipfs/blocks" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" peer "github.com/jbenet/go-ipfs/peer" delay "github.com/jbenet/go-ipfs/util/delay" - testutil "github.com/jbenet/go-ipfs/util/testutil" + mockrouting "github.com/jbenet/go-ipfs/routing/mock" ) func TestSendRequestToCooperativePeer(t *testing.T) { - net := VirtualNetwork(delay.Fixed(0)) + net := VirtualNetwork(mockrouting.NewServer(),delay.Fixed(0)) - idOfRecipient := []byte("recipient") + idOfRecipient := peer.ID("recipient") t.Log("Get two network adapters") - initiator := net.Adapter(testutil.NewPeerWithIDString("initiator")) - recipient := net.Adapter(testutil.NewPeerWithID(idOfRecipient)) + initiator := net.Adapter(peer.ID("initiator")) + recipient := net.Adapter(idOfRecipient) expectedStr := "response from recipient" recipient.SetDelegate(lambda(func( ctx context.Context, - from peer.Peer, + from peer.ID, incoming bsmsg.BitSwapMessage) ( - peer.Peer, bsmsg.BitSwapMessage) { + peer.ID, bsmsg.BitSwapMessage) { t.Log("Recipient received a message from the network") @@ -45,13 +46,17 @@ func TestSendRequestToCooperativePeer(t *testing.T) { message := bsmsg.New() message.AddBlock(blocks.NewBlock([]byte("data"))) response, err := initiator.SendRequest( - context.Background(), testutil.NewPeerWithID(idOfRecipient), message) + context.Background(), idOfRecipient, message) if err != nil { t.Fatal(err) } t.Log("Check the contents of the response from recipient") + if response == nil { + t.Fatal("Should have received a response") + } + for _, blockFromRecipient := range response.Blocks() { if string(blockFromRecipient.Data) == expectedStr { return @@ -61,10 +66,10 @@ func TestSendRequestToCooperativePeer(t *testing.T) { } func TestSendMessageAsyncButWaitForResponse(t *testing.T) { - net := VirtualNetwork(delay.Fixed(0)) - idOfResponder := []byte("responder") - waiter := net.Adapter(testutil.NewPeerWithIDString("waiter")) - responder := net.Adapter(testutil.NewPeerWithID(idOfResponder)) + net := VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0)) + idOfResponder := peer.ID("responder") + waiter := net.Adapter(peer.ID("waiter")) + responder := net.Adapter(idOfResponder) var wg sync.WaitGroup @@ -74,9 +79,9 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { responder.SetDelegate(lambda(func( ctx context.Context, - fromWaiter peer.Peer, + fromWaiter peer.ID, msgFromWaiter bsmsg.BitSwapMessage) ( - peer.Peer, bsmsg.BitSwapMessage) { + peer.ID, bsmsg.BitSwapMessage) { msgToWaiter := bsmsg.New() msgToWaiter.AddBlock(blocks.NewBlock([]byte(expectedStr))) @@ -86,9 +91,9 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { waiter.SetDelegate(lambda(func( ctx context.Context, - fromResponder peer.Peer, + fromResponder peer.ID, msgFromResponder bsmsg.BitSwapMessage) ( - peer.Peer, bsmsg.BitSwapMessage) { + peer.ID, bsmsg.BitSwapMessage) { // TODO assert that this came from the correct peer and that the message contents are as expected ok := false @@ -103,13 +108,13 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { t.Fatal("Message not received from the responder") } - return nil, nil + return "", nil })) messageSentAsync := bsmsg.New() messageSentAsync.AddBlock(blocks.NewBlock([]byte("data"))) errSending := waiter.SendMessage( - context.Background(), testutil.NewPeerWithID(idOfResponder), messageSentAsync) + context.Background(), idOfResponder, messageSentAsync) if errSending != nil { t.Fatal(errSending) } @@ -117,8 +122,8 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { wg.Wait() // until waiter delegate function is executed } -type receiverFunc func(ctx context.Context, p peer.Peer, - incoming bsmsg.BitSwapMessage) (peer.Peer, bsmsg.BitSwapMessage) +type receiverFunc func(ctx context.Context, p peer.ID, + incoming bsmsg.BitSwapMessage) (peer.ID, bsmsg.BitSwapMessage) // lambda returns a Receiver instance given a receiver function func lambda(f receiverFunc) bsnet.Receiver { @@ -128,13 +133,13 @@ func lambda(f receiverFunc) bsnet.Receiver { } type lambdaImpl struct { - f func(ctx context.Context, p peer.Peer, incoming bsmsg.BitSwapMessage) ( - peer.Peer, bsmsg.BitSwapMessage) + f func(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) ( + peer.ID, bsmsg.BitSwapMessage) } func (lam *lambdaImpl) ReceiveMessage(ctx context.Context, - p peer.Peer, incoming bsmsg.BitSwapMessage) ( - peer.Peer, bsmsg.BitSwapMessage) { + p peer.ID, incoming bsmsg.BitSwapMessage) ( + peer.ID, bsmsg.BitSwapMessage) { return lam.f(ctx, p, incoming) } diff --git a/exchange/bitswap/testutils.go b/exchange/bitswap/testutils.go index 48cb11a45..70c1bd7a5 100644 --- a/exchange/bitswap/testutils.go +++ b/exchange/bitswap/testutils.go @@ -10,18 +10,16 @@ import ( exchange "github.com/jbenet/go-ipfs/exchange" tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" peer "github.com/jbenet/go-ipfs/peer" - mockrouting "github.com/jbenet/go-ipfs/routing/mock" datastore2 "github.com/jbenet/go-ipfs/util/datastore2" delay "github.com/jbenet/go-ipfs/util/delay" ) func NewSessionGenerator( - net tn.Network, rs mockrouting.Server) SessionGenerator { + net tn.Network) SessionGenerator { ctx, cancel := context.WithCancel(context.TODO()) return SessionGenerator{ ps: peer.NewPeerstore(), net: net, - rs: rs, seq: 0, ctx: ctx, // TODO take ctx as param to Next, Instances cancel: cancel, @@ -31,7 +29,6 @@ func NewSessionGenerator( type SessionGenerator struct { seq int net tn.Network - rs mockrouting.Server ps peer.Peerstore ctx context.Context cancel context.CancelFunc @@ -44,7 +41,7 @@ func (g *SessionGenerator) Close() error { func (g *SessionGenerator) Next() Instance { g.seq++ - return session(g.ctx, g.net, g.rs, g.ps, []byte(string(g.seq))) + return session(g.ctx, g.net, g.ps, peer.ID(g.seq)) } func (g *SessionGenerator) Instances(n int) []Instance { @@ -57,7 +54,7 @@ func (g *SessionGenerator) Instances(n int) []Instance { } type Instance struct { - Peer peer.Peer + Peer peer.ID Exchange exchange.Interface blockstore blockstore.Blockstore @@ -77,11 +74,9 @@ func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { // NB: It's easy make mistakes by providing the same peer ID to two different // sessions. To safeguard, use the SessionGenerator to generate sessions. It's // just a much better idea. -func session(ctx context.Context, net tn.Network, rs mockrouting.Server, ps peer.Peerstore, id peer.ID) Instance { - p := ps.WithID(id) +func session(ctx context.Context, net tn.Network, ps peer.Peerstore, p peer.ID) Instance { adapter := net.Adapter(p) - htc := rs.Client(p) bsdelay := delay.Fixed(0) const kWriteCacheElems = 100 @@ -93,7 +88,7 @@ func session(ctx context.Context, net tn.Network, rs mockrouting.Server, ps peer const alwaysSendToPeer = true - bs := New(ctx, p, adapter, htc, bstore, alwaysSendToPeer) + bs := New(ctx, p, adapter, bstore, alwaysSendToPeer) return Instance{ Peer: p, diff --git a/fuse/ipns/ipns_test.go b/fuse/ipns/ipns_test.go index 7d8f0e8d1..1f65be07b 100644 --- a/fuse/ipns/ipns_test.go +++ b/fuse/ipns/ipns_test.go @@ -69,7 +69,7 @@ func setupIpnsTest(t *testing.T, node *core.IpfsNode) (*core.IpfsNode, *fstest.M } } - fs, err := NewIpns(node, "") + fs, err := NewIpns(node, node.PrivateKey, "") if err != nil { t.Fatal(err) } @@ -226,7 +226,7 @@ func TestFastRepublish(t *testing.T) { node, mnt := setupIpnsTest(t, nil) - h, err := node.Identity.PrivKey().GetPublic().Hash() + h, err := node.PrivateKey.GetPublic().Hash() if err != nil { t.Fatal(err) } diff --git a/fuse/ipns/ipns_unix.go b/fuse/ipns/ipns_unix.go index 1b53820da..7eabe74c3 100644 --- a/fuse/ipns/ipns_unix.go +++ b/fuse/ipns/ipns_unix.go @@ -37,8 +37,8 @@ type FileSystem struct { } // NewFileSystem constructs new fs using given core.IpfsNode instance. -func NewIpns(ipfs *core.IpfsNode, ipfspath string) (*FileSystem, error) { - root, err := CreateRoot(ipfs, []ci.PrivKey{ipfs.Identity.PrivKey()}, ipfspath) +func NewIpns(ipfs *core.IpfsNode, sk ci.PrivKey, ipfspath string) (*FileSystem, error) { + root, err := CreateRoot(ipfs, []ci.PrivKey{sk}, ipfspath) if err != nil { return nil, err } diff --git a/fuse/ipns/mount_unix.go b/fuse/ipns/mount_unix.go index b73e154de..7365d929a 100644 --- a/fuse/ipns/mount_unix.go +++ b/fuse/ipns/mount_unix.go @@ -48,7 +48,7 @@ func internalMount(ipfs *core.IpfsNode, fpath string, ipfspath string) error { } defer c.Close() - fsys, err := NewIpns(ipfs, ipfspath) + fsys, err := NewIpns(ipfs, ipfs.PrivateKey, ipfspath) if err != nil { return err } diff --git a/merkledag/merkledag.go b/merkledag/merkledag.go index 3d8916b03..9a638ca2a 100644 --- a/merkledag/merkledag.go +++ b/merkledag/merkledag.go @@ -184,9 +184,7 @@ type dagService struct { // Add adds a node to the dagService, storing the block in the BlockService func (n *dagService) Add(nd *Node) (u.Key, error) { - k, _ := nd.Key() - log.Debugf("DagService Add [%s]", k) - if n == nil { + if n == nil { // FIXME remove this assertion. protect with constructor invariant return "", fmt.Errorf("dagService is nil") } diff --git a/namesys/resolve_test.go b/namesys/resolve_test.go index 1d487f9a7..fb29490f3 100644 --- a/namesys/resolve_test.go +++ b/namesys/resolve_test.go @@ -4,14 +4,18 @@ import ( "testing" ci "github.com/jbenet/go-ipfs/crypto" + peer "github.com/jbenet/go-ipfs/peer" mockrouting "github.com/jbenet/go-ipfs/routing/mock" u "github.com/jbenet/go-ipfs/util" testutil "github.com/jbenet/go-ipfs/util/testutil" ) func TestRoutingResolve(t *testing.T) { - local := testutil.NewPeerWithIDString("testID") - d := mockrouting.NewServer().Client(local) + local, err := testutil.RandPeerID() + if err != nil { + t.Fatal(err) + } + d := mockrouting.NewServer().Client(peer.PeerInfo{ID: local}) resolver := NewRoutingResolver(d) publisher := NewRoutingPublisher(d) diff --git a/net/backpressure/backpressure_test.go b/net/backpressure/backpressure_test.go index c7bea5626..4dd29d167 100644 --- a/net/backpressure/backpressure_test.go +++ b/net/backpressure/backpressure_test.go @@ -17,15 +17,19 @@ import ( var log = eventlog.Logger("backpressure") -func GenNetwork(ctx context.Context) (inet.Network, error) { - p, err := testutil.PeerWithKeysAndAddress(testutil.RandLocalTCPAddress()) - if err != nil { - return nil, err - } - - listen := p.Addresses() +func GenNetwork(t *testing.T, ctx context.Context) (inet.Network, error) { + p := testutil.RandPeerNetParams(t) ps := peer.NewPeerstore() - return inet.NewNetwork(ctx, listen, p, ps) + ps.AddAddress(p.ID, p.Addr) + ps.AddPubKey(p.ID, p.PubKey) + ps.AddPrivKey(p.ID, p.PrivKey) + return inet.NewNetwork(ctx, ps.Addresses(p.ID), p.ID, ps) +} + +func divulgeAddresses(a, b inet.Network) { + id := a.LocalPeer() + addrs := a.Peerstore().Addresses(id) + b.Peerstore().AddAddresses(id, addrs) } // TestBackpressureStreamHandler tests whether mux handler @@ -83,7 +87,7 @@ a problem. } // the sender opens streams as fast as possible - sender := func(net inet.Network, remote peer.Peer) { + sender := func(net inet.Network, remote peer.ID) { var s inet.Stream var err error defer func() { @@ -145,11 +149,11 @@ a problem. // ok that's enough setup. let's do it! ctx := context.Background() - n1, err := GenNetwork(ctx) + n1, err := GenNetwork(t, ctx) if err != nil { t.Fatal(err) } - n2, err := GenNetwork(ctx) + n2, err := GenNetwork(t, ctx) if err != nil { t.Fatal(err) } @@ -287,15 +291,18 @@ func TestStBackpressureStreamWrite(t *testing.T) { // setup the networks ctx := context.Background() - n1, err := GenNetwork(ctx) + n1, err := GenNetwork(t, ctx) if err != nil { t.Fatal(err) } - n2, err := GenNetwork(ctx) + n2, err := GenNetwork(t, ctx) if err != nil { t.Fatal(err) } + divulgeAddresses(n1, n2) + divulgeAddresses(n2, n1) + // setup sender handler on 1 n1.SetHandler(inet.ProtocolTesting, sender) diff --git a/net/conn/conn.go b/net/conn/conn.go index 0d8c8e00e..7ee303dee 100644 --- a/net/conn/conn.go +++ b/net/conn/conn.go @@ -11,6 +11,7 @@ import ( ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" manet "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net" + ic "github.com/jbenet/go-ipfs/crypto" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" eventlog "github.com/jbenet/go-ipfs/util/eventlog" @@ -32,14 +33,14 @@ func ReleaseBuffer(b []byte) { // singleConn represents a single connection to another Peer (IPFS Node). type singleConn struct { - local peer.Peer - remote peer.Peer + local peer.ID + remote peer.ID maconn manet.Conn msgrw msgio.ReadWriteCloser } // newConn constructs a new connection -func newSingleConn(ctx context.Context, local, remote peer.Peer, maconn manet.Conn) (Conn, error) { +func newSingleConn(ctx context.Context, local, remote peer.ID, maconn manet.Conn) (Conn, error) { conn := &singleConn{ local: local, @@ -83,6 +84,14 @@ func (c *singleConn) RemoteAddr() net.Addr { return c.maconn.RemoteAddr() } +func (c *singleConn) LocalPrivateKey() ic.PrivKey { + return nil +} + +func (c *singleConn) RemotePublicKey() ic.PubKey { + return nil +} + func (c *singleConn) SetDeadline(t time.Time) error { return c.maconn.SetDeadline(t) } @@ -105,12 +114,12 @@ func (c *singleConn) RemoteMultiaddr() ma.Multiaddr { } // LocalPeer is the Peer on this side -func (c *singleConn) LocalPeer() peer.Peer { +func (c *singleConn) LocalPeer() peer.ID { return c.local } // RemotePeer is the Peer on the remote side -func (c *singleConn) RemotePeer() peer.Peer { +func (c *singleConn) RemotePeer() peer.ID { return c.remote } @@ -145,8 +154,8 @@ func (c *singleConn) ReleaseMsg(m []byte) { // ID returns the ID of a given Conn. func ID(c Conn) string { - l := fmt.Sprintf("%s/%s", c.LocalMultiaddr(), c.LocalPeer().ID()) - r := fmt.Sprintf("%s/%s", c.RemoteMultiaddr(), c.RemotePeer().ID()) + l := fmt.Sprintf("%s/%s", c.LocalMultiaddr(), c.LocalPeer().Pretty()) + r := fmt.Sprintf("%s/%s", c.RemoteMultiaddr(), c.RemotePeer().Pretty()) lh := u.Hash([]byte(l)) rh := u.Hash([]byte(r)) ch := u.XOR(lh, rh) diff --git a/net/conn/conn_test.go b/net/conn/conn_test.go index cda64088e..ccbbade6a 100644 --- a/net/conn/conn_test.go +++ b/net/conn/conn_test.go @@ -5,7 +5,6 @@ import ( "fmt" "os" "runtime" - "strconv" "sync" "testing" "time" @@ -14,6 +13,7 @@ import ( ) func testOneSendRecv(t *testing.T, c1, c2 Conn) { + log.Debugf("testOneSendRecv from %s to %s", c1.LocalPeer(), c2.LocalPeer()) m1 := []byte("hello") if err := c1.WriteMsg(m1); err != nil { t.Fatal(err) @@ -41,8 +41,9 @@ func testNotOneSendRecv(t *testing.T, c1, c2 Conn) { func TestClose(t *testing.T) { // t.Skip("Skipping in favor of another test") - ctx := context.Background() - c1, c2 := setupSingleConn(t, ctx, "/ip4/127.0.0.1/tcp/5534", "/ip4/127.0.0.1/tcp/5545") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + c1, c2, _, _ := setupSingleConn(t, ctx) testOneSendRecv(t, c1, c2) testOneSendRecv(t, c2, c1) @@ -56,6 +57,7 @@ func TestClose(t *testing.T) { } func TestCloseLeak(t *testing.T) { + // t.Skip("Skipping in favor of another test") if testing.Short() { t.SkipNow() } @@ -66,11 +68,9 @@ func TestCloseLeak(t *testing.T) { var wg sync.WaitGroup - runPair := func(p1, p2, num int) { - a1 := strconv.Itoa(p1) - a2 := strconv.Itoa(p2) + runPair := func(num int) { ctx, cancel := context.WithCancel(context.Background()) - c1, c2 := setupSingleConn(t, ctx, "/ip4/127.0.0.1/tcp/"+a1, "/ip4/127.0.0.1/tcp/"+a2) + c1, c2, _, _ := setupSingleConn(t, ctx) for i := 0; i < num; i++ { b1 := []byte(fmt.Sprintf("beep%d", i)) @@ -102,15 +102,15 @@ func TestCloseLeak(t *testing.T) { wg.Done() } - var cons = 1 - var msgs = 100 - fmt.Printf("Running %d connections * %d msgs.\n", cons, msgs) + var cons = 5 + var msgs = 50 + log.Debugf("Running %d connections * %d msgs.\n", cons, msgs) for i := 0; i < cons; i++ { wg.Add(1) - go runPair(2000+i, 2001+i, msgs) + go runPair(msgs) } - fmt.Printf("Waiting...\n") + log.Debugf("Waiting...\n") wg.Wait() // done! diff --git a/net/conn/dial.go b/net/conn/dial.go index aa41d088a..5eed05d06 100644 --- a/net/conn/dial.go +++ b/net/conn/dial.go @@ -1,6 +1,7 @@ package conn import ( + "fmt" "strings" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" @@ -11,49 +12,32 @@ import ( debugerror "github.com/jbenet/go-ipfs/util/debugerror" ) -// Dial connects to a particular peer, over a given network -// Example: d.Dial(ctx, "udp", peer) -func (d *Dialer) Dial(ctx context.Context, network string, remote peer.Peer) (Conn, error) { - raddr := remote.NetAddress(network) - if raddr == nil { - return nil, debugerror.Errorf("No remote address for network %s", network) - } - return d.DialAddr(ctx, raddr, remote) +// String returns the string rep of d. +func (d *Dialer) String() string { + return fmt.Sprintf("", d.LocalPeer, d.LocalAddrs[0]) } -// DialAddr connects to a peer over a particular address +// Dial connects to a peer over a particular address // Ensures raddr is part of peer.Addresses() // Example: d.DialAddr(ctx, peer.Addresses()[0], peer) -func (d *Dialer) DialAddr(ctx context.Context, raddr ma.Multiaddr, remote peer.Peer) (Conn, error) { - - found := false - for _, addr := range remote.Addresses() { - if addr.Equal(raddr) { - found = true - } - } - if !found { - return nil, debugerror.Errorf("address %s is not in peer %s", raddr, remote) - } +func (d *Dialer) Dial(ctx context.Context, raddr ma.Multiaddr, remote peer.ID) (Conn, error) { network, _, err := manet.DialArgs(raddr) if err != nil { return nil, err } - laddr := d.LocalPeer.NetAddress(network) - if laddr == nil { - return nil, debugerror.Errorf("No local address for network %s", network) - } - if strings.HasPrefix(raddr.String(), "/ip4/0.0.0.0") { return nil, debugerror.Errorf("Attempted to connect to zero address: %s", raddr) } - remote.SetType(peer.Remote) - remote, err = d.Peerstore.Add(remote) - if err != nil { - log.Errorf("Error putting peer into peerstore: %s", remote) + var laddr ma.Multiaddr + if len(d.LocalAddrs) > 0 { + // laddr := MultiaddrNetMatch(raddr, d.LocalAddrs) + laddr = NetAddress(network, d.LocalAddrs) + if laddr == nil { + return nil, debugerror.Errorf("No local address for network %s", network) + } } // TODO: try to get reusing addr/ports to work. @@ -69,7 +53,7 @@ func (d *Dialer) DialAddr(ctx context.Context, raddr ma.Multiaddr, remote peer.P select { case <-ctx.Done(): maconn.Close() - return nil, err + return nil, ctx.Err() default: } @@ -78,17 +62,58 @@ func (d *Dialer) DialAddr(ctx context.Context, raddr ma.Multiaddr, remote peer.P return nil, err } - if d.WithoutSecureTransport { + if d.PrivateKey == nil { + log.Warning("dialer %s dialing INSECURELY %s at %s!", d, remote, raddr) return c, nil } select { case <-ctx.Done(): c.Close() - return nil, err + return nil, ctx.Err() default: } // return c, nil - return newSecureConn(ctx, c, d.Peerstore) + return newSecureConn(ctx, d.PrivateKey, c) +} + +// MultiaddrProtocolsMatch returns whether two multiaddrs match in protocol stacks. +func MultiaddrProtocolsMatch(a, b ma.Multiaddr) bool { + ap := a.Protocols() + bp := b.Protocols() + + if len(ap) != len(bp) { + return false + } + + for i, api := range ap { + if api != bp[i] { + return false + } + } + + return true +} + +// MultiaddrNetMatch returns the first Multiaddr found to match network. +func MultiaddrNetMatch(tgt ma.Multiaddr, srcs []ma.Multiaddr) ma.Multiaddr { + for _, a := range srcs { + if MultiaddrProtocolsMatch(tgt, a) { + return a + } + } + return nil +} + +// NetAddress returns the first Multiaddr found for a given network. +func NetAddress(n string, addrs []ma.Multiaddr) ma.Multiaddr { + for _, a := range addrs { + for _, p := range a.Protocols() { + if p.Name == n { + return a + } + } + } + return nil } diff --git a/net/conn/dial_test.go b/net/conn/dial_test.go index 85377fb7a..125eb6425 100644 --- a/net/conn/dial_test.go +++ b/net/conn/dial_test.go @@ -2,45 +2,35 @@ package conn import ( "io" + "net" "testing" + "time" - ci "github.com/jbenet/go-ipfs/crypto" - peer "github.com/jbenet/go-ipfs/peer" - testutil "github.com/jbenet/go-ipfs/util/testutil" + tu "github.com/jbenet/go-ipfs/util/testutil" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ) -func setupPeer(addr string) (peer.Peer, error) { - tcp, err := ma.NewMultiaddr(addr) - if err != nil { - return nil, err - } - - sk, pk, err := ci.GenerateKeyPair(ci.RSA, 512) - if err != nil { - return nil, err - } - - p, err := testutil.NewPeerWithKeyPair(sk, pk) - if err != nil { - return nil, err - } - p.AddAddress(tcp) - return p, nil -} - func echoListen(ctx context.Context, listener Listener) { for { c, err := listener.Accept() if err != nil { + select { case <-ctx.Done(): return default: } + + if ne, ok := err.(net.Error); ok && ne.Temporary() { + <-time.After(time.Microsecond * 10) + continue + } + + log.Debugf("echoListen: listener appears to be closing") + return } + go echo(c.(Conn)) } } @@ -49,106 +39,86 @@ func echo(c Conn) { io.Copy(c, c) } -func setupSecureConn(t *testing.T, ctx context.Context, a1, a2 string) (a, b Conn) { - return setupConn(t, ctx, a1, a2, true) +func setupSecureConn(t *testing.T, ctx context.Context) (a, b Conn, p1, p2 tu.PeerNetParams) { + return setupConn(t, ctx, true) } -func setupSingleConn(t *testing.T, ctx context.Context, a1, a2 string) (a, b Conn) { - return setupConn(t, ctx, a1, a2, false) +func setupSingleConn(t *testing.T, ctx context.Context) (a, b Conn, p1, p2 tu.PeerNetParams) { + return setupConn(t, ctx, false) } -func setupConn(t *testing.T, ctx context.Context, a1, a2 string, secure bool) (a, b Conn) { +func setupConn(t *testing.T, ctx context.Context, secure bool) (a, b Conn, p1, p2 tu.PeerNetParams) { - p1, err := setupPeer(a1) - if err != nil { - t.Fatal("error setting up peer", err) + p1 = tu.RandPeerNetParams(t) + p2 = tu.RandPeerNetParams(t) + laddr := p1.Addr + + key1 := p1.PrivKey + key2 := p2.PrivKey + if !secure { + key1 = nil + key2 = nil } - - p2, err := setupPeer(a2) - if err != nil { - t.Fatal("error setting up peer", err) - } - - laddr := p1.NetAddress("tcp") - if laddr == nil { - t.Fatal("Listen address is nil.") - } - - ps1 := peer.NewPeerstore() - ps2 := peer.NewPeerstore() - ps1.Add(p1) - ps2.Add(p2) - - l1, err := Listen(ctx, laddr, p1, ps1) - l1.SetWithoutSecureTransport(!secure) + l1, err := Listen(ctx, laddr, p1.ID, key1) if err != nil { t.Fatal(err) } d2 := &Dialer{ - Peerstore: ps2, - LocalPeer: p2, - WithoutSecureTransport: !secure, + LocalPeer: p2.ID, + PrivateKey: key2, } var c2 Conn - done := make(chan struct{}) + done := make(chan error) go func() { - c2, err = d2.Dial(ctx, "tcp", p1) + var err error + c2, err = d2.Dial(ctx, p1.Addr, p1.ID) if err != nil { - t.Fatal("error dialing peer", err) + done <- err } - done <- struct{}{} + close(done) }() c1, err := l1.Accept() if err != nil { - t.Fatal("failed to accept") + t.Fatal("failed to accept", err) + } + if err := <-done; err != nil { + t.Fatal(err) } - <-done - return c1.(Conn), c2 + return c1.(Conn), c2, p1, p2 } -func TestDialer(t *testing.T) { +func testDialer(t *testing.T, secure bool) { // t.Skip("Skipping in favor of another test") - p1, err := setupPeer("/ip4/127.0.0.1/tcp/4234") - if err != nil { - t.Fatal("error setting up peer", err) - } + p1 := tu.RandPeerNetParams(t) + p2 := tu.RandPeerNetParams(t) - p2, err := setupPeer("/ip4/127.0.0.1/tcp/4235") - if err != nil { - t.Fatal("error setting up peer", err) + key1 := p1.PrivKey + key2 := p2.PrivKey + if !secure { + key1 = nil + key2 = nil } ctx, cancel := context.WithCancel(context.Background()) - - laddr := p1.NetAddress("tcp") - if laddr == nil { - t.Fatal("Listen address is nil.") - } - - ps1 := peer.NewPeerstore() - ps2 := peer.NewPeerstore() - ps1.Add(p1) - ps2.Add(p2) - - l, err := Listen(ctx, laddr, p1, ps1) + l1, err := Listen(ctx, p1.Addr, p1.ID, key1) if err != nil { t.Fatal(err) } - go echoListen(ctx, l) - - d := &Dialer{ - Peerstore: ps2, - LocalPeer: p2, + d2 := &Dialer{ + LocalPeer: p2.ID, + PrivateKey: key2, } - c, err := d.Dial(ctx, "tcp", p1) + go echoListen(ctx, l1) + + c, err := d2.Dial(ctx, p1.Addr, p1.ID) if err != nil { t.Fatal("error dialing peer", err) } @@ -180,83 +150,16 @@ func TestDialer(t *testing.T) { // fmt.Println("closing") c.Close() - l.Close() + l1.Close() cancel() } -func TestDialAddr(t *testing.T) { +func TestDialerInsecure(t *testing.T) { // t.Skip("Skipping in favor of another test") - - p1, err := setupPeer("/ip4/127.0.0.1/tcp/4334") - if err != nil { - t.Fatal("error setting up peer", err) - } - - p2, err := setupPeer("/ip4/127.0.0.1/tcp/4335") - if err != nil { - t.Fatal("error setting up peer", err) - } - - ctx, cancel := context.WithCancel(context.Background()) - - laddr := p1.NetAddress("tcp") - if laddr == nil { - t.Fatal("Listen address is nil.") - } - - ps1 := peer.NewPeerstore() - ps2 := peer.NewPeerstore() - ps1.Add(p1) - ps2.Add(p2) - - l, err := Listen(ctx, laddr, p1, ps1) - if err != nil { - t.Fatal(err) - } - - go echoListen(ctx, l) - - d := &Dialer{ - Peerstore: ps2, - LocalPeer: p2, - } - - raddr := p1.NetAddress("tcp") - if raddr == nil { - t.Fatal("Dial address is nil.") - } - - c, err := d.DialAddr(ctx, raddr, p1) - if err != nil { - t.Fatal("error dialing peer", err) - } - - // fmt.Println("sending") - c.WriteMsg([]byte("beep")) - c.WriteMsg([]byte("boop")) - - out, err := c.ReadMsg() - if err != nil { - t.Fatal(err) - } - // fmt.Println("recving", string(out)) - data := string(out) - if data != "beep" { - t.Error("unexpected conn output", data) - } - - out, err = c.ReadMsg() - if err != nil { - t.Fatal(err) - } - - data = string(out) - if string(out) != "boop" { - t.Error("unexpected conn output", data) - } - - // fmt.Println("closing") - c.Close() - l.Close() - cancel() + testDialer(t, false) +} + +func TestDialerSecure(t *testing.T) { + // t.Skip("Skipping in favor of another test") + testDialer(t, true) } diff --git a/net/conn/handshake.go b/net/conn/handshake.go index e815a7e65..3a995bc9a 100644 --- a/net/conn/handshake.go +++ b/net/conn/handshake.go @@ -2,13 +2,12 @@ package conn import ( "fmt" - "io" handshake "github.com/jbenet/go-ipfs/net/handshake" hspb "github.com/jbenet/go-ipfs/net/handshake/pb" - ggprotoio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/io" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + ggprotoio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/io" ) // Handshake1 exchanges local and remote versions and compares them @@ -51,38 +50,3 @@ func Handshake1(ctx context.Context, c Conn) error { log.Debugf("%s version handshake compatible %s", lpeer, rpeer) return nil } - -// Handshake3 exchanges local and remote service information -func Handshake3(ctx context.Context, stream io.ReadWriter, c Conn) (*handshake.Handshake3Result, error) { - rpeer := c.RemotePeer() - lpeer := c.LocalPeer() - - // setup up protobuf io - maxSize := 4096 - r := ggprotoio.NewDelimitedReader(stream, maxSize) - w := ggprotoio.NewDelimitedWriter(stream) - localH := handshake.Handshake3Msg(lpeer, c.RemoteMultiaddr()) - remoteH := new(hspb.Handshake3) - - // setup + send the message to remote - if err := w.WriteMsg(localH); err != nil { - return nil, err - } - log.Debugf("Handshake3: sent to %s", rpeer) - log.Event(ctx, "handshake3Sent", lpeer, rpeer) - - // wait + listen for response - if err := r.ReadMsg(remoteH); err != nil { - return nil, fmt.Errorf("Handshake3 could not receive remote msg: %q", err) - } - log.Debugf("Handshake3: received from %s", rpeer) - log.Event(ctx, "handshake3Received", lpeer, rpeer) - - // actually update our state based on the new knowledge - res, err := handshake.Handshake3Update(lpeer, rpeer, remoteH) - if err != nil { - log.Errorf("Handshake3 failed to update %s", rpeer) - } - res.RemoteObservedAddress = c.RemoteMultiaddr() - return res, nil -} diff --git a/net/conn/interface.go b/net/conn/interface.go index c07a9e254..d42a9a97a 100644 --- a/net/conn/interface.go +++ b/net/conn/interface.go @@ -5,6 +5,7 @@ import ( "net" "time" + ic "github.com/jbenet/go-ipfs/crypto" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" @@ -16,17 +17,15 @@ import ( type Map map[u.Key]Conn type PeerConn interface { - // LocalMultiaddr is the Multiaddr on this side + // LocalPeer (this side) ID, PrivateKey, and Address + LocalPeer() peer.ID + LocalPrivateKey() ic.PrivKey LocalMultiaddr() ma.Multiaddr - // LocalPeer is the Peer on our side of the connection - LocalPeer() peer.Peer - - // RemoteMultiaddr is the Multiaddr on the remote side + // RemotePeer ID, PublicKey, and Address + RemotePeer() peer.ID + RemotePublicKey() ic.PubKey RemoteMultiaddr() ma.Multiaddr - - // RemotePeer is the Peer on the remote side - RemotePeer() peer.Peer } // Conn is a generic message-based Peer-to-Peer connection. @@ -54,16 +53,14 @@ type Conn interface { type Dialer struct { // LocalPeer is the identity of the local Peer. - LocalPeer peer.Peer + LocalPeer peer.ID - // Peerstore is the set of peers we know about locally. The Dialer needs it - // because when an incoming connection is identified, we should reuse the - // same peer objects (otherwise things get inconsistent). - Peerstore peer.Peerstore + // LocalAddrs is a set of local addresses to use. + LocalAddrs []ma.Multiaddr - // WithoutSecureTransport determines whether to initialize an insecure connection. - // Phrased negatively so default is Secure, and verbosely to be very clear. - WithoutSecureTransport bool + // PrivateKey used to initialize a secure connection. + // Warning: if PrivateKey is nil, connection will not be secured. + PrivateKey ic.PrivKey } // Listener is an object that can accept connections. It matches net.Listener @@ -72,11 +69,6 @@ type Listener interface { // Accept waits for and returns the next connection to the listener. Accept() (net.Conn, error) - // {Set}WithoutSecureTransport decides whether to start insecure connections. - // Phrased negatively so default is Secure, and verbosely to be very clear. - WithoutSecureTransport() bool - SetWithoutSecureTransport(bool) - // Addr is the local address Addr() net.Addr @@ -84,12 +76,7 @@ type Listener interface { Multiaddr() ma.Multiaddr // LocalPeer is the identity of the local Peer. - LocalPeer() peer.Peer - - // Peerstore is the set of peers we know about locally. The Listener needs it - // because when an incoming connection is identified, we should reuse the - // same peer objects (otherwise things get inconsistent). - Peerstore() peer.Peerstore + LocalPeer() peer.ID // Close closes the listener. // Any blocked Accept operations will be unblocked and return errors. diff --git a/net/conn/listen.go b/net/conn/listen.go index bae2168eb..17eb03dbe 100644 --- a/net/conn/listen.go +++ b/net/conn/listen.go @@ -5,31 +5,37 @@ import ( "net" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + ctxgroup "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-ctxgroup" ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" manet "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net" + ic "github.com/jbenet/go-ipfs/crypto" peer "github.com/jbenet/go-ipfs/peer" ) // listener is an object that can accept connections. It implements Listener type listener struct { - withoutSecureTransport bool - manet.Listener - // Local multiaddr to listen on - maddr ma.Multiaddr + maddr ma.Multiaddr // Local multiaddr to listen on + local peer.ID // LocalPeer is the identity of the local Peer + privk ic.PrivKey // private key to use to initialize secure conns - // LocalPeer is the identity of the local Peer. - local peer.Peer + cg ctxgroup.ContextGroup +} - // Peerstore is the set of peers we know about locally - peers peer.Peerstore +func (l *listener) teardown() error { + defer log.Debugf("listener closed: %s %s", l.local, l.maddr) + return l.Listener.Close() } func (l *listener) Close() error { - log.Infof("listener closing: %s %s", l.local, l.maddr) - return l.Listener.Close() + log.Debugf("listener closing: %s %s", l.local, l.maddr) + return l.cg.Close() +} + +func (l *listener) String() string { + return fmt.Sprintf("", l.local, l.maddr) } // Accept waits for and returns the next connection to the listener. @@ -46,29 +52,22 @@ func (l *listener) Accept() (net.Conn, error) { return nil, err } - c, err := newSingleConn(ctx, l.local, nil, maconn) + c, err := newSingleConn(ctx, l.local, "", maconn) if err != nil { return nil, fmt.Errorf("Error accepting connection: %v", err) } - if l.withoutSecureTransport { + if l.privk == nil { + log.Warning("listener %s listening INSECURELY!", l) return c, nil } - sc, err := newSecureConn(ctx, c, l.peers) + sc, err := newSecureConn(ctx, l.privk, c) if err != nil { return nil, fmt.Errorf("Error securing connection: %v", err) } return sc, nil } -func (l *listener) WithoutSecureTransport() bool { - return l.withoutSecureTransport -} - -func (l *listener) SetWithoutSecureTransport(b bool) { - l.withoutSecureTransport = b -} - func (l *listener) Addr() net.Addr { return l.Listener.Addr() } @@ -79,29 +78,22 @@ func (l *listener) Multiaddr() ma.Multiaddr { } // LocalPeer is the identity of the local Peer. -func (l *listener) LocalPeer() peer.Peer { +func (l *listener) LocalPeer() peer.ID { return l.local } -// Peerstore is the set of peers we know about locally. The Listener needs it -// because when an incoming connection is identified, we should reuse the -// same peer objects (otherwise things get inconsistent). -func (l *listener) Peerstore() peer.Peerstore { - return l.peers -} - func (l *listener) Loggable() map[string]interface{} { return map[string]interface{}{ "listener": map[string]interface{}{ - "peer": l.LocalPeer(), - "address": l.Multiaddr(), - "withoutSecureTransport": l.withoutSecureTransport, + "peer": l.LocalPeer(), + "address": l.Multiaddr(), + "secure": (l.privk != nil), }, } } // Listen listens on the particular multiaddr, with given peer and peerstore. -func Listen(ctx context.Context, addr ma.Multiaddr, local peer.Peer, peers peer.Peerstore) (Listener, error) { +func Listen(ctx context.Context, addr ma.Multiaddr, local peer.ID, sk ic.PrivKey) (Listener, error) { ml, err := manet.Listen(addr) if err != nil { @@ -111,10 +103,11 @@ func Listen(ctx context.Context, addr ma.Multiaddr, local peer.Peer, peers peer. l := &listener{ Listener: ml, maddr: addr, - peers: peers, local: local, - withoutSecureTransport: false, + privk: sk, + cg: ctxgroup.WithContext(ctx), } + l.cg.SetTeardown(l.teardown) log.Infof("swarm listening on %s\n", l.Multiaddr()) log.Event(ctx, "swarmListen", l) diff --git a/net/conn/secure_conn.go b/net/conn/secure_conn.go index 6437ea05f..dda1f5b55 100644 --- a/net/conn/secure_conn.go +++ b/net/conn/secure_conn.go @@ -8,8 +8,10 @@ import ( msgio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-msgio" ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" + ic "github.com/jbenet/go-ipfs/crypto" secio "github.com/jbenet/go-ipfs/crypto/secio" peer "github.com/jbenet/go-ipfs/peer" + errors "github.com/jbenet/go-ipfs/util/debugerror" ) // secureConn wraps another Conn object with an encrypted channel. @@ -26,10 +28,21 @@ type secureConn struct { } // newConn constructs a new connection -func newSecureConn(ctx context.Context, insecure Conn, peers peer.Peerstore) (Conn, error) { +func newSecureConn(ctx context.Context, sk ic.PrivKey, insecure Conn) (Conn, error) { + + if insecure == nil { + return nil, errors.New("insecure is nil") + } + if insecure.LocalPeer() == "" { + return nil, errors.New("insecure.LocalPeer() is nil") + } + if sk == nil { + panic("way") + return nil, errors.New("private key is nil") + } // NewSession performs the secure handshake, which takes multiple RTT - sessgen := secio.SessionGenerator{Local: insecure.LocalPeer(), Peerstore: peers} + sessgen := secio.SessionGenerator{LocalID: insecure.LocalPeer(), PrivateKey: sk} session, err := sessgen.NewSession(ctx, insecure) if err != nil { return nil, err @@ -92,15 +105,25 @@ func (c *secureConn) RemoteMultiaddr() ma.Multiaddr { } // LocalPeer is the Peer on this side -func (c *secureConn) LocalPeer() peer.Peer { +func (c *secureConn) LocalPeer() peer.ID { return c.session.LocalPeer() } // RemotePeer is the Peer on the remote side -func (c *secureConn) RemotePeer() peer.Peer { +func (c *secureConn) RemotePeer() peer.ID { return c.session.RemotePeer() } +// LocalPrivateKey is the public key of the peer on this side +func (c *secureConn) LocalPrivateKey() ic.PrivKey { + return c.session.LocalPrivateKey() +} + +// RemotePubKey is the public key of the peer on the remote side +func (c *secureConn) RemotePublicKey() ic.PubKey { + return c.session.RemotePublicKey() +} + // Read reads data, net.Conn style func (c *secureConn) Read(buf []byte) (int, error) { return c.secure.Read(buf) diff --git a/net/conn/secure_conn_test.go b/net/conn/secure_conn_test.go index 9618624c4..7b400da06 100644 --- a/net/conn/secure_conn_test.go +++ b/net/conn/secure_conn_test.go @@ -2,50 +2,78 @@ package conn import ( "bytes" - "fmt" "os" "runtime" - "strconv" "sync" "testing" "time" - peer "github.com/jbenet/go-ipfs/peer" + ic "github.com/jbenet/go-ipfs/crypto" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ) -func upgradeToSecureConn(t *testing.T, ctx context.Context, c Conn) (Conn, error) { +func upgradeToSecureConn(t *testing.T, ctx context.Context, sk ic.PrivKey, c Conn) (Conn, error) { if c, ok := c.(*secureConn); ok { return c, nil } // shouldn't happen, because dial + listen already return secure conns. - s, err := newSecureConn(ctx, c, peer.NewPeerstore()) + s, err := newSecureConn(ctx, sk, c) if err != nil { return nil, err } return s, nil } -func secureHandshake(t *testing.T, ctx context.Context, c Conn, done chan error) { - _, err := upgradeToSecureConn(t, ctx, c) +func secureHandshake(t *testing.T, ctx context.Context, sk ic.PrivKey, c Conn, done chan error) { + _, err := upgradeToSecureConn(t, ctx, sk, c) done <- err } +func TestSecureSimple(t *testing.T) { + // t.Skip("Skipping in favor of another test") + + numMsgs := 100 + if testing.Short() { + numMsgs = 10 + } + + ctx := context.Background() + c1, c2, p1, p2 := setupSingleConn(t, ctx) + + done := make(chan error) + go secureHandshake(t, ctx, p1.PrivKey, c1, done) + go secureHandshake(t, ctx, p2.PrivKey, c2, done) + + for i := 0; i < 2; i++ { + if err := <-done; err != nil { + t.Fatal(err) + } + } + + for i := 0; i < numMsgs; i++ { + testOneSendRecv(t, c1, c2) + testOneSendRecv(t, c2, c1) + } + + c1.Close() + c2.Close() +} + func TestSecureClose(t *testing.T) { // t.Skip("Skipping in favor of another test") ctx := context.Background() - c1, c2 := setupSingleConn(t, ctx, "/ip4/127.0.0.1/tcp/6634", "/ip4/127.0.0.1/tcp/6645") + c1, c2, p1, p2 := setupSingleConn(t, ctx) done := make(chan error) - go secureHandshake(t, ctx, c1, done) - go secureHandshake(t, ctx, c2, done) + go secureHandshake(t, ctx, p1.PrivKey, c1, done) + go secureHandshake(t, ctx, p2.PrivKey, c2, done) for i := 0; i < 2; i++ { if err := <-done; err != nil { - t.Error(err) + t.Fatal(err) } } @@ -64,13 +92,13 @@ func TestSecureCancelHandshake(t *testing.T) { // t.Skip("Skipping in favor of another test") ctx, cancel := context.WithCancel(context.Background()) - c1, c2 := setupSingleConn(t, ctx, "/ip4/127.0.0.1/tcp/6634", "/ip4/127.0.0.1/tcp/6645") + c1, c2, p1, p2 := setupSingleConn(t, ctx) done := make(chan error) - go secureHandshake(t, ctx, c1, done) - <-time.After(50 * time.Millisecond) + go secureHandshake(t, ctx, p1.PrivKey, c1, done) + <-time.After(time.Millisecond) cancel() // cancel ctx - go secureHandshake(t, ctx, c2, done) + go secureHandshake(t, ctx, p2.PrivKey, c2, done) for i := 0; i < 2; i++ { if err := <-done; err == nil { @@ -79,6 +107,24 @@ func TestSecureCancelHandshake(t *testing.T) { } } +func TestSecureHandshakeFailsWithWrongKeys(t *testing.T) { + // t.Skip("Skipping in favor of another test") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + c1, c2, p1, p2 := setupSingleConn(t, ctx) + + done := make(chan error) + go secureHandshake(t, ctx, p2.PrivKey, c1, done) + go secureHandshake(t, ctx, p1.PrivKey, c2, done) + + for i := 0; i < 2; i++ { + if err := <-done; err == nil { + t.Fatal("wrong keys should've errored out.") + } + } +} + func TestSecureCloseLeak(t *testing.T) { // t.Skip("Skipping in favor of another test") @@ -89,15 +135,11 @@ func TestSecureCloseLeak(t *testing.T) { t.Skip("this doesn't work well on travis") } - var wg sync.WaitGroup - - runPair := func(p1, p2, num int) { - a1 := strconv.Itoa(p1) - a2 := strconv.Itoa(p2) - ctx, cancel := context.WithCancel(context.Background()) - c1, c2 := setupSecureConn(t, ctx, "/ip4/127.0.0.1/tcp/"+a1, "/ip4/127.0.0.1/tcp/"+a2) + runPair := func(c1, c2 Conn, num int) { + log.Debugf("runPair %d", num) for i := 0; i < num; i++ { + log.Debugf("runPair iteration %d", i) b1 := []byte("beep") c1.WriteMsg(b1) b2, err := c2.ReadMsg() @@ -120,22 +162,32 @@ func TestSecureCloseLeak(t *testing.T) { <-time.After(time.Microsecond * 5) } - - c1.Close() - c2.Close() - cancel() // close the listener - wg.Done() } - var cons = 20 - var msgs = 100 - fmt.Printf("Running %d connections * %d msgs.\n", cons, msgs) + var cons = 5 + var msgs = 50 + log.Debugf("Running %d connections * %d msgs.\n", cons, msgs) + + var wg sync.WaitGroup for i := 0; i < cons; i++ { wg.Add(1) - go runPair(2000+i, 2001+i, msgs) + + ctx, cancel := context.WithCancel(context.Background()) + c1, c2, _, _ := setupSecureConn(t, ctx) + go func(c1, c2 Conn) { + + defer func() { + c1.Close() + c2.Close() + cancel() + wg.Done() + }() + + runPair(c1, c2, msgs) + }(c1, c2) } - fmt.Printf("Waiting...\n") + log.Debugf("Waiting...\n") wg.Wait() // done! diff --git a/net/handshake/handshake1.go b/net/handshake/handshake1.go index 99f8e2ac7..17ca44f09 100644 --- a/net/handshake/handshake1.go +++ b/net/handshake/handshake1.go @@ -6,10 +6,13 @@ import ( config "github.com/jbenet/go-ipfs/config" pb "github.com/jbenet/go-ipfs/net/handshake/pb" + u "github.com/jbenet/go-ipfs/util" semver "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/coreos/go-semver/semver" ) +var log = u.Logger("handshake") + // IpfsVersion holds the current protocol version for a client running this code var IpfsVersion *semver.Version var ClientVersion = "go-ipfs/" + config.CurrentVersionNumber @@ -51,6 +54,13 @@ func Handshake1Compatible(handshakeA, handshakeB *pb.Handshake1) error { // NewHandshake1 creates a new Handshake1 from the two strings func NewHandshake1(protoVer, agentVer string) *pb.Handshake1 { + if protoVer == "" { + protoVer = IpfsVersion.String() + } + if agentVer == "" { + agentVer = ClientVersion + } + return &pb.Handshake1{ ProtocolVersion: &protoVer, AgentVersion: &agentVer, diff --git a/net/handshake/handshake3.go b/net/handshake/handshake3.go deleted file mode 100644 index b80735b0f..000000000 --- a/net/handshake/handshake3.go +++ /dev/null @@ -1,82 +0,0 @@ -package handshake - -import ( - "fmt" - - pb "github.com/jbenet/go-ipfs/net/handshake/pb" - peer "github.com/jbenet/go-ipfs/peer" - u "github.com/jbenet/go-ipfs/util" - - ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" -) - -var log = u.Logger("handshake") - -// Handshake3Msg constructs a Handshake3 msg. -func Handshake3Msg(localPeer peer.Peer, remoteAddr ma.Multiaddr) *pb.Handshake3 { - var msg pb.Handshake3 - // don't need publicKey after secure channel. - // msg.PublicKey = localPeer.PubKey().Bytes() - - // local listen addresses - addrs := localPeer.Addresses() - msg.ListenAddrs = make([][]byte, len(addrs)) - for i, a := range addrs { - msg.ListenAddrs[i] = a.Bytes() - } - - // observed remote address - msg.ObservedAddr = remoteAddr.Bytes() - - // services - // srv := localPeer.Services() - // msg.Services = make([]mux.ProtocolID, len(srv)) - // for i, pid := range srv { - // msg.Services[i] = pid - // } - - return &msg -} - -// Handshake3Update updates local knowledge with the information in the -// handshake3 msg we received from remote client. -func Handshake3Update(lpeer, rpeer peer.Peer, msg *pb.Handshake3) (*Handshake3Result, error) { - res := &Handshake3Result{} - - // our observed address - observedAddr, err := ma.NewMultiaddrBytes(msg.GetObservedAddr()) - if err != nil { - return res, err - } - if lpeer.AddAddress(observedAddr) { - log.Debugf("(nat) added new local, remote-observed address: %s", observedAddr) - } - res.LocalObservedAddress = observedAddr - - // remote's reported addresses - for _, a := range msg.GetListenAddrs() { - addr, err := ma.NewMultiaddrBytes(a) - if err != nil { - err = fmt.Errorf("remote peer address not a multiaddr: %s", err) - log.Errorf("Handshake3 error %s", err) - return res, err - } - rpeer.AddAddress(addr) - res.RemoteListenAddresses = append(res.RemoteListenAddresses, addr) - } - - return res, nil -} - -// Handshake3Result collects the knowledge gained in Handshake3. -type Handshake3Result struct { - - // The addresses reported by the remote client - RemoteListenAddresses []ma.Multiaddr - - // The address of the remote client we observed in this connection - RemoteObservedAddress ma.Multiaddr - - // The address the remote client observed from this connection - LocalObservedAddress ma.Multiaddr -} diff --git a/net/handshake/pb/handshake.pb.go b/net/handshake/pb/handshake.pb.go index 77ed5a41e..e4164788d 100644 --- a/net/handshake/pb/handshake.pb.go +++ b/net/handshake/pb/handshake.pb.go @@ -53,8 +53,16 @@ func (m *Handshake1) GetAgentVersion() string { // Handshake3 is delivered _after_ the secure channel is initialized type Handshake3 struct { + // can include all the values in handshake1, for protocol version, etc. + H1 *Handshake1 `protobuf:"bytes,5,opt,name=h1" json:"h1,omitempty"` + // publicKey is this node's public key (which also gives its node.ID) + // - may not need to be sent, as secure channel implies it has been sent. + // - then again, if we change / disable secure channel, may still want it. + PublicKey []byte `protobuf:"bytes,1,opt,name=publicKey" json:"publicKey,omitempty"` // listenAddrs are the multiaddrs the sender node listens for open connections on ListenAddrs [][]byte `protobuf:"bytes,2,rep,name=listenAddrs" json:"listenAddrs,omitempty"` + // protocols are the services this node is running + Protocols []string `protobuf:"bytes,3,rep,name=protocols" json:"protocols,omitempty"` // oservedAddr is the multiaddr of the remote endpoint that the sender node perceives // this is useful information to convey to the other side, as it helps the remote endpoint // determine whether its connection to the local peer goes through NAT. @@ -66,6 +74,20 @@ func (m *Handshake3) Reset() { *m = Handshake3{} } func (m *Handshake3) String() string { return proto.CompactTextString(m) } func (*Handshake3) ProtoMessage() {} +func (m *Handshake3) GetH1() *Handshake1 { + if m != nil { + return m.H1 + } + return nil +} + +func (m *Handshake3) GetPublicKey() []byte { + if m != nil { + return m.PublicKey + } + return nil +} + func (m *Handshake3) GetListenAddrs() [][]byte { if m != nil { return m.ListenAddrs @@ -73,6 +95,13 @@ func (m *Handshake3) GetListenAddrs() [][]byte { return nil } +func (m *Handshake3) GetProtocols() []string { + if m != nil { + return m.Protocols + } + return nil +} + func (m *Handshake3) GetObservedAddr() []byte { if m != nil { return m.ObservedAddr diff --git a/net/handshake/pb/handshake.proto b/net/handshake/pb/handshake.proto index 1dc7cad93..8eb699559 100644 --- a/net/handshake/pb/handshake.proto +++ b/net/handshake/pb/handshake.proto @@ -17,19 +17,19 @@ message Handshake1 { // Handshake3 is delivered _after_ the secure channel is initialized message Handshake3 { + // can include all the values in handshake1, for protocol version, etc. + optional Handshake1 h1 = 5; + // publicKey is this node's public key (which also gives its node.ID) // - may not need to be sent, as secure channel implies it has been sent. // - then again, if we change / disable secure channel, may still want it. - // optional bytes publicKey = 1; + optional bytes publicKey = 1; // listenAddrs are the multiaddrs the sender node listens for open connections on repeated bytes listenAddrs = 2; - // TODO - // services list the services this node is running - // repeated mux.ProtocolID services = 3; - - // we'll have more fields here later. + // protocols are the services this node is running + repeated string protocols = 3; // oservedAddr is the multiaddr of the remote endpoint that the sender node perceives // this is useful information to convey to the other side, as it helps the remote endpoint diff --git a/net/id.go b/net/id.go new file mode 100644 index 000000000..17dc76610 --- /dev/null +++ b/net/id.go @@ -0,0 +1,178 @@ +package net + +import ( + "sync" + + handshake "github.com/jbenet/go-ipfs/net/handshake" + pb "github.com/jbenet/go-ipfs/net/handshake/pb" + + ggio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/io" + ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" +) + +// IDService is a structure that implements ProtocolIdentify. +// It is a trivial service that gives the other peer some +// useful information about the local peer. A sort of hello. +// +// The IDService sends: +// * Our IPFS Protocol Version +// * Our IPFS Agent Version +// * Our public Listen Addresses +type IDService struct { + Network Network + + // connections undergoing identification + // for wait purposes + currid map[Conn]chan struct{} + currmu sync.RWMutex +} + +func NewIDService(n Network) *IDService { + s := &IDService{ + Network: n, + currid: make(map[Conn]chan struct{}), + } + n.SetHandler(ProtocolIdentify, s.RequestHandler) + return s +} + +func (ids *IDService) IdentifyConn(c Conn) { + ids.currmu.Lock() + if _, found := ids.currid[c]; found { + ids.currmu.Unlock() + log.Debugf("IdentifyConn called twice on: %s", c) + return // already identifying it. + } + ids.currid[c] = make(chan struct{}) + ids.currmu.Unlock() + + s, err := c.NewStreamWithProtocol(ProtocolIdentify) + if err != nil { + log.Error("network: unable to open initial stream for %s", ProtocolIdentify) + log.Event(ids.Network.CtxGroup().Context(), "IdentifyOpenFailed", c.RemotePeer()) + } + + // ok give the response to our handler. + ids.ResponseHandler(s) + + ids.currmu.Lock() + ch, found := ids.currid[c] + delete(ids.currid, c) + ids.currmu.Unlock() + + if !found { + log.Errorf("IdentifyConn failed to find channel (programmer error) for %s", c) + return + } + + close(ch) // release everyone waiting. +} + +func (ids *IDService) RequestHandler(s Stream) { + defer s.Close() + c := s.Conn() + + w := ggio.NewDelimitedWriter(s) + mes := pb.Handshake3{} + ids.populateMessage(&mes, s.Conn()) + w.WriteMsg(&mes) + + log.Debugf("%s sent message to %s %s", ProtocolIdentify, + c.RemotePeer(), c.RemoteMultiaddr()) +} + +func (ids *IDService) ResponseHandler(s Stream) { + defer s.Close() + c := s.Conn() + + r := ggio.NewDelimitedReader(s, 2048) + mes := pb.Handshake3{} + if err := r.ReadMsg(&mes); err != nil { + log.Errorf("%s error receiving message from %s %s", ProtocolIdentify, + c.RemotePeer(), c.RemoteMultiaddr()) + return + } + ids.consumeMessage(&mes, c) + + log.Debugf("%s received message from %s %s", ProtocolIdentify, + c.RemotePeer(), c.RemoteMultiaddr()) +} + +func (ids *IDService) populateMessage(mes *pb.Handshake3, c Conn) { + + // set protocols this node is currently handling + protos := ids.Network.Protocols() + mes.Protocols = make([]string, len(protos)) + for i, p := range protos { + mes.Protocols[i] = string(p) + } + + // observed address so other side is informed of their + // "public" address, at least in relation to us. + mes.ObservedAddr = c.RemoteMultiaddr().Bytes() + + // set listen addrs + laddrs, err := ids.Network.InterfaceListenAddresses() + if err != nil { + log.Error(err) + } else { + mes.ListenAddrs = make([][]byte, len(laddrs)) + for i, addr := range laddrs { + mes.ListenAddrs[i] = addr.Bytes() + } + log.Debugf("%s sent listen addrs to %s: %s", c.LocalPeer(), c.RemotePeer(), laddrs) + } + + // set protocol versions + mes.H1 = handshake.NewHandshake1("", "") +} + +func (ids *IDService) consumeMessage(mes *pb.Handshake3, c Conn) { + p := c.RemotePeer() + + // mes.Protocols + // mes.ObservedAddr + + // mes.ListenAddrs + laddrs := mes.GetListenAddrs() + lmaddrs := make([]ma.Multiaddr, 0, len(laddrs)) + for _, addr := range laddrs { + maddr, err := ma.NewMultiaddrBytes(addr) + if err != nil { + log.Errorf("%s failed to parse multiaddr from %s %s", ProtocolIdentify, p, + c.RemoteMultiaddr()) + continue + } + lmaddrs = append(lmaddrs, maddr) + } + + // update our peerstore with the addresses. + ids.Network.Peerstore().AddAddresses(p, lmaddrs) + log.Debugf("%s received listen addrs for %s: %s", c.LocalPeer(), c.RemotePeer(), lmaddrs) + + // get protocol versions + pv := *mes.H1.ProtocolVersion + av := *mes.H1.AgentVersion + ids.Network.Peerstore().Put(p, "ProtocolVersion", pv) + ids.Network.Peerstore().Put(p, "AgentVersion", av) +} + +// IdentifyWait returns a channel which will be closed once +// "ProtocolIdentify" (handshake3) finishes on given conn. +// This happens async so the connection can start to be used +// even if handshake3 knowledge is not necesary. +// Users **MUST** call IdentifyWait _after_ IdentifyConn +func (ids *IDService) IdentifyWait(c Conn) <-chan struct{} { + ids.currmu.Lock() + ch, found := ids.currid[c] + ids.currmu.Unlock() + if found { + return ch + } + + // if not found, it means we are already done identifying it, or + // haven't even started. either way, return a new channel closed. + ch = make(chan struct{}) + close(ch) + return ch +} diff --git a/net/id_test.go b/net/id_test.go new file mode 100644 index 000000000..70ae10e45 --- /dev/null +++ b/net/id_test.go @@ -0,0 +1,130 @@ +package net_test + +import ( + "testing" + "time" + + inet "github.com/jbenet/go-ipfs/net" + handshake "github.com/jbenet/go-ipfs/net/handshake" + peer "github.com/jbenet/go-ipfs/peer" + testutil "github.com/jbenet/go-ipfs/util/testutil" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" +) + +func GenNetwork(t *testing.T, ctx context.Context) inet.Network { + p := testutil.RandPeerNetParams(t) + ps := peer.NewPeerstore() + ps.AddAddress(p.ID, p.Addr) + ps.AddPubKey(p.ID, p.PubKey) + ps.AddPrivKey(p.ID, p.PrivKey) + n, err := inet.NewNetwork(ctx, ps.Addresses(p.ID), p.ID, ps) + if err != nil { + t.Fatal(err) + } + return n +} + +func DivulgeAddresses(a, b inet.Network) { + id := a.LocalPeer() + addrs := a.Peerstore().Addresses(id) + b.Peerstore().AddAddresses(id, addrs) +} + +func subtestIDService(t *testing.T, postDialWait time.Duration) { + + // the generated networks should have the id service wired in. + ctx := context.Background() + n1 := GenNetwork(t, ctx) + n2 := GenNetwork(t, ctx) + + n1p := n1.LocalPeer() + n2p := n2.LocalPeer() + + testKnowsAddrs(t, n1, n2p, []ma.Multiaddr{}) // nothing + testKnowsAddrs(t, n2, n1p, []ma.Multiaddr{}) // nothing + + // have n2 tell n1, so we can dial... + DivulgeAddresses(n2, n1) + + testKnowsAddrs(t, n1, n2p, n2.Peerstore().Addresses(n2p)) // has them + testKnowsAddrs(t, n2, n1p, []ma.Multiaddr{}) // nothing + + if err := n1.DialPeer(ctx, n2p); err != nil { + t.Fatalf("Failed to dial:", err) + } + + // we need to wait here if Dial returns before ID service is finished. + if postDialWait > 0 { + <-time.After(postDialWait) + } + + // the IDService should be opened automatically, by the network. + // what we should see now is that both peers know about each others listen addresses. + testKnowsAddrs(t, n1, n2p, n2.Peerstore().Addresses(n2p)) // has them + testHasProtocolVersions(t, n1, n2p) + + // now, this wait we do have to do. it's the wait for the Listening side + // to be done identifying the connection. + c := n2.ConnsToPeer(n1.LocalPeer()) + if len(c) < 1 { + t.Fatal("should have connection by now at least.") + } + <-n2.IdentifyProtocol().IdentifyWait(c[0]) + + // and the protocol versions. + testKnowsAddrs(t, n2, n1p, n1.Peerstore().Addresses(n1p)) // has them + testHasProtocolVersions(t, n2, n1p) +} + +func testKnowsAddrs(t *testing.T, n inet.Network, p peer.ID, expected []ma.Multiaddr) { + actual := n.Peerstore().Addresses(p) + + if len(actual) != len(expected) { + t.Error("dont have the same addresses") + } + + have := map[string]struct{}{} + for _, addr := range actual { + have[addr.String()] = struct{}{} + } + for _, addr := range expected { + if _, found := have[addr.String()]; !found { + t.Errorf("%s did not have addr for %s: %s", n.LocalPeer(), p, addr) + // panic("ahhhhhhh") + } + } +} + +func testHasProtocolVersions(t *testing.T, n inet.Network, p peer.ID) { + v, err := n.Peerstore().Get(p, "ProtocolVersion") + if v == nil { + t.Error("no protocol version") + return + } + if v.(string) != handshake.IpfsVersion.String() { + t.Error("protocol mismatch", err) + } + v, err = n.Peerstore().Get(p, "AgentVersion") + if v.(string) != handshake.ClientVersion { + t.Error("agent version mismatch", err) + } +} + +// TestIDServiceWait gives the ID service 100ms to finish after dialing +// this is becasue it used to be concurrent. Now, Dial wait till the +// id service is done. +func TestIDServiceWait(t *testing.T) { + N := 3 + for i := 0; i < N; i++ { + subtestIDService(t, 100*time.Millisecond) + } +} + +func TestIDServiceNoWait(t *testing.T) { + N := 3 + for i := 0; i < N; i++ { + subtestIDService(t, 0) + } +} diff --git a/net/interface.go b/net/interface.go index a7469f178..74354e5cd 100644 --- a/net/interface.go +++ b/net/interface.go @@ -18,10 +18,11 @@ type ProtocolID string // These are the ProtocolIDs of the protocols running. It is useful // to keep them in one place. const ( - ProtocolTesting ProtocolID = "/ipfs/testing" - ProtocolBitswap ProtocolID = "/ipfs/bitswap" - ProtocolDHT ProtocolID = "/ipfs/dht" - ProtocolDiag ProtocolID = "/ipfs/diagnostics" + ProtocolTesting ProtocolID = "/ipfs/testing" + ProtocolBitswap ProtocolID = "/ipfs/bitswap" + ProtocolDHT ProtocolID = "/ipfs/dht" + ProtocolIdentify ProtocolID = "/ipfs/id" + ProtocolDiag ProtocolID = "/ipfs/diagnostics" ) // MessageSizeMax is a soft (recommended) maximum for network messages. @@ -56,8 +57,8 @@ type StreamHandlerMap map[ProtocolID]StreamHandler type Conn interface { conn.PeerConn - // NewStreamWithProtocol constructs a new Stream directly connected to p. - NewStreamWithProtocol(pr ProtocolID, p peer.Peer) (Stream, error) + // NewStreamWithProtocol constructs a new Stream over this conn. + NewStreamWithProtocol(pr ProtocolID) (Stream, error) } // Network is the interface IPFS uses for connecting to the world. @@ -72,17 +73,24 @@ type Network interface { // This operation is threadsafe. SetHandler(ProtocolID, StreamHandler) + // Protocols returns the list of protocols this network currently + // has registered handlers for. + Protocols() []ProtocolID + // NewStream returns a new stream to given peer p. // If there is no connection to p, attempts to create one. // If ProtocolID is "", writes no header. - NewStream(ProtocolID, peer.Peer) (Stream, error) + NewStream(ProtocolID, peer.ID) (Stream, error) // Peers returns the peers connected - Peers() []peer.Peer + Peers() []peer.ID // Conns returns the connections in this Netowrk Conns() []Conn + // ConnsToPeer returns the connections in this Netowrk for given peer. + ConnsToPeer(p peer.ID) []Conn + // BandwidthTotals returns the total number of bytes passed through // the network since it was instantiated BandwidthTotals() (uint64, uint64) @@ -97,23 +105,34 @@ type Network interface { // CtxGroup returns the network's contextGroup CtxGroup() ctxgroup.ContextGroup + + // IdentifyProtocol returns the instance of the object running the Identify + // Protocol. This is what runs the ifps handshake-- this should be removed + // if this abstracted out to its own package. + IdentifyProtocol() *IDService } // Dialer represents a service that can dial out to peers // (this is usually just a Network, but other services may not need the whole // stack, and thus it becomes easier to mock) type Dialer interface { + + // Peerstore returns the internal peerstore + // This is useful to tell the dialer about a new address for a peer. + // Or use one of the public keys found out over the network. + Peerstore() peer.Peerstore + // LocalPeer returns the local peer associated with this network - LocalPeer() peer.Peer + LocalPeer() peer.ID // DialPeer attempts to establish a connection to a given peer - DialPeer(context.Context, peer.Peer) error + DialPeer(context.Context, peer.ID) error // ClosePeer closes the connection to a given peer - ClosePeer(peer.Peer) error + ClosePeer(peer.ID) error // Connectedness returns a state signaling connection capabilities - Connectedness(peer.Peer) Connectedness + Connectedness(peer.ID) Connectedness } // Connectedness signals the capacity for a connection with a given node. diff --git a/net/mock/interface.go b/net/mock/interface.go index 0f9855e5e..5f40397d7 100644 --- a/net/mock/interface.go +++ b/net/mock/interface.go @@ -10,21 +10,28 @@ import ( "io" "time" + ic "github.com/jbenet/go-ipfs/crypto" inet "github.com/jbenet/go-ipfs/net" peer "github.com/jbenet/go-ipfs/peer" + + ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ) type Mocknet interface { + + // GenPeer generates a peer and its inet.Network in the Mocknet GenPeer() (inet.Network, error) - AddPeer(peer.ID) (inet.Network, error) + + // AddPeer adds an existing peer. we need both a privkey and addr. + // ID is derived from PrivKey + AddPeer(ic.PrivKey, ma.Multiaddr) (inet.Network, error) // retrieve things - Peer(peer.ID) peer.Peer - Peers() []peer.Peer + Peers() []peer.ID Net(peer.ID) inet.Network Nets() []inet.Network Links() LinkMap - LinksBetweenPeers(a, b peer.Peer) []Link + LinksBetweenPeers(a, b peer.ID) []Link LinksBetweenNets(a, b inet.Network) []Link // Links are the **ability to connect**. @@ -32,10 +39,10 @@ type Mocknet interface { // For p1 and p2 to connect, a link must exist between them. // (this makes it possible to test dial failures, and // things like relaying traffic) - LinkPeers(peer.Peer, peer.Peer) (Link, error) + LinkPeers(peer.ID, peer.ID) (Link, error) LinkNets(inet.Network, inet.Network) (Link, error) Unlink(Link) error - UnlinkPeers(peer.Peer, peer.Peer) error + UnlinkPeers(peer.ID, peer.ID) error UnlinkNets(inet.Network, inet.Network) error // LinkDefaults are the default options that govern links @@ -45,9 +52,9 @@ type Mocknet interface { // Connections are the usual. Connecting means Dialing. // **to succeed, peers must be linked beforehand** - ConnectPeers(peer.Peer, peer.Peer) error + ConnectPeers(peer.ID, peer.ID) error ConnectNets(inet.Network, inet.Network) error - DisconnectPeers(peer.Peer, peer.Peer) error + DisconnectPeers(peer.ID, peer.ID) error DisconnectNets(inet.Network, inet.Network) error } @@ -66,7 +73,7 @@ type LinkOptions struct { // nodes cannot talk to each other directly. :) type Link interface { Networks() []inet.Network - Peers() []peer.Peer + Peers() []peer.ID SetOptions(LinkOptions) Options() LinkOptions diff --git a/net/mock/mock_conn.go b/net/mock/mock_conn.go index 0062325a0..e92ff6fa4 100644 --- a/net/mock/mock_conn.go +++ b/net/mock/mock_conn.go @@ -4,6 +4,7 @@ import ( "container/list" "sync" + ic "github.com/jbenet/go-ipfs/crypto" inet "github.com/jbenet/go-ipfs/net" peer "github.com/jbenet/go-ipfs/peer" @@ -14,8 +15,15 @@ import ( // live connection between two peers. // it goes over a particular link. type conn struct { - local peer.Peer - remote peer.Peer + local peer.ID + remote peer.ID + + localAddr ma.Multiaddr + remoteAddr ma.Multiaddr + + localPrivKey ic.PrivKey + remotePubKey ic.PubKey + net *peernet link *link rconn *conn // counterpart @@ -74,8 +82,8 @@ func (c *conn) openStream() *stream { return sl } -func (c *conn) NewStreamWithProtocol(pr inet.ProtocolID, p peer.Peer) (inet.Stream, error) { - log.Debugf("Conn.NewStreamWithProtocol: %s --> %s", c.local, p) +func (c *conn) NewStreamWithProtocol(pr inet.ProtocolID) (inet.Stream, error) { + log.Debugf("Conn.NewStreamWithProtocol: %s --> %s", c.local, c.remote) s := c.openStream() if err := inet.WriteProtocolHeader(pr, s); err != nil { @@ -87,20 +95,30 @@ func (c *conn) NewStreamWithProtocol(pr inet.ProtocolID, p peer.Peer) (inet.Stre // LocalMultiaddr is the Multiaddr on this side func (c *conn) LocalMultiaddr() ma.Multiaddr { - return nil + return c.localAddr } // LocalPeer is the Peer on our side of the connection -func (c *conn) LocalPeer() peer.Peer { +func (c *conn) LocalPeer() peer.ID { return c.local } +// LocalPrivateKey is the private key of the peer on our side. +func (c *conn) LocalPrivateKey() ic.PrivKey { + return c.localPrivKey +} + // RemoteMultiaddr is the Multiaddr on the remote side func (c *conn) RemoteMultiaddr() ma.Multiaddr { - return nil + return c.remoteAddr } // RemotePeer is the Peer on the remote side -func (c *conn) RemotePeer() peer.Peer { +func (c *conn) RemotePeer() peer.ID { return c.remote } + +// RemotePublicKey is the private key of the peer on our side. +func (c *conn) RemotePublicKey() ic.PubKey { + return c.remotePubKey +} diff --git a/net/mock/mock_link.go b/net/mock/mock_link.go index d058e910c..38d99886c 100644 --- a/net/mock/mock_link.go +++ b/net/mock/mock_link.go @@ -1,7 +1,6 @@ package mocknet import ( - "fmt" "io" "sync" @@ -16,6 +15,8 @@ type link struct { nets []*peernet opts LinkOptions + // this could have addresses on both sides. + sync.RWMutex } @@ -27,20 +28,22 @@ func (l *link) newConnPair(dialer *peernet) (*conn, *conn) { l.RLock() defer l.RUnlock() - mkconn := func(n *peernet, rid peer.ID) *conn { - c := &conn{net: n, link: l} - c.local = n.peer + mkconn := func(ln, rn *peernet) *conn { + c := &conn{net: ln, link: l} + c.local = ln.peer + c.remote = rn.peer + + c.localAddr = ln.ps.Addresses(ln.peer)[0] + c.remoteAddr = rn.ps.Addresses(rn.peer)[0] + + c.localPrivKey = ln.ps.PrivKey(ln.peer) + c.remotePubKey = rn.ps.PubKey(rn.peer) - r, err := n.ps.FindOrCreate(rid) - if err != nil { - panic(fmt.Errorf("error creating peer: %s", err)) - } - c.remote = r return c } - c1 := mkconn(l.nets[0], l.nets[1].peer.ID()) - c2 := mkconn(l.nets[1], l.nets[0].peer.ID()) + c1 := mkconn(l.nets[0], l.nets[1]) + c2 := mkconn(l.nets[1], l.nets[0]) c1.rconn = c2 c2.rconn = c1 @@ -70,11 +73,11 @@ func (l *link) Networks() []inet.Network { return cp } -func (l *link) Peers() []peer.Peer { +func (l *link) Peers() []peer.ID { l.RLock() defer l.RUnlock() - cp := make([]peer.Peer, len(l.nets)) + cp := make([]peer.ID, len(l.nets)) for i, n := range l.nets { cp[i] = n.peer } diff --git a/net/mock/mock_net.go b/net/mock/mock_net.go index 20a4dc0a2..ffed78b33 100644 --- a/net/mock/mock_net.go +++ b/net/mock/mock_net.go @@ -4,27 +4,27 @@ import ( "fmt" "sync" + ic "github.com/jbenet/go-ipfs/crypto" inet "github.com/jbenet/go-ipfs/net" peer "github.com/jbenet/go-ipfs/peer" testutil "github.com/jbenet/go-ipfs/util/testutil" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ctxgroup "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-ctxgroup" + ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ) -type peerID string - // mocknet implements mocknet.Mocknet type mocknet struct { - // must map on peer.ID (instead of peer.Peer) because + // must map on peer.ID (instead of peer.ID) because // each inet.Network has different peerstore - nets map[peerID]*peernet + nets map[peer.ID]*peernet // links make it possible to connect two peers. // think of links as the physical medium. // usually only one, but there could be multiple // **links are shared between peers** - links map[peerID]map[peerID]map[*link]struct{} + links map[peer.ID]map[peer.ID]map[*link]struct{} linkDefaults LinkOptions @@ -34,62 +34,53 @@ type mocknet struct { func New(ctx context.Context) Mocknet { return &mocknet{ - nets: map[peerID]*peernet{}, - links: map[peerID]map[peerID]map[*link]struct{}{}, + nets: map[peer.ID]*peernet{}, + links: map[peer.ID]map[peer.ID]map[*link]struct{}{}, cg: ctxgroup.WithContext(ctx), } } func (mn *mocknet) GenPeer() (inet.Network, error) { - p, err := testutil.PeerWithNewKeys() + sk, _, err := testutil.RandKeyPair(512) if err != nil { return nil, err } - n, err := mn.AddPeer(p.ID()) - if err != nil { - return nil, err - } + a := testutil.RandLocalTCPAddress() - // copy over keys - if err := n.LocalPeer().Update(p); err != nil { + n, err := mn.AddPeer(sk, a) + if err != nil { return nil, err } return n, nil } -func (mn *mocknet) AddPeer(p peer.ID) (inet.Network, error) { - n, err := newPeernet(mn.cg.Context(), mn, p) +func (mn *mocknet) AddPeer(k ic.PrivKey, a ma.Multiaddr) (inet.Network, error) { + n, err := newPeernet(mn.cg.Context(), mn, k, a) if err != nil { return nil, err } + // make sure to add listening address! + // this makes debugging things simpler as remembering to register + // an address may cause unexpected failure. + n.Peerstore().AddAddress(n.LocalPeer(), a) + log.Debugf("mocknet added listen addr for peer: %s -- %s", n.LocalPeer(), a) + mn.cg.AddChildGroup(n.cg) mn.Lock() - mn.nets[pid(n.peer)] = n + mn.nets[n.peer] = n mn.Unlock() return n, nil } -func (mn *mocknet) Peer(pid peer.ID) peer.Peer { +func (mn *mocknet) Peers() []peer.ID { mn.RLock() defer mn.RUnlock() - for _, n := range mn.nets { - if n.peer.ID().Equal(pid) { - return n.peer - } - } - return nil -} - -func (mn *mocknet) Peers() []peer.Peer { - mn.RLock() - defer mn.RUnlock() - - cp := make([]peer.Peer, 0, len(mn.nets)) + cp := make([]peer.ID, 0, len(mn.nets)) for _, n := range mn.nets { cp = append(cp, n.peer) } @@ -101,7 +92,7 @@ func (mn *mocknet) Net(pid peer.ID) inet.Network { defer mn.RUnlock() for _, n := range mn.nets { - if n.peer.ID().Equal(pid) { + if n.peer == pid { return n } } @@ -152,10 +143,10 @@ func (mn *mocknet) LinkAll() error { return nil } -func (mn *mocknet) LinkPeers(p1, p2 peer.Peer) (Link, error) { +func (mn *mocknet) LinkPeers(p1, p2 peer.ID) (Link, error) { mn.RLock() - n1 := mn.nets[pid(p1)] - n2 := mn.nets[pid(p2)] + n1 := mn.nets[p1] + n2 := mn.nets[p2] mn.RUnlock() if n1 == nil { @@ -177,7 +168,7 @@ func (mn *mocknet) validate(n inet.Network) (*peernet, error) { return nil, fmt.Errorf("Network not supported (use mock package nets only)") } - if _, found := mn.nets[pid(nr.peer)]; !found { + if _, found := mn.nets[nr.peer]; !found { return nil, fmt.Errorf("Network not on mocknet. is it from another mocknet?") } @@ -215,7 +206,7 @@ func (mn *mocknet) Unlink(l2 Link) error { return nil } -func (mn *mocknet) UnlinkPeers(p1, p2 peer.Peer) error { +func (mn *mocknet) UnlinkPeers(p1, p2 peer.ID) error { ls := mn.LinksBetweenPeers(p1, p2) if ls == nil { return fmt.Errorf("no link between p1 and p2") @@ -234,19 +225,19 @@ func (mn *mocknet) UnlinkNets(n1, n2 inet.Network) error { } // get from the links map. and lazily contruct. -func (mn *mocknet) linksMapGet(p1, p2 peer.Peer) *map[*link]struct{} { +func (mn *mocknet) linksMapGet(p1, p2 peer.ID) *map[*link]struct{} { - l1, found := mn.links[pid(p1)] + l1, found := mn.links[p1] if !found { - mn.links[pid(p1)] = map[peerID]map[*link]struct{}{} - l1 = mn.links[pid(p1)] // so we make sure it's there. + mn.links[p1] = map[peer.ID]map[*link]struct{}{} + l1 = mn.links[p1] // so we make sure it's there. } - l2, found := l1[pid(p2)] + l2, found := l1[p2] if !found { m := map[*link]struct{}{} - l1[pid(p2)] = m - l2 = l1[pid(p2)] + l1[p2] = m + l2 = l1[p2] } return &l2 @@ -286,23 +277,23 @@ func (mn *mocknet) ConnectAll() error { return nil } -func (mn *mocknet) ConnectPeers(a, b peer.Peer) error { - return mn.Net(a.ID()).DialPeer(mn.cg.Context(), b) +func (mn *mocknet) ConnectPeers(a, b peer.ID) error { + return mn.Net(a).DialPeer(mn.cg.Context(), b) } func (mn *mocknet) ConnectNets(a, b inet.Network) error { return a.DialPeer(mn.cg.Context(), b.LocalPeer()) } -func (mn *mocknet) DisconnectPeers(p1, p2 peer.Peer) error { - return mn.Net(p1.ID()).ClosePeer(p2) +func (mn *mocknet) DisconnectPeers(p1, p2 peer.ID) error { + return mn.Net(p1).ClosePeer(p2) } func (mn *mocknet) DisconnectNets(n1, n2 inet.Network) error { return n1.ClosePeer(n2.LocalPeer()) } -func (mn *mocknet) LinksBetweenPeers(p1, p2 peer.Peer) []Link { +func (mn *mocknet) LinksBetweenPeers(p1, p2 peer.ID) []Link { mn.RLock() defer mn.RUnlock() diff --git a/net/mock/mock_peernet.go b/net/mock/mock_peernet.go index 5a56de513..51ffb1e72 100644 --- a/net/mock/mock_peernet.go +++ b/net/mock/mock_peernet.go @@ -5,6 +5,7 @@ import ( "math/rand" "sync" + ic "github.com/jbenet/go-ipfs/crypto" inet "github.com/jbenet/go-ipfs/net" peer "github.com/jbenet/go-ipfs/peer" @@ -17,33 +18,38 @@ import ( type peernet struct { mocknet *mocknet // parent - peer peer.Peer + peer peer.ID ps peer.Peerstore // conns are actual live connections between peers. // many conns could run over each link. // **conns are NOT shared between peers** - connsByPeer map[peerID]map[*conn]struct{} + connsByPeer map[peer.ID]map[*conn]struct{} connsByLink map[*link]map[*conn]struct{} // needed to implement inet.Network mux inet.Mux + ids *inet.IDService cg ctxgroup.ContextGroup sync.RWMutex } // newPeernet constructs a new peernet -func newPeernet(ctx context.Context, m *mocknet, id peer.ID) (*peernet, error) { +func newPeernet(ctx context.Context, m *mocknet, k ic.PrivKey, + a ma.Multiaddr) (*peernet, error) { - // create our own entirely, so that peers dont get shuffled across - // network divides. dont share peers. - ps := peer.NewPeerstore() - p, err := ps.FindOrCreate(id) + p, err := peer.IDFromPublicKey(k.GetPublic()) if err != nil { return nil, err } + // create our own entirely, so that peers knowledge doesn't get shared + ps := peer.NewPeerstore() + ps.AddAddress(p, a) + ps.AddPrivKey(p, k) + ps.AddPubKey(p, k.GetPublic()) + n := &peernet{ mocknet: m, peer: p, @@ -51,11 +57,16 @@ func newPeernet(ctx context.Context, m *mocknet, id peer.ID) (*peernet, error) { mux: inet.Mux{Handlers: inet.StreamHandlerMap{}}, cg: ctxgroup.WithContext(ctx), - connsByPeer: map[peerID]map[*conn]struct{}{}, + connsByPeer: map[peer.ID]map[*conn]struct{}{}, connsByLink: map[*link]map[*conn]struct{}{}, } n.cg.SetTeardown(n.teardown) + + // setup a conn handler that immediately "asks the other side about them" + // this is ProtocolIdentify. + n.ids = inet.NewIDService(n) + return n, nil } @@ -86,6 +97,14 @@ func (pn *peernet) Close() error { return pn.cg.Close() } +func (pn *peernet) Protocols() []inet.ProtocolID { + return pn.mux.Protocols() +} + +func (pn *peernet) Peerstore() peer.Peerstore { + return pn.ps +} + func (pn *peernet) String() string { return fmt.Sprintf("", pn.peer, len(pn.allConns())) } @@ -97,28 +116,21 @@ func (pn *peernet) handleNewStream(s inet.Stream) { // DialPeer attempts to establish a connection to a given peer. // Respects the context. -func (pn *peernet) DialPeer(ctx context.Context, p peer.Peer) error { +func (pn *peernet) DialPeer(ctx context.Context, p peer.ID) error { return pn.connect(p) } -func (pn *peernet) connect(p peer.Peer) error { - log.Debugf("%s dialing %s", pn.peer, p) - - // cannot trust the peer we get. typical for tests to give us - // a peer from some other peerstore... - p, err := pn.ps.Add(p) - if err != nil { - return err - } - +func (pn *peernet) connect(p peer.ID) error { // first, check if we already have live connections pn.RLock() - cs, found := pn.connsByPeer[pid(p)] + cs, found := pn.connsByPeer[p] pn.RUnlock() if found && len(cs) > 0 { return nil } + log.Debugf("%s (newly) dialing %s", pn.peer, p) + // ok, must create a new connection. we need a link links := pn.mocknet.LinksBetweenPeers(pn.peer, p) if len(links) < 1 { @@ -136,7 +148,7 @@ func (pn *peernet) connect(p peer.Peer) error { return nil } -func (pn *peernet) openConn(r peer.Peer, l *link) *conn { +func (pn *peernet) openConn(r peer.ID, l *link) *conn { lc, rc := l.newConnPair(pn) log.Debugf("%s opening connection to %s", pn.LocalPeer(), lc.RemotePeer()) pn.addConn(lc) @@ -152,13 +164,17 @@ func (pn *peernet) remoteOpenedConn(c *conn) { // addConn constructs and adds a connection // to given remote peer over given link func (pn *peernet) addConn(c *conn) { + + // run the Identify protocol/handshake. + pn.ids.IdentifyConn(c) + pn.Lock() - cs, found := pn.connsByPeer[pid(c.RemotePeer())] + cs, found := pn.connsByPeer[c.RemotePeer()] if !found { cs = map[*conn]struct{}{} - pn.connsByPeer[pid(c.RemotePeer())] = cs + pn.connsByPeer[c.RemotePeer()] = cs } - pn.connsByPeer[pid(c.RemotePeer())][c] = struct{}{} + pn.connsByPeer[c.RemotePeer()][c] = struct{}{} cs, found = pn.connsByLink[c.link] if !found { @@ -180,7 +196,7 @@ func (pn *peernet) removeConn(c *conn) { } delete(cs, c) - cs, found = pn.connsByPeer[pid(c.remote)] + cs, found = pn.connsByPeer[c.remote] if !found { panic("attempting to remove a conn that doesnt exist") } @@ -193,16 +209,16 @@ func (pn *peernet) CtxGroup() ctxgroup.ContextGroup { } // LocalPeer the network's LocalPeer -func (pn *peernet) LocalPeer() peer.Peer { +func (pn *peernet) LocalPeer() peer.ID { return pn.peer } // Peers returns the connected peers -func (pn *peernet) Peers() []peer.Peer { +func (pn *peernet) Peers() []peer.ID { pn.RLock() defer pn.RUnlock() - peers := make([]peer.Peer, 0, len(pn.connsByPeer)) + peers := make([]peer.ID, 0, len(pn.connsByPeer)) for _, cs := range pn.connsByPeer { for c := range cs { peers = append(peers, c.remote) @@ -226,11 +242,11 @@ func (pn *peernet) Conns() []inet.Conn { return out } -func (pn *peernet) ConnsToPeer(p peer.Peer) []inet.Conn { +func (pn *peernet) ConnsToPeer(p peer.ID) []inet.Conn { pn.RLock() defer pn.RUnlock() - cs, found := pn.connsByPeer[pid(p)] + cs, found := pn.connsByPeer[p] if !found || len(cs) == 0 { return nil } @@ -243,9 +259,9 @@ func (pn *peernet) ConnsToPeer(p peer.Peer) []inet.Conn { } // ClosePeer connections to peer -func (pn *peernet) ClosePeer(p peer.Peer) error { +func (pn *peernet) ClosePeer(p peer.ID) error { pn.RLock() - cs, found := pn.connsByPeer[pid(p)] + cs, found := pn.connsByPeer[p] pn.RUnlock() if !found { return nil @@ -266,23 +282,23 @@ func (pn *peernet) BandwidthTotals() (in uint64, out uint64) { // ListenAddresses returns a list of addresses at which this network listens. func (pn *peernet) ListenAddresses() []ma.Multiaddr { - return []ma.Multiaddr{} + return pn.Peerstore().Addresses(pn.LocalPeer()) } // InterfaceListenAddresses returns a list of addresses at which this network // listens. It expands "any interface" addresses (/ip4/0.0.0.0, /ip6/::) to // use the known local interfaces. func (pn *peernet) InterfaceListenAddresses() ([]ma.Multiaddr, error) { - return []ma.Multiaddr{}, nil + return pn.ListenAddresses(), nil } // Connectedness returns a state signaling connection capabilities // For now only returns Connecter || NotConnected. Expand into more later. -func (pn *peernet) Connectedness(p peer.Peer) inet.Connectedness { +func (pn *peernet) Connectedness(p peer.ID) inet.Connectedness { pn.Lock() defer pn.Unlock() - cs, found := pn.connsByPeer[pid(p)] + cs, found := pn.connsByPeer[p] if found && len(cs) > 0 { return inet.Connected } @@ -292,11 +308,11 @@ func (pn *peernet) Connectedness(p peer.Peer) inet.Connectedness { // NewStream returns a new stream to given peer p. // If there is no connection to p, attempts to create one. // If ProtocolID is "", writes no header. -func (pn *peernet) NewStream(pr inet.ProtocolID, p peer.Peer) (inet.Stream, error) { +func (pn *peernet) NewStream(pr inet.ProtocolID, p peer.ID) (inet.Stream, error) { pn.Lock() defer pn.Unlock() - cs, found := pn.connsByPeer[pid(p)] + cs, found := pn.connsByPeer[p] if !found || len(cs) < 1 { return nil, fmt.Errorf("no connection to peer") } @@ -313,7 +329,7 @@ func (pn *peernet) NewStream(pr inet.ProtocolID, p peer.Peer) (inet.Stream, erro n-- } - return c.NewStreamWithProtocol(pr, p) + return c.NewStreamWithProtocol(pr) } // SetHandler sets the protocol handler on the Network's Muxer. @@ -322,6 +338,6 @@ func (pn *peernet) SetHandler(p inet.ProtocolID, h inet.StreamHandler) { pn.mux.SetHandler(p, h) } -func pid(p peer.Peer) peerID { - return peerID(p.ID()) +func (pn *peernet) IdentifyProtocol() *inet.IDService { + return pn.ids } diff --git a/net/mock/mock_test.go b/net/mock/mock_test.go index 4d3a39ac8..be10cabea 100644 --- a/net/mock/mock_test.go +++ b/net/mock/mock_test.go @@ -14,51 +14,64 @@ import ( context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ) +func randPeer(t *testing.T) peer.ID { + p, err := testutil.RandPeerID() + if err != nil { + t.Fatal(err) + } + return p +} + func TestNetworkSetup(t *testing.T) { ctx := context.Background() - p1 := testutil.RandPeer() - p2 := testutil.RandPeer() - p3 := testutil.RandPeer() + sk1, _, err := testutil.RandKeyPair(512) + if err != nil { + t.Fatal(t) + } + sk2, _, err := testutil.RandKeyPair(512) + if err != nil { + t.Fatal(t) + } + sk3, _, err := testutil.RandKeyPair(512) + if err != nil { + t.Fatal(t) + } mn := New(ctx) - // peers := []peer.Peer{p1, p2, p3} + // peers := []peer.ID{p1, p2, p3} // add peers to mock net - n1, err := mn.AddPeer(p1.ID()) - if err != nil { - t.Fatal(err) - } + a1 := testutil.RandLocalTCPAddress() + a2 := testutil.RandLocalTCPAddress() + a3 := testutil.RandLocalTCPAddress() - n2, err := mn.AddPeer(p2.ID()) + n1, err := mn.AddPeer(sk1, a1) if err != nil { t.Fatal(err) } + p1 := n1.LocalPeer() - n3, err := mn.AddPeer(p3.ID()) + n2, err := mn.AddPeer(sk2, a2) if err != nil { t.Fatal(err) } + p2 := n2.LocalPeer() + + n3, err := mn.AddPeer(sk3, a3) + if err != nil { + t.Fatal(err) + } + p3 := n3.LocalPeer() // check peers and net - - if !mn.Peer(p1.ID()).ID().Equal(p1.ID()) { - t.Error("peer for p1.ID != p1.ID") - } - if !mn.Peer(p2.ID()).ID().Equal(p2.ID()) { - t.Error("peer for p2.ID != p2.ID") - } - if !mn.Peer(p3.ID()).ID().Equal(p3.ID()) { - t.Error("peer for p3.ID != p3.ID") - } - - if mn.Net(p1.ID()) != n1 { + if mn.Net(p1) != n1 { t.Error("net for p1.ID != n1") } - if mn.Net(p2.ID()) != n2 { + if mn.Net(p2) != n2 { t.Error("net for p2.ID != n1") } - if mn.Net(p3.ID()) != n3 { + if mn.Net(p3) != n3 { t.Error("net for p3.ID != n1") } @@ -373,24 +386,32 @@ func TestStreamsStress(t *testing.T) { }(i) } - wg.Done() + wg.Wait() } func TestAdding(t *testing.T) { mn := New(context.Background()) - p1 := testutil.RandPeer() - p2 := testutil.RandPeer() - p3 := testutil.RandPeer() - peers := []peer.Peer{p1, p2, p3} - - for _, p := range peers { - if _, err := mn.AddPeer(p.ID()); err != nil { - t.Error(err) + peers := []peer.ID{} + for i := 0; i < 3; i++ { + sk, _, err := testutil.RandKeyPair(512) + if err != nil { + t.Fatal(err) } + + a := testutil.RandLocalTCPAddress() + n, err := mn.AddPeer(sk, a) + if err != nil { + t.Fatal(err) + } + + peers = append(peers, n.LocalPeer()) } + p1 := peers[0] + p2 := peers[1] + // link them for _, p1 := range peers { for _, p2 := range peers { @@ -401,9 +422,9 @@ func TestAdding(t *testing.T) { } // set the new stream handler on p2 - n2 := mn.Net(p2.ID()) + n2 := mn.Net(p2) if n2 == nil { - t.Fatalf("no network for %s", p2.ID()) + t.Fatalf("no network for %s", p2) } n2.SetHandler(inet.ProtocolBitswap, func(s inet.Stream) { go func() { @@ -429,9 +450,9 @@ func TestAdding(t *testing.T) { } // talk to p2 - n1 := mn.Net(p1.ID()) + n1 := mn.Net(p1) if n1 == nil { - t.Fatalf("no network for %s", p1.ID()) + t.Fatalf("no network for %s", p1) } s, err := n1.NewStream(inet.ProtocolBitswap, p2) diff --git a/net/mux.go b/net/mux.go index f35ec01c0..a1513325a 100644 --- a/net/mux.go +++ b/net/mux.go @@ -11,7 +11,7 @@ import ( lgbl "github.com/jbenet/go-ipfs/util/eventlog/loggables" ) -var log = eventlog.Logger("mux2") +var log = eventlog.Logger("network") // Mux provides simple stream multixplexing. // It helps you precisely when: @@ -37,6 +37,17 @@ type Mux struct { sync.RWMutex } +// Protocols returns the list of protocols this muxer has handlers for +func (m *Mux) Protocols() []ProtocolID { + m.RLock() + l := make([]ProtocolID, 0, len(m.Handlers)) + for p := range m.Handlers { + l = append(l, p) + } + m.RUnlock() + return l +} + // ReadProtocolHeader reads the stream and returns the next Handler function // according to the muxer encoding. func (m *Mux) ReadProtocolHeader(s io.Reader) (string, StreamHandler, error) { diff --git a/net/net.go b/net/net.go index 2a4b0df29..0eae441c9 100644 --- a/net/net.go +++ b/net/net.go @@ -2,6 +2,9 @@ package net import ( + "fmt" + + ic "github.com/jbenet/go-ipfs/crypto" swarm "github.com/jbenet/go-ipfs/net/swarm" peer "github.com/jbenet/go-ipfs/peer" @@ -39,11 +42,15 @@ func (s *stream) Write(p []byte) (n int, err error) { type conn_ swarm.Conn +func (s *conn_) String() string { + return s.SwarmConn().String() +} + func (c *conn_) SwarmConn() *swarm.Conn { return (*swarm.Conn)(c) } -func (c *conn_) NewStreamWithProtocol(pr ProtocolID, p peer.Peer) (Stream, error) { +func (c *conn_) NewStreamWithProtocol(pr ProtocolID) (Stream, error) { s, err := (*swarm.Conn)(c).NewStream() if err != nil { return nil, err @@ -59,37 +66,43 @@ func (c *conn_) NewStreamWithProtocol(pr ProtocolID, p peer.Peer) (Stream, error return ss, nil } -// LocalMultiaddr is the Multiaddr on this side func (c *conn_) LocalMultiaddr() ma.Multiaddr { return c.SwarmConn().LocalMultiaddr() } -// LocalPeer is the Peer on our side of the connection -func (c *conn_) LocalPeer() peer.Peer { - return c.SwarmConn().LocalPeer() -} - -// RemoteMultiaddr is the Multiaddr on the remote side func (c *conn_) RemoteMultiaddr() ma.Multiaddr { return c.SwarmConn().RemoteMultiaddr() } -// RemotePeer is the Peer on the remote side -func (c *conn_) RemotePeer() peer.Peer { +func (c *conn_) LocalPeer() peer.ID { + return c.SwarmConn().LocalPeer() +} + +func (c *conn_) RemotePeer() peer.ID { return c.SwarmConn().RemotePeer() } +func (c *conn_) LocalPrivateKey() ic.PrivKey { + return c.SwarmConn().LocalPrivateKey() +} + +func (c *conn_) RemotePublicKey() ic.PubKey { + return c.SwarmConn().RemotePublicKey() +} + // network implements the Network interface, type network struct { - local peer.Peer // local peer + local peer.ID // local peer mux Mux // protocol multiplexing swarm *swarm.Swarm // peer connection multiplexing + ps peer.Peerstore + ids *IDService cg ctxgroup.ContextGroup // for Context closing } // NewNetwork constructs a new network and starts listening on given addresses. -func NewNetwork(ctx context.Context, listen []ma.Multiaddr, local peer.Peer, +func NewNetwork(ctx context.Context, listen []ma.Multiaddr, local peer.ID, peers peer.Peerstore) (Network, error) { s, err := swarm.NewSwarm(ctx, listen, local, peers) @@ -102,22 +115,46 @@ func NewNetwork(ctx context.Context, listen []ma.Multiaddr, local peer.Peer, swarm: s, mux: Mux{Handlers: StreamHandlerMap{}}, cg: ctxgroup.WithContext(ctx), + ps: peers, } + n.cg.SetTeardown(n.close) + n.cg.AddChildGroup(s.CtxGroup()) + s.SetStreamHandler(func(s *swarm.Stream) { n.mux.Handle((*stream)(s)) }) - n.cg.SetTeardown(n.close) - n.cg.AddChildGroup(s.CtxGroup()) + // setup a conn handler that immediately "asks the other side about them" + // this is ProtocolIdentify. + n.ids = NewIDService(n) + s.SetConnHandler(n.newConnHandler) + return n, nil } +func (n *network) newConnHandler(c *swarm.Conn) { + cc := (*conn_)(c) + n.ids.IdentifyConn(cc) +} + // DialPeer attempts to establish a connection to a given peer. // Respects the context. -func (n *network) DialPeer(ctx context.Context, p peer.Peer) error { - _, err := n.swarm.Dial(ctx, p) - return err +func (n *network) DialPeer(ctx context.Context, p peer.ID) error { + log.Debugf("[%s] network dialing peer [%s]", n.local, p) + sc, err := n.swarm.Dial(ctx, p) + if err != nil { + return err + } + + // identify the connection before returning. + n.ids.IdentifyConn((*conn_)(sc)) + log.Debugf("network for %s finished dialing %s", n.local, p) + return nil +} + +func (n *network) Protocols() []ProtocolID { + return n.mux.Protocols() } // CtxGroup returns the network's ContextGroup @@ -131,15 +168,20 @@ func (n *network) Swarm() *swarm.Swarm { } // LocalPeer the network's LocalPeer -func (n *network) LocalPeer() peer.Peer { +func (n *network) LocalPeer() peer.ID { return n.swarm.LocalPeer() } // Peers returns the connected peers -func (n *network) Peers() []peer.Peer { +func (n *network) Peers() []peer.ID { return n.swarm.Peers() } +// Peers returns the connected peers +func (n *network) Peerstore() peer.Peerstore { + return n.ps +} + // Conns returns the connected peers func (n *network) Conns() []Conn { conns1 := n.swarm.Connections() @@ -150,8 +192,18 @@ func (n *network) Conns() []Conn { return out } +// ConnsToPeer returns the connections in this Netowrk for given peer. +func (n *network) ConnsToPeer(p peer.ID) []Conn { + conns1 := n.swarm.ConnectionsToPeer(p) + out := make([]Conn, len(conns1)) + for i, c := range conns1 { + out[i] = (*conn_)(c) + } + return out +} + // ClosePeer connection to peer -func (n *network) ClosePeer(p peer.Peer) error { +func (n *network) ClosePeer(p peer.ID) error { return n.swarm.CloseConnection(p) } @@ -186,9 +238,9 @@ func (n *network) InterfaceListenAddresses() ([]ma.Multiaddr, error) { // Connectedness returns a state signaling connection capabilities // For now only returns Connected || NotConnected. Expand into more later. -func (n *network) Connectedness(p peer.Peer) Connectedness { +func (n *network) Connectedness(p peer.ID) Connectedness { c := n.swarm.ConnectionsToPeer(p) - if c != nil && len(c) < 1 { + if c != nil && len(c) > 0 { return Connected } return NotConnected @@ -197,8 +249,9 @@ func (n *network) Connectedness(p peer.Peer) Connectedness { // NewStream returns a new stream to given peer p. // If there is no connection to p, attempts to create one. // If ProtocolID is "", writes no header. -func (c *network) NewStream(pr ProtocolID, p peer.Peer) (Stream, error) { - s, err := c.swarm.NewStreamWithPeer(p) +func (n *network) NewStream(pr ProtocolID, p peer.ID) (Stream, error) { + log.Debugf("[%s] network opening stream to peer [%s]: %s", n.local, p, pr) + s, err := n.swarm.NewStreamWithPeer(p) if err != nil { return nil, err } @@ -219,6 +272,14 @@ func (n *network) SetHandler(p ProtocolID, h StreamHandler) { n.mux.SetHandler(p, h) } +func (n *network) String() string { + return fmt.Sprintf("", n.LocalPeer()) +} + +func (n *network) IdentifyProtocol() *IDService { + return n.ids +} + func WriteProtocolHeader(pr ProtocolID, s Stream) error { if pr != "" { // only write proper protocol headers if err := WriteLengthPrefix(s, string(pr)); err != nil { diff --git a/net/net_test.go b/net/net_test.go new file mode 100644 index 000000000..0704ec25c --- /dev/null +++ b/net/net_test.go @@ -0,0 +1,76 @@ +package net_test + +import ( + "fmt" + "testing" + "time" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + inet "github.com/jbenet/go-ipfs/net" +) + +// TestConnectednessCorrect starts a few networks, connects a few +// and tests Connectedness value is correct. +func TestConnectednessCorrect(t *testing.T) { + + ctx := context.Background() + + nets := make([]inet.Network, 4) + for i := 0; i < 4; i++ { + nets[i] = GenNetwork(t, ctx) + } + + // connect 0-1, 0-2, 0-3, 1-2, 2-3 + + dial := func(a, b inet.Network) { + DivulgeAddresses(b, a) + if err := a.DialPeer(ctx, b.LocalPeer()); err != nil { + t.Fatalf("Failed to dial: %s", err) + } + } + + dial(nets[0], nets[1]) + dial(nets[0], nets[3]) + dial(nets[1], nets[2]) + dial(nets[3], nets[2]) + + // there's something wrong with dial, i think. it's not finishing + // completely. there must be some async stuff. + <-time.After(100 * time.Millisecond) + + // test those connected show up correctly + + // test connected + expectConnectedness(t, nets[0], nets[1], inet.Connected) + expectConnectedness(t, nets[0], nets[3], inet.Connected) + expectConnectedness(t, nets[1], nets[2], inet.Connected) + expectConnectedness(t, nets[3], nets[2], inet.Connected) + + // test not connected + expectConnectedness(t, nets[0], nets[2], inet.NotConnected) + expectConnectedness(t, nets[1], nets[3], inet.NotConnected) + + for _, n := range nets { + n.Close() + } +} + +func expectConnectedness(t *testing.T, a, b inet.Network, expected inet.Connectedness) { + es := "%s is connected to %s, but Connectedness incorrect. %s %s" + if a.Connectedness(b.LocalPeer()) != expected { + t.Errorf(es, a, b, printConns(a), printConns(b)) + } + + // test symmetric case + if b.Connectedness(a.LocalPeer()) != expected { + t.Errorf(es, b, a, printConns(b), printConns(a)) + } +} + +func printConns(n inet.Network) string { + s := fmt.Sprintf("Connections in %s:\n", n) + for _, c := range n.Conns() { + s = s + fmt.Sprintf("- %s\n", c) + } + return s +} diff --git a/net/swarm/simul_test.go b/net/swarm/simul_test.go index a765f74d1..4e2c3feaa 100644 --- a/net/swarm/simul_test.go +++ b/net/swarm/simul_test.go @@ -1,36 +1,29 @@ package swarm import ( - "fmt" "sync" "testing" + "time" peer "github.com/jbenet/go-ipfs/peer" - "github.com/jbenet/go-ipfs/util/testutil" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ) func TestSimultOpen(t *testing.T) { // t.Skip("skipping for another test") - addrs := []string{ - "/ip4/127.0.0.1/tcp/1244", - "/ip4/127.0.0.1/tcp/1245", - } - ctx := context.Background() - swarms, _ := makeSwarms(ctx, t, addrs) + swarms, peers := makeSwarms(ctx, t, 2) // connect everyone { var wg sync.WaitGroup - connect := func(s *Swarm, dst peer.Peer) { + connect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) { // copy for other peer - cp := testutil.NewPeerWithID(dst.ID()) - cp.AddAddress(dst.Addresses()[0]) - - if _, err := s.Dial(ctx, cp); err != nil { + s.peers.AddAddress(dst, addr) + if _, err := s.Dial(ctx, dst); err != nil { t.Fatal("error swarm dialing to peer", err) } wg.Done() @@ -38,8 +31,8 @@ func TestSimultOpen(t *testing.T) { log.Info("Connecting swarms simultaneously.") wg.Add(2) - go connect(swarms[0], swarms[1].local) - go connect(swarms[1], swarms[0].local) + go connect(swarms[0], swarms[1].local, peers[1].Addr) + go connect(swarms[1], swarms[0].local, peers[0].Addr) wg.Wait() } @@ -51,13 +44,7 @@ func TestSimultOpen(t *testing.T) { func TestSimultOpenMany(t *testing.T) { // t.Skip("very very slow") - many := 10 - addrs := []string{} - for i := 2200; i < (2200 + many); i++ { - s := fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", i) - addrs = append(addrs, s) - } - + addrs := 20 SubtestSwarm(t, addrs, 10) } @@ -67,14 +54,13 @@ func TestSimultOpenFewStress(t *testing.T) { } // t.Skip("skipping for another test") - num := 10 - // num := 100 - for i := 0; i < num; i++ { - addrs := []string{ - fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 1900+i), - fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 2900+i), - } + msgs := 40 + swarms := 2 + rounds := 10 + // rounds := 100 - SubtestSwarm(t, addrs, 10) + for i := 0; i < rounds; i++ { + SubtestSwarm(t, swarms, msgs) + <-time.After(10 * time.Millisecond) } } diff --git a/net/swarm/swarm.go b/net/swarm/swarm.go index 2109790a9..a8a0fc7b2 100644 --- a/net/swarm/swarm.go +++ b/net/swarm/swarm.go @@ -22,21 +22,16 @@ var log = eventlog.Logger("swarm2") // Uses peerstream.Swarm type Swarm struct { swarm *ps.Swarm - local peer.Peer + local peer.ID peers peer.Peerstore + connh ConnHandler cg ctxgroup.ContextGroup } // NewSwarm constructs a Swarm, with a Chan. func NewSwarm(ctx context.Context, listenAddrs []ma.Multiaddr, - local peer.Peer, peers peer.Peerstore) (*Swarm, error) { - - // make sure our own peer is in our peerstore... - local, err := peers.Add(local) - if err != nil { - return nil, err - } + local peer.ID, peers peer.Peerstore) (*Swarm, error) { s := &Swarm{ swarm: ps.NewSwarm(), @@ -47,7 +42,7 @@ func NewSwarm(ctx context.Context, listenAddrs []ma.Multiaddr, // configure Swarm s.cg.SetTeardown(s.teardown) - s.swarm.SetConnHandler(s.connHandler) + s.SetConnHandler(nil) // make sure to setup our own conn handler. return s, s.listen(listenAddrs) } @@ -71,6 +66,27 @@ func (s *Swarm) StreamSwarm() *ps.Swarm { return s.swarm } +// SetConnHandler assigns the handler for new connections. +// See peerstream. You will rarely use this. See SetStreamHandler +func (s *Swarm) SetConnHandler(handler ConnHandler) { + + // handler is nil if user wants to clear the old handler. + if handler == nil { + s.swarm.SetConnHandler(func(psconn *ps.Conn) { + s.connHandler(psconn) + }) + return + } + + s.swarm.SetConnHandler(func(psconn *ps.Conn) { + // sc is nil if closed in our handler. + if sc := s.connHandler(psconn); sc != nil { + // call the user's handler. in a goroutine for sync safety. + go handler(sc) + } + }) +} + // SetStreamHandler assigns the handler for new streams. // See peerstream. func (s *Swarm) SetStreamHandler(handler StreamHandler) { @@ -80,13 +96,7 @@ func (s *Swarm) SetStreamHandler(handler StreamHandler) { } // NewStreamWithPeer creates a new stream on any available connection to p -func (s *Swarm) NewStreamWithPeer(p peer.Peer) (*Stream, error) { - // make sure we use OUR peers. (the tests mess with you...) - p, err := s.peers.Add(p) - if err != nil { - return nil, err - } - +func (s *Swarm) NewStreamWithPeer(p peer.ID) (*Stream, error) { // if we have no connections, try connecting. if len(s.ConnectionsToPeer(p)) == 0 { log.Debug("Swarm: NewStreamWithPeer no connections. Attempting to connect...") @@ -101,21 +111,12 @@ func (s *Swarm) NewStreamWithPeer(p peer.Peer) (*Stream, error) { } // StreamsWithPeer returns all the live Streams to p -func (s *Swarm) StreamsWithPeer(p peer.Peer) []*Stream { - // make sure we use OUR peers. (the tests mess with you...) - if p2, err := s.peers.Add(p); err == nil { - p = p2 - } - +func (s *Swarm) StreamsWithPeer(p peer.ID) []*Stream { return wrapStreams(ps.StreamsWithGroup(p, s.swarm.Streams())) } // ConnectionsToPeer returns all the live connections to p -func (s *Swarm) ConnectionsToPeer(p peer.Peer) []*Conn { - // make sure we use OUR peers. (the tests mess with you...) - if p2, err := s.peers.Add(p); err == nil { - p = p2 - } +func (s *Swarm) ConnectionsToPeer(p peer.ID) []*Conn { return wrapConns(ps.ConnsWithGroup(p, s.swarm.Conns())) } @@ -125,13 +126,7 @@ func (s *Swarm) Connections() []*Conn { } // CloseConnection removes a given peer from swarm + closes the connection -func (s *Swarm) CloseConnection(p peer.Peer) error { - // make sure we use OUR peers. (the tests mess with you...) - p, err := s.peers.Add(p) - if err != nil { - return err - } - +func (s *Swarm) CloseConnection(p peer.ID) error { conns := s.swarm.ConnsWithGroup(p) // boom. for _, c := range conns { c.Close() @@ -140,11 +135,11 @@ func (s *Swarm) CloseConnection(p peer.Peer) error { } // Peers returns a copy of the set of peers swarm is connected to. -func (s *Swarm) Peers() []peer.Peer { +func (s *Swarm) Peers() []peer.ID { conns := s.Connections() - seen := make(map[peer.Peer]struct{}) - peers := make([]peer.Peer, 0, len(conns)) + seen := make(map[peer.ID]struct{}) + peers := make([]peer.ID, 0, len(conns)) for _, c := range conns { p := c.RemotePeer() if _, found := seen[p]; found { @@ -157,6 +152,6 @@ func (s *Swarm) Peers() []peer.Peer { } // LocalPeer returns the local peer swarm is associated to. -func (s *Swarm) LocalPeer() peer.Peer { +func (s *Swarm) LocalPeer() peer.ID { return s.local } diff --git a/net/swarm/swarm_conn.go b/net/swarm/swarm_conn.go index 9fdd06b4a..36be0bc50 100644 --- a/net/swarm/swarm_conn.go +++ b/net/swarm/swarm_conn.go @@ -3,6 +3,7 @@ package swarm import ( "fmt" + ic "github.com/jbenet/go-ipfs/crypto" conn "github.com/jbenet/go-ipfs/net/conn" peer "github.com/jbenet/go-ipfs/peer" @@ -23,6 +24,10 @@ import ( // layers do build up pieces of functionality. and they're all just io.RW :) ) type Conn ps.Conn +// ConnHandler is called when new conns are opened from remote peers. +// See peerstream.ConnHandler +type ConnHandler func(*Conn) + func (c *Conn) StreamConn() *ps.Conn { return (*ps.Conn)(c) } @@ -35,13 +40,17 @@ func (c *Conn) RawConn() conn.Conn { return (*ps.Conn)(c).NetConn().(conn.Conn) } +func (c *Conn) String() string { + return fmt.Sprintf("", c.RawConn()) +} + // LocalMultiaddr is the Multiaddr on this side func (c *Conn) LocalMultiaddr() ma.Multiaddr { return c.RawConn().LocalMultiaddr() } // LocalPeer is the Peer on our side of the connection -func (c *Conn) LocalPeer() peer.Peer { +func (c *Conn) LocalPeer() peer.ID { return c.RawConn().LocalPeer() } @@ -51,10 +60,20 @@ func (c *Conn) RemoteMultiaddr() ma.Multiaddr { } // RemotePeer is the Peer on the remote side -func (c *Conn) RemotePeer() peer.Peer { +func (c *Conn) RemotePeer() peer.ID { return c.RawConn().RemotePeer() } +// LocalPrivateKey is the public key of the peer on this side +func (c *Conn) LocalPrivateKey() ic.PrivKey { + return c.RawConn().LocalPrivateKey() +} + +// RemotePublicKey is the public key of the peer on the remote side +func (c *Conn) RemotePublicKey() ic.PubKey { + return c.RawConn().RemotePublicKey() +} + // NewStream returns a new Stream from this connection func (c *Conn) NewStream() (*Stream, error) { s, err := c.StreamConn().NewStream() @@ -96,12 +115,12 @@ func (s *Swarm) newConnSetup(ctx context.Context, psConn *ps.Conn) (*Conn, error return nil, err } - // removing this for now, as it has to change. we can put this in a different - // sub-protocol anyway. - // // run Handshake3 - // if err := runHandshake3(ctx, s, sc); err != nil { - // return nil, err - // } + // if we have a public key, make sure we add it to our peerstore! + // This is an important detail. Otherwise we must fetch the public + // key from the DHT or some other system. + if pk := sc.RemotePublicKey(); pk != nil { + s.peers.AddPubKey(sc.RemotePeer(), pk) + } // ok great! we can use it. add it to our group. @@ -113,29 +132,3 @@ func (s *Swarm) newConnSetup(ctx context.Context, psConn *ps.Conn) (*Conn, error return sc, nil } - -// func runHandshake3(ctx context.Context, s *Swarm, c *Conn) error { -// log.Event(ctx, "newConnection", c.LocalPeer(), c.RemotePeer()) - -// stream, err := c.NewStream() -// if err != nil { -// return err -// } - -// // handshake3 (this whole thing is ugly. maybe lets get rid of it...) -// h3result, err := conn.Handshake3(ctx, stream, c.RawConn()) -// if err != nil { -// return fmt.Errorf("Handshake3 failed: %s", err) -// } - -// // check for nats. you know, just in case. -// if h3result.LocalObservedAddress != nil { -// checkNATWarning(s, h3result.LocalObservedAddress, c.LocalMultiaddr()) -// } else { -// log.Warningf("Received nil observed address from %s", c.RemotePeer()) -// } - -// stream.Close() -// log.Event(ctx, "handshake3Succeeded", c.LocalPeer(), c.RemotePeer()) -// return nil -// } diff --git a/net/swarm/swarm_dial.go b/net/swarm/swarm_dial.go index e71d63b57..03e596e9e 100644 --- a/net/swarm/swarm_dial.go +++ b/net/swarm/swarm_dial.go @@ -17,9 +17,9 @@ import ( // the connection will happen over. Swarm can use whichever it choses. // This allows us to use various transport protocols, do NAT traversal/relay, // etc. to achive connection. -func (s *Swarm) Dial(ctx context.Context, p peer.Peer) (*Conn, error) { +func (s *Swarm) Dial(ctx context.Context, p peer.ID) (*Conn, error) { - if p.ID().Equal(s.local.ID()) { + if p == s.local { return nil, errors.New("Attempted connection to self!") } @@ -31,28 +31,35 @@ func (s *Swarm) Dial(ctx context.Context, p peer.Peer) (*Conn, error) { } } - // check if we don't have the peer in Peerstore - p, err := s.peers.Add(p) - if err != nil { - return nil, err + sk := s.peers.PrivKey(s.local) + if sk == nil { + // may be fine for sk to be nil, just log a warning. + log.Warning("Dial not given PrivateKey, so WILL NOT SECURE conn.") + } + + remoteAddrs := s.peers.Addresses(p) + if len(remoteAddrs) == 0 { + return nil, errors.New("peer has no addresses") + } + localAddrs := s.peers.Addresses(s.local) + if len(localAddrs) == 0 { + log.Debug("Dialing out with no local addresses.") } // open connection to peer d := &conn.Dialer{ - LocalPeer: s.local, - Peerstore: s.peers, - } - - if len(p.Addresses()) == 0 { - return nil, errors.New("peer has no addresses") + LocalPeer: s.local, + LocalAddrs: localAddrs, + PrivateKey: sk, } // try to connect to one of the peer's known addresses. // for simplicity, we do this sequentially. // A future commit will do this asynchronously. var connC conn.Conn - for _, addr := range p.Addresses() { - connC, err = d.DialAddr(ctx, addr, p) + var err error + for _, addr := range remoteAddrs { + connC, err = d.Dial(ctx, addr, p) if err == nil { break } diff --git a/net/swarm/swarm_listen.go b/net/swarm/swarm_listen.go index 94fd17aa8..bcc55cad6 100644 --- a/net/swarm/swarm_listen.go +++ b/net/swarm/swarm_listen.go @@ -6,8 +6,8 @@ import ( context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" - multierr "github.com/jbenet/go-ipfs/util/multierr" ps "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-peerstream" + multierr "github.com/jbenet/go-ipfs/util/multierr" ) // Open listeners for each network the swarm should listen on @@ -35,21 +35,26 @@ func (s *Swarm) listen(addrs []ma.Multiaddr) error { // Listen for new connections on the given multiaddr func (s *Swarm) setupListener(maddr ma.Multiaddr) error { - resolved, err := resolveUnspecifiedAddresses([]ma.Multiaddr{maddr}) + // TODO rethink how this has to work. (jbenet) + // + // resolved, err := resolveUnspecifiedAddresses([]ma.Multiaddr{maddr}) + // if err != nil { + // return err + // } + // for _, a := range resolved { + // s.peers.AddAddress(s.local, a) + // } + + sk := s.peers.PrivKey(s.local) + if sk == nil { + // may be fine for sk to be nil, just log a warning. + log.Warning("Listener not given PrivateKey, so WILL NOT SECURE conns.") + } + list, err := conn.Listen(s.cg.Context(), maddr, s.local, sk) if err != nil { return err } - list, err := conn.Listen(s.cg.Context(), maddr, s.local, s.peers) - if err != nil { - return err - } - - // add resolved local addresses to peer - for _, addr := range resolved { - s.local.AddAddress(addr) - } - // AddListener to the peerstream Listener. this will begin accepting connections // and streams! _, err = s.swarm.AddListener(list) @@ -60,21 +65,22 @@ func (s *Swarm) setupListener(maddr ma.Multiaddr) error { // here we configure it slightly. Note that this is sequential, so if anything // will take a while do it in a goroutine. // See https://godoc.org/github.com/jbenet/go-peerstream for more information -func (s *Swarm) connHandler(c *ps.Conn) { - go func() { - ctx := context.Background() - // this context is for running the handshake, which -- when receiveing connections - // -- we have no bound on beyond what the transport protocol bounds it at. - // note that setup + the handshake are bounded by underlying io. - // (i.e. if TCP or UDP disconnects (or the swarm closes), we're done. - // Q: why not have a shorter handshake? think about an HTTP server on really slow conns. - // as long as the conn is live (TCP says its online), it tries its best. we follow suit.) +func (s *Swarm) connHandler(c *ps.Conn) *Conn { + ctx := context.Background() + // this context is for running the handshake, which -- when receiveing connections + // -- we have no bound on beyond what the transport protocol bounds it at. + // note that setup + the handshake are bounded by underlying io. + // (i.e. if TCP or UDP disconnects (or the swarm closes), we're done. + // Q: why not have a shorter handshake? think about an HTTP server on really slow conns. + // as long as the conn is live (TCP says its online), it tries its best. we follow suit.) - if _, err := s.newConnSetup(ctx, c); err != nil { - log.Error(err) - log.Event(ctx, "newConnHandlerDisconnect", lgbl.NetConn(c.NetConn()), lgbl.Error(err)) - c.Close() // boom. close it. - return - } - }() + sc, err := s.newConnSetup(ctx, c) + if err != nil { + log.Error(err) + log.Event(ctx, "newConnHandlerDisconnect", lgbl.NetConn(c.NetConn()), lgbl.Error(err)) + c.Close() // boom. close it. + return nil + } + + return sc } diff --git a/net/swarm/swarm_test.go b/net/swarm/swarm_test.go index a8e3aa4c4..c0a1ab9fa 100644 --- a/net/swarm/swarm_test.go +++ b/net/swarm/swarm_test.go @@ -7,9 +7,7 @@ import ( "testing" "time" - ci "github.com/jbenet/go-ipfs/crypto" peer "github.com/jbenet/go-ipfs/peer" - u "github.com/jbenet/go-ipfs/util" errors "github.com/jbenet/go-ipfs/util/debugerror" testutil "github.com/jbenet/go-ipfs/util/testutil" @@ -48,88 +46,68 @@ func EchoStreamHandler(stream *Stream) { }() } -func setupPeer(t *testing.T, addr string) peer.Peer { - tcp, err := ma.NewMultiaddr(addr) - if err != nil { - t.Fatal(err) - } +func makeSwarms(ctx context.Context, t *testing.T, num int) ([]*Swarm, []testutil.PeerNetParams) { + swarms := make([]*Swarm, 0, num) + peersnp := make([]testutil.PeerNetParams, 0, num) - sk, pk, err := ci.GenerateKeyPair(ci.RSA, 512) - if err != nil { - t.Fatal(err) - } + for i := 0; i < num; i++ { + localnp := testutil.RandPeerNetParams(t) + peersnp = append(peersnp, localnp) - p, err := testutil.NewPeerWithKeyPair(sk, pk) - if err != nil { - t.Fatal(err) - } - p.AddAddress(tcp) - return p -} - -func makeSwarms(ctx context.Context, t *testing.T, addrs []string) ([]*Swarm, []peer.Peer) { - swarms := []*Swarm{} - - for _, addr := range addrs { - local := setupPeer(t, addr) peerstore := peer.NewPeerstore() - swarm, err := NewSwarm(ctx, local.Addresses(), local, peerstore) + peerstore.AddAddress(localnp.ID, localnp.Addr) + peerstore.AddPubKey(localnp.ID, localnp.PubKey) + peerstore.AddPrivKey(localnp.ID, localnp.PrivKey) + + addrs := peerstore.Addresses(localnp.ID) + swarm, err := NewSwarm(ctx, addrs, localnp.ID, peerstore) if err != nil { t.Fatal(err) } + swarm.SetStreamHandler(EchoStreamHandler) swarms = append(swarms, swarm) } - peers := make([]peer.Peer, len(swarms)) - for i, s := range swarms { - peers[i] = s.local - } - - return swarms, peers + return swarms, peersnp } -func SubtestSwarm(t *testing.T, addrs []string, MsgNum int) { +func connectSwarms(t *testing.T, ctx context.Context, swarms []*Swarm, peersnp []testutil.PeerNetParams) { + + var wg sync.WaitGroup + connect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) { + // TODO: make a DialAddr func. + s.peers.AddAddress(dst, addr) + if _, err := s.Dial(ctx, dst); err != nil { + t.Fatal("error swarm dialing to peer", err) + } + wg.Done() + } + + log.Info("Connecting swarms simultaneously.") + for _, s := range swarms { + for _, p := range peersnp { + if p.ID != s.local { // don't connect to self. + wg.Add(1) + connect(s, p.ID, p.Addr) + } + } + } + wg.Wait() + + for _, s := range swarms { + log.Infof("%s swarm routing table: %s", s.local, s.Peers()) + } +} + +func SubtestSwarm(t *testing.T, SwarmNum int, MsgNum int) { // t.Skip("skipping for another test") ctx := context.Background() - swarms, peers := makeSwarms(ctx, t, addrs) + swarms, peersnp := makeSwarms(ctx, t, SwarmNum) // connect everyone - { - var wg sync.WaitGroup - connect := func(s *Swarm, dst peer.Peer) { - // copy for other peer - - cp, err := s.peers.FindOrCreate(dst.ID()) - if err != nil { - t.Fatal(err) - } - cp.AddAddress(dst.Addresses()[0]) - - log.Infof("SWARM TEST: %s dialing %s", s.local, dst) - if _, err := s.Dial(ctx, cp); err != nil { - t.Fatal("error swarm dialing to peer", err) - } - log.Infof("SWARM TEST: %s connected to %s", s.local, dst) - wg.Done() - } - - log.Info("Connecting swarms simultaneously.") - for _, s := range swarms { - for _, p := range peers { - if p != s.local { // don't connect to self. - wg.Add(1) - connect(s, p) - } - } - } - wg.Wait() - - for _, s := range swarms { - log.Infof("%s swarm routing table: %s", s.local, s.Peers()) - } - } + connectSwarms(t, ctx, swarms, peersnp) // ping/pong for _, s1 := range swarms { @@ -138,13 +116,8 @@ func SubtestSwarm(t *testing.T, addrs []string, MsgNum int) { log.Debugf("-------------------------------------------------------") _, cancel := context.WithCancel(ctx) - peers, err := s1.peers.All() - if err != nil { - t.Fatal(err) - } - - got := map[u.Key]int{} - errChan := make(chan error, MsgNum*len(*peers)) + got := map[peer.ID]int{} + errChan := make(chan error, MsgNum*len(peersnp)) streamChan := make(chan *Stream, MsgNum) // send out "ping" x MsgNum to every peer @@ -152,7 +125,7 @@ func SubtestSwarm(t *testing.T, addrs []string, MsgNum int) { defer close(streamChan) var wg sync.WaitGroup - send := func(p peer.Peer) { + send := func(p peer.ID) { defer wg.Done() // first, one stream per peer (nice) @@ -173,13 +146,13 @@ func SubtestSwarm(t *testing.T, addrs []string, MsgNum int) { streamChan <- stream } - for _, p := range *peers { - if p == s1.local { + for _, p := range peersnp { + if p.ID == s1.local { continue // dont send to self... } wg.Add(1) - go send(p) + go send(p.ID) } wg.Wait() }() @@ -188,7 +161,7 @@ func SubtestSwarm(t *testing.T, addrs []string, MsgNum int) { go func() { defer close(errChan) count := 0 - countShouldBe := MsgNum * (len(*peers) - 1) + countShouldBe := MsgNum * (len(peersnp) - 1) for stream := range streamChan { // one per peer defer stream.Close() @@ -215,7 +188,7 @@ func SubtestSwarm(t *testing.T, addrs []string, MsgNum int) { msgCount++ } - got[p.Key()] = msgCount + got[p] = msgCount count += msgCount } @@ -232,8 +205,8 @@ func SubtestSwarm(t *testing.T, addrs []string, MsgNum int) { } log.Debugf("%s got pongs", s1.local) - if (len(*peers) - 1) != len(got) { - t.Error("got less messages than sent") + if (len(peersnp) - 1) != len(got) { + t.Errorf("got (%d) less messages than sent (%d).", len(got), len(peersnp)) } for p, n := range got { @@ -254,15 +227,36 @@ func SubtestSwarm(t *testing.T, addrs []string, MsgNum int) { func TestSwarm(t *testing.T) { // t.Skip("skipping for another test") - addrs := []string{ - "/ip4/127.0.0.1/tcp/10234", - "/ip4/127.0.0.1/tcp/10235", - "/ip4/127.0.0.1/tcp/10236", - "/ip4/127.0.0.1/tcp/10237", - "/ip4/127.0.0.1/tcp/10238", - } - // msgs := 1000 msgs := 100 - SubtestSwarm(t, addrs, msgs) + swarms := 5 + SubtestSwarm(t, swarms, msgs) +} + +func TestConnHandler(t *testing.T) { + // t.Skip("skipping for another test") + + ctx := context.Background() + swarms, peersnp := makeSwarms(ctx, t, 5) + + gotconn := make(chan struct{}, 10) + swarms[0].SetConnHandler(func(conn *Conn) { + gotconn <- struct{}{} + }) + + connectSwarms(t, ctx, swarms, peersnp) + + <-time.After(time.Millisecond) + // should've gotten 5 by now. + close(gotconn) + + expect := 4 + actual := 0 + for _ = range gotconn { + actual++ + } + + if actual != expect { + t.Fatal("should have connected to %d swarms. got: %d", actual, expect) + } } diff --git a/peer/metrics.go b/peer/metrics.go new file mode 100644 index 000000000..6cad11cdf --- /dev/null +++ b/peer/metrics.go @@ -0,0 +1,63 @@ +package peer + +import ( + "sync" + "time" +) + +// LatencyEWMASmooting governs the decay of the EWMA (the speed +// at which it changes). This must be a normalized (0-1) value. +// 1 is 100% change, 0 is no change. +var LatencyEWMASmoothing = 0.1 + +// Metrics is just an object that tracks metrics +// across a set of peers. +type Metrics interface { + + // RecordLatency records a new latency measurement + RecordLatency(ID, time.Duration) + + // LatencyEWMA returns an exponentially-weighted moving avg. + // of all measurements of a peer's latency. + LatencyEWMA(ID) time.Duration +} + +type metrics struct { + latmap map[ID]time.Duration + latmu sync.RWMutex +} + +func NewMetrics() Metrics { + return &metrics{ + latmap: make(map[ID]time.Duration), + } +} + +// RecordLatency records a new latency measurement +func (m *metrics) RecordLatency(p ID, next time.Duration) { + nextf := float64(next) + s := LatencyEWMASmoothing + if s > 1 || s < 0 { + s = 0.1 // ignore the knob. it's broken. look, it jiggles. + } + + m.latmu.Lock() + ewma, found := m.latmap[p] + ewmaf := float64(ewma) + if !found { + m.latmap[p] = next // when no data, just take it as the mean. + } else { + nextf = ((1.0 - s) * ewmaf) + (s * nextf) + m.latmap[p] = time.Duration(nextf) + } + m.latmu.Unlock() +} + +// LatencyEWMA returns an exponentially-weighted moving avg. +// of all measurements of a peer's latency. +func (m *metrics) LatencyEWMA(p ID) time.Duration { + m.latmu.RLock() + lat := m.latmap[p] + m.latmu.RUnlock() + return time.Duration(lat) +} diff --git a/peer/metrics_test.go b/peer/metrics_test.go new file mode 100644 index 000000000..0301792c3 --- /dev/null +++ b/peer/metrics_test.go @@ -0,0 +1,40 @@ +package peer_test + +import ( + "fmt" + "math/rand" + "testing" + "time" + + peer "github.com/jbenet/go-ipfs/peer" + testutil "github.com/jbenet/go-ipfs/util/testutil" +) + +func TestLatencyEWMAFun(t *testing.T) { + t.Skip("run it for fun") + + m := peer.NewMetrics() + id, err := testutil.RandPeerID() + if err != nil { + t.Fatal(err) + } + + mu := 100.0 + sig := 10.0 + next := func() time.Duration { + mu = (rand.NormFloat64() * sig) + mu + return time.Duration(mu) + } + + print := func() { + fmt.Printf("%3.f %3.f --> %d\n", sig, mu, m.LatencyEWMA(id)) + } + + for { + select { + case <-time.After(200 * time.Millisecond): + m.RecordLatency(id, next()) + print() + } + } +} diff --git a/peer/peer.go b/peer/peer.go index b3d3a7db7..9d57f9811 100644 --- a/peer/peer.go +++ b/peer/peer.go @@ -2,11 +2,8 @@ package peer import ( - "bytes" - "errors" + "encoding/hex" "fmt" - "sync" - "time" b58 "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-base58" ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" @@ -18,139 +15,18 @@ import ( var log = u.Logger("peer") -// ID is a byte slice representing the identity of a peer. -type ID mh.Multihash - -// String is utililty function for printing out peer ID strings. -func (id ID) String() string { - return id.Pretty() -} - -// Equal is utililty function for comparing two peer ID's -func (id ID) Equal(other ID) bool { - return bytes.Equal(id, other) -} +// ID represents the identity of a peer. +type ID string // Pretty returns a b58-encoded string of the ID func (id ID) Pretty() string { - return b58.Encode(id) + return IDB58Encode(id) } -// DecodePrettyID returns a b58-encoded string of the ID -func DecodePrettyID(s string) (ID, error) { - m, err := mh.FromB58String(s) - if err != nil { - return nil, err +func (id ID) Loggable() map[string]interface{} { + return map[string]interface{}{ + "peerID": id.Pretty(), } - return ID(m), err -} - -// IDFromPubKey retrieves a Public Key from the peer given by pk -func IDFromPubKey(pk ic.PubKey) (ID, error) { - b, err := pk.Bytes() - if err != nil { - return nil, err - } - hash := u.Hash(b) - return ID(hash), nil -} - -// Map maps Key (string) : *peer (slices are not comparable). -type Map map[u.Key]Peer - -// Peer represents the identity information of an IPFS Node, including -// ID, and relevant Addresses. -type Peer interface { - - // TODO reduce the peer interface to be read-only. Force mutations to occur - // on the peer store eg. peerstore.SetLatency(peerId, value). - - // ID returns the peer's ID - ID() ID - - // Key returns the ID as a Key (string) for maps. - Key() u.Key - - // Addresses returns the peer's multiaddrs - Addresses() []ma.Multiaddr - - // AddAddress adds the given Multiaddr address to Peer's addresses. - // returns whether this was a newly added address. - AddAddress(a ma.Multiaddr) bool - - // NetAddress returns the first Multiaddr found for a given network. - NetAddress(n string) ma.Multiaddr - - // Services returns the peer's services - // Services() []mux.ProtocolID - // SetServices([]mux.ProtocolID) - - // Priv/PubKey returns the peer's Private Key - PrivKey() ic.PrivKey - PubKey() ic.PubKey - - // LoadAndVerifyKeyPair unmarshalls, loads a private/public key pair. - // Error if (a) unmarshalling fails, or (b) pubkey does not match id. - LoadAndVerifyKeyPair(marshalled []byte) error - VerifyAndSetPrivKey(sk ic.PrivKey) error - VerifyAndSetPubKey(pk ic.PubKey) error - - // Get/SetLatency manipulate the current latency measurement. - GetLatency() (out time.Duration) - SetLatency(laten time.Duration) - - // Get/SetType indicate whether this is a local or remote peer - GetType() Type - SetType(Type) - - //Get/Set Agent and Protocol Versions - GetVersions() (agent, protocol string) - SetVersions(agent, protocol string) - // Update with the data of another peer instance - Update(Peer) error - - Loggable() map[string]interface{} -} - -type Type uint8 - -const ( - // Unspecified indicates peer was created without specifying Type - Unspecified Type = iota - Local - Remote -) - -func (t Type) String() string { - switch t { - case Local: - return "localPeer" - case Remote: - return "remotePeer" - default: - } - return "unspecifiedPeer" -} - -type peer struct { - id ID - addresses []ma.Multiaddr - // services []mux.ProtocolID - - privKey ic.PrivKey - pubKey ic.PubKey - - // TODO move latency away from peer into the package that uses it. Instead, - // within that package, map from ID to latency value. - latency time.Duration - - protocolVersion string - agentVersion string - - // typ can be Local, Remote, or Unspecified (default) - typ Type - - sync.RWMutex } // String prints out the peer. @@ -159,242 +35,98 @@ type peer struct { // enforce this by only exposing functions that generate // IDs safely. Then any peer.ID type found in the // codebase is known to be correct. -func (p *peer) String() string { - pid := p.id.String() +func (id ID) String() string { + pid := id.Pretty() maxRunes := 6 if len(pid) < maxRunes { maxRunes = len(pid) } - return fmt.Sprintf("peer %s", pid[:maxRunes]) + return fmt.Sprintf("", pid[:maxRunes]) } -func (p *peer) Loggable() map[string]interface{} { - return map[string]interface{}{ - p.GetType().String(): map[string]interface{}{ - "id": p.ID().String(), - "latency": p.GetLatency(), - }, - } +// MatchesPrivateKey tests whether this ID was derived from sk +func (id ID) MatchesPrivateKey(sk ic.PrivKey) bool { + return id.MatchesPublicKey(sk.GetPublic()) } -// Key returns the ID as a Key (string) for maps. -func (p *peer) Key() u.Key { - return u.Key(p.id) -} - -// ID returns the peer's ID -func (p *peer) ID() ID { - return p.id -} - -// PrivKey returns the peer's Private Key -func (p *peer) PrivKey() ic.PrivKey { - return p.privKey -} - -// PubKey returns the peer's Private Key -func (p *peer) PubKey() ic.PubKey { - return p.pubKey -} - -// Addresses returns the peer's multiaddrs -func (p *peer) Addresses() []ma.Multiaddr { - cp := make([]ma.Multiaddr, len(p.addresses)) - p.RLock() - copy(cp, p.addresses) - defer p.RUnlock() - return cp -} - -// AddAddress adds the given Multiaddr address to Peer's addresses. -// Returns whether this address was a newly added address -func (p *peer) AddAddress(a ma.Multiaddr) bool { - if a == nil { - panic("adding a nil Multiaddr") - } - - p.Lock() - defer p.Unlock() - - for _, addr := range p.addresses { - if addr.Equal(a) { - return false - } - } - p.addresses = append(p.addresses, a) - return true -} - -// NetAddress returns the first Multiaddr found for a given network. -func (p *peer) NetAddress(n string) ma.Multiaddr { - p.RLock() - defer p.RUnlock() - - for _, a := range p.addresses { - for _, p := range a.Protocols() { - if p.Name == n { - return a - } - } - } - return nil -} - -// func (p *peer) Services() []mux.ProtocolID { -// p.RLock() -// defer p.RUnlock() -// return p.services -// } -// -// func (p *peer) SetServices(s []mux.ProtocolID) { -// p.Lock() -// defer p.Unlock() -// p.services = s -// } - -// GetLatency retrieves the current latency measurement. -func (p *peer) GetLatency() (out time.Duration) { - p.RLock() - out = p.latency - p.RUnlock() - return -} - -// SetLatency sets the latency measurement. -// TODO: Instead of just keeping a single number, -// keep a running average over the last hour or so -// Yep, should be EWMA or something. (-jbenet) -func (p *peer) SetLatency(laten time.Duration) { - p.Lock() - if p.latency == 0 { - p.latency = laten - } else { - p.latency = ((p.latency * 9) + laten) / 10 - } - p.Unlock() -} - -func (p *peer) SetType(t Type) { - p.Lock() - p.typ = t - defer p.Unlock() -} - -func (p *peer) GetType() Type { - p.Lock() - defer p.Unlock() - return p.typ -} - -// LoadAndVerifyKeyPair unmarshalls, loads a private/public key pair. -// Error if (a) unmarshalling fails, or (b) pubkey does not match id. -func (p *peer) LoadAndVerifyKeyPair(marshalled []byte) error { - sk, err := ic.UnmarshalPrivateKey(marshalled) +// MatchesPublicKey tests whether this ID was derived from pk +func (id ID) MatchesPublicKey(pk ic.PubKey) bool { + oid, err := IDFromPublicKey(pk) if err != nil { - return fmt.Errorf("Failed to unmarshal private key: %v", err) + return false } - - return p.VerifyAndSetPrivKey(sk) + return oid == id } -// VerifyAndSetPrivKey sets private key, given its pubkey matches the peer.ID -func (p *peer) VerifyAndSetPrivKey(sk ic.PrivKey) error { - - // construct and assign pubkey. ensure it matches this peer - if err := p.VerifyAndSetPubKey(sk.GetPublic()); err != nil { - return err +// IDFromString cast a string to ID type, and validate +// the id to make sure it is a multihash. +func IDFromString(s string) (ID, error) { + if _, err := mh.Cast([]byte(s)); err != nil { + return ID(""), err } - - p.Lock() - defer p.Unlock() - - // if we didn't have the priavte key, assign it - if p.privKey == nil { - p.privKey = sk - return nil - } - - // if we already had the keys, check they're equal. - if p.privKey.Equals(sk) { - return nil // as expected. keep the old objects. - } - - // keys not equal. invariant violated. this warrants a panic. - // these keys should be _the same_ because peer.ID = H(pk) - // this mismatch should never happen. - log.Errorf("%s had PrivKey: %v -- got %v", p, p.privKey, sk) - panic("invariant violated: unexpected key mismatch") + return ID(s), nil } -// VerifyAndSetPubKey sets public key, given it matches the peer.ID -func (p *peer) VerifyAndSetPubKey(pk ic.PubKey) error { - pkid, err := IDFromPubKey(pk) +// IDFromBytes cast a string to ID type, and validate +// the id to make sure it is a multihash. +func IDFromBytes(b []byte) (ID, error) { + if _, err := mh.Cast(b); err != nil { + return ID(""), err + } + return ID(b), nil +} + +// IDB58Decode returns a b58-decoded Peer +func IDB58Decode(s string) (ID, error) { + m, err := mh.FromB58String(s) if err != nil { - return fmt.Errorf("Failed to hash public key: %v", err) + return "", err } - - p.Lock() - defer p.Unlock() - - if !p.id.Equal(pkid) { - return fmt.Errorf("Public key does not match peer.ID.") - } - - // if we didn't have the keys, assign them. - if p.pubKey == nil { - p.pubKey = pk - return nil - } - - // if we already had the pubkey, check they're equal. - if p.pubKey.Equals(pk) { - return nil // as expected. keep the old objects. - } - - // keys not equal. invariant violated. this warrants a panic. - // these keys should be _the same_ because peer.ID = H(pk) - // this mismatch should never happen. - log.Errorf("%s had PubKey: %v -- got %v", p, p.pubKey, pk) - panic("invariant violated: unexpected key mismatch") + return ID(m), err } -// Updates this peer with information from another peer instance -func (p *peer) Update(other Peer) error { - if !p.ID().Equal(other.ID()) { - return errors.New("peer ids do not match") - } - - for _, a := range other.Addresses() { - p.AddAddress(a) - } - - p.SetLatency(other.GetLatency()) - - p.SetType(other.GetType()) - - sk := other.PrivKey() - pk := other.PubKey() - p.Lock() - if p.privKey == nil { - p.privKey = sk - } - - if p.pubKey == nil { - p.pubKey = pk - } - defer p.Unlock() - return nil +// IDB58Encode returns b58-encoded string +func IDB58Encode(id ID) string { + return b58.Encode([]byte(id)) } -func (p *peer) GetVersions() (agent, protocol string) { - p.RLock() - defer p.RUnlock() - return p.agentVersion, p.protocolVersion +// IDHexDecode returns a b58-decoded Peer +func IDHexDecode(s string) (ID, error) { + m, err := mh.FromHexString(s) + if err != nil { + return "", err + } + return ID(m), err } -func (p *peer) SetVersions(agent, protocol string) { - p.Lock() - defer p.Unlock() - p.agentVersion = agent - p.protocolVersion = protocol +// IDHexEncode returns b58-encoded string +func IDHexEncode(id ID) string { + return hex.EncodeToString([]byte(id)) +} + +// IDFromPublicKey returns the Peer ID corresponding to pk +func IDFromPublicKey(pk ic.PubKey) (ID, error) { + b, err := pk.Bytes() + if err != nil { + return "", err + } + hash := u.Hash(b) + return ID(hash), nil +} + +// IDFromPrivateKey returns the Peer ID corresponding to sk +func IDFromPrivateKey(sk ic.PrivKey) (ID, error) { + return IDFromPublicKey(sk.GetPublic()) +} + +// Map maps a Peer ID to a struct. +type Set map[ID]struct{} + +// PeerInfo is a small struct used to pass around a peer with +// a set of addresses (and later, keys?). This is not meant to be +// a complete view of the system, but rather to model updates to +// the peerstore. It is used by things like the routing system. +type PeerInfo struct { + ID ID + Addrs []ma.Multiaddr } diff --git a/peer/peer_test.go b/peer/peer_test.go index 7224882fc..810df6218 100644 --- a/peer/peer_test.go +++ b/peer/peer_test.go @@ -1,65 +1,161 @@ package peer import ( + "encoding/base64" + "fmt" + "strings" "testing" - ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" - mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash" + ic "github.com/jbenet/go-ipfs/crypto" + u "github.com/jbenet/go-ipfs/util" + + b58 "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-base58" ) -func TestNetAddress(t *testing.T) { +var gen1 keyset // generated +var gen2 keyset // generated +var man keyset // manual - tcp, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/1234") - if err != nil { - t.Error(err) - return +func init() { + if err := gen1.generate(); err != nil { + panic(err) + } + if err := gen2.generate(); err != nil { + panic(err) } - udp, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/2345") - if err != nil { - t.Error(err) - return - } - - mh, err := mh.FromHexString("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33") - if err != nil { - t.Error(err) - return - } - - p := NewPeerstore().WithID(ID(mh)) - p.AddAddress(tcp) - p.AddAddress(udp) - p.AddAddress(tcp) - - if len(p.Addresses()) == 3 { - t.Error("added same address twice") - } - - tcp2 := p.NetAddress("tcp") - if tcp2 != tcp { - t.Error("NetAddress lookup failed", tcp, tcp2) - } - - udp2 := p.NetAddress("udp") - if udp2 != udp { - t.Error("NetAddress lookup failed", udp, udp2) + skManBytes = strings.Replace(skManBytes, "\n", "", -1) + if err := man.load(hpkpMan, skManBytes); err != nil { + panic(err) } } -func TestStringMethodWithSmallId(t *testing.T) { - p := NewPeerstore().WithID([]byte(string(0))) - p1, ok := p.(*peer) - if !ok { - t.Fatal("WithID doesn't return a peer") - } - p1.String() +type keyset struct { + sk ic.PrivKey + pk ic.PubKey + hpk string + hpkp string } -func TestDefaultType(t *testing.T) { - t.Log("Ensure that peers are initialized to Unspecified by default") - p := peer{} - if p.GetType() != Unspecified { - t.Fatalf("Peer's default type is was not `Unspecified`") +func (ks *keyset) generate() error { + var err error + ks.sk, ks.pk, err = ic.GenerateKeyPair(ic.RSA, 1024) + if err != nil { + return err } + + bpk, err := ks.pk.Bytes() + if err != nil { + return err + } + + ks.hpk = string(u.Hash(bpk)) + ks.hpkp = b58.Encode([]byte(ks.hpk)) + return nil } + +func (ks *keyset) load(hpkp, skBytesStr string) error { + skBytes, err := base64.StdEncoding.DecodeString(skBytesStr) + if err != nil { + return err + } + + ks.sk, err = ic.UnmarshalPrivateKey(skBytes) + if err != nil { + return err + } + + ks.pk = ks.sk.GetPublic() + bpk, err := ks.pk.Bytes() + if err != nil { + return err + } + + ks.hpk = string(u.Hash(bpk)) + ks.hpkp = b58.Encode([]byte(ks.hpk)) + if ks.hpkp != hpkp { + return fmt.Errorf("hpkp doesn't match key. %s", hpkp) + } + return nil +} + +func TestIDMatchesPublicKey(t *testing.T) { + + test := func(ks keyset) { + p1, err := IDB58Decode(ks.hpkp) + if err != nil { + t.Fatal(err) + } + + if ks.hpk != string(p1) { + t.Error("p1 and hpk differ") + } + + if !p1.MatchesPublicKey(ks.pk) { + t.Fatal("p1 does not match pk") + } + + p2, err := IDFromPublicKey(ks.pk) + if err != nil { + t.Fatal(err) + } + + if p1 != p2 { + t.Error("p1 and p2 differ", p1.Pretty(), p2.Pretty()) + } + + if p2.Pretty() != ks.hpkp { + t.Error("hpkp and p2.Pretty differ", ks.hpkp, p2.Pretty()) + } + } + + test(gen1) + test(gen2) + test(man) +} + +func TestIDMatchesPrivateKey(t *testing.T) { + + test := func(ks keyset) { + p1, err := IDB58Decode(ks.hpkp) + if err != nil { + t.Fatal(err) + } + + if ks.hpk != string(p1) { + t.Error("p1 and hpk differ") + } + + if !p1.MatchesPrivateKey(ks.sk) { + t.Fatal("p1 does not match sk") + } + + p2, err := IDFromPrivateKey(ks.sk) + if err != nil { + t.Fatal(err) + } + + if p1 != p2 { + t.Error("p1 and p2 differ", p1.Pretty(), p2.Pretty()) + } + } + + test(gen1) + test(gen2) + test(man) +} + +var hpkpMan = `QmRK3JgmVEGiewxWbhpXLJyjWuGuLeSTMTndA1coMHEy5o` +var skManBytes = ` +CAAS4AQwggJcAgEAAoGBAL7w+Wc4VhZhCdM/+Hccg5Nrf4q9NXWwJylbSrXz/unFS24wyk6pEk0zi3W +7li+vSNVO+NtJQw9qGNAMtQKjVTP+3Vt/jfQRnQM3s6awojtjueEWuLYVt62z7mofOhCtj+VwIdZNBo +/EkLZ0ETfcvN5LVtLYa8JkXybnOPsLvK+PAgMBAAECgYBdk09HDM7zzL657uHfzfOVrdslrTCj6p5mo +DzvCxLkkjIzYGnlPuqfNyGjozkpSWgSUc+X+EGLLl3WqEOVdWJtbM61fewEHlRTM5JzScvwrJ39t7o6 +CCAjKA0cBWBd6UWgbN/t53RoWvh9HrA2AW5YrT0ZiAgKe9y7EMUaENVJ8QJBAPhpdmb4ZL4Fkm4OKia +NEcjzn6mGTlZtef7K/0oRC9+2JkQnCuf6HBpaRhJoCJYg7DW8ZY+AV6xClKrgjBOfERMCQQDExhnzu2 +dsQ9k8QChBlpHO0TRbZBiQfC70oU31kM1AeLseZRmrxv9Yxzdl8D693NNWS2JbKOXl0kMHHcuGQLMVA +kBZ7WvkmPV3aPL6jnwp2pXepntdVnaTiSxJ1dkXShZ/VSSDNZMYKY306EtHrIu3NZHtXhdyHKcggDXr +qkBrdgErAkAlpGPojUwemOggr4FD8sLX1ot2hDJyyV7OK2FXfajWEYJyMRL1Gm9Uk1+Un53RAkJneqp +JGAzKpyttXBTIDO51AkEA98KTiROMnnU8Y6Mgcvr68/SMIsvCYMt9/mtwSBGgl80VaTQ5Hpaktl6Xbh +VUt5Wv0tRxlXZiViCGCD1EtrrwTw== +` diff --git a/peer/peerstore.go b/peer/peerstore.go index db66c90df..46de50f0d 100644 --- a/peer/peerstore.go +++ b/peer/peerstore.go @@ -1,132 +1,252 @@ package peer import ( + "errors" "sync" ic "github.com/jbenet/go-ipfs/crypto" - u "github.com/jbenet/go-ipfs/util" - errors "github.com/jbenet/go-ipfs/util/debugerror" + + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + dssync "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" + ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ) -// Peerstore provides a threadsafe collection for peers. +// Peerstore provides a threadsafe store of Peer related +// information. type Peerstore interface { - FindOrCreate(ID) (Peer, error) - Add(Peer) (Peer, error) - Delete(ID) error - All() (*Map, error) + KeyBook + AddressBook + Metrics - WithKeyPair(sk ic.PrivKey, pk ic.PubKey) (Peer, error) - WithID(id ID) Peer - WithIDString(id string) Peer + // Peers returns a list of all peer.IDs in this Peerstore + Peers() []ID + + // PeerInfo returns a peer.PeerInfo struct for given peer.ID. + // This is a small slice of the information Peerstore has on + // that peer, useful to other services. + PeerInfo(ID) PeerInfo + + // Get/Put is a simple registry for other peer-related key/value pairs. + // if we find something we use often, it should become its own set of + // methods. this is a last resort. + Get(id ID, key string) (interface{}, error) + Put(id ID, key string, val interface{}) error +} + +// AddressBook tracks the addresses of Peers +type AddressBook interface { + Addresses(ID) []ma.Multiaddr + AddAddress(ID, ma.Multiaddr) + AddAddresses(ID, []ma.Multiaddr) +} + +type addressMap map[string]ma.Multiaddr + +type addressbook struct { + addrs map[ID]addressMap + sync.RWMutex +} + +func newAddressbook() *addressbook { + return &addressbook{addrs: map[ID]addressMap{}} +} + +func (ab *addressbook) Peers() []ID { + ab.RLock() + ps := make([]ID, 0, len(ab.addrs)) + for p := range ab.addrs { + ps = append(ps, p) + } + ab.RUnlock() + return ps +} + +func (ab *addressbook) Addresses(p ID) []ma.Multiaddr { + ab.RLock() + defer ab.RUnlock() + + maddrs, found := ab.addrs[p] + if !found { + return nil + } + + maddrs2 := make([]ma.Multiaddr, 0, len(maddrs)) + for _, m := range maddrs { + maddrs2 = append(maddrs2, m) + } + return maddrs2 +} + +func (ab *addressbook) AddAddress(p ID, m ma.Multiaddr) { + ab.Lock() + defer ab.Unlock() + + _, found := ab.addrs[p] + if !found { + ab.addrs[p] = addressMap{} + } + ab.addrs[p][m.String()] = m +} + +func (ab *addressbook) AddAddresses(p ID, ms []ma.Multiaddr) { + ab.Lock() + defer ab.Unlock() + + for _, m := range ms { + _, found := ab.addrs[p] + if !found { + ab.addrs[p] = addressMap{} + } + ab.addrs[p][m.String()] = m + } +} + +// KeyBook tracks the Public keys of Peers. +type KeyBook interface { + PubKey(ID) ic.PubKey + AddPubKey(ID, ic.PubKey) error + + PrivKey(ID) ic.PrivKey + AddPrivKey(ID, ic.PrivKey) error +} + +type keybook struct { + pks map[ID]ic.PubKey + sks map[ID]ic.PrivKey + + sync.RWMutex // same lock. wont happen a ton. +} + +func newKeybook() *keybook { + return &keybook{ + pks: map[ID]ic.PubKey{}, + sks: map[ID]ic.PrivKey{}, + } +} + +func (kb *keybook) Peers() []ID { + kb.RLock() + ps := make([]ID, 0, len(kb.pks)+len(kb.sks)) + for p := range kb.pks { + ps = append(ps, p) + } + for p := range kb.sks { + if _, found := kb.pks[p]; !found { + ps = append(ps, p) + } + } + kb.RUnlock() + return ps +} + +func (kb *keybook) PubKey(p ID) ic.PubKey { + kb.RLock() + pk := kb.pks[p] + kb.RUnlock() + return pk +} + +func (kb *keybook) AddPubKey(p ID, pk ic.PubKey) error { + + // check it's correct first + if !p.MatchesPublicKey(pk) { + return errors.New("ID does not match PublicKey") + } + + kb.Lock() + kb.pks[p] = pk + kb.Unlock() + return nil +} + +func (kb *keybook) PrivKey(p ID) ic.PrivKey { + kb.RLock() + sk := kb.sks[p] + kb.RUnlock() + return sk +} + +func (kb *keybook) AddPrivKey(p ID, sk ic.PrivKey) error { + + if sk == nil { + return errors.New("sk is nil (PrivKey)") + } + + // check it's correct first + if !p.MatchesPrivateKey(sk) { + return errors.New("ID does not match PrivateKey") + } + + kb.Lock() + kb.sks[p] = sk + kb.Unlock() + return nil } type peerstore struct { - sync.RWMutex - data map[string]Peer // key is string(ID) + keybook + addressbook + metrics + + // store other data, like versions + ds ds.ThreadSafeDatastore } // NewPeerstore creates a threadsafe collection of peers. func NewPeerstore() Peerstore { return &peerstore{ - data: make(map[string]Peer), + keybook: *newKeybook(), + addressbook: *newAddressbook(), + metrics: *(NewMetrics()).(*metrics), + ds: dssync.MutexWrap(ds.NewMapDatastore()), } } -func (ps *peerstore) FindOrCreate(i ID) (Peer, error) { - ps.Lock() - defer ps.Unlock() - - if i == nil { - panic("wat") - } - - p, ok := ps.data[i.String()] - if !ok { // not found, construct it ourselves, add it to datastore, and return. - - // TODO(brian) kinda dangerous, no? If ID is invalid and doesn't - // correspond to an actual valid peer ID, this peerstore will return an - // instantiated peer value, allowing the error to propagate. It might - // be better to nip this at the bud by returning nil and making the - // client manually add a Peer. To keep the peerstore in control, this - // can even be a peerstore method that performs cursory validation. - // - // Potential bad case: Suppose values arrive from untrusted providers - // in the DHT. - p = &peer{id: i} - ps.data[i.String()] = p - } - - // no error, got it back fine - return p, nil +func (ps *peerstore) Put(p ID, key string, val interface{}) error { + dsk := ds.NewKey(string(p) + "/" + key) + return ps.ds.Put(dsk, val) } -func (p *peerstore) Add(peer Peer) (Peer, error) { - p.Lock() - defer p.Unlock() - - existing, ok := p.data[peer.Key().String()] - if !ok { // not found? just add and return. - p.data[peer.Key().String()] = peer - return peer, nil - } - // already here. - if peer == existing { - return peer, nil - } - existing.Update(peer) // must do some merging. - return existing, nil +func (ps *peerstore) Get(p ID, key string) (interface{}, error) { + dsk := ds.NewKey(string(p) + "/" + key) + return ps.ds.Get(dsk) } -func (p *peerstore) Delete(i ID) error { - p.Lock() - defer p.Unlock() - - delete(p.data, i.String()) - return nil -} - -func (p *peerstore) All() (*Map, error) { - p.Lock() - defer p.Unlock() - - ps := Map{} - for k, v := range p.data { - ps[u.Key(k)] = v +func (ps *peerstore) Peers() []ID { + set := map[ID]struct{}{} + for _, p := range ps.keybook.Peers() { + set[p] = struct{}{} } - return &ps, nil -} - -// WithKeyPair returns a Peer object with given keys. -func (ps *peerstore) WithKeyPair(sk ic.PrivKey, pk ic.PubKey) (Peer, error) { - if sk == nil && pk == nil { - return nil, errors.Errorf("PeerWithKeyPair nil keys") + for _, p := range ps.addressbook.Peers() { + set[p] = struct{}{} } - pk2 := sk.GetPublic() - if pk == nil { - pk = pk2 - } else if !pk.Equals(pk2) { - return nil, errors.Errorf("key mismatch. pubkey is not privkey's pubkey") + pps := make([]ID, 0, len(set)) + for p := range set { + pps = append(pps, p) } + return pps +} - pkid, err := IDFromPubKey(pk) - if err != nil { - return nil, errors.Errorf("Failed to hash public key: %v", err) +func (ps *peerstore) PeerInfo(p ID) PeerInfo { + return PeerInfo{ + ID: p, + Addrs: ps.addressbook.Addresses(p), } - - p := &peer{id: pkid, pubKey: pk, privKey: sk} - ps.Add(p) - return p, nil } -// WithID constructs a peer with given ID. -func (ps *peerstore) WithID(id ID) Peer { - p := &peer{id: id} - ps.Add(p) - return p +func PeerInfos(ps Peerstore, peers []ID) []PeerInfo { + pi := make([]PeerInfo, len(peers)) + for i, p := range peers { + pi[i] = ps.PeerInfo(p) + } + return pi } -// WithIDString constructs a peer with given ID (string). -func (ps *peerstore) WithIDString(id string) Peer { - return ps.WithID(ID(id)) +func PeerInfoIDs(pis []PeerInfo) []ID { + ps := make([]ID, len(pis)) + for i, pi := range pis { + ps[i] = pi.ID + } + return ps } diff --git a/peer/peerstore_test.go b/peer/peerstore_test.go index 808e48eb1..ae08e5f14 100644 --- a/peer/peerstore_test.go +++ b/peer/peerstore_test.go @@ -1,90 +1,77 @@ package peer import ( - "errors" "testing" ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ) -func setupPeer(ps Peerstore, id string, addr string) (Peer, error) { - tcp, err := ma.NewMultiaddr(addr) +func IDS(t *testing.T, ids string) ID { + id, err := IDB58Decode(ids) if err != nil { - return nil, err + t.Fatal(err) } - - p := ps.WithIDString(id) - p.AddAddress(tcp) - return p, nil + return id } -func TestPeerstore(t *testing.T) { +func MA(t *testing.T, m string) ma.Multiaddr { + maddr, err := ma.NewMultiaddr(m) + if err != nil { + t.Fatal(err) + } + return maddr +} + +func TestAddresses(t *testing.T) { ps := NewPeerstore() - p11, _ := setupPeer(ps, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31", "/ip4/127.0.0.1/tcp/1234") - p21, _ := setupPeer(ps, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a32", "/ip4/127.0.0.1/tcp/2345") - // p31, _ := setupPeer("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33", "/ip4/127.0.0.1/tcp/3456") - // p41, _ := setupPeer("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a34", "/ip4/127.0.0.1/tcp/4567") + id1 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQN") + id2 := IDS(t, "QmRmPL3FDZKE3Qiwv1RosLdwdvbvg17b2hB39QPScgWKKZ") + id3 := IDS(t, "QmPhi7vBsChP7sjRoZGgg7bcKqF6MmCcQwvRbDte8aJ6Kn") - p13, err := ps.Add(p11) - if err != nil { - t.Error(err) + ma11 := MA(t, "/ip4/1.2.3.1/tcp/1111") + ma21 := MA(t, "/ip4/1.2.3.2/tcp/1111") + ma22 := MA(t, "/ip4/1.2.3.2/tcp/2222") + ma31 := MA(t, "/ip4/1.2.3.3/tcp/1111") + ma32 := MA(t, "/ip4/1.2.3.3/tcp/2222") + ma33 := MA(t, "/ip4/1.2.3.3/tcp/3333") + + ps.AddAddress(id1, ma11) + ps.AddAddress(id2, ma21) + ps.AddAddress(id2, ma22) + ps.AddAddress(id3, ma31) + ps.AddAddress(id3, ma32) + ps.AddAddress(id3, ma33) + + test := func(exp, act []ma.Multiaddr) { + if len(exp) != len(act) { + t.Fatal("lengths not the same") + } + + for _, a := range exp { + found := false + + for _, b := range act { + if a.Equal(b) { + found = true + break + } + } + + if !found { + t.Fatal("expected address %s not found", a) + } + } } - if p13 != p11 { - t.Error("these should be the same") - } - - p12, err := ps.FindOrCreate(ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31")) - if err != nil { - t.Error(err) - } - - if p11 != p12 { - t.Error(errors.New("peers should be the same")) - } - - p23, err := ps.Add(p21) - if err != nil { - t.Error(err) - } - if p23 != p21 { - t.Error("These should be the same") - } - - p22, err := ps.FindOrCreate(ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a32")) - if err != nil { - t.Error(err) - } - - if p21 != p22 { - t.Error(errors.New("peers should be the same")) - } - - _, err = ps.FindOrCreate(ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33")) - if err != nil { - t.Error(errors.New("should not have an error here")) - } - - err = ps.Delete(ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31")) - if err != nil { - t.Error(err) - } - - // reconstruct! - _, err = ps.FindOrCreate(ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31")) - if err != nil { - t.Error(errors.New("should not have an error anyway. reconstruct!")) - } - - p22, err = ps.FindOrCreate(ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a32")) - if err != nil { - t.Error(err) - } - - if p21 != p22 { - t.Error(errors.New("peers should be the same")) - } + // test the Addresses return value + test([]ma.Multiaddr{ma11}, ps.Addresses(id1)) + test([]ma.Multiaddr{ma21, ma22}, ps.Addresses(id2)) + test([]ma.Multiaddr{ma31, ma32, ma33}, ps.Addresses(id3)) + // test also the PeerInfo return + test([]ma.Multiaddr{ma11}, ps.PeerInfo(id1).Addrs) + test([]ma.Multiaddr{ma21, ma22}, ps.PeerInfo(id2).Addrs) + test([]ma.Multiaddr{ma31, ma32, ma33}, ps.PeerInfo(id3).Addrs) } diff --git a/peer/queue/distance.go b/peer/queue/distance.go index 8eff20288..ebe876bb1 100644 --- a/peer/queue/distance.go +++ b/peer/queue/distance.go @@ -13,7 +13,7 @@ import ( // peerMetric tracks a peer and its distance to something else. type peerMetric struct { // the peer - peer peer.Peer + peer peer.ID // big.Int for XOR metric metric *big.Int @@ -64,11 +64,11 @@ func (pq *distancePQ) Len() int { return len(pq.heap) } -func (pq *distancePQ) Enqueue(p peer.Peer) { +func (pq *distancePQ) Enqueue(p peer.ID) { pq.Lock() defer pq.Unlock() - distance := ks.XORKeySpace.Key(p.ID()).Distance(pq.from) + distance := ks.XORKeySpace.Key([]byte(p)).Distance(pq.from) heap.Push(&pq.heap, &peerMetric{ peer: p, @@ -76,7 +76,7 @@ func (pq *distancePQ) Enqueue(p peer.Peer) { }) } -func (pq *distancePQ) Dequeue() peer.Peer { +func (pq *distancePQ) Dequeue() peer.ID { pq.Lock() defer pq.Unlock() diff --git a/peer/queue/interface.go b/peer/queue/interface.go index ba17f0aa2..c2106164e 100644 --- a/peer/queue/interface.go +++ b/peer/queue/interface.go @@ -11,8 +11,8 @@ type PeerQueue interface { Len() int // Enqueue adds this node to the queue. - Enqueue(peer.Peer) + Enqueue(peer.ID) // Dequeue retrieves the highest (smallest int) priority node - Dequeue() peer.Peer + Dequeue() peer.ID } diff --git a/peer/queue/queue_test.go b/peer/queue/queue_test.go index d91892465..d3e661400 100644 --- a/peer/queue/queue_test.go +++ b/peer/queue/queue_test.go @@ -8,22 +8,18 @@ import ( peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" - testutil "github.com/jbenet/go-ipfs/util/testutil" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ) -func newPeer(id string) peer.Peer { - return testutil.NewPeerWithIDString(id) -} - func TestQueue(t *testing.T) { - p1 := newPeer("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31") - p2 := newPeer("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a32") - p3 := newPeer("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33") - p4 := newPeer("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a34") - p5 := newPeer("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31") + p1 := peer.ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31") // these aren't valid, because need to hex-decode. + p2 := peer.ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a32") // these aren't valid, because need to hex-decode. + p3 := peer.ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33") // these aren't valid, because need to hex-decode. + p4 := peer.ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a34") // these aren't valid, because need to hex-decode. + p5 := peer.ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31") // these aren't valid, because need to hex-decode. + // but they work. // these are the peer.IDs' XORKeySpace Key values: // [228 47 151 130 156 102 222 232 218 31 132 94 170 208 80 253 120 103 55 35 91 237 48 157 81 245 57 247 66 150 9 40] @@ -67,10 +63,10 @@ func TestQueue(t *testing.T) { } -func newPeerTime(t time.Time) peer.Peer { +func newPeerTime(t time.Time) peer.ID { s := fmt.Sprintf("hmmm time: %v", t) h := u.Hash([]byte(s)) - return testutil.NewPeerWithID(peer.ID(h)) + return peer.ID(h) } func TestSyncQueue(t *testing.T) { diff --git a/peer/queue/sync.go b/peer/queue/sync.go index c219e2671..7a00eeb43 100644 --- a/peer/queue/sync.go +++ b/peer/queue/sync.go @@ -9,8 +9,8 @@ import ( // ChanQueue makes any PeerQueue synchronizable through channels. type ChanQueue struct { Queue PeerQueue - EnqChan chan<- peer.Peer - DeqChan <-chan peer.Peer + EnqChan chan<- peer.ID + DeqChan <-chan peer.ID } // NewChanQueue creates a ChanQueue by wrapping pq. @@ -23,8 +23,8 @@ func NewChanQueue(ctx context.Context, pq PeerQueue) *ChanQueue { func (cq *ChanQueue) process(ctx context.Context) { // construct the channels here to be able to use them bidirectionally - enqChan := make(chan peer.Peer, 10) - deqChan := make(chan peer.Peer, 10) + enqChan := make(chan peer.ID) + deqChan := make(chan peer.ID) cq.EnqChan = enqChan cq.DeqChan = deqChan @@ -32,8 +32,8 @@ func (cq *ChanQueue) process(ctx context.Context) { go func() { defer close(deqChan) - var next peer.Peer - var item peer.Peer + var next peer.ID + var item peer.ID var more bool for { @@ -60,10 +60,10 @@ func (cq *ChanQueue) process(ctx context.Context) { cq.Queue.Enqueue(item) cq.Queue.Enqueue(next) - next = nil + next = "" case deqChan <- next: - next = nil + next = "" case <-ctx.Done(): return diff --git a/routing/dht/dht.go b/routing/dht/dht.go index 6e49c84cf..a893e62b8 100644 --- a/routing/dht/dht.go +++ b/routing/dht/dht.go @@ -34,11 +34,10 @@ const doPinging = false // It is used to implement the base IpfsRouting module. type IpfsDHT struct { network inet.Network // the network services we need - self peer.Peer // Local peer (yourself) - peerstore peer.Peerstore // Other peers + self peer.ID // Local peer (yourself) + peerstore peer.Peerstore // Peer Registry - datastore ds.Datastore // Local data - dslock sync.Mutex + datastore ds.ThreadSafeDatastore // Local data routingTable *kb.RoutingTable // Array of routing tables for differently distanced nodes providers *ProviderManager @@ -53,19 +52,19 @@ type IpfsDHT struct { } // NewDHT creates a new DHT object with the given peer as the 'local' host -func NewDHT(ctx context.Context, p peer.Peer, ps peer.Peerstore, n inet.Network, dstore ds.Datastore) *IpfsDHT { +func NewDHT(ctx context.Context, p peer.ID, n inet.Network, dstore ds.ThreadSafeDatastore) *IpfsDHT { dht := new(IpfsDHT) dht.datastore = dstore dht.self = p - dht.peerstore = ps + dht.peerstore = n.Peerstore() dht.ContextGroup = ctxgroup.WithContext(ctx) dht.network = n n.SetHandler(inet.ProtocolDHT, dht.handleNewStream) - dht.providers = NewProviderManager(dht.Context(), p.ID()) + dht.providers = NewProviderManager(dht.Context(), p) dht.AddChildGroup(dht.providers) - dht.routingTable = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID()), time.Minute) + dht.routingTable = kb.NewRoutingTable(20, kb.ConvertPeerID(p), time.Minute, dht.peerstore) dht.birth = time.Now() dht.Validators = make(map[string]ValidatorFunc) @@ -78,8 +77,13 @@ func NewDHT(ctx context.Context, p peer.Peer, ps peer.Peerstore, n inet.Network, return dht } +// LocalPeer returns the peer.Peer of the dht. +func (dht *IpfsDHT) LocalPeer() peer.ID { + return dht.self +} + // Connect to a new peer at the given address, ping and add to the routing table -func (dht *IpfsDHT) Connect(ctx context.Context, npeer peer.Peer) error { +func (dht *IpfsDHT) Connect(ctx context.Context, npeer peer.ID) error { if err := dht.network.DialPeer(ctx, npeer); err != nil { return err } @@ -95,7 +99,8 @@ func (dht *IpfsDHT) Connect(ctx context.Context, npeer peer.Peer) error { } // putValueToNetwork stores the given key/value pair at the peer 'p' -func (dht *IpfsDHT) putValueToNetwork(ctx context.Context, p peer.Peer, +// meaning: it sends a PUT_VALUE message to p +func (dht *IpfsDHT) putValueToNetwork(ctx context.Context, p peer.ID, key string, rec *pb.Record) error { pmes := pb.NewMessage(pb.Message_PUT_VALUE, string(key), 0) @@ -113,25 +118,30 @@ func (dht *IpfsDHT) putValueToNetwork(ctx context.Context, p peer.Peer, // putProvider sends a message to peer 'p' saying that the local node // can provide the value of 'key' -func (dht *IpfsDHT) putProvider(ctx context.Context, p peer.Peer, key string) error { +func (dht *IpfsDHT) putProvider(ctx context.Context, p peer.ID, key string) error { pmes := pb.NewMessage(pb.Message_ADD_PROVIDER, string(key), 0) // add self as the provider - pmes.ProviderPeers = pb.PeersToPBPeers(dht.network, []peer.Peer{dht.self}) + pi := dht.peerstore.PeerInfo(dht.self) + pmes.ProviderPeers = pb.PeerInfosToPBPeers(dht.network, []peer.PeerInfo{pi}) err := dht.sendMessage(ctx, p, pmes) if err != nil { return err } - log.Debugf("%s putProvider: %s for %s", dht.self, p, u.Key(key)) + log.Debugf("%s putProvider: %s for %s (%s)", dht.self, p, u.Key(key), pi.Addrs) return nil } -func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p peer.Peer, - key u.Key) ([]byte, []peer.Peer, error) { +// getValueOrPeers queries a particular peer p for the value for +// key. It returns either the value or a list of closer peers. +// NOTE: it will update the dht's peerstore with any new addresses +// it finds for the given peer. +func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p peer.ID, + key u.Key) ([]byte, []peer.PeerInfo, error) { pmes, err := dht.getValueSingle(ctx, p, key) if err != nil { @@ -142,8 +152,8 @@ func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p peer.Peer, // Success! We were given the value log.Debug("getValueOrPeers: got value") - // make sure record is still valid - err = dht.verifyRecord(record) + // make sure record is valid. + err = dht.verifyRecordOnline(ctx, record) if err != nil { log.Error("Received invalid record!") return nil, nil, err @@ -151,24 +161,8 @@ func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p peer.Peer, return record.GetValue(), nil, nil } - // TODO decide on providers. This probably shouldn't be happening. - if prv := pmes.GetProviderPeers(); prv != nil && len(prv) > 0 { - val, err := dht.getFromPeerList(ctx, key, prv) - if err != nil { - return nil, nil, err - } - log.Debug("getValueOrPeers: get from providers") - return val, nil, nil - } - // Perhaps we were given closer peers - peers, errs := pb.PBPeersToPeers(dht.peerstore, pmes.GetCloserPeers()) - for _, err := range errs { - if err != nil { - log.Error(err) - } - } - + peers := pb.PBPeersToPeerInfos(pmes.GetCloserPeers()) if len(peers) > 0 { log.Debug("getValueOrPeers: peers") return nil, peers, nil @@ -179,51 +173,16 @@ func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p peer.Peer, } // getValueSingle simply performs the get value RPC with the given parameters -func (dht *IpfsDHT) getValueSingle(ctx context.Context, p peer.Peer, +func (dht *IpfsDHT) getValueSingle(ctx context.Context, p peer.ID, key u.Key) (*pb.Message, error) { pmes := pb.NewMessage(pb.Message_GET_VALUE, string(key), 0) return dht.sendRequest(ctx, p, pmes) } -// TODO: Im not certain on this implementation, we get a list of peers/providers -// from someone what do we do with it? Connect to each of them? randomly pick -// one to get the value from? Or just connect to one at a time until we get a -// successful connection and request the value from it? -func (dht *IpfsDHT) getFromPeerList(ctx context.Context, key u.Key, - peerlist []*pb.Message_Peer) ([]byte, error) { - - for _, pinfo := range peerlist { - p, err := dht.ensureConnectedToPeer(ctx, pinfo) - if err != nil { - log.Errorf("getFromPeers error: %s", err) - continue - } - - pmes, err := dht.getValueSingle(ctx, p, key) - if err != nil { - log.Errorf("getFromPeers error: %s\n", err) - continue - } - - if record := pmes.GetRecord(); record != nil { - // Success! We were given the value - - err := dht.verifyRecord(record) - if err != nil { - return nil, err - } - dht.providers.AddProvider(key, p) - return record.GetValue(), nil - } - } - return nil, routing.ErrNotFound -} - // getLocal attempts to retrieve the value from the datastore func (dht *IpfsDHT) getLocal(key u.Key) ([]byte, error) { - dht.dslock.Lock() - defer dht.dslock.Unlock() + log.Debug("getLocal %s", key) v, err := dht.datastore.Get(key.DsKey()) if err != nil { @@ -243,7 +202,7 @@ func (dht *IpfsDHT) getLocal(key u.Key) ([]byte, error) { // TODO: 'if paranoid' if u.Debug { - err = dht.verifyRecord(rec) + err = dht.verifyRecordLocally(rec) if err != nil { log.Errorf("local record verify failed: %s", err) return nil, err @@ -269,41 +228,40 @@ func (dht *IpfsDHT) putLocal(key u.Key, value []byte) error { // Update signals the routingTable to Update its last-seen status // on the given peer. -func (dht *IpfsDHT) Update(ctx context.Context, p peer.Peer) { +func (dht *IpfsDHT) Update(ctx context.Context, p peer.ID) { log.Event(ctx, "updatePeer", p) dht.routingTable.Update(p) } // FindLocal looks for a peer with a given ID connected to this dht and returns the peer and the table it was found in. -func (dht *IpfsDHT) FindLocal(id peer.ID) (peer.Peer, *kb.RoutingTable) { +func (dht *IpfsDHT) FindLocal(id peer.ID) (peer.PeerInfo, *kb.RoutingTable) { p := dht.routingTable.Find(id) - if p != nil { - return p, dht.routingTable + if p != "" { + return dht.peerstore.PeerInfo(p), dht.routingTable } - return nil, nil + return peer.PeerInfo{}, nil } // findPeerSingle asks peer 'p' if they know where the peer with id 'id' is -func (dht *IpfsDHT) findPeerSingle(ctx context.Context, p peer.Peer, id peer.ID) (*pb.Message, error) { +func (dht *IpfsDHT) findPeerSingle(ctx context.Context, p peer.ID, id peer.ID) (*pb.Message, error) { pmes := pb.NewMessage(pb.Message_FIND_NODE, string(id), 0) return dht.sendRequest(ctx, p, pmes) } -func (dht *IpfsDHT) findProvidersSingle(ctx context.Context, p peer.Peer, key u.Key) (*pb.Message, error) { +func (dht *IpfsDHT) findProvidersSingle(ctx context.Context, p peer.ID, key u.Key) (*pb.Message, error) { pmes := pb.NewMessage(pb.Message_GET_PROVIDERS, string(key), 0) return dht.sendRequest(ctx, p, pmes) } -func (dht *IpfsDHT) addProviders(key u.Key, pbps []*pb.Message_Peer) []peer.Peer { - peers, errs := pb.PBPeersToPeers(dht.peerstore, pbps) - for _, err := range errs { - log.Errorf("error converting peer: %v", err) - } +func (dht *IpfsDHT) addProviders(key u.Key, pbps []*pb.Message_Peer) []peer.ID { + peers := pb.PBPeersToPeerInfos(pbps) + + var provArr []peer.ID + for _, pi := range peers { + p := pi.ID - var provArr []peer.Peer - for _, p := range peers { // Dont add outselves to the list - if p.ID().Equal(dht.self.ID()) { + if p == dht.self { continue } @@ -316,14 +274,14 @@ func (dht *IpfsDHT) addProviders(key u.Key, pbps []*pb.Message_Peer) []peer.Peer } // nearestPeersToQuery returns the routing tables closest peers. -func (dht *IpfsDHT) nearestPeersToQuery(pmes *pb.Message, count int) []peer.Peer { +func (dht *IpfsDHT) nearestPeersToQuery(pmes *pb.Message, count int) []peer.ID { key := u.Key(pmes.GetKey()) closer := dht.routingTable.NearestPeers(kb.ConvertKey(key), count) return closer } // betterPeerToQuery returns nearestPeersToQuery, but iff closer than self. -func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, count int) []peer.Peer { +func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, count int) []peer.ID { closer := dht.nearestPeersToQuery(pmes, count) // no node? nil @@ -333,17 +291,17 @@ func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, count int) []peer.Peer // == to self? thats bad for _, p := range closer { - if p.ID().Equal(dht.self.ID()) { + if p == dht.self { log.Error("Attempted to return self! this shouldnt happen...") return nil } } - var filtered []peer.Peer + var filtered []peer.ID for _, p := range closer { // must all be closer than self key := u.Key(pmes.GetKey()) - if !kb.Closer(dht.self.ID(), p.ID(), key) { + if !kb.Closer(dht.self, p, key) { filtered = append(filtered, p) } } @@ -352,30 +310,13 @@ func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, count int) []peer.Peer return filtered } -// getPeer searches the peerstore for a peer with the given peer ID -func (dht *IpfsDHT) getPeer(id peer.ID) (peer.Peer, error) { - p, err := dht.peerstore.FindOrCreate(id) - if err != nil { - err = fmt.Errorf("Failed to get peer from peerstore: %s", err) - log.Error(err) - return nil, err - } - return p, nil -} - -func (dht *IpfsDHT) ensureConnectedToPeer(ctx context.Context, pbp *pb.Message_Peer) (peer.Peer, error) { - p, err := pb.PBPeerToPeer(dht.peerstore, pbp) - if err != nil { - return nil, err - } - - if dht.self.ID().Equal(p.ID()) { - return nil, errors.New("attempting to ensure connection to self") +func (dht *IpfsDHT) ensureConnectedToPeer(ctx context.Context, p peer.ID) error { + if p == dht.self { + return errors.New("attempting to ensure connection to self") } // dial connection - err = dht.network.DialPeer(ctx, p) - return p, err + return dht.network.DialPeer(ctx, p) } //TODO: this should be smarter about which keys it selects. @@ -421,14 +362,24 @@ func (dht *IpfsDHT) PingRoutine(t time.Duration) { // Bootstrap builds up list of peers by requesting random peer IDs func (dht *IpfsDHT) Bootstrap(ctx context.Context) { - id := make([]byte, 16) - rand.Read(id) - p, err := dht.FindPeer(ctx, peer.ID(id)) - if err != nil { - log.Errorf("Bootstrap peer error: %s", err) - } - err = dht.network.DialPeer(ctx, p) - if err != nil { - log.Errorf("Bootstrap peer error: %s", err) + + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + id := make([]byte, 16) + rand.Read(id) + pi, err := dht.FindPeer(ctx, peer.ID(id)) + if err != nil { + // NOTE: this is not an error. this is expected! + log.Errorf("Bootstrap peer error: %s", err) + } + + // woah, we got a peer under a random id? it _cannot_ be valid. + log.Errorf("dht seemingly found a peer at a random bootstrap id (%s)...", pi) + }() } + wg.Wait() } diff --git a/routing/dht/dht_net.go b/routing/dht/dht_net.go index 6e46b4de6..a91e0f53c 100644 --- a/routing/dht/dht_net.go +++ b/routing/dht/dht_net.go @@ -67,7 +67,7 @@ func (dht *IpfsDHT) handleNewMessage(s inet.Stream) { // sendRequest sends out a request, but also makes sure to // measure the RTT for latency measurements. -func (dht *IpfsDHT) sendRequest(ctx context.Context, p peer.Peer, pmes *pb.Message) (*pb.Message, error) { +func (dht *IpfsDHT) sendRequest(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { log.Debugf("%s dht starting stream", dht.self) s, err := dht.network.NewStream(inet.ProtocolDHT, p) @@ -98,13 +98,13 @@ func (dht *IpfsDHT) sendRequest(ctx context.Context, p peer.Peer, pmes *pb.Messa return nil, errors.New("no response to request") } - p.SetLatency(time.Since(start)) + dht.peerstore.RecordLatency(p, time.Since(start)) log.Event(ctx, "dhtReceivedMessage", dht.self, p, rpmes) return rpmes, nil } // sendMessage sends out a message -func (dht *IpfsDHT) sendMessage(ctx context.Context, p peer.Peer, pmes *pb.Message) error { +func (dht *IpfsDHT) sendMessage(ctx context.Context, p peer.ID, pmes *pb.Message) error { log.Debugf("%s dht starting stream", dht.self) s, err := dht.network.NewStream(inet.ProtocolDHT, p) diff --git a/routing/dht/dht_test.go b/routing/dht/dht_test.go index 50ec76792..b378675c6 100644 --- a/routing/dht/dht_test.go +++ b/routing/dht/dht_test.go @@ -2,44 +2,47 @@ package dht import ( "bytes" - "math/rand" "sort" "testing" + "time" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + dssync "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" - ci "github.com/jbenet/go-ipfs/crypto" + // ci "github.com/jbenet/go-ipfs/crypto" inet "github.com/jbenet/go-ipfs/net" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" testutil "github.com/jbenet/go-ipfs/util/testutil" - - "fmt" - "time" ) -func randMultiaddr(t *testing.T) ma.Multiaddr { +func setupDHT(ctx context.Context, t *testing.T, addr ma.Multiaddr) *IpfsDHT { - s := fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 10000+rand.Intn(40000)) - a, err := ma.NewMultiaddr(s) + sk, pk, err := testutil.RandKeyPair(512) + if err != nil { + t.Fatal(err) + } + + p, err := peer.IDFromPublicKey(pk) if err != nil { t.Fatal(err) } - return a -} -func setupDHT(ctx context.Context, t *testing.T, p peer.Peer) *IpfsDHT { peerstore := peer.NewPeerstore() + peerstore.AddPrivKey(p, sk) + peerstore.AddPubKey(p, pk) + peerstore.AddAddress(p, addr) - n, err := inet.NewNetwork(ctx, p.Addresses(), p, peerstore) + n, err := inet.NewNetwork(ctx, []ma.Multiaddr{addr}, p, peerstore) if err != nil { t.Fatal(err) } - d := NewDHT(ctx, p, peerstore, n, ds.NewMapDatastore()) + dss := dssync.MutexWrap(ds.NewMapDatastore()) + d := NewDHT(ctx, p, n, dss) d.Validators["v"] = func(u.Key, []byte) error { return nil @@ -47,77 +50,53 @@ func setupDHT(ctx context.Context, t *testing.T, p peer.Peer) *IpfsDHT { return d } -func setupDHTS(ctx context.Context, n int, t *testing.T) ([]ma.Multiaddr, []peer.Peer, []*IpfsDHT) { - var addrs []ma.Multiaddr - for i := 0; i < n; i++ { - r := rand.Intn(40000) - a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 10000+r)) - if err != nil { - t.Fatal(err) - } - addrs = append(addrs, a) - } - - var peers []peer.Peer - for i := 0; i < n; i++ { - p := makePeer(addrs[i]) - peers = append(peers, p) - } - +func setupDHTS(ctx context.Context, n int, t *testing.T) ([]ma.Multiaddr, []peer.ID, []*IpfsDHT) { + addrs := make([]ma.Multiaddr, n) dhts := make([]*IpfsDHT, n) + peers := make([]peer.ID, n) + for i := 0; i < n; i++ { - dhts[i] = setupDHT(ctx, t, peers[i]) + addrs[i] = testutil.RandLocalTCPAddress() + dhts[i] = setupDHT(ctx, t, addrs[i]) + peers[i] = dhts[i].self } return addrs, peers, dhts } -func makePeerString(t *testing.T, addr string) peer.Peer { - maddr, err := ma.NewMultiaddr(addr) - if err != nil { +func connect(t *testing.T, ctx context.Context, a, b *IpfsDHT) { + + idB := b.self + addrB := b.peerstore.Addresses(idB) + if len(addrB) == 0 { + t.Fatal("peers setup incorrectly: no local address") + } + + a.peerstore.AddAddresses(idB, addrB) + if err := a.Connect(ctx, idB); err != nil { t.Fatal(err) } - return makePeer(maddr) -} - -func makePeer(addr ma.Multiaddr) peer.Peer { - sk, pk, err := ci.GenerateKeyPair(ci.RSA, 512) - if err != nil { - panic(err) - } - p, err := testutil.NewPeerWithKeyPair(sk, pk) - if err != nil { - panic(err) - } - p.AddAddress(addr) - return p } func TestPing(t *testing.T) { // t.Skip("skipping test to debug another") ctx := context.Background() - addrA := randMultiaddr(t) - addrB := randMultiaddr(t) + addrA := testutil.RandLocalTCPAddress() + addrB := testutil.RandLocalTCPAddress() - peerA := makePeer(addrA) - peerB := makePeer(addrB) + dhtA := setupDHT(ctx, t, addrA) + dhtB := setupDHT(ctx, t, addrB) - dhtA := setupDHT(ctx, t, peerA) - dhtB := setupDHT(ctx, t, peerB) + peerA := dhtA.self + peerB := dhtB.self defer dhtA.Close() defer dhtB.Close() defer dhtA.network.Close() defer dhtB.network.Close() - if err := dhtA.Connect(ctx, peerB); err != nil { - t.Fatal(err) - } - - // if err := dhtB.Connect(ctx, peerA); err != nil { - // t.Fatal(err) - // } + connect(t, ctx, dhtA, dhtB) //Test that we can ping the node ctxT, _ := context.WithTimeout(ctx, 100*time.Millisecond) @@ -136,14 +115,16 @@ func TestValueGetSet(t *testing.T) { ctx := context.Background() - addrA := randMultiaddr(t) - addrB := randMultiaddr(t) + addrA := testutil.RandLocalTCPAddress() + addrB := testutil.RandLocalTCPAddress() - peerA := makePeer(addrA) - peerB := makePeer(addrB) + dhtA := setupDHT(ctx, t, addrA) + dhtB := setupDHT(ctx, t, addrB) - dhtA := setupDHT(ctx, t, peerA) - dhtB := setupDHT(ctx, t, peerB) + defer dhtA.Close() + defer dhtB.Close() + defer dhtA.network.Close() + defer dhtB.network.Close() vf := func(u.Key, []byte) error { return nil @@ -151,15 +132,7 @@ func TestValueGetSet(t *testing.T) { dhtA.Validators["v"] = vf dhtB.Validators["v"] = vf - defer dhtA.Close() - defer dhtB.Close() - defer dhtA.network.Close() - defer dhtB.network.Close() - - err := dhtA.Connect(ctx, peerB) - if err != nil { - t.Fatal(err) - } + connect(t, ctx, dhtA, dhtB) ctxT, _ := context.WithTimeout(ctx, time.Second) dhtA.PutValue(ctxT, "/v/hello", []byte("world")) @@ -189,7 +162,7 @@ func TestProvides(t *testing.T) { // t.Skip("skipping test to debug another") ctx := context.Background() - _, peers, dhts := setupDHTS(ctx, 4, t) + _, _, dhts := setupDHTS(ctx, 4, t) defer func() { for i := 0; i < 4; i++ { dhts[i].Close() @@ -197,22 +170,11 @@ func TestProvides(t *testing.T) { } }() - err := dhts[0].Connect(ctx, peers[1]) - if err != nil { - t.Fatal(err) - } + connect(t, ctx, dhts[0], dhts[1]) + connect(t, ctx, dhts[1], dhts[2]) + connect(t, ctx, dhts[1], dhts[3]) - err = dhts[1].Connect(ctx, peers[2]) - if err != nil { - t.Fatal(err) - } - - err = dhts[1].Connect(ctx, peers[3]) - if err != nil { - t.Fatal(err) - } - - err = dhts[3].putLocal(u.Key("hello"), []byte("world")) + err := dhts[3].putLocal(u.Key("hello"), []byte("world")) if err != nil { t.Fatal(err) } @@ -227,18 +189,21 @@ func TestProvides(t *testing.T) { t.Fatal(err) } - time.Sleep(time.Millisecond * 60) + // what is this timeout for? was 60ms before. + time.Sleep(time.Millisecond * 6) ctxT, _ := context.WithTimeout(ctx, time.Second) provchan := dhts[0].FindProvidersAsync(ctxT, u.Key("hello"), 1) - after := time.After(time.Second) select { case prov := <-provchan: - if prov == nil { + if prov.ID == "" { t.Fatal("Got back nil provider") } - case <-after: + if prov.ID != dhts[3].self { + t.Fatal("Got back nil provider") + } + case <-ctxT.Done(): t.Fatal("Did not get a provider back.") } } @@ -250,7 +215,7 @@ func TestProvidesAsync(t *testing.T) { ctx := context.Background() - _, peers, dhts := setupDHTS(ctx, 4, t) + _, _, dhts := setupDHTS(ctx, 4, t) defer func() { for i := 0; i < 4; i++ { dhts[i].Close() @@ -258,22 +223,11 @@ func TestProvidesAsync(t *testing.T) { } }() - err := dhts[0].Connect(ctx, peers[1]) - if err != nil { - t.Fatal(err) - } + connect(t, ctx, dhts[0], dhts[1]) + connect(t, ctx, dhts[1], dhts[2]) + connect(t, ctx, dhts[1], dhts[3]) - err = dhts[1].Connect(ctx, peers[2]) - if err != nil { - t.Fatal(err) - } - - err = dhts[1].Connect(ctx, peers[3]) - if err != nil { - t.Fatal(err) - } - - err = dhts[3].putLocal(u.Key("hello"), []byte("world")) + err := dhts[3].putLocal(u.Key("hello"), []byte("world")) if err != nil { t.Fatal(err) } @@ -297,10 +251,10 @@ func TestProvidesAsync(t *testing.T) { if !ok { t.Fatal("Provider channel was closed...") } - if p == nil { + if p.ID == "" { t.Fatal("Got back nil provider!") } - if !p.ID().Equal(dhts[3].self.ID()) { + if p.ID != dhts[3].self { t.Fatalf("got a provider, but not the right one. %s", p) } case <-ctxT.Done(): @@ -315,7 +269,7 @@ func TestLayeredGet(t *testing.T) { ctx := context.Background() - _, peers, dhts := setupDHTS(ctx, 4, t) + _, _, dhts := setupDHTS(ctx, 4, t) defer func() { for i := 0; i < 4; i++ { dhts[i].Close() @@ -323,22 +277,11 @@ func TestLayeredGet(t *testing.T) { } }() - err := dhts[0].Connect(ctx, peers[1]) - if err != nil { - t.Fatalf("Failed to connect: %s", err) - } + connect(t, ctx, dhts[0], dhts[1]) + connect(t, ctx, dhts[1], dhts[2]) + connect(t, ctx, dhts[1], dhts[3]) - err = dhts[1].Connect(ctx, peers[2]) - if err != nil { - t.Fatal(err) - } - - err = dhts[1].Connect(ctx, peers[3]) - if err != nil { - t.Fatal(err) - } - - err = dhts[3].putLocal(u.Key("/v/hello"), []byte("world")) + err := dhts[3].putLocal(u.Key("/v/hello"), []byte("world")) if err != nil { t.Fatal(err) } @@ -377,32 +320,21 @@ func TestFindPeer(t *testing.T) { } }() - err := dhts[0].Connect(ctx, peers[1]) - if err != nil { - t.Fatal(err) - } - - err = dhts[1].Connect(ctx, peers[2]) - if err != nil { - t.Fatal(err) - } - - err = dhts[1].Connect(ctx, peers[3]) - if err != nil { - t.Fatal(err) - } + connect(t, ctx, dhts[0], dhts[1]) + connect(t, ctx, dhts[1], dhts[2]) + connect(t, ctx, dhts[1], dhts[3]) ctxT, _ := context.WithTimeout(ctx, time.Second) - p, err := dhts[0].FindPeer(ctxT, peers[2].ID()) + p, err := dhts[0].FindPeer(ctxT, peers[2]) if err != nil { t.Fatal(err) } - if p == nil { + if p.ID == "" { t.Fatal("Failed to find peer.") } - if !p.ID().Equal(peers[2].ID()) { + if p.ID != peers[2] { t.Fatal("Didnt find expected peer.") } } @@ -426,25 +358,10 @@ func TestFindPeersConnectedToPeer(t *testing.T) { // topology: // 0-1, 1-2, 1-3, 2-3 - err := dhts[0].Connect(ctx, peers[1]) - if err != nil { - t.Fatal(err) - } - - err = dhts[1].Connect(ctx, peers[2]) - if err != nil { - t.Fatal(err) - } - - err = dhts[1].Connect(ctx, peers[3]) - if err != nil { - t.Fatal(err) - } - - err = dhts[2].Connect(ctx, peers[3]) - if err != nil { - t.Fatal(err) - } + connect(t, ctx, dhts[0], dhts[1]) + connect(t, ctx, dhts[1], dhts[2]) + connect(t, ctx, dhts[1], dhts[3]) + connect(t, ctx, dhts[2], dhts[3]) // fmt.Println("0 is", peers[0]) // fmt.Println("1 is", peers[1]) @@ -452,13 +369,13 @@ func TestFindPeersConnectedToPeer(t *testing.T) { // fmt.Println("3 is", peers[3]) ctxT, _ := context.WithTimeout(ctx, time.Second) - pchan, err := dhts[0].FindPeersConnectedToPeer(ctxT, peers[2].ID()) + pchan, err := dhts[0].FindPeersConnectedToPeer(ctxT, peers[2]) if err != nil { t.Fatal(err) } - // shouldFind := []peer.Peer{peers[1], peers[3]} - found := []peer.Peer{} + // shouldFind := []peer.ID{peers[1], peers[3]} + found := []peer.PeerInfo{} for nextp := range pchan { found = append(found, nextp) } @@ -475,7 +392,7 @@ func TestFindPeersConnectedToPeer(t *testing.T) { } } -func testPeerListsMatch(t *testing.T, p1, p2 []peer.Peer) { +func testPeerListsMatch(t *testing.T, p1, p2 []peer.ID) { if len(p1) != len(p2) { t.Fatal("did not find as many peers as should have", p1, p2) @@ -485,11 +402,11 @@ func testPeerListsMatch(t *testing.T, p1, p2 []peer.Peer) { ids2 := make([]string, len(p2)) for i, p := range p1 { - ids1[i] = p.ID().Pretty() + ids1[i] = string(p) } for i, p := range p2 { - ids2[i] = p.ID().Pretty() + ids2[i] = string(p) } sort.Sort(sort.StringSlice(ids1)) @@ -514,39 +431,41 @@ func TestConnectCollision(t *testing.T) { ctx := context.Background() - addrA := randMultiaddr(t) - addrB := randMultiaddr(t) + addrA := testutil.RandLocalTCPAddress() + addrB := testutil.RandLocalTCPAddress() - peerA := makePeer(addrA) - peerB := makePeer(addrB) + dhtA := setupDHT(ctx, t, addrA) + dhtB := setupDHT(ctx, t, addrB) - dhtA := setupDHT(ctx, t, peerA) - dhtB := setupDHT(ctx, t, peerB) + peerA := dhtA.self + peerB := dhtB.self - done := make(chan struct{}) + errs := make(chan error) go func() { + dhtA.peerstore.AddAddress(peerB, addrB) err := dhtA.Connect(ctx, peerB) - if err != nil { - t.Fatal(err) - } - done <- struct{}{} + errs <- err }() go func() { + dhtB.peerstore.AddAddress(peerA, addrA) err := dhtB.Connect(ctx, peerA) - if err != nil { - t.Fatal(err) - } - done <- struct{}{} + errs <- err }() timeout := time.After(time.Second) select { - case <-done: + case e := <-errs: + if e != nil { + t.Fatal(e) + } case <-timeout: t.Fatal("Timeout received!") } select { - case <-done: + case e := <-errs: + if e != nil { + t.Fatal(e) + } case <-timeout: t.Fatal("Timeout received!") } @@ -555,7 +474,5 @@ func TestConnectCollision(t *testing.T) { dhtB.Close() dhtA.network.Close() dhtB.network.Close() - - <-time.After(200 * time.Millisecond) } } diff --git a/routing/dht/diag.go b/routing/dht/diag.go index 82316e2e3..96d2b1a01 100644 --- a/routing/dht/diag.go +++ b/routing/dht/diag.go @@ -32,12 +32,12 @@ func (di *diagInfo) Marshal() []byte { func (dht *IpfsDHT) getDiagInfo() *diagInfo { di := new(diagInfo) di.CodeVersion = "github.com/jbenet/go-ipfs" - di.ID = dht.self.ID() + di.ID = dht.self di.LifeSpan = time.Since(dht.birth) di.Keys = nil // Currently no way to query datastore for _, p := range dht.routingTable.ListPeers() { - d := connDiagInfo{p.GetLatency(), p.ID()} + d := connDiagInfo{dht.peerstore.LatencyEWMA(p), p} di.Connections = append(di.Connections, d) } return di diff --git a/routing/dht/ext_test.go b/routing/dht/ext_test.go index c7315d538..04f5111a9 100644 --- a/routing/dht/ext_test.go +++ b/routing/dht/ext_test.go @@ -4,19 +4,17 @@ import ( "math/rand" "testing" - crand "crypto/rand" - inet "github.com/jbenet/go-ipfs/net" mocknet "github.com/jbenet/go-ipfs/net/mock" peer "github.com/jbenet/go-ipfs/peer" routing "github.com/jbenet/go-ipfs/routing" pb "github.com/jbenet/go-ipfs/routing/dht/pb" u "github.com/jbenet/go-ipfs/util" - testutil "github.com/jbenet/go-ipfs/util/testutil" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ggio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/io" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + dssync "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" "time" ) @@ -34,8 +32,8 @@ func TestGetFailures(t *testing.T) { nets := mn.Nets() peers := mn.Peers() - ps := peer.NewPeerstore() - d := NewDHT(ctx, peers[0], ps, nets[0], ds.NewMapDatastore()) + tsds := dssync.MutexWrap(ds.NewMapDatastore()) + d := NewDHT(ctx, peers[0], nets[0], tsds) d.Update(ctx, peers[1]) // This one should time out @@ -126,14 +124,6 @@ func TestGetFailures(t *testing.T) { } } -// TODO: Maybe put these in some sort of "ipfs_testutil" package -func _randPeer() peer.Peer { - id := make(peer.ID, 16) - crand.Read(id) - p := testutil.NewPeerWithID(id) - return p -} - func TestNotFound(t *testing.T) { if testing.Short() { t.SkipNow() @@ -146,9 +136,8 @@ func TestNotFound(t *testing.T) { } nets := mn.Nets() peers := mn.Peers() - peerstore := peer.NewPeerstore() - - d := NewDHT(ctx, peers[0], peerstore, nets[0], ds.NewMapDatastore()) + tsds := dssync.MutexWrap(ds.NewMapDatastore()) + d := NewDHT(ctx, peers[0], nets[0], tsds) for _, p := range peers { d.Update(ctx, p) @@ -156,6 +145,7 @@ func TestNotFound(t *testing.T) { // Reply with random peers to every message for _, neti := range nets { + neti := neti // shadow loop var neti.SetHandler(inet.ProtocolDHT, func(s inet.Stream) { defer s.Close() @@ -171,12 +161,14 @@ func TestNotFound(t *testing.T) { case pb.Message_GET_VALUE: resp := &pb.Message{Type: pmes.Type} - ps := []peer.Peer{} + ps := []peer.PeerInfo{} for i := 0; i < 7; i++ { - ps = append(ps, peers[rand.Intn(len(peers))]) + p := peers[rand.Intn(len(peers))] + pi := neti.Peerstore().PeerInfo(p) + ps = append(ps, pi) } - resp.CloserPeers = pb.PeersToPBPeers(d.network, peers) + resp.CloserPeers = pb.PeerInfosToPBPeers(d.network, ps) if err := pbw.WriteMsg(resp); err != nil { panic(err) } @@ -216,9 +208,9 @@ func TestLessThanKResponses(t *testing.T) { } nets := mn.Nets() peers := mn.Peers() - peerstore := peer.NewPeerstore() - d := NewDHT(ctx, peers[0], peerstore, nets[0], ds.NewMapDatastore()) + tsds := dssync.MutexWrap(ds.NewMapDatastore()) + d := NewDHT(ctx, peers[0], nets[0], tsds) for i := 1; i < 5; i++ { d.Update(ctx, peers[i]) @@ -226,6 +218,7 @@ func TestLessThanKResponses(t *testing.T) { // Reply with random peers to every message for _, neti := range nets { + neti := neti // shadow loop var neti.SetHandler(inet.ProtocolDHT, func(s inet.Stream) { defer s.Close() @@ -239,9 +232,10 @@ func TestLessThanKResponses(t *testing.T) { switch pmes.GetType() { case pb.Message_GET_VALUE: + pi := neti.Peerstore().PeerInfo(peers[1]) resp := &pb.Message{ Type: pmes.Type, - CloserPeers: pb.PeersToPBPeers(d.network, []peer.Peer{peers[1]}), + CloserPeers: pb.PeerInfosToPBPeers(d.network, []peer.PeerInfo{pi}), } if err := pbw.WriteMsg(resp); err != nil { diff --git a/routing/dht/handlers.go b/routing/dht/handlers.go index 4319ef019..070f320a9 100644 --- a/routing/dht/handlers.go +++ b/routing/dht/handlers.go @@ -17,7 +17,7 @@ import ( var CloserPeerCount = 4 // dhthandler specifies the signature of functions that handle DHT messages. -type dhtHandler func(context.Context, peer.Peer, *pb.Message) (*pb.Message, error) +type dhtHandler func(context.Context, peer.ID, *pb.Message) (*pb.Message, error) func (dht *IpfsDHT) handlerForMsgType(t pb.Message_MessageType) dhtHandler { switch t { @@ -38,16 +38,17 @@ func (dht *IpfsDHT) handlerForMsgType(t pb.Message_MessageType) dhtHandler { } } -func (dht *IpfsDHT) handleGetValue(ctx context.Context, p peer.Peer, pmes *pb.Message) (*pb.Message, error) { +func (dht *IpfsDHT) handleGetValue(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { log.Debugf("%s handleGetValue for key: %s\n", dht.self, pmes.GetKey()) // setup response resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel()) - // first, is the key even a key? + // first, is there even a key? key := pmes.GetKey() if key == "" { return nil, errors.New("handleGetValue but no key was provided") + // TODO: send back an error response? could be bad, but the other node's hanging. } // let's first check if we have the value locally. @@ -85,36 +86,38 @@ func (dht *IpfsDHT) handleGetValue(ctx context.Context, p peer.Peer, pmes *pb.Me // if we know any providers for the requested value, return those. provs := dht.providers.GetProviders(ctx, u.Key(pmes.GetKey())) + provinfos := peer.PeerInfos(dht.peerstore, provs) if len(provs) > 0 { log.Debugf("handleGetValue returning %d provider[s]", len(provs)) - resp.ProviderPeers = pb.PeersToPBPeers(dht.network, provs) + resp.ProviderPeers = pb.PeerInfosToPBPeers(dht.network, provinfos) } // Find closest peer on given cluster to desired key and reply with that info closer := dht.betterPeersToQuery(pmes, CloserPeerCount) + closerinfos := peer.PeerInfos(dht.peerstore, closer) if closer != nil { - for _, p := range closer { - log.Debugf("handleGetValue returning closer peer: '%s'", p) - if len(p.Addresses()) < 1 { - log.Critical("no addresses on peer being sent!") + for _, pi := range closerinfos { + log.Debugf("handleGetValue returning closer peer: '%s'", pi.ID) + if len(pi.Addrs) < 1 { + log.Criticalf(`no addresses on peer being sent! + [local:%s] + [sending:%s] + [remote:%s]`, dht.self, pi.ID, p) } } - resp.CloserPeers = pb.PeersToPBPeers(dht.network, closer) + + resp.CloserPeers = pb.PeerInfosToPBPeers(dht.network, closerinfos) } return resp, nil } // Store a value in this peer local storage -func (dht *IpfsDHT) handlePutValue(ctx context.Context, p peer.Peer, pmes *pb.Message) (*pb.Message, error) { - dht.dslock.Lock() - defer dht.dslock.Unlock() +func (dht *IpfsDHT) handlePutValue(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { dskey := u.Key(pmes.GetKey()).DsKey() - err := dht.verifyRecord(pmes.GetRecord()) - if err != nil { - fmt.Println(u.Key(pmes.GetRecord().GetAuthor())) - log.Error("Bad dht record in put request") + if err := dht.verifyRecordLocally(pmes.GetRecord()); err != nil { + log.Errorf("Bad dht record in PUT from: %s. %s", u.Key(pmes.GetRecord().GetAuthor()), err) return nil, err } @@ -128,18 +131,18 @@ func (dht *IpfsDHT) handlePutValue(ctx context.Context, p peer.Peer, pmes *pb.Me return pmes, err } -func (dht *IpfsDHT) handlePing(_ context.Context, p peer.Peer, pmes *pb.Message) (*pb.Message, error) { +func (dht *IpfsDHT) handlePing(_ context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { log.Debugf("%s Responding to ping from %s!\n", dht.self, p) return pmes, nil } -func (dht *IpfsDHT) handleFindPeer(ctx context.Context, p peer.Peer, pmes *pb.Message) (*pb.Message, error) { +func (dht *IpfsDHT) handleFindPeer(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { resp := pb.NewMessage(pmes.GetType(), "", pmes.GetClusterLevel()) - var closest []peer.Peer + var closest []peer.ID // if looking for self... special case where we send it on CloserPeers. - if peer.ID(pmes.GetKey()).Equal(dht.self.ID()) { - closest = []peer.Peer{dht.self} + if peer.ID(pmes.GetKey()) == dht.self { + closest = []peer.ID{dht.self} } else { closest = dht.betterPeersToQuery(pmes, CloserPeerCount) } @@ -149,22 +152,20 @@ func (dht *IpfsDHT) handleFindPeer(ctx context.Context, p peer.Peer, pmes *pb.Me return resp, nil } - var withAddresses []peer.Peer - for _, p := range closest { - if len(p.Addresses()) > 0 { - withAddresses = append(withAddresses, p) + var withAddresses []peer.PeerInfo + closestinfos := peer.PeerInfos(dht.peerstore, closest) + for _, pi := range closestinfos { + if len(pi.Addrs) > 0 { + withAddresses = append(withAddresses, pi) + log.Debugf("handleFindPeer: sending back '%s'", pi.ID) } } - for _, p := range withAddresses { - log.Debugf("handleFindPeer: sending back '%s'", p) - } - - resp.CloserPeers = pb.PeersToPBPeers(dht.network, withAddresses) + resp.CloserPeers = pb.PeerInfosToPBPeers(dht.network, withAddresses) return resp, nil } -func (dht *IpfsDHT) handleGetProviders(ctx context.Context, p peer.Peer, pmes *pb.Message) (*pb.Message, error) { +func (dht *IpfsDHT) handleGetProviders(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel()) // check if we have this value, to add ourselves as provider. @@ -183,13 +184,15 @@ func (dht *IpfsDHT) handleGetProviders(ctx context.Context, p peer.Peer, pmes *p } if providers != nil && len(providers) > 0 { - resp.ProviderPeers = pb.PeersToPBPeers(dht.network, providers) + infos := peer.PeerInfos(dht.peerstore, providers) + resp.ProviderPeers = pb.PeerInfosToPBPeers(dht.network, infos) } // Also send closer peers. closer := dht.betterPeersToQuery(pmes, CloserPeerCount) if closer != nil { - resp.CloserPeers = pb.PeersToPBPeers(dht.network, closer) + infos := peer.PeerInfos(dht.peerstore, providers) + resp.CloserPeers = pb.PeerInfosToPBPeers(dht.network, infos) } return resp, nil @@ -197,34 +200,35 @@ func (dht *IpfsDHT) handleGetProviders(ctx context.Context, p peer.Peer, pmes *p type providerInfo struct { Creation time.Time - Value peer.Peer + Value peer.ID } -func (dht *IpfsDHT) handleAddProvider(ctx context.Context, p peer.Peer, pmes *pb.Message) (*pb.Message, error) { +func (dht *IpfsDHT) handleAddProvider(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { key := u.Key(pmes.GetKey()) log.Debugf("%s adding %s as a provider for '%s'\n", dht.self, p, peer.ID(key)) // add provider should use the address given in the message - for _, pb := range pmes.GetProviderPeers() { - pid := peer.ID(pb.GetId()) - if pid.Equal(p.ID()) { - - maddrs, err := pb.Addresses() - if err != nil { - log.Errorf("provider %s error with addresses %s", p, pb.Addrs) - continue - } - - log.Infof("received provider %s %s for %s", p, maddrs, key) - for _, maddr := range maddrs { - p.AddAddress(maddr) - } - dht.providers.AddProvider(key, p) - - } else { - log.Errorf("handleAddProvider received provider %s from %s", pid, p) + pinfos := pb.PBPeersToPeerInfos(pmes.GetProviderPeers()) + for _, pi := range pinfos { + if pi.ID != p { + // we should ignore this provider reccord! not from originator. + // (we chould sign them and check signature later...) + log.Errorf("handleAddProvider received provider %s from %s. Ignore.", pi.ID, p) + continue } + + if len(pi.Addrs) < 1 { + log.Errorf("%s got no valid addresses for provider %s. Ignore.", dht.self, p) + continue + } + + log.Infof("received provider %s for %s (addrs: %s)", p, key, pi.Addrs) + for _, maddr := range pi.Addrs { + // add the received addresses to our peerstore. + dht.peerstore.AddAddress(p, maddr) + } + dht.providers.AddProvider(key, p) } return pmes, nil // send back same msg as confirmation. diff --git a/routing/dht/pb/dht.pb.go b/routing/dht/pb/dht.pb.go index e102ef7d3..09db3d5f9 100644 --- a/routing/dht/pb/dht.pb.go +++ b/routing/dht/pb/dht.pb.go @@ -182,7 +182,7 @@ type Message_Peer struct { // ID of a given peer. Id *string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` // multiaddrs for a given peer - Addrs []string `protobuf:"bytes,2,rep,name=addrs" json:"addrs,omitempty"` + Addrs [][]byte `protobuf:"bytes,2,rep,name=addrs" json:"addrs,omitempty"` // used to signal the sender's connection capabilities to the peer Connection *Message_ConnectionType `protobuf:"varint,3,opt,name=connection,enum=dht.pb.Message_ConnectionType" json:"connection,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -199,7 +199,7 @@ func (m *Message_Peer) GetId() string { return "" } -func (m *Message_Peer) GetAddrs() []string { +func (m *Message_Peer) GetAddrs() [][]byte { if m != nil { return m.Addrs } diff --git a/routing/dht/pb/dht.proto b/routing/dht/pb/dht.proto index 6f31dd5e3..91c8d8e04 100644 --- a/routing/dht/pb/dht.proto +++ b/routing/dht/pb/dht.proto @@ -32,7 +32,7 @@ message Message { optional string id = 1; // multiaddrs for a given peer - repeated string addrs = 2; + repeated bytes addrs = 2; // used to signal the sender's connection capabilities to the peer optional ConnectionType connection = 3; diff --git a/routing/dht/pb/message.go b/routing/dht/pb/message.go index c5c4afea7..570c7cf18 100644 --- a/routing/dht/pb/message.go +++ b/routing/dht/pb/message.go @@ -1,15 +1,15 @@ package dht_pb import ( - "errors" - "fmt" - ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" inet "github.com/jbenet/go-ipfs/net" peer "github.com/jbenet/go-ipfs/peer" + eventlog "github.com/jbenet/go-ipfs/util/eventlog" ) +var log = eventlog.Logger("dht.pb") + // NewMessage constructs a new dht message with given type, key, and level func NewMessage(typ Message_MessageType, key string, level int) *Message { m := &Message{ @@ -20,43 +20,32 @@ func NewMessage(typ Message_MessageType, key string, level int) *Message { return m } -func peerToPBPeer(p peer.Peer) *Message_Peer { +func peerInfoToPBPeer(p peer.PeerInfo) *Message_Peer { pbp := new(Message_Peer) - maddrs := p.Addresses() - pbp.Addrs = make([]string, len(maddrs)) - for i, maddr := range maddrs { - pbp.Addrs[i] = maddr.String() + pbp.Addrs = make([][]byte, len(p.Addrs)) + for i, maddr := range p.Addrs { + pbp.Addrs[i] = maddr.Bytes() // Bytes, not String. Compressed. } - pid := string(p.ID()) - pbp.Id = &pid + s := string(p.ID) + pbp.Id = &s return pbp } -// PBPeerToPeer turns a *Message_Peer into its peer.Peer counterpart -func PBPeerToPeer(ps peer.Peerstore, pbp *Message_Peer) (peer.Peer, error) { - p, err := ps.FindOrCreate(peer.ID(pbp.GetId())) - if err != nil { - return nil, fmt.Errorf("Failed to get peer from peerstore: %s", err) +// PBPeerToPeer turns a *Message_Peer into its peer.PeerInfo counterpart +func PBPeerToPeerInfo(pbp *Message_Peer) peer.PeerInfo { + return peer.PeerInfo{ + ID: peer.ID(pbp.GetId()), + Addrs: pbp.Addresses(), } - - // add addresses - maddrs, err := pbp.Addresses() - if err != nil { - return nil, fmt.Errorf("Received peer with bad or missing addresses: %s", pbp.Addrs) - } - for _, maddr := range maddrs { - p.AddAddress(maddr) - } - return p, nil } -// RawPeersToPBPeers converts a slice of Peers into a slice of *Message_Peers, +// RawPeerInfosToPBPeers converts a slice of Peers into a slice of *Message_Peers, // ready to go out on the wire. -func RawPeersToPBPeers(peers []peer.Peer) []*Message_Peer { +func RawPeerInfosToPBPeers(peers []peer.PeerInfo) []*Message_Peer { pbpeers := make([]*Message_Peer, len(peers)) for i, p := range peers { - pbpeers[i] = peerToPBPeer(p) + pbpeers[i] = peerInfoToPBPeer(p) } return pbpeers } @@ -64,49 +53,42 @@ func RawPeersToPBPeers(peers []peer.Peer) []*Message_Peer { // PeersToPBPeers converts given []peer.Peer into a set of []*Message_Peer, // which can be written to a message and sent out. the key thing this function // does (in addition to PeersToPBPeers) is set the ConnectionType with -// information from the given inet.Dialer. -func PeersToPBPeers(d inet.Network, peers []peer.Peer) []*Message_Peer { - pbps := RawPeersToPBPeers(peers) +// information from the given inet.Network. +func PeerInfosToPBPeers(n inet.Network, peers []peer.PeerInfo) []*Message_Peer { + pbps := RawPeerInfosToPBPeers(peers) for i, pbp := range pbps { - c := ConnectionType(d.Connectedness(peers[i])) + c := ConnectionType(n.Connectedness(peers[i].ID)) pbp.Connection = &c } return pbps } -// PBPeersToPeers converts given []*Message_Peer into a set of []peer.Peer -// Returns two slices, one of peers, and one of errors. The slice of peers -// will ONLY contain successfully converted peers. The slice of errors contains -// whether each input Message_Peer was successfully converted. -func PBPeersToPeers(ps peer.Peerstore, pbps []*Message_Peer) ([]peer.Peer, []error) { - errs := make([]error, len(pbps)) - peers := make([]peer.Peer, 0, len(pbps)) - for i, pbp := range pbps { - p, err := PBPeerToPeer(ps, pbp) - if err != nil { - errs[i] = err - } else { - peers = append(peers, p) - } +// PBPeersToPeerInfos converts given []*Message_Peer into []peer.PeerInfo +// Invalid addresses will be silently omitted. +func PBPeersToPeerInfos(pbps []*Message_Peer) []peer.PeerInfo { + peers := make([]peer.PeerInfo, 0, len(pbps)) + for _, pbp := range pbps { + peers = append(peers, PBPeerToPeerInfo(pbp)) } - return peers, errs + return peers } // Addresses returns a multiaddr associated with the Message_Peer entry -func (m *Message_Peer) Addresses() ([]ma.Multiaddr, error) { +func (m *Message_Peer) Addresses() []ma.Multiaddr { if m == nil { - return nil, errors.New("MessagePeer is nil") + return nil } var err error maddrs := make([]ma.Multiaddr, len(m.Addrs)) for i, addr := range m.Addrs { - maddrs[i], err = ma.NewMultiaddr(addr) + maddrs[i], err = ma.NewMultiaddrBytes(addr) if err != nil { - return nil, err + log.Error("error decoding Multiaddr for peer: %s", m.GetId()) + continue } } - return maddrs, nil + return maddrs } // GetClusterLevel gets and adjusts the cluster level on the message. diff --git a/routing/dht/providers.go b/routing/dht/providers.go index 928b3fa32..861c25f0c 100644 --- a/routing/dht/providers.go +++ b/routing/dht/providers.go @@ -23,12 +23,12 @@ type ProviderManager struct { type addProv struct { k u.Key - val peer.Peer + val peer.ID } type getProv struct { k u.Key - resp chan []peer.Peer + resp chan []peer.ID } func NewProviderManager(ctx context.Context, local peer.ID) *ProviderManager { @@ -53,7 +53,7 @@ func (pm *ProviderManager) run() { for { select { case np := <-pm.newprovs: - if np.val.ID().Equal(pm.lpeer) { + if np.val == pm.lpeer { pm.local[np.k] = struct{}{} } pi := new(providerInfo) @@ -63,7 +63,7 @@ func (pm *ProviderManager) run() { pm.providers[np.k] = append(arr, pi) case gp := <-pm.getprovs: - var parr []peer.Peer + var parr []peer.ID provs := pm.providers[gp.k] for _, p := range provs { parr = append(parr, p.Value) @@ -94,17 +94,17 @@ func (pm *ProviderManager) run() { } } -func (pm *ProviderManager) AddProvider(k u.Key, val peer.Peer) { +func (pm *ProviderManager) AddProvider(k u.Key, val peer.ID) { pm.newprovs <- &addProv{ k: k, val: val, } } -func (pm *ProviderManager) GetProviders(ctx context.Context, k u.Key) []peer.Peer { +func (pm *ProviderManager) GetProviders(ctx context.Context, k u.Key) []peer.ID { gp := &getProv{ k: k, - resp: make(chan []peer.Peer, 1), // buffered to prevent sender from blocking + resp: make(chan []peer.ID, 1), // buffered to prevent sender from blocking } select { case <-ctx.Done(): diff --git a/routing/dht/providers_test.go b/routing/dht/providers_test.go index 7d8aaa304..35ff92dfe 100644 --- a/routing/dht/providers_test.go +++ b/routing/dht/providers_test.go @@ -3,9 +3,8 @@ package dht import ( "testing" - "github.com/jbenet/go-ipfs/peer" + peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" - testutil "github.com/jbenet/go-ipfs/util/testutil" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ) @@ -15,7 +14,7 @@ func TestProviderManager(t *testing.T) { mid := peer.ID("testing") p := NewProviderManager(ctx, mid) a := u.Key("test") - p.AddProvider(a, testutil.NewPeerWithIDString("testingprovider")) + p.AddProvider(a, peer.ID("testingprovider")) resp := p.GetProviders(ctx, a) if len(resp) != 1 { t.Fatal("Could not retrieve provider.") diff --git a/routing/dht/query.go b/routing/dht/query.go index f4e43132d..c45fa239f 100644 --- a/routing/dht/query.go +++ b/routing/dht/query.go @@ -31,10 +31,10 @@ type dhtQuery struct { } type dhtQueryResult struct { - value []byte // GetValue - peer peer.Peer // FindPeer - providerPeers []peer.Peer // GetProviders - closerPeers []peer.Peer // * + value []byte // GetValue + peer peer.PeerInfo // FindPeer + providerPeers []peer.PeerInfo // GetProviders + closerPeers []peer.PeerInfo // * success bool } @@ -53,10 +53,10 @@ func newQuery(k u.Key, d inet.Dialer, f queryFunc) *dhtQuery { // - the value // - a list of peers potentially better able to serve the query // - an error -type queryFunc func(context.Context, peer.Peer) (*dhtQueryResult, error) +type queryFunc func(context.Context, peer.ID) (*dhtQueryResult, error) // Run runs the query at hand. pass in a list of peers to use first. -func (q *dhtQuery) Run(ctx context.Context, peers []peer.Peer) (*dhtQueryResult, error) { +func (q *dhtQuery) Run(ctx context.Context, peers []peer.ID) (*dhtQueryResult, error) { runner := newQueryRunner(ctx, q) return runner.Run(peers) } @@ -70,7 +70,7 @@ type dhtQueryRunner struct { peersToQuery *queue.ChanQueue // peersSeen are all the peers queried. used to prevent querying same peer 2x - peersSeen peer.Map + peersSeen peer.Set // rateLimit is a channel used to rate limit our processing (semaphore) rateLimit chan struct{} @@ -101,12 +101,12 @@ func newQueryRunner(ctx context.Context, q *dhtQuery) *dhtQueryRunner { query: q, peersToQuery: queue.NewChanQueue(ctx, queue.NewXORDistancePQ(q.key)), peersRemaining: todoctr.NewSyncCounter(), - peersSeen: peer.Map{}, + peersSeen: peer.Set{}, rateLimit: make(chan struct{}, q.concurrency), } } -func (r *dhtQueryRunner) Run(peers []peer.Peer) (*dhtQueryResult, error) { +func (r *dhtQueryRunner) Run(peers []peer.ID) (*dhtQueryResult, error) { log.Debugf("Run query with %d peers.", len(peers)) if len(peers) == 0 { log.Warning("Running query with no peers!") @@ -120,7 +120,7 @@ func (r *dhtQueryRunner) Run(peers []peer.Peer) (*dhtQueryResult, error) { // add all the peers we got first. for _, p := range peers { - r.addPeerToQuery(p, nil) // don't have access to self here... + r.addPeerToQuery(p, "") // don't have access to self here... } // go do this thing. @@ -154,31 +154,30 @@ func (r *dhtQueryRunner) Run(peers []peer.Peer) (*dhtQueryResult, error) { return nil, err } -func (r *dhtQueryRunner) addPeerToQuery(next peer.Peer, benchmark peer.Peer) { - if next == nil { - // wtf why are peers nil?!? - log.Error("Query getting nil peers!!!\n") - return - } - +func (r *dhtQueryRunner) addPeerToQuery(next peer.ID, benchmark peer.ID) { // if new peer is ourselves... - if next.ID().Equal(r.query.dialer.LocalPeer().ID()) { + if next == r.query.dialer.LocalPeer() { return } // if new peer further away than whom we got it from, don't bother (loops) - if benchmark != nil && kb.Closer(benchmark.ID(), next.ID(), r.query.key) { + // TODO----------- this benchmark should be replaced by a heap: + // we should be doing the s/kademlia "continue to search" + // (i.e. put all of them in a heap sorted by dht distance and then just + // pull from the the top until a) you exhaust all peers you get, + // b) you succeed, c) your context expires. + if benchmark != "" && kb.Closer(benchmark, next, r.query.key) { return } // if already seen, no need. r.Lock() - _, found := r.peersSeen[next.Key()] + _, found := r.peersSeen[next] if found { r.Unlock() return } - r.peersSeen[next.Key()] = next + r.peersSeen[next] = struct{}{} r.Unlock() log.Debugf("adding peer to query: %v\n", next) @@ -211,7 +210,7 @@ func (r *dhtQueryRunner) spawnWorkers() { } } -func (r *dhtQueryRunner) queryPeer(p peer.Peer) { +func (r *dhtQueryRunner) queryPeer(p peer.ID) { log.Debugf("spawned worker for: %v", p) // make sure we rate limit concurrency. @@ -234,7 +233,6 @@ func (r *dhtQueryRunner) queryPeer(p peer.Peer) { }() // make sure we're connected to the peer. - // (Incidentally, this will add it to the peerstore too) err := r.query.dialer.DialPeer(r.ctx, p) if err != nil { log.Debugf("ERROR worker for: %v -- err connecting: %v", p, err) @@ -260,10 +258,15 @@ func (r *dhtQueryRunner) queryPeer(p peer.Peer) { r.Unlock() r.cancel() // signal to everyone that we're done. - } else if res.closerPeers != nil { - log.Debugf("PEERS CLOSER -- worker for: %v", p) + } else if len(res.closerPeers) > 0 { + log.Debugf("PEERS CLOSER -- worker for: %v (%d closer peers)", p, len(res.closerPeers)) for _, next := range res.closerPeers { - r.addPeerToQuery(next, p) + // add their addresses to the dialer's peerstore + r.query.dialer.Peerstore().AddAddresses(next.ID, next.Addrs) + r.addPeerToQuery(next.ID, p) + log.Debugf("PEERS CLOSER -- worker for: %v added %v (%v)", p, next.ID, next.Addrs) } + } else { + log.Debugf("QUERY worker for: %v - not found, and no closer peers.", p) } } diff --git a/routing/dht/records.go b/routing/dht/records.go index 1f284ed99..cf383916b 100644 --- a/routing/dht/records.go +++ b/routing/dht/records.go @@ -3,15 +3,17 @@ package dht import ( "bytes" "errors" + "fmt" "strings" - "time" "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" + ci "github.com/jbenet/go-ipfs/crypto" "github.com/jbenet/go-ipfs/peer" pb "github.com/jbenet/go-ipfs/routing/dht/pb" u "github.com/jbenet/go-ipfs/util" + ctxutil "github.com/jbenet/go-ipfs/util/ctx" ) // ValidatorFunc is a function that is called to validate a given @@ -26,64 +28,163 @@ var ErrBadRecord = errors.New("bad dht record") // is not found in the Validator map of the DHT. var ErrInvalidRecordType = errors.New("invalid record keytype") +// KeyForPublicKey returns the key used to retrieve public keys +// from the dht. +func KeyForPublicKey(id peer.ID) u.Key { + return u.Key("/pk/" + string(id)) +} + +// RecordBlobForSig returns the blob protected by the record signature +func RecordBlobForSig(r *pb.Record) []byte { + k := []byte(r.GetKey()) + v := []byte(r.GetValue()) + a := []byte(r.GetAuthor()) + return bytes.Join([][]byte{k, v, a}, []byte{}) +} + // creates and signs a dht record for the given key/value pair func (dht *IpfsDHT) makePutRecord(key u.Key, value []byte) (*pb.Record, error) { record := new(pb.Record) record.Key = proto.String(string(key)) record.Value = value - record.Author = proto.String(string(dht.self.ID())) - blob := bytes.Join([][]byte{[]byte(key), value, []byte(dht.self.ID())}, []byte{}) - sig, err := dht.self.PrivKey().Sign(blob) + record.Author = proto.String(string(dht.self)) + blob := RecordBlobForSig(record) + + sk := dht.peerstore.PrivKey(dht.self) + if sk == nil { + log.Errorf("%s dht cannot get own private key!", dht.self) + return nil, fmt.Errorf("cannot get private key to sign record!") + } + + sig, err := sk.Sign(blob) if err != nil { return nil, err } + record.Signature = sig return record, nil } -func (dht *IpfsDHT) getPublicKey(pid peer.ID) (ci.PubKey, error) { - log.Debug("getPublicKey for: %s", pid) - p, err := dht.peerstore.FindOrCreate(pid) - if err == nil { - return p.PubKey(), nil +func (dht *IpfsDHT) getPublicKeyOnline(ctx context.Context, p peer.ID) (ci.PubKey, error) { + log.Debugf("getPublicKey for: %s", p) + + // check locally. + pk := dht.peerstore.PubKey(p) + if pk != nil { + return pk, nil } - log.Debug("not in peerstore, searching dht.") - ctxT, _ := context.WithTimeout(dht.ContextGroup.Context(), time.Second*5) - val, err := dht.GetValue(ctxT, u.Key("/pk/"+string(pid))) + // ok, try the node itself. if they're overwhelmed or slow we can move on. + ctxT, _ := ctxutil.WithDeadlineFraction(ctx, 0.3) + if pk, err := dht.getPublicKeyFromNode(ctx, p); err == nil { + return pk, nil + } + + // last ditch effort: let's try the dht. + log.Debugf("pk for %s not in peerstore, and peer failed. trying dht.", p) + pkkey := KeyForPublicKey(p) + + // ok, try the node itself. if they're overwhelmed or slow we can move on. + val, err := dht.GetValue(ctxT, pkkey) if err != nil { log.Warning("Failed to find requested public key.") return nil, err } - pubkey, err := ci.UnmarshalPublicKey(val) + pk, err = ci.UnmarshalPublicKey(val) if err != nil { log.Errorf("Failed to unmarshal public key: %s", err) return nil, err } - return pubkey, nil + return pk, nil } -func (dht *IpfsDHT) verifyRecord(r *pb.Record) error { +func (dht *IpfsDHT) getPublicKeyFromNode(ctx context.Context, p peer.ID) (ci.PubKey, error) { + + // check locally, just in case... + pk := dht.peerstore.PubKey(p) + if pk != nil { + return pk, nil + } + + pkkey := KeyForPublicKey(p) + pmes, err := dht.getValueSingle(ctx, p, pkkey) + if err != nil { + return nil, err + } + + // node doesn't have key :( + record := pmes.GetRecord() + if record == nil { + return nil, fmt.Errorf("node not responding with its public key: %s", p) + } + + // Success! We were given the value. we don't need to check + // validity because a) we can't. b) we know the hash of the + // key we're looking for. + val := record.GetValue() + log.Debug("dht got a value from other peer.") + + pk, err = ci.UnmarshalPublicKey(val) + if err != nil { + return nil, err + } + + id, err := peer.IDFromPublicKey(pk) + if err != nil { + return nil, err + } + if id != p { + return nil, fmt.Errorf("public key does not match id: %s", p) + } + + // ok! it's valid. we got it! + log.Debugf("dht got public key from node itself.") + return pk, nil +} + +// verifyRecordLocally attempts to verify a record. if we do not have the public +// key, we fail. we do not search the dht. +func (dht *IpfsDHT) verifyRecordLocally(r *pb.Record) error { + // First, validate the signature - p, err := dht.peerstore.FindOrCreate(peer.ID(r.GetAuthor())) + p := peer.ID(r.GetAuthor()) + pk := dht.peerstore.PubKey(p) + if pk == nil { + return fmt.Errorf("do not have public key for %s", p) + } + + return dht.verifyRecord(r, pk) +} + +// verifyRecordOnline verifies a record, searching the DHT for the public key +// if necessary. The reason there is a distinction in the functions is that +// retrieving arbitrary public keys from the DHT as a result of passively +// receiving records (e.g. through a PUT_VALUE or ADD_PROVIDER) can cause a +// massive amplification attack on the dht. Use with care. +func (dht *IpfsDHT) verifyRecordOnline(ctx context.Context, r *pb.Record) error { + + // get the public key, search for it if necessary. + p := peer.ID(r.GetAuthor()) + pk, err := dht.getPublicKeyOnline(ctx, p) if err != nil { return err } - k := u.Key(r.GetKey()) - blob := bytes.Join([][]byte{[]byte(k), - r.GetValue(), - []byte(r.GetAuthor())}, []byte{}) + return dht.verifyRecord(r, pk) +} - ok, err := p.PubKey().Verify(blob, r.GetSignature()) +func (dht *IpfsDHT) verifyRecord(r *pb.Record, pk ci.PubKey) error { + // First, validate the signature + blob := RecordBlobForSig(r) + ok, err := pk.Verify(blob, r.GetSignature()) if err != nil { log.Error("Signature verify failed.") return err } - if !ok { + log.Error("dht found a forged record! (ignored)") return ErrBadRecord } diff --git a/routing/dht/routing.go b/routing/dht/routing.go index 51f15ff21..c515324c5 100644 --- a/routing/dht/routing.go +++ b/routing/dht/routing.go @@ -41,7 +41,7 @@ func (dht *IpfsDHT) PutValue(ctx context.Context, key u.Key, value []byte) error peers := dht.routingTable.NearestPeers(kb.ConvertKey(key), KValue) - query := newQuery(key, dht.network, func(ctx context.Context, p peer.Peer) (*dhtQueryResult, error) { + query := newQuery(key, dht.network, func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) { log.Debugf("%s PutValue qry part %v", dht.self, p) err := dht.putValueToNetwork(ctx, p, string(key), rec) if err != nil { @@ -61,7 +61,6 @@ func (dht *IpfsDHT) GetValue(ctx context.Context, key u.Key) ([]byte, error) { log.Debugf("Get Value [%s]", key) // If we have it local, dont bother doing an RPC! - // NOTE: this might not be what we want to do... val, err := dht.getLocal(key) if err == nil { log.Debug("Got value locally!") @@ -76,7 +75,7 @@ func (dht *IpfsDHT) GetValue(ctx context.Context, key u.Key) ([]byte, error) { } // setup the Query - query := newQuery(key, dht.network, func(ctx context.Context, p peer.Peer) (*dhtQueryResult, error) { + query := newQuery(key, dht.network, func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) { val, peers, err := dht.getValueOrPeers(ctx, p, key) if err != nil { @@ -131,15 +130,16 @@ func (dht *IpfsDHT) Provide(ctx context.Context, key u.Key) error { // FindProvidersAsync is the same thing as FindProviders, but returns a channel. // Peers will be returned on the channel as soon as they are found, even before // the search query completes. -func (dht *IpfsDHT) FindProvidersAsync(ctx context.Context, key u.Key, count int) <-chan peer.Peer { +func (dht *IpfsDHT) FindProvidersAsync(ctx context.Context, key u.Key, count int) <-chan peer.PeerInfo { log.Event(ctx, "findProviders", &key) - peerOut := make(chan peer.Peer, count) + peerOut := make(chan peer.PeerInfo, count) go dht.findProvidersAsyncRoutine(ctx, key, count, peerOut) return peerOut } -func (dht *IpfsDHT) findProvidersAsyncRoutine(ctx context.Context, key u.Key, count int, peerOut chan peer.Peer) { +func (dht *IpfsDHT) findProvidersAsyncRoutine(ctx context.Context, key u.Key, count int, peerOut chan peer.PeerInfo) { defer close(peerOut) + log.Debugf("%s FindProviders %s", dht.self, key) ps := pset.NewLimited(count) provs := dht.providers.GetProviders(ctx, key) @@ -147,7 +147,7 @@ func (dht *IpfsDHT) findProvidersAsyncRoutine(ctx context.Context, key u.Key, co // NOTE: assuming that this list of peers is unique if ps.TryAdd(p) { select { - case peerOut <- p: + case peerOut <- dht.peerstore.PeerInfo(p): case <-ctx.Done(): return } @@ -160,23 +160,18 @@ func (dht *IpfsDHT) findProvidersAsyncRoutine(ctx context.Context, key u.Key, co } // setup the Query - query := newQuery(key, dht.network, func(ctx context.Context, p peer.Peer) (*dhtQueryResult, error) { + query := newQuery(key, dht.network, func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) { pmes, err := dht.findProvidersSingle(ctx, p, key) if err != nil { return nil, err } - provs, errs := pb.PBPeersToPeers(dht.peerstore, pmes.GetProviderPeers()) - for _, err := range errs { - if err != nil { - log.Warning(err) - } - } + provs := pb.PBPeersToPeerInfos(pmes.GetProviderPeers()) // Add unique providers from request, up to 'count' for _, prov := range provs { - if ps.TryAdd(prov) { + if ps.TryAdd(prov.ID) { select { case peerOut <- prov: case <-ctx.Done(): @@ -191,13 +186,7 @@ func (dht *IpfsDHT) findProvidersAsyncRoutine(ctx context.Context, key u.Key, co // Give closer peers back to the query to be queried closer := pmes.GetCloserPeers() - clpeers, errs := pb.PBPeersToPeers(dht.peerstore, closer) - for _, err := range errs { - if err != nil { - log.Warning(err) - } - } - + clpeers := pb.PBPeersToPeerInfos(closer) return &dhtQueryResult{closerPeers: clpeers}, nil }) @@ -208,62 +197,58 @@ func (dht *IpfsDHT) findProvidersAsyncRoutine(ctx context.Context, key u.Key, co } } -func (dht *IpfsDHT) addPeerListAsync(ctx context.Context, k u.Key, peers []*pb.Message_Peer, ps *pset.PeerSet, count int, out chan peer.Peer) { +func (dht *IpfsDHT) addPeerListAsync(ctx context.Context, k u.Key, peers []*pb.Message_Peer, ps *pset.PeerSet, count int, out chan peer.PeerInfo) { var wg sync.WaitGroup - for _, pbp := range peers { + peerInfos := pb.PBPeersToPeerInfos(peers) + for _, pi := range peerInfos { wg.Add(1) - go func(mp *pb.Message_Peer) { + go func(pi peer.PeerInfo) { defer wg.Done() - // construct new peer - p, err := dht.ensureConnectedToPeer(ctx, mp) - if err != nil { + + p := pi.ID + if err := dht.ensureConnectedToPeer(ctx, p); err != nil { log.Errorf("%s", err) return } - if p == nil { - log.Error("Got nil peer from ensureConnectedToPeer") - return - } dht.providers.AddProvider(k, p) if ps.TryAdd(p) { select { - case out <- p: + case out <- pi: case <-ctx.Done(): return } } else if ps.Size() >= count { return } - }(pbp) + }(pi) } wg.Wait() } // FindPeer searches for a peer with given ID. -func (dht *IpfsDHT) FindPeer(ctx context.Context, id peer.ID) (peer.Peer, error) { +func (dht *IpfsDHT) FindPeer(ctx context.Context, id peer.ID) (peer.PeerInfo, error) { // Check if were already connected to them - p, _ := dht.FindLocal(id) - if p != nil { - return p, nil + if pi, _ := dht.FindLocal(id); pi.ID != "" { + return pi, nil } closest := dht.routingTable.NearestPeers(kb.ConvertPeerID(id), AlphaValue) if closest == nil || len(closest) == 0 { - return nil, kb.ErrLookupFailure + return peer.PeerInfo{}, kb.ErrLookupFailure } // Sanity... for _, p := range closest { - if p.ID().Equal(id) { + if p == id { log.Error("Found target peer in list of closest peers...") - return p, nil + return dht.peerstore.PeerInfo(p), nil } } // setup the Query - query := newQuery(u.Key(id), dht.network, func(ctx context.Context, p peer.Peer) (*dhtQueryResult, error) { + query := newQuery(u.Key(id), dht.network, func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) { pmes, err := dht.findPeerSingle(ctx, p, id) if err != nil { @@ -271,45 +256,40 @@ func (dht *IpfsDHT) FindPeer(ctx context.Context, id peer.ID) (peer.Peer, error) } closer := pmes.GetCloserPeers() - clpeers, errs := pb.PBPeersToPeers(dht.peerstore, closer) - for _, err := range errs { - if err != nil { - log.Warning(err) - } - } + clpeerInfos := pb.PBPeersToPeerInfos(closer) // see it we got the peer here - for _, np := range clpeers { - if string(np.ID()) == string(id) { + for _, npi := range clpeerInfos { + if npi.ID == id { return &dhtQueryResult{ - peer: np, + peer: npi, success: true, }, nil } } - return &dhtQueryResult{closerPeers: clpeers}, nil + return &dhtQueryResult{closerPeers: clpeerInfos}, nil }) // run it! result, err := query.Run(ctx, closest) if err != nil { - return nil, err + return peer.PeerInfo{}, err } log.Debugf("FindPeer %v %v", id, result.success) - if result.peer == nil { - return nil, routing.ErrNotFound + if result.peer.ID == "" { + return peer.PeerInfo{}, routing.ErrNotFound } return result.peer, nil } // FindPeersConnectedToPeer searches for peers directly connected to a given peer. -func (dht *IpfsDHT) FindPeersConnectedToPeer(ctx context.Context, id peer.ID) (<-chan peer.Peer, error) { +func (dht *IpfsDHT) FindPeersConnectedToPeer(ctx context.Context, id peer.ID) (<-chan peer.PeerInfo, error) { - peerchan := make(chan peer.Peer, asyncQueryBuffer) - peersSeen := map[string]peer.Peer{} + peerchan := make(chan peer.PeerInfo, asyncQueryBuffer) + peersSeen := peer.Set{} closest := dht.routingTable.NearestPeers(kb.ConvertPeerID(id), AlphaValue) if closest == nil || len(closest) == 0 { @@ -317,42 +297,37 @@ func (dht *IpfsDHT) FindPeersConnectedToPeer(ctx context.Context, id peer.ID) (< } // setup the Query - query := newQuery(u.Key(id), dht.network, func(ctx context.Context, p peer.Peer) (*dhtQueryResult, error) { + query := newQuery(u.Key(id), dht.network, func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) { pmes, err := dht.findPeerSingle(ctx, p, id) if err != nil { return nil, err } - var clpeers []peer.Peer + var clpeers []peer.PeerInfo closer := pmes.GetCloserPeers() for _, pbp := range closer { - // skip peers already seen - if _, found := peersSeen[string(pbp.GetId())]; found { - continue - } + pi := pb.PBPeerToPeerInfo(pbp) - // skip peers that fail to unmarshal - p, err := pb.PBPeerToPeer(dht.peerstore, pbp) - if err != nil { - log.Warning(err) + // skip peers already seen + if _, found := peersSeen[pi.ID]; found { continue } + peersSeen[pi.ID] = struct{}{} // if peer is connected, send it to our client. if pb.Connectedness(*pbp.Connection) == inet.Connected { select { case <-ctx.Done(): return nil, ctx.Err() - case peerchan <- p: + case peerchan <- pi: } } - peersSeen[string(p.ID())] = p - // if peer is the peer we're looking for, don't bother querying it. + // TODO maybe query it? if pb.Connectedness(*pbp.Connection) != inet.Connected { - clpeers = append(clpeers, p) + clpeers = append(clpeers, pi) } } @@ -374,7 +349,7 @@ func (dht *IpfsDHT) FindPeersConnectedToPeer(ctx context.Context, id peer.ID) (< } // Ping a peer, log the time it took -func (dht *IpfsDHT) Ping(ctx context.Context, p peer.Peer) error { +func (dht *IpfsDHT) Ping(ctx context.Context, p peer.ID) error { // Thoughts: maybe this should accept an ID and do a peer lookup? log.Debugf("ping %s start", p) diff --git a/routing/kbucket/bucket.go b/routing/kbucket/bucket.go index 51f524971..2fa5586db 100644 --- a/routing/kbucket/bucket.go +++ b/routing/kbucket/bucket.go @@ -23,7 +23,7 @@ func (b *Bucket) find(id peer.ID) *list.Element { b.lk.RLock() defer b.lk.RUnlock() for e := b.list.Front(); e != nil; e = e.Next() { - if e.Value.(peer.Peer).ID().Equal(id) { + if e.Value.(peer.ID) == id { return e } } @@ -36,18 +36,18 @@ func (b *Bucket) moveToFront(e *list.Element) { b.lk.Unlock() } -func (b *Bucket) pushFront(p peer.Peer) { +func (b *Bucket) pushFront(p peer.ID) { b.lk.Lock() b.list.PushFront(p) b.lk.Unlock() } -func (b *Bucket) popBack() peer.Peer { +func (b *Bucket) popBack() peer.ID { b.lk.Lock() defer b.lk.Unlock() last := b.list.Back() b.list.Remove(last) - return last.Value.(peer.Peer) + return last.Value.(peer.ID) } func (b *Bucket) len() int { @@ -68,7 +68,7 @@ func (b *Bucket) Split(cpl int, target ID) *Bucket { newbuck.list = out e := b.list.Front() for e != nil { - peerID := ConvertPeerID(e.Value.(peer.Peer).ID()) + peerID := ConvertPeerID(e.Value.(peer.ID)) peerCPL := commonPrefixLen(peerID, target) if peerCPL > cpl { cur := e diff --git a/routing/kbucket/table.go b/routing/kbucket/table.go index c144c191e..da4c6e720 100644 --- a/routing/kbucket/table.go +++ b/routing/kbucket/table.go @@ -23,6 +23,9 @@ type RoutingTable struct { // Blanket lock, refine later for better performance tabLock sync.RWMutex + // latency metrics + metrics peer.Metrics + // Maximum acceptable latency for peers in this cluster maxLatency time.Duration @@ -32,21 +35,22 @@ type RoutingTable struct { } // NewRoutingTable creates a new routing table with a given bucketsize, local ID, and latency tolerance. -func NewRoutingTable(bucketsize int, localID ID, latency time.Duration) *RoutingTable { +func NewRoutingTable(bucketsize int, localID ID, latency time.Duration, m peer.Metrics) *RoutingTable { rt := new(RoutingTable) rt.Buckets = []*Bucket{newBucket()} rt.bucketsize = bucketsize rt.local = localID rt.maxLatency = latency + rt.metrics = m return rt } // Update adds or moves the given peer to the front of its respective bucket // If a peer gets removed from a bucket, it is returned -func (rt *RoutingTable) Update(p peer.Peer) peer.Peer { +func (rt *RoutingTable) Update(p peer.ID) peer.ID { rt.tabLock.Lock() defer rt.tabLock.Unlock() - peerID := ConvertPeerID(p.ID()) + peerID := ConvertPeerID(p) cpl := commonPrefixLen(peerID, rt.local) bucketID := cpl @@ -55,12 +59,12 @@ func (rt *RoutingTable) Update(p peer.Peer) peer.Peer { } bucket := rt.Buckets[bucketID] - e := bucket.find(p.ID()) + e := bucket.find(p) if e == nil { // New peer, add to bucket - if p.GetLatency() > rt.maxLatency { + if rt.metrics.LatencyEWMA(p) > rt.maxLatency { // Connection doesnt meet requirements, skip! - return nil + return "" } bucket.pushFront(p) @@ -75,16 +79,16 @@ func (rt *RoutingTable) Update(p peer.Peer) peer.Peer { return bucket.popBack() } } - return nil + return "" } // If the peer is already in the table, move it to the front. // This signifies that it it "more active" and the less active nodes // Will as a result tend towards the back of the list bucket.moveToFront(e) - return nil + return "" } -func (rt *RoutingTable) nextBucket() peer.Peer { +func (rt *RoutingTable) nextBucket() peer.ID { bucket := rt.Buckets[len(rt.Buckets)-1] newBucket := bucket.Split(len(rt.Buckets)-1, rt.local) rt.Buckets = append(rt.Buckets, newBucket) @@ -96,12 +100,12 @@ func (rt *RoutingTable) nextBucket() peer.Peer { if bucket.len() > rt.bucketsize { return bucket.popBack() } - return nil + return "" } // A helper struct to sort peers by their distance to the local node type peerDistance struct { - p peer.Peer + p peer.ID distance ID } @@ -118,8 +122,8 @@ func (p peerSorterArr) Less(a, b int) bool { func copyPeersFromList(target ID, peerArr peerSorterArr, peerList *list.List) peerSorterArr { for e := peerList.Front(); e != nil; e = e.Next() { - p := e.Value.(peer.Peer) - pID := ConvertPeerID(p.ID()) + p := e.Value.(peer.ID) + pID := ConvertPeerID(p) pd := peerDistance{ p: p, distance: xor(target, pID), @@ -134,27 +138,27 @@ func copyPeersFromList(target ID, peerArr peerSorterArr, peerList *list.List) pe } // Find a specific peer by ID or return nil -func (rt *RoutingTable) Find(id peer.ID) peer.Peer { +func (rt *RoutingTable) Find(id peer.ID) peer.ID { srch := rt.NearestPeers(ConvertPeerID(id), 1) - if len(srch) == 0 || !srch[0].ID().Equal(id) { - return nil + if len(srch) == 0 || srch[0] != id { + return "" } return srch[0] } // NearestPeer returns a single peer that is nearest to the given ID -func (rt *RoutingTable) NearestPeer(id ID) peer.Peer { +func (rt *RoutingTable) NearestPeer(id ID) peer.ID { peers := rt.NearestPeers(id, 1) if len(peers) > 0 { return peers[0] } log.Errorf("NearestPeer: Returning nil, table size = %d", rt.Size()) - return nil + return "" } // NearestPeers returns a list of the 'count' closest peers to the given ID -func (rt *RoutingTable) NearestPeers(id ID, count int) []peer.Peer { +func (rt *RoutingTable) NearestPeers(id ID, count int) []peer.ID { rt.tabLock.RLock() defer rt.tabLock.RUnlock() cpl := commonPrefixLen(id, rt.local) @@ -186,7 +190,7 @@ func (rt *RoutingTable) NearestPeers(id ID, count int) []peer.Peer { // Sort by distance to local peer sort.Sort(peerArr) - var out []peer.Peer + var out []peer.ID for i := 0; i < count && i < peerArr.Len(); i++ { out = append(out, peerArr[i].p) } @@ -205,11 +209,11 @@ func (rt *RoutingTable) Size() int { // ListPeers takes a RoutingTable and returns a list of all peers from all buckets in the table. // NOTE: This is potentially unsafe... use at your own risk -func (rt *RoutingTable) ListPeers() []peer.Peer { - var peers []peer.Peer +func (rt *RoutingTable) ListPeers() []peer.ID { + var peers []peer.ID for _, buck := range rt.Buckets { for e := buck.getIter(); e != nil; e = e.Next() { - peers = append(peers, e.Value.(peer.Peer)) + peers = append(peers, e.Value.(peer.ID)) } } return peers @@ -221,6 +225,6 @@ func (rt *RoutingTable) Print() { rt.tabLock.RLock() peers := rt.ListPeers() for i, p := range peers { - fmt.Printf("%d) %s %s\n", i, p.ID().Pretty(), p.GetLatency().String()) + fmt.Printf("%d) %s %s\n", i, p.Pretty(), rt.metrics.LatencyEWMA(p).String()) } } diff --git a/routing/kbucket/table_test.go b/routing/kbucket/table_test.go index 85fc387e2..db93ddf86 100644 --- a/routing/kbucket/table_test.go +++ b/routing/kbucket/table_test.go @@ -1,8 +1,6 @@ package kbucket import ( - crand "crypto/rand" - "crypto/sha256" "math/rand" "testing" "time" @@ -12,37 +10,29 @@ import ( peer "github.com/jbenet/go-ipfs/peer" ) -func RandID() ID { - buf := make([]byte, 16) - crand.Read(buf) - - hash := sha256.Sum256(buf) - return ID(hash[:]) -} - // Test basic features of the bucket struct func TestBucket(t *testing.T) { b := newBucket() - peers := make([]peer.Peer, 100) + peers := make([]peer.ID, 100) for i := 0; i < 100; i++ { - peers[i] = tu.RandPeer() + peers[i] = tu.RandPeerIDFatal(t) b.pushFront(peers[i]) } - local := tu.RandPeer() - localID := ConvertPeerID(local.ID()) + local := tu.RandPeerIDFatal(t) + localID := ConvertPeerID(local) i := rand.Intn(len(peers)) - e := b.find(peers[i].ID()) + e := b.find(peers[i]) if e == nil { t.Errorf("Failed to find peer: %v", peers[i]) } - spl := b.Split(0, ConvertPeerID(local.ID())) + spl := b.Split(0, ConvertPeerID(local)) llist := b.list for e := llist.Front(); e != nil; e = e.Next() { - p := ConvertPeerID(e.Value.(peer.Peer).ID()) + p := ConvertPeerID(e.Value.(peer.ID)) cpl := commonPrefixLen(p, localID) if cpl > 0 { t.Fatalf("Split failed. found id with cpl > 0 in 0 bucket") @@ -51,7 +41,7 @@ func TestBucket(t *testing.T) { rlist := spl.list for e := rlist.Front(); e != nil; e = e.Next() { - p := ConvertPeerID(e.Value.(peer.Peer).ID()) + p := ConvertPeerID(e.Value.(peer.ID)) cpl := commonPrefixLen(p, localID) if cpl == 0 { t.Fatalf("Split failed. found id with cpl == 0 in non 0 bucket") @@ -61,24 +51,25 @@ func TestBucket(t *testing.T) { // Right now, this just makes sure that it doesnt hang or crash func TestTableUpdate(t *testing.T) { - local := tu.RandPeer() - rt := NewRoutingTable(10, ConvertPeerID(local.ID()), time.Hour) + local := tu.RandPeerIDFatal(t) + m := peer.NewMetrics() + rt := NewRoutingTable(10, ConvertPeerID(local), time.Hour, m) - peers := make([]peer.Peer, 100) + peers := make([]peer.ID, 100) for i := 0; i < 100; i++ { - peers[i] = tu.RandPeer() + peers[i] = tu.RandPeerIDFatal(t) } // Testing Update for i := 0; i < 10000; i++ { p := rt.Update(peers[rand.Intn(len(peers))]) - if p != nil { + if p != "" { //t.Log("evicted peer.") } } for i := 0; i < 100; i++ { - id := RandID() + id := ConvertPeerID(tu.RandPeerIDFatal(t)) ret := rt.NearestPeers(id, 5) if len(ret) == 0 { t.Fatal("Failed to find node near ID.") @@ -87,34 +78,36 @@ func TestTableUpdate(t *testing.T) { } func TestTableFind(t *testing.T) { - local := tu.RandPeer() - rt := NewRoutingTable(10, ConvertPeerID(local.ID()), time.Hour) + local := tu.RandPeerIDFatal(t) + m := peer.NewMetrics() + rt := NewRoutingTable(10, ConvertPeerID(local), time.Hour, m) - peers := make([]peer.Peer, 100) + peers := make([]peer.ID, 100) for i := 0; i < 5; i++ { - peers[i] = tu.RandPeer() + peers[i] = tu.RandPeerIDFatal(t) rt.Update(peers[i]) } t.Logf("Searching for peer: '%s'", peers[2]) - found := rt.NearestPeer(ConvertPeerID(peers[2].ID())) - if !found.ID().Equal(peers[2].ID()) { + found := rt.NearestPeer(ConvertPeerID(peers[2])) + if !(found == peers[2]) { t.Fatalf("Failed to lookup known node...") } } func TestTableFindMultiple(t *testing.T) { - local := tu.RandPeer() - rt := NewRoutingTable(20, ConvertPeerID(local.ID()), time.Hour) + local := tu.RandPeerIDFatal(t) + m := peer.NewMetrics() + rt := NewRoutingTable(20, ConvertPeerID(local), time.Hour, m) - peers := make([]peer.Peer, 100) + peers := make([]peer.ID, 100) for i := 0; i < 18; i++ { - peers[i] = tu.RandPeer() + peers[i] = tu.RandPeerIDFatal(t) rt.Update(peers[i]) } t.Logf("Searching for peer: '%s'", peers[2]) - found := rt.NearestPeers(ConvertPeerID(peers[2].ID()), 15) + found := rt.NearestPeers(ConvertPeerID(peers[2]), 15) if len(found) != 15 { t.Fatalf("Got back different number of peers than we expected.") } @@ -125,10 +118,11 @@ func TestTableFindMultiple(t *testing.T) { // and set GOMAXPROCS above 1 func TestTableMultithreaded(t *testing.T) { local := peer.ID("localPeer") - tab := NewRoutingTable(20, ConvertPeerID(local), time.Hour) - var peers []peer.Peer + m := peer.NewMetrics() + tab := NewRoutingTable(20, ConvertPeerID(local), time.Hour, m) + var peers []peer.ID for i := 0; i < 500; i++ { - peers = append(peers, tu.RandPeer()) + peers = append(peers, tu.RandPeerIDFatal(t)) } done := make(chan struct{}) @@ -151,7 +145,7 @@ func TestTableMultithreaded(t *testing.T) { go func() { for i := 0; i < 1000; i++ { n := rand.Intn(len(peers)) - tab.Find(peers[n].ID()) + tab.Find(peers[n]) } done <- struct{}{} }() @@ -163,11 +157,12 @@ func TestTableMultithreaded(t *testing.T) { func BenchmarkUpdates(b *testing.B) { b.StopTimer() local := ConvertKey("localKey") - tab := NewRoutingTable(20, local, time.Hour) + m := peer.NewMetrics() + tab := NewRoutingTable(20, local, time.Hour, m) - var peers []peer.Peer + var peers []peer.ID for i := 0; i < b.N; i++ { - peers = append(peers, tu.RandPeer()) + peers = append(peers, tu.RandPeerIDFatal(b)) } b.StartTimer() @@ -179,16 +174,17 @@ func BenchmarkUpdates(b *testing.B) { func BenchmarkFinds(b *testing.B) { b.StopTimer() local := ConvertKey("localKey") - tab := NewRoutingTable(20, local, time.Hour) + m := peer.NewMetrics() + tab := NewRoutingTable(20, local, time.Hour, m) - var peers []peer.Peer + var peers []peer.ID for i := 0; i < b.N; i++ { - peers = append(peers, tu.RandPeer()) + peers = append(peers, tu.RandPeerIDFatal(b)) tab.Update(peers[i]) } b.StartTimer() for i := 0; i < b.N; i++ { - tab.Find(peers[i].ID()) + tab.Find(peers[i]) } } diff --git a/routing/kbucket/util.go b/routing/kbucket/util.go index 4adac0405..2d06b5f08 100644 --- a/routing/kbucket/util.go +++ b/routing/kbucket/util.go @@ -40,7 +40,7 @@ func commonPrefixLen(a, b ID) int { // ConvertPeerID creates a DHT ID by hashing a Peer ID (Multihash) func ConvertPeerID(id peer.ID) ID { - hash := sha256.Sum256(id) + hash := sha256.Sum256([]byte(id)) return hash[:] } diff --git a/routing/mock/client.go b/routing/mock/client.go index 444a4b960..9be43b653 100644 --- a/routing/mock/client.go +++ b/routing/mock/client.go @@ -15,7 +15,7 @@ var log = u.Logger("mockrouter") type client struct { datastore ds.Datastore server server - peer peer.Peer + peer peer.PeerInfo } // FIXME(brian): is this method meant to simulate putting a value into the network? @@ -40,17 +40,17 @@ func (c *client) GetValue(ctx context.Context, key u.Key) ([]byte, error) { return data, nil } -func (c *client) FindProviders(ctx context.Context, key u.Key) ([]peer.Peer, error) { +func (c *client) FindProviders(ctx context.Context, key u.Key) ([]peer.PeerInfo, error) { return c.server.Providers(key), nil } -func (c *client) FindPeer(ctx context.Context, pid peer.ID) (peer.Peer, error) { +func (c *client) FindPeer(ctx context.Context, pid peer.ID) (peer.PeerInfo, error) { log.Debugf("FindPeer: %s", pid) - return nil, nil + return peer.PeerInfo{}, nil } -func (c *client) FindProvidersAsync(ctx context.Context, k u.Key, max int) <-chan peer.Peer { - out := make(chan peer.Peer) +func (c *client) FindProvidersAsync(ctx context.Context, k u.Key, max int) <-chan peer.PeerInfo { + out := make(chan peer.PeerInfo) go func() { defer close(out) for i, p := range c.server.Providers(k) { diff --git a/routing/mock/interface.go b/routing/mock/interface.go index 639736292..abb869eb4 100644 --- a/routing/mock/interface.go +++ b/routing/mock/interface.go @@ -15,13 +15,13 @@ import ( // Server provides mockrouting Clients type Server interface { - Client(p peer.Peer) Client - ClientWithDatastore(peer.Peer, ds.Datastore) Client + Client(p peer.PeerInfo) Client + ClientWithDatastore(peer.PeerInfo, ds.Datastore) Client } // Client implements IpfsRouting type Client interface { - FindProviders(context.Context, u.Key) ([]peer.Peer, error) + FindProviders(context.Context, u.Key) ([]peer.PeerInfo, error) routing.IpfsRouting } @@ -37,7 +37,7 @@ func NewServer() Server { // NewServerWithDelay returns a mockrouting Server with a delay! func NewServerWithDelay(conf DelayConfig) Server { return &s{ - providers: make(map[u.Key]map[u.Key]providerRecord), + providers: make(map[u.Key]map[peer.ID]providerRecord), delayConf: conf, } } diff --git a/routing/mock/mockrouting_test.go b/routing/mock/mockrouting_test.go index 44b1b52bd..739edbc63 100644 --- a/routing/mock/mockrouting_test.go +++ b/routing/mock/mockrouting_test.go @@ -1,7 +1,6 @@ package mockrouting import ( - "bytes" "testing" "time" @@ -9,17 +8,16 @@ import ( peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" delay "github.com/jbenet/go-ipfs/util/delay" - testutil "github.com/jbenet/go-ipfs/util/testutil" ) func TestKeyNotFound(t *testing.T) { - var peer = testutil.NewPeerWithID(peer.ID([]byte("the peer id"))) + var pi = peer.PeerInfo{ID: peer.ID("the peer id")} var key = u.Key("mock key") var ctx = context.Background() rs := NewServer() - providers := rs.Client(peer).FindProvidersAsync(ctx, key, 10) + providers := rs.Client(pi).FindProvidersAsync(ctx, key, 10) _, ok := <-providers if ok { t.Fatal("should be closed") @@ -27,9 +25,9 @@ func TestKeyNotFound(t *testing.T) { } func TestClientFindProviders(t *testing.T) { - peer := testutil.NewPeerWithIDString("42") + pi := peer.PeerInfo{ID: peer.ID("42")} rs := NewServer() - client := rs.Client(peer) + client := rs.Client(pi) k := u.Key("hello") err := client.Provide(context.Background(), k) @@ -41,14 +39,14 @@ func TestClientFindProviders(t *testing.T) { time.Sleep(time.Millisecond * 300) max := 100 - providersFromHashTable, err := rs.Client(peer).FindProviders(context.Background(), k) + providersFromHashTable, err := rs.Client(pi).FindProviders(context.Background(), k) if err != nil { t.Fatal(err) } isInHT := false - for _, p := range providersFromHashTable { - if bytes.Equal(p.ID(), peer.ID()) { + for _, pi := range providersFromHashTable { + if pi.ID == pi.ID { isInHT = true } } @@ -57,8 +55,8 @@ func TestClientFindProviders(t *testing.T) { } providersFromClient := client.FindProvidersAsync(context.Background(), u.Key("hello"), max) isInClient := false - for p := range providersFromClient { - if bytes.Equal(p.ID(), peer.ID()) { + for pi := range providersFromClient { + if pi.ID == pi.ID { isInClient = true } } @@ -72,16 +70,16 @@ func TestClientOverMax(t *testing.T) { k := u.Key("hello") numProvidersForHelloKey := 100 for i := 0; i < numProvidersForHelloKey; i++ { - peer := testutil.NewPeerWithIDString(string(i)) - err := rs.Client(peer).Provide(context.Background(), k) + pi := peer.PeerInfo{ID: peer.ID(i)} + err := rs.Client(pi).Provide(context.Background(), k) if err != nil { t.Fatal(err) } } max := 10 - peer := testutil.NewPeerWithIDString("TODO") - client := rs.Client(peer) + pi := peer.PeerInfo{ID: peer.ID("TODO")} + client := rs.Client(pi) providersFromClient := client.FindProvidersAsync(context.Background(), k, max) i := 0 @@ -98,20 +96,33 @@ func TestCanceledContext(t *testing.T) { rs := NewServer() k := u.Key("hello") + // avoid leaking goroutine, without using the context to signal + // (we want the goroutine to keep trying to publish on a + // cancelled context until we've tested it doesnt do anything.) + done := make(chan struct{}) + defer func() { done <- struct{}{} }() + t.Log("async'ly announce infinite stream of providers for key") i := 0 go func() { // infinite stream for { - peer := testutil.NewPeerWithIDString(string(i)) - err := rs.Client(peer).Provide(context.Background(), k) + select { + case <-done: + t.Log("exiting async worker") + return + default: + } + + pi := peer.PeerInfo{ID: peer.ID(i)} + err := rs.Client(pi).Provide(context.Background(), k) if err != nil { - t.Fatal(err) + t.Error(err) } i++ } }() - local := testutil.NewPeerWithIDString("peer id doesn't matter") + local := peer.PeerInfo{ID: peer.ID("peer id doesn't matter")} client := rs.Client(local) t.Log("warning: max is finite so this test is non-deterministic") @@ -137,7 +148,7 @@ func TestCanceledContext(t *testing.T) { func TestValidAfter(t *testing.T) { - var p = testutil.NewPeerWithID(peer.ID([]byte("the peer id"))) + var pi = peer.PeerInfo{ID: peer.ID("the peer id")} var key = u.Key("mock key") var ctx = context.Background() conf := DelayConfig{ @@ -147,10 +158,10 @@ func TestValidAfter(t *testing.T) { rs := NewServerWithDelay(conf) - rs.Client(p).Provide(ctx, key) + rs.Client(pi).Provide(ctx, key) - var providers []peer.Peer - providers, err := rs.Client(p).FindProviders(ctx, key) + var providers []peer.PeerInfo + providers, err := rs.Client(pi).FindProviders(ctx, key) if err != nil { t.Fatal(err) } @@ -159,7 +170,7 @@ func TestValidAfter(t *testing.T) { } conf.ValueVisibility.Set(0) - providers, err = rs.Client(p).FindProviders(ctx, key) + providers, err = rs.Client(pi).FindProviders(ctx, key) if err != nil { t.Fatal(err) } diff --git a/routing/mock/server.go b/routing/mock/server.go index e176c7aeb..31ae4b730 100644 --- a/routing/mock/server.go +++ b/routing/mock/server.go @@ -12,8 +12,8 @@ import ( // server is the mockrouting.Client's private interface to the routing server type server interface { - Announce(peer.Peer, u.Key) error - Providers(u.Key) []peer.Peer + Announce(peer.PeerInfo, u.Key) error + Providers(u.Key) []peer.PeerInfo Server } @@ -23,36 +23,36 @@ type s struct { delayConf DelayConfig lock sync.RWMutex - providers map[u.Key]map[u.Key]providerRecord + providers map[u.Key]map[peer.ID]providerRecord } type providerRecord struct { - Peer peer.Peer + Peer peer.PeerInfo Created time.Time } -func (rs *s) Announce(p peer.Peer, k u.Key) error { +func (rs *s) Announce(p peer.PeerInfo, k u.Key) error { rs.lock.Lock() defer rs.lock.Unlock() _, ok := rs.providers[k] if !ok { - rs.providers[k] = make(map[u.Key]providerRecord) + rs.providers[k] = make(map[peer.ID]providerRecord) } - rs.providers[k][p.Key()] = providerRecord{ + rs.providers[k][p.ID] = providerRecord{ Created: time.Now(), Peer: p, } return nil } -func (rs *s) Providers(k u.Key) []peer.Peer { +func (rs *s) Providers(k u.Key) []peer.PeerInfo { rs.delayConf.Query.Wait() // before locking rs.lock.RLock() defer rs.lock.RUnlock() - var ret []peer.Peer + var ret []peer.PeerInfo records, ok := rs.providers[k] if !ok { return ret @@ -71,11 +71,11 @@ func (rs *s) Providers(k u.Key) []peer.Peer { return ret } -func (rs *s) Client(p peer.Peer) Client { +func (rs *s) Client(p peer.PeerInfo) Client { return rs.ClientWithDatastore(p, ds.NewMapDatastore()) } -func (rs *s) ClientWithDatastore(p peer.Peer, datastore ds.Datastore) Client { +func (rs *s) ClientWithDatastore(p peer.PeerInfo, datastore ds.Datastore) Client { return &client{ peer: p, datastore: ds.NewMapDatastore(), diff --git a/routing/routing.go b/routing/routing.go index 09773f20b..ae9acad44 100644 --- a/routing/routing.go +++ b/routing/routing.go @@ -16,7 +16,7 @@ var ErrNotFound = errors.New("routing: not found") // IpfsRouting is the routing module interface // It is implemented by things like DHTs, etc. type IpfsRouting interface { - FindProvidersAsync(context.Context, u.Key, int) <-chan peer.Peer + FindProvidersAsync(context.Context, u.Key, int) <-chan peer.PeerInfo // Basic Put/Get @@ -33,6 +33,7 @@ type IpfsRouting interface { Provide(context.Context, u.Key) error // Find specific Peer - // FindPeer searches for a peer with given ID. - FindPeer(context.Context, peer.ID) (peer.Peer, error) + // FindPeer searches for a peer with given ID, returns a peer.PeerInfo + // with relevant addresses. + FindPeer(context.Context, peer.ID) (peer.PeerInfo, error) } diff --git a/util/ctx/fracctx.go b/util/ctx/fracctx.go new file mode 100644 index 000000000..74d50b692 --- /dev/null +++ b/util/ctx/fracctx.go @@ -0,0 +1,22 @@ +package ctxutil + +import ( + "time" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" +) + +func WithDeadlineFraction(ctx context.Context, fraction float64) (context.Context, context.CancelFunc) { + d, found := ctx.Deadline() + if !found { // no deadline + return context.WithCancel(ctx) + } + + left := d.Sub(time.Now()) + if left < 0 { // already passed... + return context.WithCancel(ctx) + } + + left = time.Duration(float64(left) * fraction) + return context.WithTimeout(ctx, left) +} diff --git a/util/ctx/fracctx_test.go b/util/ctx/fracctx_test.go new file mode 100644 index 000000000..dc6aa9a88 --- /dev/null +++ b/util/ctx/fracctx_test.go @@ -0,0 +1,137 @@ +package ctxutil + +import ( + "testing" + "time" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" +) + +// this test is on the context tool itself, not our stuff. it's for sanity on ours. +func TestDeadline(t *testing.T) { + ctx, _ := context.WithTimeout(context.Background(), 5*time.Millisecond) + + select { + case <-ctx.Done(): + t.Fatal("ended too early") + default: + } + + <-time.After(6 * time.Millisecond) + + select { + case <-ctx.Done(): + default: + t.Fatal("ended too late") + } +} + +func TestDeadlineFractionForever(t *testing.T) { + + ctx, _ := WithDeadlineFraction(context.Background(), 0.5) + + _, found := ctx.Deadline() + if found { + t.Fatal("should last forever") + } +} + +func TestDeadlineFractionHalf(t *testing.T) { + + ctx1, _ := context.WithTimeout(context.Background(), 10*time.Millisecond) + ctx2, _ := WithDeadlineFraction(ctx1, 0.5) + + select { + case <-ctx1.Done(): + t.Fatal("ctx1 ended too early") + case <-ctx2.Done(): + t.Fatal("ctx2 ended too early") + default: + } + + <-time.After(2 * time.Millisecond) + + select { + case <-ctx1.Done(): + t.Fatal("ctx1 ended too early") + case <-ctx2.Done(): + t.Fatal("ctx2 ended too early") + default: + } + + <-time.After(4 * time.Millisecond) + + select { + case <-ctx1.Done(): + t.Fatal("ctx1 ended too early") + case <-ctx2.Done(): + default: + t.Fatal("ctx2 ended too late") + } + + <-time.After(6 * time.Millisecond) + + select { + case <-ctx1.Done(): + default: + t.Fatal("ctx1 ended too late") + } + +} + +func TestDeadlineFractionCancel(t *testing.T) { + + ctx1, cancel1 := context.WithTimeout(context.Background(), 10*time.Millisecond) + ctx2, cancel2 := WithDeadlineFraction(ctx1, 0.5) + + select { + case <-ctx1.Done(): + t.Fatal("ctx1 ended too early") + case <-ctx2.Done(): + t.Fatal("ctx2 ended too early") + default: + } + + cancel2() + + select { + case <-ctx1.Done(): + t.Fatal("ctx1 should NOT be cancelled") + case <-ctx2.Done(): + default: + t.Fatal("ctx2 should be cancelled") + } + + cancel1() + + select { + case <-ctx1.Done(): + case <-ctx2.Done(): + default: + t.Fatal("ctx1 should be cancelled") + } + +} + +func TestDeadlineFractionObeysParent(t *testing.T) { + + ctx1, cancel1 := context.WithTimeout(context.Background(), 10*time.Millisecond) + ctx2, _ := WithDeadlineFraction(ctx1, 0.5) + + select { + case <-ctx1.Done(): + t.Fatal("ctx1 ended too early") + case <-ctx2.Done(): + t.Fatal("ctx2 ended too early") + default: + } + + cancel1() + + select { + case <-ctx2.Done(): + default: + t.Fatal("ctx2 should be cancelled") + } + +} diff --git a/util/peerset/peerset.go b/util/peerset/peerset.go index 5dcf26682..35a80a12d 100644 --- a/util/peerset/peerset.go +++ b/util/peerset/peerset.go @@ -7,34 +7,34 @@ import ( // PeerSet is a threadsafe set of peers type PeerSet struct { - ps map[string]bool // FIXME can be map[string]struct{} + ps map[peer.ID]struct{} lk sync.RWMutex size int } func New() *PeerSet { ps := new(PeerSet) - ps.ps = make(map[string]bool) + ps.ps = make(map[peer.ID]struct{}) ps.size = -1 return ps } func NewLimited(size int) *PeerSet { ps := new(PeerSet) - ps.ps = make(map[string]bool) + ps.ps = make(map[peer.ID]struct{}) ps.size = size return ps } -func (ps *PeerSet) Add(p peer.Peer) { +func (ps *PeerSet) Add(p peer.ID) { ps.lk.Lock() - ps.ps[string(p.ID())] = true + ps.ps[p] = struct{}{} ps.lk.Unlock() } -func (ps *PeerSet) Contains(p peer.Peer) bool { +func (ps *PeerSet) Contains(p peer.ID) bool { ps.lk.RLock() - _, ok := ps.ps[string(p.ID())] + _, ok := ps.ps[p] ps.lk.RUnlock() return ok } @@ -49,12 +49,12 @@ func (ps *PeerSet) Size() int { // This operation can fail for one of two reasons: // 1) The given peer is already in the set // 2) The number of peers in the set is equal to size -func (ps *PeerSet) TryAdd(p peer.Peer) bool { +func (ps *PeerSet) TryAdd(p peer.ID) bool { var success bool ps.lk.Lock() - if _, ok := ps.ps[string(p.ID())]; !ok && (len(ps.ps) < ps.size || ps.size == -1) { + if _, ok := ps.ps[p]; !ok && (len(ps.ps) < ps.size || ps.size == -1) { success = true - ps.ps[string(p.ID())] = true + ps.ps[p] = struct{}{} } ps.lk.Unlock() return success diff --git a/util/testutil/gen.go b/util/testutil/gen.go index c59826961..16f39ef45 100644 --- a/util/testutil/gen.go +++ b/util/testutil/gen.go @@ -1,9 +1,12 @@ package testutil import ( + "bytes" crand "crypto/rand" + "errors" "fmt" - "math/rand" + "io" + "testing" ci "github.com/jbenet/go-ipfs/crypto" peer "github.com/jbenet/go-ipfs/peer" @@ -12,44 +15,30 @@ import ( ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ) -func RandPeer() peer.Peer { - id := make([]byte, 16) - crand.Read(id) - mhid := u.Hash(id) - return NewPeerWithID(peer.ID(mhid)) +func RandKeyPair(bits int) (ci.PrivKey, ci.PubKey, error) { + return ci.GenerateKeyPair(ci.RSA, bits) } -func PeerWithNewKeys() (peer.Peer, error) { - sk, pk, err := ci.GenerateKeyPair(ci.RSA, 512) - if err != nil { - return nil, err +// RandPeerID generates random "valid" peer IDs. it does not NEED to generate +// keys because it is as if we lost the key right away. fine to read randomness +// and hash it. to generate proper keys and corresponding PeerID, use: +// sk, pk, _ := testutil.RandKeyPair() +// id, _ := peer.IDFromPublicKey(pk) +func RandPeerID() (peer.ID, error) { + buf := make([]byte, 16) + if _, err := io.ReadFull(crand.Reader, buf); err != nil { + return "", err } - - return NewPeerWithKeyPair(sk, pk) + h := u.Hash(buf) + return peer.ID(h), nil } -func PeerWithKeysAndStringAddress(addr string) (peer.Peer, error) { - maddr, err := ma.NewMultiaddr(addr) +func RandPeerIDFatal(t testing.TB) peer.ID { + p, err := RandPeerID() if err != nil { - return nil, err + t.Fatal(err) } - - return PeerWithKeysAndAddress(maddr) -} - -func PeerWithKeysAndAddress(maddr ma.Multiaddr) (peer.Peer, error) { - sk, pk, err := ci.GenerateKeyPair(ci.RSA, 512) - if err != nil { - return nil, err - } - - p, err := NewPeerWithKeyPair(sk, pk) - if err != nil { - return nil, err - } - - p.AddAddress(maddr) - return p, nil + return p } // RandLocalTCPAddress returns a random multiaddr. it suppresses errors @@ -60,7 +49,71 @@ func RandLocalTCPAddress() ma.Multiaddr { // most ports above 10000 aren't in use by long running processes, so yay. // (maybe there should be a range of "loopback" ports that are guaranteed // to be open for the process, but naturally can only talk to self.) - addr := fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 10000+rand.Intn(50000)) + if lastPort == 0 { + lastPort = 10000 + SeededRand.Intn(50000) + } + lastPort++ + + addr := fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", lastPort) maddr, _ := ma.NewMultiaddr(addr) return maddr } + +var lastPort = 0 + +// PeerNetParams is a struct to bundle together the four things +// you need to run a connection with a peer: id, 2keys, and addr. +type PeerNetParams struct { + ID peer.ID + PrivKey ci.PrivKey + PubKey ci.PubKey + Addr ma.Multiaddr +} + +func (p *PeerNetParams) checkKeys() error { + if !p.ID.MatchesPrivateKey(p.PrivKey) { + return errors.New("p.ID does not match p.PrivKey") + } + + if !p.ID.MatchesPublicKey(p.PubKey) { + return errors.New("p.ID does not match p.PubKey") + } + + var buf bytes.Buffer + buf.Write([]byte("hello world. this is me, I swear.")) + b := buf.Bytes() + + sig, err := p.PrivKey.Sign(b) + if err != nil { + return fmt.Errorf("sig signing failed: %s", err) + } + + sigok, err := p.PubKey.Verify(b, sig) + if err != nil { + return fmt.Errorf("sig verify failed: %s", err) + } + if !sigok { + return fmt.Errorf("sig verify failed: sig invalid!") + } + + return nil // ok. move along. +} + +func RandPeerNetParams(t *testing.T) (p PeerNetParams) { + var err error + p.Addr = RandLocalTCPAddress() + p.PrivKey, p.PubKey, err = ci.GenerateKeyPair(ci.RSA, 512) + if err != nil { + t.Fatal(err) + } + + p.ID, err = peer.IDFromPublicKey(p.PubKey) + if err != nil { + t.Fatal(err) + } + + if err := p.checkKeys(); err != nil { + t.Fatal(err) + } + return p +} diff --git a/util/testutil/mock.go b/util/testutil/mock.go deleted file mode 100644 index 1c5d4ded2..000000000 --- a/util/testutil/mock.go +++ /dev/null @@ -1,18 +0,0 @@ -package testutil - -import ( - ic "github.com/jbenet/go-ipfs/crypto" - peer "github.com/jbenet/go-ipfs/peer" -) - -func NewPeerWithKeyPair(sk ic.PrivKey, pk ic.PubKey) (peer.Peer, error) { - return peer.NewPeerstore().WithKeyPair(sk, pk) -} - -func NewPeerWithID(id peer.ID) peer.Peer { - return peer.NewPeerstore().WithID(id) -} - -func NewPeerWithIDString(id string) peer.Peer { - return peer.NewPeerstore().WithIDString(id) -} diff --git a/util/testutil/rand.go b/util/testutil/rand.go new file mode 100644 index 000000000..9630bc144 --- /dev/null +++ b/util/testutil/rand.go @@ -0,0 +1,36 @@ +package testutil + +import ( + "math/rand" + "sync" + "time" +) + +var SeededRand *rand.Rand + +func init() { + SeededRand = NewSeededRand(time.Now().UTC().UnixNano()) +} + +func NewSeededRand(seed int64) *rand.Rand { + src := rand.NewSource(seed) + return rand.New(&LockedRandSource{src: src}) +} + +type LockedRandSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *LockedRandSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *LockedRandSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +}