benchmark secure channel

This commit is contained in:
Jeromy 2014-10-26 08:01:33 +00:00
parent c7ef55a450
commit 317ca2f865
6 changed files with 196 additions and 26 deletions

View File

@ -37,6 +37,7 @@ func NewBlockService(d ds.Datastore, rem exchange.Interface) (*BlockService, err
// AddBlock adds a particular block to the service, Putting it into the datastore.
func (s *BlockService) AddBlock(b *blocks.Block) (u.Key, error) {
k := b.Key()
log.Debugf("blockservice: storing [%s] in datastore", k)
// TODO(brian): define a block datastore with a Put method which accepts a
// block parameter

View File

@ -275,6 +275,7 @@ func (s *SecurePipe) handleSecureOut(hashType string, mIV, mCKey, mMKey []byte)
myMac, macSize := makeMac(hashType, mMKey)
basebuf := make([]byte, 1<<22)
for {
var data []byte
ok := true
@ -294,19 +295,23 @@ func (s *SecurePipe) handleSecureOut(hashType string, mIV, mCKey, mMKey []byte)
continue
}
buff := make([]byte, len(data)+macSize)
buff := basebuf[:len(data)+macSize]
myCipher.XORKeyStream(buff, data)
myMac.Write(buff[0:len(data)])
copy(buff[len(data):], myMac.Sum(nil))
myMac.Reset()
encData(data, buff, myCipher, myMac)
// log.Debug("[peer %s] secure out [to = %s] %d", s.local, s.remote, len(buff))
s.insecure.Out <- buff
}
}
func encData(data, buff []byte, ciph cipher.Stream, mac hash.Hash) {
ciph.XORKeyStream(buff, data)
mac.Write(buff[0:len(data)])
copy(buff[len(data):], mac.Sum(nil))
mac.Reset()
}
// Determines which algorithm to use. Note: f(a, b) = f(b, a)
func selectBest(myPrefs, theirPrefs string) (string, error) {
// Person with greatest hash gets first choice.

161
crypto/spipe/spipe_test.go Normal file
View File

@ -0,0 +1,161 @@
package spipe
import (
"testing"
"code.google.com/p/go.net/context"
ci "github.com/jbenet/go-ipfs/crypto"
"github.com/jbenet/go-ipfs/peer"
"github.com/jbenet/go-ipfs/util"
)
func getPeer(tb testing.TB) peer.Peer {
privk, pubk, err := ci.GenerateKeyPair(ci.RSA, 1024)
if err != nil {
tb.Fatal(err)
}
p, err := peer.WithKeyPair(privk, pubk)
if err != nil {
tb.Fatal(err)
}
return p
}
func bindDuplexNoCopy(a, b Duplex) {
go func() {
for m := range b.Out {
a.In <- m
}
}()
for m := range a.Out {
b.In <- m
}
}
func bindDuplexWithCopy(a, b Duplex) {
dup := func(byt []byte) []byte {
n := make([]byte, len(byt))
copy(n, byt)
return n
}
go func() {
for m := range b.Out {
a.In <- dup(m)
}
}()
for m := range a.Out {
b.In <- dup(m)
}
}
func BenchmarkDataEncryptDefault(b *testing.B) {
SupportedExchanges = "P-256,P-224,P-384,P-521"
SupportedCiphers = "AES-256,AES-128"
SupportedHashes = "SHA256,SHA512,SHA1"
runEncryptBenchmark(b)
}
func BenchmarkDataEncryptLite(b *testing.B) {
SupportedExchanges = "P-256"
SupportedCiphers = "AES-128"
SupportedHashes = "SHA1"
runEncryptBenchmark(b)
}
func runEncryptBenchmark(b *testing.B) {
pstore := peer.NewPeerstore()
ctx := context.TODO()
bufsize := 1024 * 1024
pa := getPeer(b)
pb := getPeer(b)
duplexa := Duplex{
In: make(chan []byte),
Out: make(chan []byte),
}
duplexb := Duplex{
In: make(chan []byte),
Out: make(chan []byte),
}
go bindDuplexNoCopy(duplexa, duplexb)
var spb *SecurePipe
done := make(chan struct{})
go func() {
var err error
spb, err = NewSecurePipe(ctx, bufsize, pb, pstore, duplexb)
if err != nil {
b.Fatal(err)
}
done <- struct{}{}
}()
spa, err := NewSecurePipe(ctx, bufsize, pa, pstore, duplexa)
if err != nil {
b.Fatal(err)
}
<-done
go func() {
for _ = range spa.In {
// Throw it all away,
// all of your hopes and dreams
// piped out to /dev/null...
done <- struct{}{}
}
}()
data := make([]byte, 1024*512)
util.NewFastRand().Read(data)
// Begin actual benchmarking
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.SetBytes(int64(len(data)))
spb.Out <- data
<-done
}
}
func BenchmarkDataTransfer(b *testing.B) {
duplexa := Duplex{
In: make(chan []byte),
Out: make(chan []byte),
}
duplexb := Duplex{
In: make(chan []byte),
Out: make(chan []byte),
}
go bindDuplexWithCopy(duplexa, duplexb)
done := make(chan struct{})
go func() {
for _ = range duplexa.In {
// Throw it all away,
// all of your hopes and dreams
// piped out to /dev/null...
done <- struct{}{}
}
}()
data := make([]byte, 1024*512)
util.NewFastRand().Read(data)
// Begin actual benchmarking
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.SetBytes(int64(len(data)))
duplexb.Out <- data
<-done
}
}

View File

@ -1,6 +1,8 @@
package bitswap
import (
"time"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
@ -67,6 +69,10 @@ type bitswap struct {
// TODO ensure only one active request per key
func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) {
log.Debugf("Get Block %v", k)
now := time.Now()
defer func() {
log.Errorf("GetBlock took %f secs", time.Now().Sub(now).Seconds())
}()
ctx, cancelFunc := context.WithCancel(parent)
bs.wantlist.Add(k)
@ -160,7 +166,7 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm
go func(block blocks.Block) {
err := bs.HasBlock(ctx, block) // FIXME err ignored
if err != nil {
log.Errorf("HasBlock errored: %s", err)
log.Warningf("HasBlock errored: %s", err)
}
}(block)
}

View File

@ -36,6 +36,7 @@ func NewDagFromReaderWithSplitter(r io.Reader, spl chunk.BlockSplitter) (*dag.No
mbf := new(ft.MultiBlock)
for blk := range blkChan {
log.Debugf("created block, size %d", len(blk))
mbf.AddBlockSize(uint64(len(blk)))
child := &dag.Node{Data: ft.WrapData(blk)}
err := root.AddNodeLink("", child)

View File

@ -7,16 +7,12 @@ import (
"testing"
"time"
tu "github.com/jbenet/go-ipfs/util/testutil"
peer "github.com/jbenet/go-ipfs/peer"
)
func _randPeer() peer.Peer {
id := make(peer.ID, 16)
crand.Read(id)
return peer.WithID(id)
}
func _randID() ID {
func RandID() ID {
buf := make([]byte, 16)
crand.Read(buf)
@ -30,11 +26,11 @@ func TestBucket(t *testing.T) {
peers := make([]peer.Peer, 100)
for i := 0; i < 100; i++ {
peers[i] = _randPeer()
peers[i] = tu.RandPeer()
b.pushFront(peers[i])
}
local := _randPeer()
local := tu.RandPeer()
localID := ConvertPeerID(local.ID())
i := rand.Intn(len(peers))
@ -65,12 +61,12 @@ func TestBucket(t *testing.T) {
// Right now, this just makes sure that it doesnt hang or crash
func TestTableUpdate(t *testing.T) {
local := _randPeer()
local := tu.RandPeer()
rt := NewRoutingTable(10, ConvertPeerID(local.ID()), time.Hour)
peers := make([]peer.Peer, 100)
for i := 0; i < 100; i++ {
peers[i] = _randPeer()
peers[i] = tu.RandPeer()
}
// Testing Update
@ -82,7 +78,7 @@ func TestTableUpdate(t *testing.T) {
}
for i := 0; i < 100; i++ {
id := _randID()
id := RandID()
ret := rt.NearestPeers(id, 5)
if len(ret) == 0 {
t.Fatal("Failed to find node near ID.")
@ -91,12 +87,12 @@ func TestTableUpdate(t *testing.T) {
}
func TestTableFind(t *testing.T) {
local := _randPeer()
local := tu.RandPeer()
rt := NewRoutingTable(10, ConvertPeerID(local.ID()), time.Hour)
peers := make([]peer.Peer, 100)
for i := 0; i < 5; i++ {
peers[i] = _randPeer()
peers[i] = tu.RandPeer()
rt.Update(peers[i])
}
@ -108,12 +104,12 @@ func TestTableFind(t *testing.T) {
}
func TestTableFindMultiple(t *testing.T) {
local := _randPeer()
local := tu.RandPeer()
rt := NewRoutingTable(20, ConvertPeerID(local.ID()), time.Hour)
peers := make([]peer.Peer, 100)
for i := 0; i < 18; i++ {
peers[i] = _randPeer()
peers[i] = tu.RandPeer()
rt.Update(peers[i])
}
@ -132,7 +128,7 @@ func TestTableMultithreaded(t *testing.T) {
tab := NewRoutingTable(20, ConvertPeerID(local), time.Hour)
var peers []peer.Peer
for i := 0; i < 500; i++ {
peers = append(peers, _randPeer())
peers = append(peers, tu.RandPeer())
}
done := make(chan struct{})
@ -171,7 +167,7 @@ func BenchmarkUpdates(b *testing.B) {
var peers []peer.Peer
for i := 0; i < b.N; i++ {
peers = append(peers, _randPeer())
peers = append(peers, tu.RandPeer())
}
b.StartTimer()
@ -187,7 +183,7 @@ func BenchmarkFinds(b *testing.B) {
var peers []peer.Peer
for i := 0; i < b.N; i++ {
peers = append(peers, _randPeer())
peers = append(peers, tu.RandPeer())
tab.Update(peers[i])
}