kubo/test/supernode_client/main.go
Lars Gierth 1afebc21f3 gateway: clean up its surface, and remove BlockList
This patch is in preparation for the gateway's extraction.

It's interesting to trace technical debt back to its
origin, understanding the circumstances in which it
was introduced and built up, and then cutting it back
at exactly the right places.

- Clean up the gateway's surface
  The option builder GatewayOption() now takes only
  arguments which are relevant for HTTP handler muxing,
  i.e. the paths where the gateway should be mounted.
  All other configuration happens through the
  GatewayConfig object.

- Remove BlockList
  I know why this was introduced in the first place,
  but it never ended up fulfilling that purpose.
  Somehow it was only ever used by the API server,
  not the gateway, which really doesn't make sense.
  It was also never wired up with CLI nor fs-repo.
  Eventually @krl started punching holes into it
  to make the Web UI accessible.

- Remove --unrestricted-api
  This was holes being punched into BlockList too,
  for accessing /ipfs and /ipn on the API server.
  With BlockList removed and /ipfs and /ipns freely
  accessible, putting this option out of action
  is safe. With the next major release,
  the option can be removed for good.

License: MIT
Signed-off-by: Lars Gierth <larsg@systemli.org>
2016-06-19 00:52:35 +02:00

268 lines
6.2 KiB
Go

package main
import (
"bytes"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"math"
"os"
gopath "path"
"time"
random "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-random"
commands "github.com/ipfs/go-ipfs/commands"
core "github.com/ipfs/go-ipfs/core"
corehttp "github.com/ipfs/go-ipfs/core/corehttp"
corerouting "github.com/ipfs/go-ipfs/core/corerouting"
"github.com/ipfs/go-ipfs/core/coreunix"
"github.com/ipfs/go-ipfs/repo"
config "github.com/ipfs/go-ipfs/repo/config"
fsrepo "github.com/ipfs/go-ipfs/repo/fsrepo"
ds2 "github.com/ipfs/go-ipfs/thirdparty/datastore2"
"github.com/ipfs/go-ipfs/thirdparty/ipfsaddr"
unit "github.com/ipfs/go-ipfs/thirdparty/unit"
"gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
syncds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/sync"
pstore "gx/ipfs/QmXHUpFsnpCmanRnacqYkFoLoFfEq5yS2nUgGkAjJ1Nj9j/go-libp2p-peerstore"
logging "gx/ipfs/QmYtB7Qge8cJpXc4irsEp8zRqfnZMBeB7aTrMEkPk67DRv/go-log"
ma "gx/ipfs/QmYzDkkgAEmrcNzFCiYo6L1dTX4EAG1gZkbtdbd9trL4vd/go-multiaddr"
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
)
var elog = logging.Logger("gc-client")
var (
cat = flag.Bool("cat", false, "else add")
seed = flag.Int64("seed", 1, "")
nBitsForKeypair = flag.Int("b", 1024, "number of bits for keypair (if repo is uninitialized)")
)
func main() {
flag.Parse()
if err := run(); err != nil {
fmt.Fprintf(os.Stderr, "error: %s\n", err)
os.Exit(1)
}
}
func run() error {
servers := config.DefaultSNRServers
fmt.Println("using gcr remotes:")
for _, p := range servers {
fmt.Println("\t", p)
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cwd, err := os.Getwd()
if err != nil {
return err
}
repoPath := gopath.Join(cwd, config.DefaultPathName)
if err := ensureRepoInitialized(repoPath); err != nil {
}
repo, err := fsrepo.Open(repoPath)
if err != nil { // owned by node
return err
}
cfg, err := repo.Config()
if err != nil {
return err
}
cfg.Bootstrap = servers
if err := repo.SetConfig(cfg); err != nil {
return err
}
var addrs []ipfsaddr.IPFSAddr
for _, info := range servers {
addr, err := ipfsaddr.ParseString(info)
if err != nil {
return err
}
addrs = append(addrs, addr)
}
var infos []pstore.PeerInfo
for _, addr := range addrs {
infos = append(infos, pstore.PeerInfo{
ID: addr.ID(),
Addrs: []ma.Multiaddr{addr.Transport()},
})
}
node, err := core.NewNode(ctx, &core.BuildCfg{
Online: true,
Repo: repo,
Routing: corerouting.SupernodeClient(infos...),
})
if err != nil {
return err
}
defer node.Close()
opts := []corehttp.ServeOption{
corehttp.CommandsOption(cmdCtx(node, repoPath)),
corehttp.GatewayOption(),
}
if *cat {
if err := runFileCattingWorker(ctx, node); err != nil {
return err
}
} else {
if err := runFileAddingWorker(node); err != nil {
return err
}
}
return corehttp.ListenAndServe(node, cfg.Addresses.API, opts...)
}
func ensureRepoInitialized(path string) error {
if !fsrepo.IsInitialized(path) {
conf, err := config.Init(ioutil.Discard, *nBitsForKeypair)
if err != nil {
return err
}
if err := fsrepo.Init(path, conf); err != nil {
return err
}
}
return nil
}
func sizeOfIthFile(i int64) int64 {
return (1 << uint64(i)) * unit.KB
}
func runFileAddingWorker(n *core.IpfsNode) error {
errs := make(chan error)
go func() {
var i int64
for i = 1; i < math.MaxInt32; i++ {
piper, pipew := io.Pipe()
go func() {
defer pipew.Close()
if err := random.WritePseudoRandomBytes(sizeOfIthFile(i), pipew, *seed); err != nil {
errs <- err
}
}()
k, err := coreunix.Add(n, piper)
if err != nil {
errs <- err
}
log.Println("added file", "seed", *seed, "#", i, "key", k, "size", unit.Information(sizeOfIthFile(i)))
time.Sleep(1 * time.Second)
}
}()
var i int64
for i = 0; i < math.MaxInt32; i++ {
err := <-errs
if err != nil {
log.Fatal(err)
}
}
return nil
}
func runFileCattingWorker(ctx context.Context, n *core.IpfsNode) error {
conf, err := config.Init(ioutil.Discard, *nBitsForKeypair)
if err != nil {
return err
}
r := &repo.Mock{
D: ds2.CloserWrap(syncds.MutexWrap(datastore.NewMapDatastore())),
C: *conf,
}
dummy, err := core.NewNode(ctx, &core.BuildCfg{
Repo: r,
})
if err != nil {
return err
}
errs := make(chan error)
go func() {
defer dummy.Close()
var i int64 = 1
for {
buf := new(bytes.Buffer)
if err := random.WritePseudoRandomBytes(sizeOfIthFile(i), buf, *seed); err != nil {
errs <- err
}
// add to a dummy node to discover the key
k, err := coreunix.Add(dummy, bytes.NewReader(buf.Bytes()))
if err != nil {
errs <- err
}
e := elog.EventBegin(ctx, "cat", logging.LoggableF(func() map[string]interface{} {
return map[string]interface{}{
"key": k,
"localPeer": n.Identity,
}
}))
if r, err := coreunix.Cat(ctx, n, k); err != nil {
e.Done()
log.Printf("failed to cat file. seed: %d #%d key: %s err: %s", *seed, i, k, err)
} else {
log.Println("found file", "seed", *seed, "#", i, "key", k, "size", unit.Information(sizeOfIthFile(i)))
io.Copy(ioutil.Discard, r)
e.Done()
log.Println("catted file", "seed", *seed, "#", i, "key", k, "size", unit.Information(sizeOfIthFile(i)))
i++
}
time.Sleep(time.Second)
}
}()
err = <-errs
if err != nil {
log.Fatal(err)
}
return nil
}
func toPeerInfos(bpeers []config.BootstrapPeer) ([]pstore.PeerInfo, error) {
var peers []pstore.PeerInfo
for _, bootstrap := range bpeers {
p, err := toPeerInfo(bootstrap)
if err != nil {
return nil, err
}
peers = append(peers, p)
}
return peers, nil
}
func toPeerInfo(bootstrap config.BootstrapPeer) (p pstore.PeerInfo, err error) {
p = pstore.PeerInfo{
ID: bootstrap.ID(),
Addrs: []ma.Multiaddr{bootstrap.Multiaddr()},
}
return p, nil
}
func cmdCtx(node *core.IpfsNode, repoPath string) commands.Context {
return commands.Context{
Online: true,
ConfigRoot: repoPath,
LoadConfig: func(path string) (*config.Config, error) {
return node.Repo.Config()
},
ConstructNode: func() (*core.IpfsNode, error) {
return node, nil
},
}
}