fix error style

License: MIT
Signed-off-by: Łukasz Magiera <magik6k@gmail.com>
This commit is contained in:
Łukasz Magiera 2018-03-18 19:54:46 +01:00
parent fa9489ef72
commit 2ee5cfb90f
44 changed files with 71 additions and 69 deletions

View File

@ -82,7 +82,7 @@ environment variable:
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) {
cctx := env.(*oldcmds.Context)
if cctx.Online {
res.SetError(errors.New("init must be run offline only!"), cmdkit.ErrNormal)
res.SetError(errors.New("init must be run offline only"), cmdkit.ErrNormal)
return
}

View File

@ -113,7 +113,7 @@ func main() {
func setupPeer(a args) (peer.ID, pstore.Peerstore, error) {
if a.keybits < 1024 {
return "", nil, errors.New("Bitsize less than 1024 is considered unsafe.")
return "", nil, errors.New("bitsize less than 1024 is considered unsafe")
}
out("generating key pair...")

View File

@ -173,20 +173,20 @@ func convertOptions(req *cmds.Request) error {
if len(str) == 0 {
value = "empty value"
}
return fmt.Errorf("Could not convert %q to type %q (for option %q)",
return fmt.Errorf("could not convert %q to type %q (for option %q)",
value, opt.Type().String(), "-"+k)
}
req.Options[k] = val
} else {
return fmt.Errorf("Option %q should be type %q, but got type %q",
return fmt.Errorf("option %q should be type %q, but got type %q",
k, opt.Type().String(), kind.String())
}
}
for _, name := range opt.Names() {
if _, ok := req.Options[name]; name != k && ok {
return fmt.Errorf("Duplicate command options were provided (%q and %q)",
return fmt.Errorf("duplicate command options were provided (%q and %q)",
k, name)
}
}

View File

@ -44,7 +44,7 @@ var CatCmd = &cmds.Command{
offset, _ := req.Options["offset"].(int)
if offset < 0 {
res.SetError(fmt.Errorf("Cannot specify negative offset."), cmdkit.ErrNormal)
res.SetError(fmt.Errorf("cannot specify negative offset"), cmdkit.ErrNormal)
return
}
@ -54,7 +54,7 @@ var CatCmd = &cmds.Command{
return
}
if max < 0 {
res.SetError(fmt.Errorf("Cannot specify negative length."), cmdkit.ErrNormal)
res.SetError(fmt.Errorf("cannot specify negative length"), cmdkit.ErrNormal)
return
}
if !found {

View File

@ -354,7 +354,7 @@ func transformConfig(configRoot string, configName string, transformer config.Tr
func getConfig(r repo.Repo, key string) (*ConfigField, error) {
value, err := r.GetConfigKey(key)
if err != nil {
return nil, fmt.Errorf("Failed to get config value: %q", err)
return nil, fmt.Errorf("failed to get config value: %q", err)
}
return &ConfigField{
Key: key,
@ -392,7 +392,7 @@ func replaceConfig(r repo.Repo, file io.Reader) error {
keyF, err := getConfig(r, config.PrivKeySelector)
if err != nil {
return fmt.Errorf("Failed to get PrivKey")
return fmt.Errorf("failed to get PrivKey")
}
pkstr, ok := keyF.Value.(string)

View File

@ -170,7 +170,7 @@ var findProvidersDhtCmd = &cmds.Command{
return
}
if numProviders < 1 {
res.SetError(fmt.Errorf("Number of providers must be greater than 0"), cmdkit.ErrNormal)
res.SetError(fmt.Errorf("number of providers must be greater than 0"), cmdkit.ErrNormal)
return
}

View File

@ -21,7 +21,7 @@ import (
"gx/ipfs/QmfAkMSt9Fwzk48QDJecPcwCUjnf2uG7MLnmCGTp4C6ouL/go-ipfs-cmds"
)
var ErrInvalidCompressionLevel = errors.New("Compression level must be between 1 and 9")
var ErrInvalidCompressionLevel = errors.New("compression level must be between 1 and 9")
var GetCmd = &cmds.Command{
Helptext: cmdkit.HelpText{

View File

@ -156,7 +156,7 @@ EXAMPLE:
func printPeer(ps pstore.Peerstore, p peer.ID) (interface{}, error) {
if p == "" {
return nil, errors.New("Attempted to print nil peer!")
return nil, errors.New("attempted to print nil peer")
}
info := new(IdOutput)

View File

@ -665,7 +665,7 @@ func deserializeNode(nd *Node, dataFieldEncoding string) (*dag.ProtoNode, error)
}
dagnode.SetData(data)
default:
return nil, fmt.Errorf("Unkown data field encoding")
return nil, fmt.Errorf("unkown data field encoding")
}
links := make([]*ipld.Link, len(nd.Links))

View File

@ -297,7 +297,7 @@ Example:
switch typeStr {
case "all", "direct", "indirect", "recursive":
default:
err = fmt.Errorf("Invalid type '%s', must be one of {direct, indirect, recursive, all}", typeStr)
err = fmt.Errorf("invalid type '%s', must be one of {direct, indirect, recursive, all}", typeStr)
res.SetError(err, cmdkit.ErrClient)
return
}
@ -447,7 +447,7 @@ var verifyPinCmd = &cmds.Command{
quiet, _, _ := res.Request().Option("quiet").Bool()
if verbose && quiet {
res.SetError(fmt.Errorf("The --verbose and --quiet options can not be used at the same time"), cmdkit.ErrNormal)
res.SetError(fmt.Errorf("the --verbose and --quiet options can not be used at the same time"), cmdkit.ErrNormal)
}
opts := pinVerifyOpts{

View File

@ -19,7 +19,7 @@ import (
"gx/ipfs/QmceUdzxkimdYsgtX733uNgzf1DLHyBKN6ehGSp85ayppM/go-ipfs-cmdkit"
)
var errNotOnline = errors.New("This command must be run in online mode. Try running 'ipfs daemon' first.")
var errNotOnline = errors.New("this command must be run in online mode. Try running 'ipfs daemon' first")
var PublishCmd = &cmds.Command{
Helptext: cmdkit.HelpText{

View File

@ -86,7 +86,7 @@ This command outputs data in the following encodings:
}
if n.Floodsub == nil {
res.SetError(fmt.Errorf("experimental pubsub feature not enabled. Run daemon with --enable-pubsub-experiment to use."), cmdkit.ErrNormal)
res.SetError(fmt.Errorf("experimental pubsub feature not enabled. Run daemon with --enable-pubsub-experiment to use"), cmdkit.ErrNormal)
return
}
@ -323,7 +323,7 @@ To use, the daemon must be run with '--enable-pubsub-experiment'.
}
if n.Floodsub == nil {
res.SetError(fmt.Errorf("experimental pubsub feature not enabled. Run daemon with --enable-pubsub-experiment to use."), cmdkit.ErrNormal)
res.SetError(fmt.Errorf("experimental pubsub feature not enabled. Run daemon with --enable-pubsub-experiment to use"), cmdkit.ErrNormal)
return
}

View File

@ -696,7 +696,7 @@ func (n *IpfsNode) GetKey(name string) (ic.PrivKey, error) {
func (n *IpfsNode) LoadPrivateKey() error {
if n.Identity == "" || n.Peerstore == nil {
return errors.New("loaded private key out of order.")
return errors.New("loaded private key out of order")
}
if n.PrivateKey != nil {
@ -826,7 +826,7 @@ func listenAddresses(cfg *config.Config) ([]ma.Multiaddr, error) {
for _, addr := range cfg.Addresses.Swarm {
maddr, err := ma.NewMultiaddr(addr)
if err != nil {
return nil, fmt.Errorf("Failure to parse config.Addresses.Swarm: %s", cfg.Addresses.Swarm)
return nil, fmt.Errorf("failure to parse config.Addresses.Swarm: %s", cfg.Addresses.Swarm)
}
listen = append(listen, maddr)
}

View File

@ -300,7 +300,7 @@ func deserializeNode(nd *Node, dataFieldEncoding string) (*dag.ProtoNode, error)
}
dagnode.SetData(data)
default:
return nil, fmt.Errorf("Unkown data field encoding")
return nil, fmt.Errorf("unkown data field encoding")
}
links := make([]*ipld.Link, len(nd.Links))

View File

@ -18,7 +18,7 @@ import (
var log = logging.Logger("corerepo")
var ErrMaxStorageExceeded = errors.New("Maximum storage limit exceeded. Maybe unpin some files?")
var ErrMaxStorageExceeded = errors.New("maximum storage limit exceeded. Try to unpin some files")
type GC struct {
Node *core.IpfsNode

View File

@ -88,7 +88,7 @@ func (n *network) SendMessage(
receiver, ok := n.clients[to]
if !ok {
return errors.New("Cannot locate peer on network")
return errors.New("cannot locate peer on network")
}
// nb: terminate the context since the context wouldn't actually be passed
@ -107,7 +107,7 @@ func (n *network) SendMessage(
func (n *network) deliver(
r bsnet.Receiver, from peer.ID, message bsmsg.BitSwapMessage) error {
if message == nil || from == "" {
return errors.New("Invalid input")
return errors.New("invalid input")
}
n.delay.Wait()

View File

@ -82,7 +82,7 @@ func (rp *Reprovider) Run(tick time.Duration) {
func (rp *Reprovider) Reprovide() error {
keychan, err := rp.keyProvider(rp.ctx)
if err != nil {
return fmt.Errorf("Failed to get key chan: %s", err)
return fmt.Errorf("failed to get key chan: %s", err)
}
for c := range keychan {
// hash security

View File

@ -564,7 +564,7 @@ func (dir *Directory) Rename(ctx context.Context, req *fuse.RenameRequest, newDi
return fuse.EPERM
default:
log.Error("Unknown node type for rename target dir!")
return errors.New("Unknown fs node type!")
return errors.New("unknown fs node type")
}
return nil
}

View File

@ -81,7 +81,7 @@ func (m *mount) mount() error {
// wait for the mount process to be done, or timed out.
select {
case <-time.After(MountTimeout):
return fmt.Errorf("Mounting %s timed out.", m.MountPoint())
return fmt.Errorf("mounting %s timed out", m.MountPoint())
case err := <-errs:
return err
case <-m.fuseConn.Ready:

View File

@ -82,7 +82,7 @@ func ForceUnmountManyTimes(m Mount, attempts int) error {
<-time.After(time.Millisecond * 500)
}
return fmt.Errorf("Unmount %s failed after 10 seconds of trying.", m.MountPoint())
return fmt.Errorf("unmount %s failed after 10 seconds of trying", m.MountPoint())
}
type closer struct {

View File

@ -183,7 +183,7 @@ func TestIpfsStressRead(t *testing.T) {
}
if !bytes.Equal(rbuf, data) {
errs <- errors.New("Incorrect Read!")
errs <- errors.New("incorrect read")
}
}
}()

View File

@ -135,7 +135,7 @@ func (s *Node) Attr(ctx context.Context, a *fuse.Attr) error {
a.Mode = 0777 | os.ModeSymlink
a.Size = uint64(len(s.cached.GetData()))
default:
return fmt.Errorf("Invalid data type - %s", s.cached.GetType())
return fmt.Errorf("invalid data type - %s", s.cached.GetType())
}
return nil
}

View File

@ -103,11 +103,11 @@ func TestTwoChunks(t *testing.T) {
func arrComp(a, b []byte) error {
if len(a) != len(b) {
return fmt.Errorf("Arrays differ in length. %d != %d", len(a), len(b))
return fmt.Errorf("arrays differ in length. %d != %d", len(a), len(b))
}
for i, v := range a {
if v != b[i] {
return fmt.Errorf("Arrays differ at index: %d", i)
return fmt.Errorf("arrays differ at index: %d", i)
}
}
return nil

View File

@ -141,11 +141,11 @@ func testBuilderConsistency(t *testing.T, rawLeaves UseRawLeaves) {
func arrComp(a, b []byte) error {
if len(a) != len(b) {
return fmt.Errorf("Arrays differ in length. %d != %d", len(a), len(b))
return fmt.Errorf("arrays differ in length. %d != %d", len(a), len(b))
}
for i, v := range a {
if v != b[i] {
return fmt.Errorf("Arrays differ at index: %d", i)
return fmt.Errorf("arrays differ at index: %d", i)
}
}
return nil

View File

@ -283,7 +283,7 @@ func verifyTDagRec(n ipld.Node, depth int, p VerifyParams) error {
}
if pbn.GetType() != ft.TRaw {
return errors.New("Expected raw block")
return errors.New("expected raw block")
}
if p.RawLeaves {

View File

@ -250,7 +250,7 @@ func assertDirContents(dir string, exp []string) error {
}
if len(finfos) != len(exp) {
return fmt.Errorf("Expected %d directory entries", len(exp))
return fmt.Errorf("expected %d directory entries", len(exp))
}
var names []string

View File

@ -21,7 +21,7 @@ import (
func (n *ProtoNode) unmarshal(encoded []byte) error {
var pbn pb.PBNode
if err := pbn.Unmarshal(encoded); err != nil {
return fmt.Errorf("Unmarshal failed. %v", err)
return fmt.Errorf("unmarshal failed. %v", err)
}
pbnl := pbn.GetLinks()
@ -30,7 +30,7 @@ func (n *ProtoNode) unmarshal(encoded []byte) error {
n.links[i] = &ipld.Link{Name: l.GetName(), Size: l.GetTsize()}
c, err := cid.Cast(l.GetHash())
if err != nil {
return fmt.Errorf("Link hash #%d is not valid multihash. %v", i, err)
return fmt.Errorf("link hash #%d is not valid multihash. %v", i, err)
}
n.links[i].Cid = c
}
@ -47,7 +47,7 @@ func (n *ProtoNode) Marshal() ([]byte, error) {
pbn := n.getPBNode()
data, err := pbn.Marshal()
if err != nil {
return data, fmt.Errorf("Marshal failed. %v", err)
return data, fmt.Errorf("marshal failed. %v", err)
}
return data, nil
}
@ -123,9 +123,9 @@ func DecodeProtobufBlock(b blocks.Block) (ipld.Node, error) {
decnd, err := DecodeProtobuf(b.RawData())
if err != nil {
if strings.Contains(err.Error(), "Unmarshal failed") {
return nil, fmt.Errorf("The block referred to by '%s' was not a valid merkledag node", c)
return nil, fmt.Errorf("the block referred to by '%s' was not a valid merkledag node", c)
}
return nil, fmt.Errorf("Failed to decode Protocol Buffers: %v", err)
return nil, fmt.Errorf("failed to decode Protocol Buffers: %v", err)
}
decnd.cached = c

View File

@ -74,7 +74,7 @@ func (n *dagService) Get(ctx context.Context, c *cid.Cid) (ipld.Node, error) {
if err == bserv.ErrNotFound {
return nil, ipld.ErrNotFound
}
return nil, fmt.Errorf("Failed to get block for %s: %v", c, err)
return nil, fmt.Errorf("failed to get block for %s: %v", c, err)
}
return ipld.Decode(b)

View File

@ -239,7 +239,7 @@ func runBatchFetchTest(t *testing.T, read io.Reader) {
}
if !bytes.Equal(datagot, expected) {
errs <- errors.New("Got bad data back!")
errs <- errors.New("got bad data back")
}
}(i)
}

View File

@ -108,7 +108,7 @@ func assertDirAtPath(root *Directory, pth string, children []string) error {
sort.Strings(children)
sort.Strings(names)
if !compStrArrs(children, names) {
return errors.New("directories children did not match!")
return errors.New("directories children did not match")
}
return nil
@ -158,7 +158,7 @@ func assertFileAtPath(ds ipld.DAGService, root *Directory, expn ipld.Node, pth s
file, ok := finaln.(*File)
if !ok {
return fmt.Errorf("%s was not a file!", pth)
return fmt.Errorf("%s was not a file", pth)
}
rfd, err := file.Open(OpenReadOnly, false)
@ -177,7 +177,7 @@ func assertFileAtPath(ds ipld.DAGService, root *Directory, expn ipld.Node, pth s
}
if !bytes.Equal(out, expbytes) {
return fmt.Errorf("Incorrect data at path!")
return fmt.Errorf("incorrect data at path")
}
return nil
}
@ -616,7 +616,7 @@ func randomFile(d *Directory) (*File, error) {
fi, ok := fsn.(*File)
if !ok {
return nil, errors.New("file wasnt a file, race?")
return nil, errors.New("file wasn't a file, race?")
}
return fi, nil
@ -889,7 +889,7 @@ func readFile(rt *Root, path string, offset int64, buf []byte) error {
return err
}
if nread != len(buf) {
return fmt.Errorf("didnt read enough!")
return fmt.Errorf("didn't read enough")
}
return fd.Close()

View File

@ -14,7 +14,7 @@ type mockDNS struct {
func (m *mockDNS) lookupTXT(name string) (txt []string, err error) {
txt, ok := m.entries[name]
if !ok {
return nil, fmt.Errorf("No TXT entry for %s", name)
return nil, fmt.Errorf("no TXT entry for %s", name)
}
return txt, nil
}

View File

@ -37,18 +37,19 @@ import (
opts "github.com/ipfs/go-ipfs/namesys/opts"
path "github.com/ipfs/go-ipfs/path"
ci "gx/ipfs/QmaPbCnUMBohSGo3KnxEa2bHqyJVVeEEcwtqJAYxerieBo/go-libp2p-crypto"
)
// ErrResolveFailed signals an error when attempting to resolve.
var ErrResolveFailed = errors.New("Could not resolve name.")
var ErrResolveFailed = errors.New("could not resolve name")
// ErrResolveRecursion signals a recursion-depth limit.
var ErrResolveRecursion = errors.New(
"Could not resolve name (recursion limit exceeded).")
"could not resolve name (recursion limit exceeded)")
// ErrPublishFailed signals an error when attempting to publish.
var ErrPublishFailed = errors.New("Could not publish name.")
var ErrPublishFailed = errors.New("could not publish name")
// Namesys represents a cohesive name publishing and resolving system.
//

View File

@ -24,7 +24,7 @@ func testResolution(t *testing.T, resolver Resolver, name string, depth uint, ex
p, err := resolver.Resolve(context.Background(), name, opts.Depth(depth))
if err != expError {
t.Fatal(fmt.Errorf(
"Expected %s with a depth of %d to have a '%s' error, but got '%s'",
"expected %s with a depth of %d to have a '%s' error, but got '%s'",
name, depth, expError, err))
}
if p.String() != expected {

View File

@ -203,7 +203,7 @@ func (r *PubsubResolver) resolveOnce(ctx context.Context, name string, options *
id := peer.ID(hash)
if r.host.Peerstore().PrivKey(id) != nil {
return "", errors.New("Cannot resolve own name through pubsub")
return "", errors.New("cannot resolve own name through pubsub")
}
pubk := id.ExtractPublicKey()

View File

@ -131,7 +131,7 @@ func verifyCanResolve(r Resolver, name string, exp path.Path) error {
}
if res != exp {
return errors.New("got back wrong record!")
return errors.New("got back wrong record")
}
return nil

View File

@ -305,7 +305,7 @@ func (p *pinner) isPinnedWithType(c *cid.Cid, mode Mode) (string, bool, error) {
switch mode {
case Any, Direct, Indirect, Recursive, Internal:
default:
err := fmt.Errorf("Invalid Pin Mode '%d', must be one of {%d, %d, %d, %d, %d}",
err := fmt.Errorf("invalid Pin Mode '%d', must be one of {%d, %d, %d, %d, %d}",
mode, Direct, Indirect, Recursive, Internal, Any)
return "", false, err
}

View File

@ -93,7 +93,7 @@ func FromMap(v map[string]interface{}) (*Config, error) {
}
var conf Config
if err := json.NewDecoder(buf).Decode(&conf); err != nil {
return nil, fmt.Errorf("Failure to decode config: %s", err)
return nil, fmt.Errorf("failure to decode config: %s", err)
}
return &conf, nil
}
@ -105,7 +105,7 @@ func ToMap(conf *Config) (map[string]interface{}, error) {
}
var m map[string]interface{}
if err := json.NewDecoder(buf).Decode(&m); err != nil {
return nil, fmt.Errorf("Failure to decode config: %s", err)
return nil, fmt.Errorf("failure to decode config: %s", err)
}
return m, nil
}

View File

@ -153,7 +153,7 @@ func identityConfig(out io.Writer, nbits int) (Identity, error) {
// TODO guard higher up
ident := Identity{}
if nbits < 1024 {
return ident, errors.New("Bitsize less than 1024 is considered unsafe.")
return ident, errors.New("bitsize less than 1024 is considered unsafe")
}
fmt.Fprintf(out, "generating %v-bit RSA keypair...", nbits)

View File

@ -50,7 +50,7 @@ See https://github.com/ipfs/fs-repo-migrations/blob/master/run.md for details.`
var (
ErrNoVersion = errors.New("no version file found, please run 0-to-1 migration tool.\n" + migrationInstructions)
ErrOldRepo = errors.New("ipfs repo found in old '~/.go-ipfs' location, please run migration tool.\n" + migrationInstructions)
ErrNeedMigration = errors.New("ipfs repo needs migration.")
ErrNeedMigration = errors.New("ipfs repo needs migration")
)
type NoRepoError struct {
@ -412,7 +412,7 @@ func (r *FSRepo) openDatastore() error {
return err
}
if oldSpec != spec.String() {
return fmt.Errorf("Datastore configuration of '%s' does not match what is on disk '%s'",
return fmt.Errorf("datastore configuration of '%s' does not match what is on disk '%s'",
oldSpec, spec.String())
}
@ -633,7 +633,7 @@ func (r *FSRepo) SetConfigKey(key string, value interface{}) error {
default:
}
if !ok {
return fmt.Errorf("Wrong config type, expected %T", oldValue)
return fmt.Errorf("wrong config type, expected %T", oldValue)
}
}

View File

@ -9,6 +9,7 @@ import (
"path/filepath"
"github.com/ipfs/go-ipfs/repo/config"
"gx/ipfs/QmNiJuT8Ja3hMVpBHXv3Q6dwmperaQ6JjLtpMQgMCD7xvx/go-ipfs-util"
"gx/ipfs/QmdYwCmx8pZRkzdcd8MhmLJqYVoVTC1aGsy5Q4reMGLNLg/atomicfile"
)
@ -21,7 +22,7 @@ func ReadConfigFile(filename string, cfg interface{}) error {
}
defer f.Close()
if err := json.NewDecoder(f).Decode(cfg); err != nil {
return fmt.Errorf("Failure to decode config: %s", err)
return fmt.Errorf("failure to decode config: %s", err)
}
return nil
}

View File

@ -82,7 +82,7 @@ func checkOK(resp *http.Response, err error) error {
if err != nil {
fmt.Fprintf(os.Stderr, "pollEndpoint: ioutil.ReadAll() Error: %s", err)
}
return fmt.Errorf("Response not OK. %d %s %q", resp.StatusCode, resp.Status, string(body))
return fmt.Errorf("response not OK. %d %s %q", resp.StatusCode, resp.Status, string(body))
}
return err
}

View File

@ -238,7 +238,7 @@ func (dr *PBDagReader) Seek(offset int64, whence int) (int64, error) {
switch whence {
case io.SeekStart:
if offset < 0 {
return -1, errors.New("Invalid offset")
return -1, errors.New("invalid offset")
}
if offset == dr.offset {
return offset, nil

View File

@ -95,11 +95,11 @@ func GetRandomNode(t testing.TB, dserv ipld.DAGService, size int64, opts NodeOpt
// ArrComp checks if two byte slices are the same.
func ArrComp(a, b []byte) error {
if len(a) != len(b) {
return fmt.Errorf("Arrays differ in length. %d != %d", len(a), len(b))
return fmt.Errorf("arrays differ in length. %d != %d", len(a), len(b))
}
for i, v := range a {
if v != b[i] {
return fmt.Errorf("Arrays differ at index: %d", i)
return fmt.Errorf("arrays differ at index: %d", i)
}
}
return nil

View File

@ -129,13 +129,13 @@ func DataSize(data []byte) (uint64, error) {
switch pbdata.GetType() {
case pb.Data_Directory:
return 0, errors.New("Cant get data size of directory")
return 0, errors.New("can't get data size of directory")
case pb.Data_File:
return pbdata.GetFilesize(), nil
case pb.Data_Raw:
return uint64(len(pbdata.GetData())), nil
default:
return 0, errors.New("Unrecognized node data type")
return 0, errors.New("unrecognized node data type")
}
}