Merge pull request #6478 from ipfs/fix/stale-api

ignore stale API files and deprecate ipfs repo fsck
This commit is contained in:
Steven Allen 2019-07-03 14:30:18 -07:00 committed by GitHub
commit eb4da3c12d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 187 additions and 362 deletions

View File

@ -21,7 +21,6 @@ import (
repo "github.com/ipfs/go-ipfs/repo"
fsrepo "github.com/ipfs/go-ipfs/repo/fsrepo"
osh "github.com/Kubuxu/go-os-helper"
"github.com/ipfs/go-ipfs-cmds"
"github.com/ipfs/go-ipfs-cmds/cli"
"github.com/ipfs/go-ipfs-cmds/http"
@ -195,21 +194,90 @@ func checkDebug(req *cmds.Request) {
}
}
func apiAddrOption(req *cmds.Request) (ma.Multiaddr, error) {
apiAddrStr, apiSpecified := req.Options[corecmds.ApiOption].(string)
if !apiSpecified {
return nil, nil
}
return ma.NewMultiaddr(apiAddrStr)
}
func makeExecutor(req *cmds.Request, env interface{}) (cmds.Executor, error) {
exe := cmds.NewExecutor(req.Root)
cctx := env.(*oldcmds.Context)
details := commandDetails(req.Path)
client, err := commandShouldRunOnDaemon(*details, req, env.(*oldcmds.Context))
// Check if the command is disabled.
if details.cannotRunOnClient && details.cannotRunOnDaemon {
return nil, fmt.Errorf("command disabled: %v", req.Path)
}
// Can we just run this locally?
if !details.cannotRunOnClient && details.doesNotUseRepo {
return exe, nil
}
// Get the API option from the commandline.
apiAddr, err := apiAddrOption(req)
if err != nil {
return nil, err
}
var exctr cmds.Executor
if client != nil && !req.Command.External {
exctr = client.(cmds.Executor)
} else {
exctr = cmds.NewExecutor(req.Root)
// Require that the command be run on the daemon when the API flag is
// passed (unless we're trying to _run_ the daemon).
daemonRequested := apiAddr != nil && req.Command != daemonCmd
// Run this on the client if required.
if details.cannotRunOnDaemon || req.Command.External {
if daemonRequested {
// User requested that the command be run on the daemon but we can't.
// NOTE: We drop this check for the `ipfs daemon` command.
return nil, errors.New("api flag specified but command cannot be run on the daemon")
}
return exe, nil
}
return exctr, nil
// Finally, look in the repo for an API file.
if apiAddr == nil {
var err error
apiAddr, err = fsrepo.APIAddr(cctx.ConfigRoot)
switch err {
case nil, repo.ErrApiNotRunning:
default:
return nil, err
}
}
// Still no api specified? Run it on the client or fail.
if apiAddr == nil {
if details.cannotRunOnClient {
return nil, fmt.Errorf("command must be run on the daemon: %v", req.Path)
}
return exe, nil
}
// Resolve the API addr.
apiAddr, err = resolveAddr(req.Context, apiAddr)
if err != nil {
return nil, err
}
_, host, err := manet.DialArgs(apiAddr)
if err != nil {
return nil, err
}
// Construct the executor.
opts := []http.ClientOpt{
http.ClientWithAPIPrefix(corehttp.APIPath),
}
// Fallback on a local executor if we (a) have a repo and (b) aren't
// forcing a daemon.
if !daemonRequested && fsrepo.IsInitialized(cctx.ConfigRoot) {
opts = append(opts, http.ClientWithFallback(exe))
}
return http.NewClient(host, opts...), nil
}
func checkPermissions(path string) (bool, error) {
@ -227,7 +295,11 @@ func checkPermissions(path string) (bool, error) {
}
// commandDetails returns a command's details for the command given by |path|.
func commandDetails(path []string) *cmdDetails {
func commandDetails(path []string) cmdDetails {
if len(path) == 0 {
// special case root command
return cmdDetails{doesNotUseRepo: true}
}
var details cmdDetails
// find the last command in path that has a cmdDetailsMap entry
for i := range path {
@ -235,67 +307,7 @@ func commandDetails(path []string) *cmdDetails {
details = cmdDetails
}
}
return &details
}
// commandShouldRunOnDaemon determines, from command details, whether a
// command ought to be executed on an ipfs daemon.
//
// It returns a client if the command should be executed on a daemon and nil if
// it should be executed on a client. It returns an error if the command must
// NOT be executed on either.
func commandShouldRunOnDaemon(details cmdDetails, req *cmds.Request, cctx *oldcmds.Context) (http.Client, error) {
path := req.Path
// root command.
if len(path) < 1 {
return nil, nil
}
if details.cannotRunOnClient && details.cannotRunOnDaemon {
return nil, fmt.Errorf("command disabled: %s", path[0])
}
if details.doesNotUseRepo && details.canRunOnClient() {
return nil, nil
}
// at this point need to know whether api is running. we defer
// to this point so that we don't check unnecessarily
// did user specify an api to use for this command?
apiAddrStr, _ := req.Options[corecmds.ApiOption].(string)
client, err := getAPIClient(req.Context, cctx.ConfigRoot, apiAddrStr)
if err == repo.ErrApiNotRunning {
if apiAddrStr != "" && req.Command != daemonCmd {
// if user SPECIFIED an api, and this cmd is not daemon
// we MUST use it. so error out.
return nil, err
}
// ok for api not to be running
} else if err != nil { // some other api error
return nil, err
}
if client != nil {
if details.cannotRunOnDaemon {
// check if daemon locked. legacy error text, for now.
log.Debugf("Command cannot run on daemon. Checking if daemon is locked")
if daemonLocked, _ := fsrepo.LockedByOtherProcess(cctx.ConfigRoot); daemonLocked {
return nil, cmds.ClientError("ipfs daemon is running. please stop it to run this command")
}
return nil, nil
}
return client, nil
}
if details.cannotRunOnClient {
return nil, cmds.ClientError("must run on the ipfs daemon")
}
return nil, nil
return details
}
func getRepoPath(req *cmds.Request) (string, error) {
@ -366,67 +378,6 @@ func profileIfEnabled() (func(), error) {
return func() {}, nil
}
var apiFileErrorFmt string = `Failed to parse '%[1]s/api' file.
error: %[2]s
If you're sure go-ipfs isn't running, you can just delete it.
`
var checkIPFSUnixFmt = "Otherwise check:\n\tps aux | grep ipfs"
var checkIPFSWinFmt = "Otherwise check:\n\ttasklist | findstr ipfs"
// getAPIClient checks the repo, and the given options, checking for
// a running API service. if there is one, it returns a client.
// otherwise, it returns errApiNotRunning, or another error.
func getAPIClient(ctx context.Context, repoPath, apiAddrStr string) (http.Client, error) {
var apiErrorFmt string
switch {
case osh.IsUnix():
apiErrorFmt = apiFileErrorFmt + checkIPFSUnixFmt
case osh.IsWindows():
apiErrorFmt = apiFileErrorFmt + checkIPFSWinFmt
default:
apiErrorFmt = apiFileErrorFmt
}
var addr ma.Multiaddr
var err error
if len(apiAddrStr) != 0 {
addr, err = ma.NewMultiaddr(apiAddrStr)
if err != nil {
return nil, err
}
if len(addr.Protocols()) == 0 {
return nil, fmt.Errorf("multiaddr doesn't provide any protocols")
}
} else {
addr, err = fsrepo.APIAddr(repoPath)
if err == repo.ErrApiNotRunning {
return nil, err
}
if err != nil {
return nil, fmt.Errorf(apiErrorFmt, repoPath, err.Error())
}
}
if len(addr.Protocols()) == 0 {
return nil, fmt.Errorf(apiErrorFmt, repoPath, "multiaddr doesn't provide any protocols")
}
return apiClientForAddr(ctx, addr)
}
func apiClientForAddr(ctx context.Context, addr ma.Multiaddr) (http.Client, error) {
addr, err := resolveAddr(ctx, addr)
if err != nil {
return nil, err
}
_, host, err := manet.DialArgs(addr)
if err != nil {
return nil, err
}
return http.NewClient(host, http.ClientWithAPIPrefix(corehttp.APIPath)), nil
}
func resolveAddr(ctx context.Context, addr ma.Multiaddr) (ma.Multiaddr, error) {
ctx, cancelFunc := context.WithTimeout(ctx, 10*time.Second)
defer cancelFunc()

View File

@ -6,7 +6,6 @@ import (
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
@ -20,7 +19,6 @@ import (
cid "github.com/ipfs/go-cid"
bstore "github.com/ipfs/go-ipfs-blockstore"
cmds "github.com/ipfs/go-ipfs-cmds"
config "github.com/ipfs/go-ipfs-config"
)
type RepoVersion struct {
@ -216,44 +214,11 @@ var repoFsckCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Remove repo lockfiles.",
ShortDescription: `
'ipfs repo fsck' is a plumbing command that will remove repo and level db
lockfiles, as well as the api file. This command can only run when no ipfs
daemons are running.
'ipfs repo fsck' is now a no-op.
`,
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
configRoot, err := cmdenv.GetConfigRoot(env)
if err != nil {
return err
}
dsPath, err := config.DataStorePath(configRoot)
if err != nil {
return err
}
dsLockFile := filepath.Join(dsPath, "LOCK") // TODO: get this lockfile programmatically
repoLockFile := filepath.Join(configRoot, fsrepo.LockFile)
apiFile := filepath.Join(configRoot, "api") // TODO: get this programmatically
log.Infof("Removing repo lockfile: %s", repoLockFile)
log.Infof("Removing datastore lockfile: %s", dsLockFile)
log.Infof("Removing api file: %s", apiFile)
err = os.Remove(repoLockFile)
if err != nil && !os.IsNotExist(err) {
return err
}
err = os.Remove(dsLockFile)
if err != nil && !os.IsNotExist(err) {
return err
}
err = os.Remove(apiFile)
if err != nil && !os.IsNotExist(err) {
return err
}
return cmds.EmitOnce(res, &MessageOutput{"Lockfiles have been removed.\n"})
return cmds.EmitOnce(res, &MessageOutput{"`ipfs repo fsck` is deprecated and does nothing.\n"})
},
Type: MessageOutput{},
Encoders: cmds.EncoderMap{

3
go.mod
View File

@ -2,7 +2,6 @@ module github.com/ipfs/go-ipfs
require (
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669
github.com/Kubuxu/go-os-helper v0.0.1
github.com/Kubuxu/gocovmerge v0.0.0-20161216165753-7ecaa51963cd
github.com/blang/semver v3.5.1+incompatible
github.com/bren2010/proquint v0.0.0-20160323162903-38337c27106d
@ -32,7 +31,7 @@ require (
github.com/ipfs/go-ipfs-blockstore v0.0.1
github.com/ipfs/go-ipfs-blocksutil v0.0.1
github.com/ipfs/go-ipfs-chunker v0.0.1
github.com/ipfs/go-ipfs-cmds v0.0.10
github.com/ipfs/go-ipfs-cmds v0.1.0
github.com/ipfs/go-ipfs-config v0.0.6
github.com/ipfs/go-ipfs-ds-help v0.0.1
github.com/ipfs/go-ipfs-exchange-interface v0.0.1

4
go.sum
View File

@ -247,8 +247,8 @@ github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IW
github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk=
github.com/ipfs/go-ipfs-chunker v0.0.1 h1:cHUUxKFQ99pozdahi+uSC/3Y6HeRpi9oTeUHbE27SEw=
github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw=
github.com/ipfs/go-ipfs-cmds v0.0.10 h1:4kA3E94HbDrLb4RZTkX3yXyUjKv50RfPz0Pv9xkP2cA=
github.com/ipfs/go-ipfs-cmds v0.0.10/go.mod h1:TiK4e7/V31tuEb8YWDF8lN3qrnDH+BS7ZqWIeYJlAs8=
github.com/ipfs/go-ipfs-cmds v0.1.0 h1:0CEde9EcxByej8+L6d1PST57J4ambRPyCTjLG5Ymou8=
github.com/ipfs/go-ipfs-cmds v0.1.0/go.mod h1:TiK4e7/V31tuEb8YWDF8lN3qrnDH+BS7ZqWIeYJlAs8=
github.com/ipfs/go-ipfs-config v0.0.5 h1:D9ek19anOzm8iYPvezeeamSg5mzwqKPb2jyAyJZT/4A=
github.com/ipfs/go-ipfs-config v0.0.5/go.mod h1:IGkVTacurWv9WFKc7IBPjHGM/7hi6+PEClqUb/l2BIM=
github.com/ipfs/go-ipfs-config v0.0.6 h1:jzK9Tl8S0oWBir3F5ObtGgnHRPdqQ0MYiCmwXtV3Ps4=

100
test/sharness/t0064-api-file.sh Executable file
View File

@ -0,0 +1,100 @@
#!/usr/bin/env bash
#
# MIT Licensed; see the LICENSE file in this repository.
#
test_description="Test api file"
. lib/test-lib.sh
test_init_ipfs
test_launch_ipfs_daemon
test_kill_ipfs_daemon
test_expect_success "version always works" '
ipfs version >/dev/null
'
test_expect_success "swarm peers fails when offline" '
test_must_fail ipfs swarm peers >/dev/null
'
test_expect_success "swarm peers fails when offline and API specified" '
test_must_fail ipfs swarm peers --api="$API_MADDR" >/dev/null
'
test_expect_success "pin ls succeeds when offline" '
ipfs pin ls >/dev/null
'
test_expect_success "pin ls fails when offline and API specified" '
test_must_fail ipfs pin ls --api="$API_MADDR" >/dev/null
'
test_expect_success "id succeeds when offline" '
ipfs id >/dev/null
'
test_expect_success "id fails when offline API specified" '
test_must_fail ipfs id --api="$API_MADDR" >/dev/null
'
test_expect_success "create API file" '
echo "$API_MADDR" > "$IPFS_PATH/api"
'
test_expect_success "version always works" '
ipfs version >/dev/null
'
test_expect_success "id succeeds when offline and API file exists" '
ipfs id >/dev/null
'
test_expect_success "pin ls succeeds when offline and API file exists" '
ipfs pin ls >/dev/null
'
test_launch_ipfs_daemon
test_expect_success "version always works" '
ipfs version >/dev/null
'
test_expect_success "id succeeds when online" '
ipfs id >/dev/null
'
test_expect_success "swarm peers succeeds when online" '
ipfs swarm peers >/dev/null
'
test_expect_success "pin ls succeeds when online" '
ipfs pin ls >/dev/null
'
test_expect_success "remove API file when daemon is running" '
rm "$IPFS_PATH/api"
'
test_expect_success "version always works" '
ipfs version >/dev/null
'
test_expect_success "swarm peers fails when the API file is missing" '
test_must_fail ipfs swarm peers >/dev/null
'
test_expect_success "id fails when daemon is running but API file is missing (locks repo)" '
test_must_fail ipfs pin ls >/dev/null
'
test_expect_success "pin ls fails when daemon is running but API file is missing (locks repo)" '
test_must_fail ipfs pin ls >/dev/null
'
test_kill_ipfs_daemon
test_done

View File

@ -1,190 +0,0 @@
#!/usr/bin/env bash
#
# Copyright (c) 2016 Mike Pfister
# MIT Licensed; see the LICENSE file in this repository.
#
test_description="Test ipfs repo fsck operations"
. lib/test-lib.sh
test_init_ipfs
#############################
# Test without daemon running
#############################
# NOTE: if api file isn't present we can assume the daemon isn't running
# Try with all lock files present: repo.lock, api, and datastore/LOCK with
# repo.lock and datastore/LOCK being empty
test_expect_success "'ipfs repo fsck' succeeds with no daemon running empty
repo.lock" '
mkdir -p $IPFS_PATH &&
mkdir -p $IPFS_PATH/datastore &&
touch $IPFS_PATH/datastore/LOCK &&
touch $IPFS_PATH/repo.lock &&
printf "/ip4/127.0.0.1/tcp/5001" > "$IPFS_PATH/api" &&
ipfs repo fsck > fsck_out_actual1
'
test_expect_success "'ipfs repo fsck' output looks good with no daemon" '
grep "Lockfiles have been removed." fsck_out_actual1
'
# Make sure the files are actually removed
test_expect_success "'ipfs repo fsck' confirm file deletion" '
test ! -e "$IPFS_PATH/repo.lock" &&
test ! -e "$IPFS_PATH/datastore/LOCK" &&
test ! -e "$IPFS_PATH/api"
'
# Try with all lock files present: repo.lock, api, and datastore/LOCK with
# repo.lock is non-zero TODO: this test is broken until we find consensus on the
# non-zero repo.lock issue
test_expect_success "'ipfs repo fsck' succeeds with no daemon running non-zero
repo.lock" '
mkdir -p "$IPFS_PATH" &&
printf ":D" > "$IPFS_PATH/repo.lock" &&
touch "$IPFS_PATH/datastore/LOCK" &&
ipfs repo fsck > fsck_out_actual1b
'
test_expect_success "'ipfs repo fsck' output looks good with no daemon" '
grep "Lockfiles have been removed." fsck_out_actual1b
'
# Make sure the files are actually removed
test_expect_success "'ipfs repo fsck' confirm file deletion" '
test ! -e "$IPFS_PATH/repo.lock" &&
test ! -e "$IPFS_PATH/datastore/LOCK" &&
test ! -e "$IPFS_PATH/api"
'
########################
# Test for partial locks
########################
# Try with locks api and datastore/LOCK
test_expect_success "'ipfs repo fsck' succeeds partial lock" '
printf "/ip4/127.0.0.1/tcp/5001" > "$IPFS_PATH/api" &&
touch $IPFS_PATH/datastore/LOCK &&
ipfs repo fsck > fsck_out_actual2
'
test_expect_success "'ipfs repo fsck' output looks good with no daemon" '
grep "Lockfiles have been removed." fsck_out_actual2
'
# Make sure the files are actually removed
test_expect_success "'ipfs repo fsck' confirm file deletion" '
test ! -e "$IPFS_PATH/repo.lock" &&
test ! -e "$IPFS_PATH/datastore/LOCK" &&
test ! -e "$IPFS_PATH/api"
'
# Try with locks api and repo.lock
test_expect_success "'ipfs repo fsck' succeeds partial lock" '
printf "/ip4/127.0.0.1/tcp/5001" > "$IPFS_PATH/api" &&
touch $IPFS_PATH/repo.lock &&
ipfs repo fsck > fsck_out_actual3
'
test_expect_success "'ipfs repo fsck' output looks good with no daemon" '
grep "Lockfiles have been removed." fsck_out_actual3
'
# Make sure the files are actually removed
test_expect_success "'ipfs repo fsck' confirm file deletion" '
test ! -e "$IPFS_PATH/repo.lock" &&
test ! -e "$IPFS_PATH/datastore/LOCK" &&
test ! -e "$IPFS_PATH/api"
'
# Try with locks repo.lock and datastore
test_expect_success "'ipfs repo fsck' succeeds partial lock" '
touch $IPFS_PATH/repo.lock &&
touch $IPFS_PATH/datastore/LOCK &&
ipfs repo fsck > fsck_out_actual4
'
test_expect_success "'ipfs repo fsck' output looks good with no daemon" '
grep "Lockfiles have been removed." fsck_out_actual4
'
# Make sure the files are actually removed
test_expect_success "'ipfs repo fsck' confirm file deletion" '
test ! -e "$IPFS_PATH/repo.lock" &&
test ! -e "$IPFS_PATH/datastore/LOCK" &&
test ! -e "$IPFS_PATH/api"
'
#######################
# Test for single locks
#######################
# Try with single locks repo.lock
test_expect_success "'ipfs repo fsck' succeeds partial lock" '
touch $IPFS_PATH/repo.lock &&
ipfs repo fsck > fsck_out_actual5
'
test_expect_success "'ipfs repo fsck' output looks good with no daemon" '
grep "Lockfiles have been removed." fsck_out_actual5
'
# Make sure the files are actually removed
test_expect_success "'ipfs repo fsck' confirm file deletion" '
test ! -e "$IPFS_PATH/repo.lock" &&
test ! -e "$IPFS_PATH/datastore/LOCK" &&
test ! -e "$IPFS_PATH/api"
'
# Try with single locks datastore/LOCK
test_expect_success "'ipfs repo fsck' succeeds partial lock" '
touch $IPFS_PATH/datastore/LOCK &&
ipfs repo fsck > fsck_out_actual6
'
test_expect_success "'ipfs repo fsck' output looks good with no daemon" '
grep "Lockfiles have been removed." fsck_out_actual6
'
# Make sure the files are actually removed
test_expect_success "'ipfs repo fsck' confirm file deletion" '
test ! -e "$IPFS_PATH/repo.lock" &&
test ! -e "$IPFS_PATH/datastore/LOCK" &&
test ! -e "$IPFS_PATH/api"
'
# Try with single lock api
test_expect_success "'ipfs repo fsck' succeeds partial lock" '
printf "/ip4/127.0.0.1/tcp/5001" > "$IPFS_PATH/api" &&
ipfs repo fsck > fsck_out_actual7
'
test_expect_success "'ipfs repo fsck' output looks good with no daemon" '
grep "Lockfiles have been removed." fsck_out_actual7
'
# Make sure the files are actually removed
test_expect_success "'ipfs repo fsck' confirm file deletion" '
test ! -e "$IPFS_PATH/repo.lock" &&
test ! -e "$IPFS_PATH/datastore/LOCK" &&
test ! -e "$IPFS_PATH/api"
'
##########################
# Test with daemon running
##########################
test_launch_ipfs_daemon
# Daemon is running -> command doesn't run
test_expect_success "'ipfs repo fsck' fails with daemon running" '
! (ipfs repo fsck 2>fsck_out_actual8 )
'
test_expect_success "'ipfs repo fsck' output looks good with daemon" '
grep "Error: ipfs daemon is running" fsck_out_actual8
'
test_kill_ipfs_daemon
test_done