mirror of
https://github.com/ipfs/kubo.git
synced 2026-02-21 10:27:46 +08:00
feat: add built-in ipfs update command
adds `ipfs update` command tree that downloads pre-built Kubo binaries from GitHub Releases, verifies SHA-512 checksums, and replaces the running binary in place. subcommands: - `ipfs update check` -- query GitHub for newer versions - `ipfs update versions` -- list available releases - `ipfs update install [version]` -- download, verify, backup, and atomically replace the current binary - `ipfs update revert` -- restore the previously backed up binary from `$IPFS_PATH/old-bin/` read-only subcommands (check, versions) work while the daemon is running. install and revert require the daemon to be stopped first. design decisions: - uses GitHub Releases API instead of dist.ipfs.tech because GitHub is harder to censor in regions that block IPFS infrastructure - honors GITHUB_TOKEN/GH_TOKEN to avoid unauthenticated rate limits - backs up the current binary before replacing, with permission-error fallback that saves to a temp dir with manual `sudo mv` instructions - `KUBO_UPDATE_GITHUB_URL` env var redirects API calls for integration testing; `IPFS_VERSION_FAKE` overrides the reported version - unit tests use mock HTTP servers and the var override; CLI tests use the env vars with a temp binary copy so the real build is never touched resolves https://github.com/ipfs/kubo/issues/10937
This commit is contained in:
parent
8eab2fcf5d
commit
706aab385b
15
.github/workflows/test-migrations.yml
vendored
15
.github/workflows/test-migrations.yml
vendored
@ -1,4 +1,4 @@
|
||||
name: Migrations
|
||||
name: Migrations & Update
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@ -9,6 +9,9 @@ on:
|
||||
- 'test/cli/migrations/**'
|
||||
# Config and repo handling
|
||||
- 'repo/fsrepo/**'
|
||||
# Update command
|
||||
- 'core/commands/update*.go'
|
||||
- 'test/cli/update_test.go'
|
||||
# This workflow file itself
|
||||
- '.github/workflows/test-migrations.yml'
|
||||
push:
|
||||
@ -19,6 +22,8 @@ on:
|
||||
- 'repo/fsrepo/migrations/**'
|
||||
- 'test/cli/migrations/**'
|
||||
- 'repo/fsrepo/**'
|
||||
- 'core/commands/update*.go'
|
||||
- 'test/cli/update_test.go'
|
||||
- '.github/workflows/test-migrations.yml'
|
||||
|
||||
concurrency:
|
||||
@ -75,6 +80,13 @@ jobs:
|
||||
ipfs version || echo "Failed to run ipfs version"
|
||||
go test ./test/cli/migrations/...
|
||||
|
||||
- name: Run CLI update tests
|
||||
env:
|
||||
IPFS_PATH: ${{ runner.temp }}/ipfs-update-test
|
||||
run: |
|
||||
export PATH="${{ github.workspace }}/cmd/ipfs:$PATH"
|
||||
go test -run "TestUpdate" ./test/cli/...
|
||||
|
||||
- name: Upload test results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v6
|
||||
@ -83,3 +95,4 @@ jobs:
|
||||
path: |
|
||||
test/**/*.log
|
||||
${{ runner.temp }}/ipfs-test/
|
||||
${{ runner.temp }}/ipfs-update-test/
|
||||
|
||||
@ -213,6 +213,10 @@ func TestCommands(t *testing.T) {
|
||||
"/swarm/peering/rm",
|
||||
"/swarm/resources",
|
||||
"/update",
|
||||
"/update/check",
|
||||
"/update/install",
|
||||
"/update/revert",
|
||||
"/update/versions",
|
||||
"/version",
|
||||
"/version/check",
|
||||
"/version/deps",
|
||||
|
||||
@ -81,7 +81,7 @@ TOOL COMMANDS
|
||||
config Manage configuration
|
||||
version Show IPFS version information
|
||||
diag Generate diagnostic reports
|
||||
update Download and apply go-ipfs updates
|
||||
update Update Kubo to a different version
|
||||
commands List all available commands
|
||||
log Manage and show logs of running daemon
|
||||
|
||||
@ -157,7 +157,7 @@ var rootSubcommands = map[string]*cmds.Command{
|
||||
"refs": RefsCmd,
|
||||
"resolve": ResolveCmd,
|
||||
"swarm": SwarmCmd,
|
||||
"update": ExternalBinary("Please see https://github.com/ipfs/ipfs-update/blob/master/README.md#install for installation instructions."),
|
||||
"update": UpdateCmd,
|
||||
"version": VersionCmd,
|
||||
"shutdown": daemonShutdownCmd,
|
||||
"cid": CidCmd,
|
||||
|
||||
674
core/commands/update.go
Normal file
674
core/commands/update.go
Normal file
@ -0,0 +1,674 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
goversion "github.com/hashicorp/go-version"
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
version "github.com/ipfs/kubo"
|
||||
"github.com/ipfs/kubo/repo/fsrepo"
|
||||
"github.com/ipfs/kubo/repo/fsrepo/migrations"
|
||||
"github.com/ipfs/kubo/repo/fsrepo/migrations/atomicfile"
|
||||
)
|
||||
|
||||
const (
|
||||
updatePreOptionName = "pre"
|
||||
updateCountOptionName = "count"
|
||||
updateAllowDowngradeOptionName = "allow-downgrade"
|
||||
)
|
||||
|
||||
// UpdateCmd is the "ipfs update" command tree.
|
||||
var UpdateCmd = &cmds.Command{
|
||||
Status: cmds.Experimental,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Update Kubo to a different version",
|
||||
ShortDescription: `
|
||||
Downloads pre-built Kubo binaries from GitHub Releases, verifies
|
||||
checksums, and replaces the running binary in place. The previous
|
||||
binary is saved so you can revert if needed.
|
||||
|
||||
The daemon must be stopped before installing or reverting.
|
||||
`,
|
||||
LongDescription: `
|
||||
Downloads pre-built Kubo binaries from GitHub Releases, verifies
|
||||
checksums, and replaces the running binary in place. The previous
|
||||
binary is saved so you can revert if needed.
|
||||
|
||||
The daemon must be stopped before installing or reverting.
|
||||
|
||||
ENVIRONMENT VARIABLES
|
||||
|
||||
HTTPS_PROXY
|
||||
HTTP proxy for reaching GitHub. Set this when GitHub is not
|
||||
directly reachable from your network.
|
||||
Example: HTTPS_PROXY=http://proxy:8080 ipfs update install
|
||||
|
||||
GITHUB_TOKEN
|
||||
GitHub personal access token. Raises the API rate limit from
|
||||
60 to 5000 requests per hour. Set this if you hit "rate limit
|
||||
exceeded" errors. GH_TOKEN is also accepted.
|
||||
|
||||
IPFS_PATH
|
||||
Determines where binary backups are stored ($IPFS_PATH/old-bin/).
|
||||
Defaults to ~/.ipfs.
|
||||
`,
|
||||
},
|
||||
NoRemote: true,
|
||||
Extra: CreateCmdExtras(SetDoesNotUseRepo(true), SetDoesNotUseConfigAsInput(true)),
|
||||
Subcommands: map[string]*cmds.Command{
|
||||
"check": updateCheckCmd,
|
||||
"versions": updateVersionsCmd,
|
||||
"install": updateInstallCmd,
|
||||
"revert": updateRevertCmd,
|
||||
},
|
||||
}
|
||||
|
||||
// -- check --
|
||||
|
||||
// UpdateCheckOutput is the output of "ipfs update check".
|
||||
type UpdateCheckOutput struct {
|
||||
CurrentVersion string
|
||||
LatestVersion string
|
||||
UpdateAvailable bool
|
||||
}
|
||||
|
||||
var updateCheckCmd = &cmds.Command{
|
||||
Status: cmds.Experimental,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Check if a newer Kubo version is available",
|
||||
ShortDescription: `
|
||||
Queries GitHub Releases for the latest Kubo version and compares
|
||||
it against the currently running binary. Only considers releases
|
||||
with binaries available for your operating system and architecture.
|
||||
|
||||
Works while the daemon is running (read-only, no repo access).
|
||||
|
||||
ENVIRONMENT VARIABLES
|
||||
|
||||
HTTPS_PROXY HTTP proxy for reaching GitHub API.
|
||||
GITHUB_TOKEN Raises the API rate limit (GH_TOKEN also accepted).
|
||||
`,
|
||||
},
|
||||
NoRemote: true,
|
||||
Extra: CreateCmdExtras(SetDoesNotUseRepo(true), SetDoesNotUseConfigAsInput(true)),
|
||||
Options: []cmds.Option{
|
||||
cmds.BoolOption(updatePreOptionName, "Include pre-release versions."),
|
||||
},
|
||||
Type: UpdateCheckOutput{},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
ctx := req.Context
|
||||
includePre, _ := req.Options[updatePreOptionName].(bool)
|
||||
|
||||
rel, err := githubLatestRelease(ctx, includePre)
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking for updates: %w", err)
|
||||
}
|
||||
|
||||
latest := trimVPrefix(rel.TagName)
|
||||
current := currentVersion()
|
||||
|
||||
updateAvailable, err := isNewerVersion(current, latest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return cmds.EmitOnce(res, &UpdateCheckOutput{
|
||||
CurrentVersion: current,
|
||||
LatestVersion: latest,
|
||||
UpdateAvailable: updateAvailable,
|
||||
})
|
||||
},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *UpdateCheckOutput) error {
|
||||
if out.UpdateAvailable {
|
||||
fmt.Fprintf(w, "Update available: %s -> %s\n", out.CurrentVersion, out.LatestVersion)
|
||||
fmt.Fprintln(w, "Run 'ipfs update install' to install the latest version.")
|
||||
} else {
|
||||
fmt.Fprintf(w, "Already up to date (%s)\n", out.CurrentVersion)
|
||||
}
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
// -- versions --
|
||||
|
||||
// UpdateVersionsOutput is the output of "ipfs update versions".
|
||||
type UpdateVersionsOutput struct {
|
||||
Current string
|
||||
Versions []string
|
||||
}
|
||||
|
||||
var updateVersionsCmd = &cmds.Command{
|
||||
Status: cmds.Experimental,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "List available Kubo versions",
|
||||
ShortDescription: `
|
||||
Lists Kubo versions published on GitHub Releases. The currently
|
||||
running version is marked with an asterisk (*).
|
||||
`,
|
||||
},
|
||||
NoRemote: true,
|
||||
Extra: CreateCmdExtras(SetDoesNotUseRepo(true), SetDoesNotUseConfigAsInput(true)),
|
||||
Options: []cmds.Option{
|
||||
cmds.IntOption(updateCountOptionName, "n", "Number of versions to list.").WithDefault(30),
|
||||
cmds.BoolOption(updatePreOptionName, "Include pre-release versions."),
|
||||
},
|
||||
Type: UpdateVersionsOutput{},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
ctx := req.Context
|
||||
count, _ := req.Options[updateCountOptionName].(int)
|
||||
if count <= 0 {
|
||||
count = 30
|
||||
}
|
||||
includePre, _ := req.Options[updatePreOptionName].(bool)
|
||||
|
||||
releases, err := githubListReleases(ctx, count, includePre)
|
||||
if err != nil {
|
||||
return fmt.Errorf("listing versions: %w", err)
|
||||
}
|
||||
|
||||
versions := make([]string, 0, len(releases))
|
||||
for _, r := range releases {
|
||||
versions = append(versions, trimVPrefix(r.TagName))
|
||||
}
|
||||
|
||||
return cmds.EmitOnce(res, &UpdateVersionsOutput{
|
||||
Current: currentVersion(),
|
||||
Versions: versions,
|
||||
})
|
||||
},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *UpdateVersionsOutput) error {
|
||||
for _, v := range out.Versions {
|
||||
marker := " "
|
||||
if v == out.Current {
|
||||
marker = "* "
|
||||
}
|
||||
fmt.Fprintf(w, "%s%s\n", marker, v)
|
||||
}
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
// -- install --
|
||||
|
||||
// UpdateInstallOutput is the output of "ipfs update install".
|
||||
type UpdateInstallOutput struct {
|
||||
OldVersion string
|
||||
NewVersion string
|
||||
BinaryPath string
|
||||
StashedTo string
|
||||
}
|
||||
|
||||
var updateInstallCmd = &cmds.Command{
|
||||
Status: cmds.Experimental,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Download and install a Kubo update",
|
||||
ShortDescription: `
|
||||
Downloads the specified version (or latest) from GitHub Releases,
|
||||
verifies the SHA-512 checksum, saves a backup of the current binary,
|
||||
and atomically replaces it.
|
||||
|
||||
If replacing the binary fails due to file permissions, the new binary
|
||||
is saved to a temporary directory and the path is printed so you can
|
||||
move it manually (e.g. with sudo).
|
||||
|
||||
Previous binaries are kept in $IPFS_PATH/old-bin/ and can be
|
||||
restored with 'ipfs update revert'.
|
||||
`,
|
||||
},
|
||||
NoRemote: true,
|
||||
Extra: CreateCmdExtras(SetDoesNotUseRepo(true), SetDoesNotUseConfigAsInput(true)),
|
||||
Arguments: []cmds.Argument{
|
||||
cmds.StringArg("version", false, false, "Version to install (default: latest)."),
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmds.BoolOption(updatePreOptionName, "Include pre-release versions when resolving latest."),
|
||||
cmds.BoolOption(updateAllowDowngradeOptionName, "Allow installing an older version."),
|
||||
},
|
||||
Type: UpdateInstallOutput{},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
ctx := req.Context
|
||||
|
||||
if err := checkDaemonNotRunning(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
current := currentVersion()
|
||||
includePre, _ := req.Options[updatePreOptionName].(bool)
|
||||
allowDowngrade, _ := req.Options[updateAllowDowngradeOptionName].(bool)
|
||||
|
||||
// Resolve target version.
|
||||
var tag string
|
||||
if len(req.Arguments) > 0 && req.Arguments[0] != "" {
|
||||
tag = normalizeVersion(req.Arguments[0])
|
||||
} else {
|
||||
rel, err := githubLatestRelease(ctx, includePre)
|
||||
if err != nil {
|
||||
return fmt.Errorf("finding latest release: %w", err)
|
||||
}
|
||||
tag = rel.TagName
|
||||
}
|
||||
target := trimVPrefix(tag)
|
||||
|
||||
// Compare versions.
|
||||
if target == current {
|
||||
return fmt.Errorf("already running version %s", current)
|
||||
}
|
||||
|
||||
newer, err := isNewerVersion(current, target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !newer && !allowDowngrade {
|
||||
return fmt.Errorf("version %s is older than current %s (use --allow-downgrade to force)", target, current)
|
||||
}
|
||||
|
||||
// Find and download asset.
|
||||
fmt.Fprintf(os.Stderr, "Downloading Kubo %s...\n", target)
|
||||
|
||||
_, asset, err := findReleaseAsset(ctx, normalizeVersion(target))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := downloadAsset(ctx, asset.BrowserDownloadURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify checksum using .sha512 sidecar file.
|
||||
if err := downloadAndVerifySHA512(ctx, data, asset.BrowserDownloadURL); err != nil {
|
||||
return fmt.Errorf("checksum verification failed: %w", err)
|
||||
}
|
||||
fmt.Fprintln(os.Stderr, "Checksum verified (SHA-512).")
|
||||
|
||||
// Extract binary from archive.
|
||||
binData, err := extractBinaryFromArchive(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("extracting binary: %w", err)
|
||||
}
|
||||
|
||||
// Resolve current binary path.
|
||||
binPath, err := os.Executable()
|
||||
if err != nil {
|
||||
return fmt.Errorf("finding current binary: %w", err)
|
||||
}
|
||||
binPath, err = filepath.EvalSymlinks(binPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("resolving binary path: %w", err)
|
||||
}
|
||||
|
||||
// Stash current binary.
|
||||
stashedTo, err := stashBinary(binPath, current)
|
||||
if err != nil {
|
||||
return fmt.Errorf("backing up current binary: %w", err)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "Backed up current binary to %s\n", stashedTo)
|
||||
|
||||
// Replace binary.
|
||||
if err := replaceBinary(binPath, binData); err != nil {
|
||||
// Permission error fallback: save to temp dir.
|
||||
if errors.Is(err, os.ErrPermission) {
|
||||
tmpPath := filepath.Join(os.TempDir(), migrations.ExeName(fmt.Sprintf("ipfs-%s", target)))
|
||||
if writeErr := os.WriteFile(tmpPath, binData, 0o755); writeErr != nil {
|
||||
return fmt.Errorf("cannot write to %s either: %w (original error: %v)", tmpPath, writeErr, err)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "Could not replace %s (permission denied).\n", binPath)
|
||||
fmt.Fprintf(os.Stderr, "New binary saved to: %s\n", tmpPath)
|
||||
fmt.Fprintf(os.Stderr, "Move it manually, e.g.: sudo mv %s %s\n", tmpPath, binPath)
|
||||
return cmds.EmitOnce(res, &UpdateInstallOutput{
|
||||
OldVersion: current,
|
||||
NewVersion: target,
|
||||
BinaryPath: tmpPath,
|
||||
StashedTo: stashedTo,
|
||||
})
|
||||
}
|
||||
return fmt.Errorf("replacing binary: %w", err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "Successfully updated Kubo %s -> %s\n", current, target)
|
||||
|
||||
return cmds.EmitOnce(res, &UpdateInstallOutput{
|
||||
OldVersion: current,
|
||||
NewVersion: target,
|
||||
BinaryPath: binPath,
|
||||
StashedTo: stashedTo,
|
||||
})
|
||||
},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *UpdateInstallOutput) error {
|
||||
// All status output goes to stderr in Run; text encoder is a no-op.
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
// -- revert --
|
||||
|
||||
// UpdateRevertOutput is the output of "ipfs update revert".
|
||||
type UpdateRevertOutput struct {
|
||||
RestoredVersion string
|
||||
BinaryPath string
|
||||
}
|
||||
|
||||
var updateRevertCmd = &cmds.Command{
|
||||
Status: cmds.Experimental,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Revert to a previously installed Kubo version",
|
||||
ShortDescription: `
|
||||
Restores the most recently backed up binary from $IPFS_PATH/old-bin/.
|
||||
The backup is created automatically by 'ipfs update install'.
|
||||
`,
|
||||
},
|
||||
NoRemote: true,
|
||||
Extra: CreateCmdExtras(SetDoesNotUseRepo(true), SetDoesNotUseConfigAsInput(true)),
|
||||
Type: UpdateRevertOutput{},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
if err := checkDaemonNotRunning(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stashDir, err := getStashDir()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stashPath, stashVer, err := findLatestStash(stashDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stashData, err := os.ReadFile(stashPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading stashed binary: %w", err)
|
||||
}
|
||||
|
||||
binPath, err := os.Executable()
|
||||
if err != nil {
|
||||
return fmt.Errorf("finding current binary: %w", err)
|
||||
}
|
||||
binPath, err = filepath.EvalSymlinks(binPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("resolving binary path: %w", err)
|
||||
}
|
||||
|
||||
if err := replaceBinary(binPath, stashData); err != nil {
|
||||
if errors.Is(err, os.ErrPermission) {
|
||||
tmpPath := filepath.Join(os.TempDir(), migrations.ExeName(fmt.Sprintf("ipfs-%s", stashVer)))
|
||||
if writeErr := os.WriteFile(tmpPath, stashData, 0o755); writeErr != nil {
|
||||
return fmt.Errorf("cannot write to %s either: %w (original error: %v)", tmpPath, writeErr, err)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "Could not replace %s (permission denied).\n", binPath)
|
||||
fmt.Fprintf(os.Stderr, "Reverted binary saved to: %s\n", tmpPath)
|
||||
fmt.Fprintf(os.Stderr, "Move it manually, e.g.: sudo mv %s %s\n", tmpPath, binPath)
|
||||
return cmds.EmitOnce(res, &UpdateRevertOutput{
|
||||
RestoredVersion: stashVer,
|
||||
BinaryPath: tmpPath,
|
||||
})
|
||||
}
|
||||
return fmt.Errorf("replacing binary: %w", err)
|
||||
}
|
||||
|
||||
// Remove the stash file that was restored.
|
||||
os.Remove(stashPath)
|
||||
|
||||
fmt.Fprintf(os.Stderr, "Reverted to Kubo %s\n", stashVer)
|
||||
|
||||
return cmds.EmitOnce(res, &UpdateRevertOutput{
|
||||
RestoredVersion: stashVer,
|
||||
BinaryPath: binPath,
|
||||
})
|
||||
},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *UpdateRevertOutput) error {
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
// -- helpers --
|
||||
|
||||
// currentVersion returns the version string used by update commands.
|
||||
// It defaults to version.CurrentVersionNumber but can be overridden by
|
||||
// setting IPFS_VERSION_FAKE, which is useful for testing update
|
||||
// detection without rebuilding the binary.
|
||||
func currentVersion() string {
|
||||
if v := os.Getenv("IPFS_VERSION_FAKE"); v != "" {
|
||||
return v
|
||||
}
|
||||
return version.CurrentVersionNumber
|
||||
}
|
||||
|
||||
// checkDaemonNotRunning returns an error if the IPFS daemon is running.
|
||||
func checkDaemonNotRunning() error {
|
||||
repoPath, err := fsrepo.BestKnownPath()
|
||||
if err != nil {
|
||||
// If we can't determine the repo path, skip the check.
|
||||
return nil
|
||||
}
|
||||
locked, err := fsrepo.LockedByOtherProcess(repoPath)
|
||||
if err != nil {
|
||||
// Lock check failed (e.g. repo doesn't exist yet), not an error.
|
||||
return nil
|
||||
}
|
||||
if locked {
|
||||
return fmt.Errorf("IPFS daemon is running (repo locked at %s). Stop it first with 'ipfs shutdown'", repoPath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getStashDir returns the path to the old-bin stash directory, creating it if needed.
|
||||
func getStashDir() (string, error) {
|
||||
repoPath, err := fsrepo.BestKnownPath()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("determining IPFS path: %w", err)
|
||||
}
|
||||
dir := filepath.Join(repoPath, "old-bin")
|
||||
if err := os.MkdirAll(dir, 0o755); err != nil {
|
||||
return "", fmt.Errorf("creating stash directory: %w", err)
|
||||
}
|
||||
return dir, nil
|
||||
}
|
||||
|
||||
// stashBinary copies the current binary to the stash directory.
|
||||
// Uses named returns so the deferred dst.Close() error is not silently
|
||||
// discarded -- a failed close means the backup may be incomplete.
|
||||
func stashBinary(binPath, ver string) (stashPath string, err error) {
|
||||
dir, err := getStashDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
stashName := migrations.ExeName(fmt.Sprintf("ipfs-%s", ver))
|
||||
stashPath = filepath.Join(dir, stashName)
|
||||
|
||||
src, err := os.Open(binPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("opening current binary: %w", err)
|
||||
}
|
||||
defer src.Close()
|
||||
|
||||
dst, err := os.OpenFile(stashPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o755)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("creating stash file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if cerr := dst.Close(); cerr != nil && err == nil {
|
||||
err = fmt.Errorf("writing stash file: %w", cerr)
|
||||
}
|
||||
}()
|
||||
|
||||
if _, err := io.Copy(dst, src); err != nil {
|
||||
return "", fmt.Errorf("copying binary to stash: %w", err)
|
||||
}
|
||||
|
||||
return stashPath, nil
|
||||
}
|
||||
|
||||
// findLatestStash finds the most recently versioned stash file.
|
||||
func findLatestStash(dir string) (path, ver string, err error) {
|
||||
entries, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("reading stash directory: %w", err)
|
||||
}
|
||||
|
||||
type stash struct {
|
||||
path string
|
||||
ver string
|
||||
parsed *goversion.Version
|
||||
}
|
||||
|
||||
var stashes []stash
|
||||
for _, e := range entries {
|
||||
name := e.Name()
|
||||
// Expected format: ipfs-<version> or ipfs-<version>.exe
|
||||
trimmed := strings.TrimPrefix(name, "ipfs-")
|
||||
if trimmed == name {
|
||||
continue // doesn't match pattern
|
||||
}
|
||||
trimmed = strings.TrimSuffix(trimmed, ".exe")
|
||||
parsed, parseErr := goversion.NewVersion(trimmed)
|
||||
if parseErr != nil {
|
||||
continue
|
||||
}
|
||||
stashes = append(stashes, stash{
|
||||
path: filepath.Join(dir, name),
|
||||
ver: trimmed,
|
||||
parsed: parsed,
|
||||
})
|
||||
}
|
||||
|
||||
if len(stashes) == 0 {
|
||||
return "", "", fmt.Errorf("no stashed binaries found in %s", dir)
|
||||
}
|
||||
|
||||
slices.SortFunc(stashes, func(a, b stash) int {
|
||||
// Sort newest first: if a > b return -1.
|
||||
if a.parsed.GreaterThan(b.parsed) {
|
||||
return -1
|
||||
}
|
||||
if b.parsed.GreaterThan(a.parsed) {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
})
|
||||
|
||||
return stashes[0].path, stashes[0].ver, nil
|
||||
}
|
||||
|
||||
// replaceBinary atomically replaces the binary at targetPath with data.
|
||||
func replaceBinary(targetPath string, data []byte) error {
|
||||
af, err := atomicfile.New(targetPath, 0o755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := af.Write(data); err != nil {
|
||||
_ = af.Abort()
|
||||
return err
|
||||
}
|
||||
|
||||
return af.Close()
|
||||
}
|
||||
|
||||
// extractBinaryFromArchive extracts the kubo/ipfs binary from a tar.gz or zip archive.
|
||||
func extractBinaryFromArchive(data []byte) ([]byte, error) {
|
||||
binName := migrations.ExeName("ipfs")
|
||||
|
||||
// Try tar.gz first, then zip.
|
||||
if result, err := extractFromTarGz(data, binName); err == nil {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
if result, err := extractFromZip(data, binName); err == nil {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("could not find ipfs binary in archive (expected kubo/ipfs)")
|
||||
}
|
||||
|
||||
func extractFromTarGz(data []byte, binName string) ([]byte, error) {
|
||||
gzr, err := gzip.NewReader(bytes.NewReader(data))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer gzr.Close()
|
||||
|
||||
tr := tar.NewReader(gzr)
|
||||
lookFor := "kubo/" + binName
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if hdr.Name == lookFor {
|
||||
return io.ReadAll(tr)
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("%s not found in tar.gz", lookFor)
|
||||
}
|
||||
|
||||
func extractFromZip(data []byte, binName string) ([]byte, error) {
|
||||
zr, err := zip.NewReader(bytes.NewReader(data), int64(len(data)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lookFor := "kubo/" + binName
|
||||
for _, f := range zr.File {
|
||||
if f.Name != lookFor {
|
||||
continue
|
||||
}
|
||||
rc, err := f.Open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result, err := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
return result, err
|
||||
}
|
||||
return nil, fmt.Errorf("%s not found in zip", lookFor)
|
||||
}
|
||||
|
||||
// trimVPrefix removes a leading "v" from a version string.
|
||||
func trimVPrefix(s string) string {
|
||||
return strings.TrimPrefix(s, "v")
|
||||
}
|
||||
|
||||
// normalizeVersion ensures a version string has a "v" prefix (for GitHub tags).
|
||||
func normalizeVersion(s string) string {
|
||||
s = strings.TrimSpace(s)
|
||||
if !strings.HasPrefix(s, "v") {
|
||||
return "v" + s
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// isNewerVersion returns true if target is newer than current.
|
||||
func isNewerVersion(current, target string) (bool, error) {
|
||||
cv, err := goversion.NewVersion(current)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("parsing current version %q: %w", current, err)
|
||||
}
|
||||
tv, err := goversion.NewVersion(target)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("parsing target version %q: %w", target, err)
|
||||
}
|
||||
return tv.GreaterThan(cv), nil
|
||||
}
|
||||
272
core/commands/update_github.go
Normal file
272
core/commands/update_github.go
Normal file
@ -0,0 +1,272 @@
|
||||
package commands
|
||||
|
||||
// This file implements fetching Kubo release binaries from GitHub Releases.
|
||||
//
|
||||
// We use GitHub Releases instead of dist.ipfs.tech because GitHub is harder
|
||||
// to censor. Many networks and regions block or interfere with IPFS-specific
|
||||
// infrastructure, but GitHub is widely accessible and its TLS-protected API
|
||||
// is difficult to selectively block without breaking many other services.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha512"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
version "github.com/ipfs/kubo"
|
||||
)
|
||||
|
||||
const (
|
||||
githubOwner = "ipfs"
|
||||
githubRepo = "kubo"
|
||||
|
||||
githubAPIBase = "https://api.github.com"
|
||||
|
||||
// maxDownloadSize is the maximum allowed binary archive size (200 MB).
|
||||
maxDownloadSize = 200 << 20
|
||||
)
|
||||
|
||||
// githubReleaseFmt is the default GitHub Releases API URL prefix.
|
||||
// It is a var (not const) so unit tests can point API calls at a mock server.
|
||||
var githubReleaseFmt = githubAPIBase + "/repos/" + githubOwner + "/" + githubRepo + "/releases"
|
||||
|
||||
// githubReleaseBaseURL returns the Releases API base URL.
|
||||
// It checks KUBO_UPDATE_GITHUB_URL first (used by CLI integration tests),
|
||||
// then falls back to githubReleaseFmt (overridable by unit tests).
|
||||
func githubReleaseBaseURL() string {
|
||||
if u := os.Getenv("KUBO_UPDATE_GITHUB_URL"); u != "" {
|
||||
return u
|
||||
}
|
||||
return githubReleaseFmt
|
||||
}
|
||||
|
||||
// ghRelease represents a GitHub release.
|
||||
type ghRelease struct {
|
||||
TagName string `json:"tag_name"`
|
||||
Prerelease bool `json:"prerelease"`
|
||||
Assets []ghAsset `json:"assets"`
|
||||
}
|
||||
|
||||
// ghAsset represents a release asset on GitHub.
|
||||
type ghAsset struct {
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
BrowserDownloadURL string `json:"browser_download_url"`
|
||||
}
|
||||
|
||||
// githubGet performs an authenticated GET request to the GitHub API.
|
||||
// It honors GITHUB_TOKEN or GH_TOKEN env vars to avoid the 60 req/hr
|
||||
// unauthenticated rate limit.
|
||||
func githubGet(ctx context.Context, url string) (*http.Response, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Header.Set("Accept", "application/vnd.github+json")
|
||||
req.Header.Set("User-Agent", "kubo/"+version.CurrentVersionNumber)
|
||||
|
||||
if token := githubToken(); token != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+token)
|
||||
}
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode == http.StatusForbidden || resp.StatusCode == http.StatusTooManyRequests {
|
||||
resp.Body.Close()
|
||||
hint := ""
|
||||
if githubToken() == "" {
|
||||
hint = " (hint: set GITHUB_TOKEN or GH_TOKEN to avoid rate limits)"
|
||||
}
|
||||
return nil, fmt.Errorf("GitHub API rate limit exceeded%s", hint)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
resp.Body.Close()
|
||||
return nil, fmt.Errorf("GitHub API returned HTTP %d for %s", resp.StatusCode, url)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func githubToken() string {
|
||||
if t := os.Getenv("GITHUB_TOKEN"); t != "" {
|
||||
return t
|
||||
}
|
||||
return os.Getenv("GH_TOKEN")
|
||||
}
|
||||
|
||||
// githubLatestRelease returns the newest release that has a platform asset
|
||||
// for the current GOOS/GOARCH. This avoids false positives when a release
|
||||
// tag exists but artifacts haven't been uploaded yet.
|
||||
func githubLatestRelease(ctx context.Context, includePre bool) (*ghRelease, error) {
|
||||
releases, err := githubListReleases(ctx, 10, includePre)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := range releases {
|
||||
want := assetNameForPlatformTag(releases[i].TagName)
|
||||
for _, a := range releases[i].Assets {
|
||||
if a.Name == want {
|
||||
return &releases[i], nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("no release found with a binary for %s/%s", runtime.GOOS, runtime.GOARCH)
|
||||
}
|
||||
|
||||
// githubListReleases fetches up to count releases, optionally including prereleases.
|
||||
func githubListReleases(ctx context.Context, count int, includePre bool) ([]ghRelease, error) {
|
||||
// Fetch more than needed so we can filter prereleases and still return count results.
|
||||
perPage := count
|
||||
if !includePre {
|
||||
perPage = count * 3
|
||||
}
|
||||
if perPage > 100 {
|
||||
perPage = 100
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s?per_page=%d", githubReleaseBaseURL(), perPage)
|
||||
resp, err := githubGet(ctx, url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var all []ghRelease
|
||||
if err := json.NewDecoder(resp.Body).Decode(&all); err != nil {
|
||||
return nil, fmt.Errorf("decoding GitHub releases: %w", err)
|
||||
}
|
||||
|
||||
var filtered []ghRelease
|
||||
for _, r := range all {
|
||||
if !includePre && r.Prerelease {
|
||||
continue
|
||||
}
|
||||
filtered = append(filtered, r)
|
||||
if len(filtered) >= count {
|
||||
break
|
||||
}
|
||||
}
|
||||
return filtered, nil
|
||||
}
|
||||
|
||||
// githubReleaseByTag fetches a single release by its git tag.
|
||||
func githubReleaseByTag(ctx context.Context, tag string) (*ghRelease, error) {
|
||||
url := fmt.Sprintf("%s/tags/%s", githubReleaseBaseURL(), tag)
|
||||
resp, err := githubGet(ctx, url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var rel ghRelease
|
||||
if err := json.NewDecoder(resp.Body).Decode(&rel); err != nil {
|
||||
return nil, fmt.Errorf("decoding GitHub release: %w", err)
|
||||
}
|
||||
return &rel, nil
|
||||
}
|
||||
|
||||
// findReleaseAsset locates the platform-appropriate asset in a release.
|
||||
// It fails immediately with a clear message if:
|
||||
// - the release tag does not exist on GitHub (typo, unreleased version)
|
||||
// - the release exists but has no binary for this OS/arch (CI still building)
|
||||
func findReleaseAsset(ctx context.Context, tag string) (*ghRelease, *ghAsset, error) {
|
||||
rel, err := githubReleaseByTag(ctx, tag)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("release %s not found on GitHub: %w", tag, err)
|
||||
}
|
||||
|
||||
want := assetNameForPlatformTag(tag)
|
||||
for i := range rel.Assets {
|
||||
if rel.Assets[i].Name == want {
|
||||
return rel, &rel.Assets[i], nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil, fmt.Errorf(
|
||||
"release %s exists but has no binary for %s/%s yet; build artifacts may still be uploading, try again in a few hours",
|
||||
tag, runtime.GOOS, runtime.GOARCH)
|
||||
}
|
||||
|
||||
// downloadAsset downloads a release asset by its browser_download_url.
|
||||
// This hits GitHub's CDN directly, not the API, so no auth headers are needed.
|
||||
func downloadAsset(ctx context.Context, url string) ([]byte, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("User-Agent", "kubo/"+version.CurrentVersionNumber)
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("downloading asset: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("download returned HTTP %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
data, err := io.ReadAll(io.LimitReader(resp.Body, maxDownloadSize+1))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading download: %w", err)
|
||||
}
|
||||
if int64(len(data)) > maxDownloadSize {
|
||||
return nil, fmt.Errorf("download exceeds maximum size of %d bytes", maxDownloadSize)
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// downloadAndVerifySHA512 downloads the .sha512 sidecar file for the given
|
||||
// archive URL and verifies the archive data against it.
|
||||
func downloadAndVerifySHA512(ctx context.Context, data []byte, archiveURL string) error {
|
||||
sha512URL := archiveURL + ".sha512"
|
||||
checksumData, err := downloadAsset(ctx, sha512URL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("downloading checksum file: %w", err)
|
||||
}
|
||||
|
||||
// Parse "<hex> <filename>\n" format (standard sha512sum output).
|
||||
fields := strings.Fields(string(checksumData))
|
||||
if len(fields) < 1 {
|
||||
return fmt.Errorf("empty or malformed .sha512 file")
|
||||
}
|
||||
wantHex := fields[0]
|
||||
|
||||
return verifySHA512(data, wantHex)
|
||||
}
|
||||
|
||||
// verifySHA512 checks that data matches the given hex-encoded SHA-512 hash.
|
||||
func verifySHA512(data []byte, wantHex string) error {
|
||||
want, err := hex.DecodeString(wantHex)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid hex in SHA-512 checksum: %w", err)
|
||||
}
|
||||
got := sha512.Sum512(data)
|
||||
if !bytes.Equal(got[:], want) {
|
||||
return fmt.Errorf("SHA-512 mismatch: expected %s, got %x", wantHex, got[:])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// assetNameForPlatformTag returns the expected archive filename for a given
|
||||
// release tag and the current GOOS/GOARCH.
|
||||
func assetNameForPlatformTag(tag string) string {
|
||||
ext := "tar.gz"
|
||||
if runtime.GOOS == "windows" {
|
||||
ext = "zip"
|
||||
}
|
||||
return fmt.Sprintf("kubo_%s_%s-%s.%s", tag, runtime.GOOS, runtime.GOARCH, ext)
|
||||
}
|
||||
428
core/commands/update_github_test.go
Normal file
428
core/commands/update_github_test.go
Normal file
@ -0,0 +1,428 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha512"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// --- SHA-512 verification ---
|
||||
//
|
||||
// These tests verify the integrity-checking code that protects users from
|
||||
// tampered or corrupted downloads. A broken hash check could allow
|
||||
// installing a malicious binary, so each failure mode must be covered.
|
||||
|
||||
// TestVerifySHA512 exercises the low-level hash comparison function.
|
||||
func TestVerifySHA512(t *testing.T) {
|
||||
t.Parallel()
|
||||
data := []byte("hello world")
|
||||
sum := sha512.Sum512(data)
|
||||
validHex := fmt.Sprintf("%x", sum[:])
|
||||
|
||||
t.Run("accepts matching hash", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
err := verifySHA512(data, validHex)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("rejects data that does not match hash", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
err := verifySHA512([]byte("tampered"), validHex)
|
||||
assert.ErrorContains(t, err, "SHA-512 mismatch",
|
||||
"must reject data whose hash differs from the expected value")
|
||||
})
|
||||
|
||||
t.Run("rejects malformed hex string", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
err := verifySHA512(data, "not-valid-hex")
|
||||
assert.ErrorContains(t, err, "invalid hex in SHA-512 checksum")
|
||||
})
|
||||
}
|
||||
|
||||
// TestDownloadAndVerifySHA512 tests the complete download-and-verify flow:
|
||||
// fetching a .sha512 sidecar file from alongside the archive URL, parsing
|
||||
// the standard sha512sum format ("<hex> <filename>\n"), and comparing
|
||||
// against the archive data. This is the function called by "ipfs update install".
|
||||
func TestDownloadAndVerifySHA512(t *testing.T) {
|
||||
t.Parallel()
|
||||
archiveData := []byte("fake-archive-content")
|
||||
sum := sha512.Sum512(archiveData)
|
||||
checksumBody := fmt.Sprintf("%x kubo_v0.41.0_linux-amd64.tar.gz\n", sum[:])
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.Path {
|
||||
case "/archive.tar.gz.sha512":
|
||||
_, _ = w.Write([]byte(checksumBody))
|
||||
default:
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
t.Run("accepts archive matching sidecar hash", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
err := downloadAndVerifySHA512(t.Context(), archiveData, srv.URL+"/archive.tar.gz")
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("rejects archive with wrong content", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
err := downloadAndVerifySHA512(t.Context(), []byte("tampered"), srv.URL+"/archive.tar.gz")
|
||||
assert.ErrorContains(t, err, "SHA-512 mismatch",
|
||||
"must hard-fail when downloaded archive doesn't match the published checksum")
|
||||
})
|
||||
|
||||
t.Run("fails when sidecar file is missing", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
err := downloadAndVerifySHA512(t.Context(), archiveData, srv.URL+"/no-such-file.tar.gz")
|
||||
assert.ErrorContains(t, err, "downloading checksum file",
|
||||
"must fail if the .sha512 sidecar can't be fetched")
|
||||
})
|
||||
}
|
||||
|
||||
// --- GitHub API layer ---
|
||||
|
||||
// TestGitHubGet verifies the low-level GitHub API helper that adds
|
||||
// authentication headers and translates HTTP errors into actionable
|
||||
// messages (especially rate-limit hints for unauthenticated users).
|
||||
func TestGitHubGet(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("sets Accept and User-Agent headers", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
assert.Equal(t, "application/vnd.github+json", r.Header.Get("Accept"),
|
||||
"must request GitHub's v3 JSON format")
|
||||
assert.Contains(t, r.Header.Get("User-Agent"), "kubo/",
|
||||
"User-Agent must identify the kubo version for debugging")
|
||||
_, _ = w.Write([]byte("{}"))
|
||||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
resp, err := githubGet(t.Context(), srv.URL)
|
||||
require.NoError(t, err)
|
||||
resp.Body.Close()
|
||||
})
|
||||
|
||||
t.Run("returns rate-limit error on HTTP 403", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||
w.WriteHeader(http.StatusForbidden)
|
||||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
_, err := githubGet(t.Context(), srv.URL)
|
||||
assert.ErrorContains(t, err, "rate limit exceeded")
|
||||
})
|
||||
|
||||
t.Run("returns rate-limit error on HTTP 429", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||
w.WriteHeader(http.StatusTooManyRequests)
|
||||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
_, err := githubGet(t.Context(), srv.URL)
|
||||
assert.ErrorContains(t, err, "rate limit exceeded")
|
||||
})
|
||||
|
||||
t.Run("returns HTTP status on server error", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
_, err := githubGet(t.Context(), srv.URL)
|
||||
assert.ErrorContains(t, err, "HTTP 500")
|
||||
})
|
||||
}
|
||||
|
||||
// TestGitHubListReleases verifies that release listing correctly filters
|
||||
// prereleases and respects the count limit. Uses a mock GitHub API server
|
||||
// to avoid network dependencies and rate limits in CI.
|
||||
//
|
||||
// Not parallel: temporarily overrides the package-level githubReleaseFmt var.
|
||||
func TestGitHubListReleases(t *testing.T) {
|
||||
allReleases := []ghRelease{
|
||||
{TagName: "v0.42.0-rc1", Prerelease: true},
|
||||
{TagName: "v0.41.0"},
|
||||
{TagName: "v0.40.0"},
|
||||
}
|
||||
body, err := json.Marshal(allReleases)
|
||||
require.NoError(t, err)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||
_, _ = w.Write(body)
|
||||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
saved := githubReleaseFmt
|
||||
githubReleaseFmt = srv.URL
|
||||
t.Cleanup(func() { githubReleaseFmt = saved })
|
||||
|
||||
t.Run("excludes prereleases by default", func(t *testing.T) {
|
||||
got, err := githubListReleases(t.Context(), 10, false)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, got, 2, "the rc1 prerelease should be filtered out")
|
||||
assert.Equal(t, "v0.41.0", got[0].TagName)
|
||||
assert.Equal(t, "v0.40.0", got[1].TagName)
|
||||
})
|
||||
|
||||
t.Run("includes prereleases when requested", func(t *testing.T) {
|
||||
got, err := githubListReleases(t.Context(), 10, true)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, got, 3)
|
||||
assert.Equal(t, "v0.42.0-rc1", got[0].TagName)
|
||||
})
|
||||
|
||||
t.Run("respects count limit", func(t *testing.T) {
|
||||
got, err := githubListReleases(t.Context(), 1, false)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, got, 1, "should return at most 1 release")
|
||||
})
|
||||
}
|
||||
|
||||
// TestGitHubLatestRelease verifies that the "find latest release" logic
|
||||
// skips releases that don't have a binary for the current OS/arch.
|
||||
// This handles the real-world case where a release tag is created but
|
||||
// CI hasn't finished uploading build artifacts yet.
|
||||
//
|
||||
// Not parallel: temporarily overrides the package-level githubReleaseFmt var.
|
||||
func TestGitHubLatestRelease(t *testing.T) {
|
||||
releases := []ghRelease{
|
||||
{
|
||||
TagName: "v0.42.0",
|
||||
Assets: []ghAsset{{Name: "kubo_v0.42.0_some-other-arch.tar.gz"}},
|
||||
},
|
||||
{
|
||||
TagName: "v0.41.0",
|
||||
Assets: []ghAsset{{Name: assetNameForPlatformTag("v0.41.0")}},
|
||||
},
|
||||
}
|
||||
body, err := json.Marshal(releases)
|
||||
require.NoError(t, err)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||
_, _ = w.Write(body)
|
||||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
saved := githubReleaseFmt
|
||||
githubReleaseFmt = srv.URL
|
||||
t.Cleanup(func() { githubReleaseFmt = saved })
|
||||
|
||||
rel, err := githubLatestRelease(t.Context(), false)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "v0.41.0", rel.TagName,
|
||||
"should skip v0.42.0 (no binary for %s/%s) and return v0.41.0",
|
||||
runtime.GOOS, runtime.GOARCH)
|
||||
}
|
||||
|
||||
// TestFindReleaseAsset verifies that findReleaseAsset locates the correct
|
||||
// platform-specific asset in a release, and returns a clear error when the
|
||||
// release exists but has no binary for the current OS/arch.
|
||||
//
|
||||
// Not parallel: temporarily overrides the package-level githubReleaseFmt var.
|
||||
func TestFindReleaseAsset(t *testing.T) {
|
||||
wantAsset := assetNameForPlatformTag("v0.50.0")
|
||||
|
||||
release := ghRelease{
|
||||
TagName: "v0.50.0",
|
||||
Assets: []ghAsset{
|
||||
{Name: "kubo_v0.50.0_some-other-arch.tar.gz", BrowserDownloadURL: "https://example.com/other"},
|
||||
{Name: wantAsset, BrowserDownloadURL: "https://example.com/correct"},
|
||||
},
|
||||
}
|
||||
body, err := json.Marshal(release)
|
||||
require.NoError(t, err)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||
_, _ = w.Write(body)
|
||||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
saved := githubReleaseFmt
|
||||
githubReleaseFmt = srv.URL
|
||||
t.Cleanup(func() { githubReleaseFmt = saved })
|
||||
|
||||
t.Run("returns matching asset for current platform", func(t *testing.T) {
|
||||
rel, asset, err := findReleaseAsset(t.Context(), "v0.50.0")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "v0.50.0", rel.TagName)
|
||||
assert.Equal(t, wantAsset, asset.Name)
|
||||
assert.Equal(t, "https://example.com/correct", asset.BrowserDownloadURL)
|
||||
})
|
||||
|
||||
t.Run("returns error when no asset matches current platform", func(t *testing.T) {
|
||||
// Serve a release that only has an asset for a different arch.
|
||||
noMatch := ghRelease{
|
||||
TagName: "v0.51.0",
|
||||
Assets: []ghAsset{{Name: "kubo_v0.51.0_plan9-mips.tar.gz"}},
|
||||
}
|
||||
noMatchBody, err := json.Marshal(noMatch)
|
||||
require.NoError(t, err)
|
||||
|
||||
noMatchSrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||
_, _ = w.Write(noMatchBody)
|
||||
}))
|
||||
t.Cleanup(noMatchSrv.Close)
|
||||
|
||||
githubReleaseFmt = noMatchSrv.URL
|
||||
|
||||
_, _, err = findReleaseAsset(t.Context(), "v0.51.0")
|
||||
assert.ErrorContains(t, err, "has no binary for",
|
||||
"should explain that the release exists but lacks a matching asset")
|
||||
})
|
||||
}
|
||||
|
||||
// --- Asset download ---
|
||||
|
||||
// TestDownloadAsset verifies the HTTP download helper that fetches release
|
||||
// archives from GitHub's CDN. Tests both the happy path and HTTP error
|
||||
// reporting.
|
||||
func TestDownloadAsset(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("downloads content successfully", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||
_, _ = w.Write([]byte("binary-content"))
|
||||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
data, err := downloadAsset(t.Context(), srv.URL)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte("binary-content"), data)
|
||||
})
|
||||
|
||||
t.Run("returns clear error on HTTP failure", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
_, err := downloadAsset(t.Context(), srv.URL)
|
||||
assert.ErrorContains(t, err, "HTTP 404")
|
||||
})
|
||||
}
|
||||
|
||||
// --- Archive extraction ---
|
||||
|
||||
// TestExtractBinaryFromArchive verifies that the ipfs binary can be
|
||||
// extracted from release archives. Kubo releases use tar.gz on Unix
|
||||
// and zip on Windows, with the binary at "kubo/ipfs" inside the archive.
|
||||
func TestExtractBinaryFromArchive(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("extracts binary from valid tar.gz", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
wantContent := []byte("#!/bin/fake-ipfs-binary")
|
||||
archive := makeTarGz(t, "kubo/ipfs", wantContent)
|
||||
|
||||
got, err := extractBinaryFromArchive(archive)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, wantContent, got)
|
||||
})
|
||||
|
||||
t.Run("rejects archive without kubo/ipfs entry", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
// A valid tar.gz that contains a file at the wrong path.
|
||||
archive := makeTarGz(t, "wrong-path/ipfs", []byte("binary"))
|
||||
|
||||
_, err := extractBinaryFromArchive(archive)
|
||||
assert.ErrorContains(t, err, "could not find ipfs binary")
|
||||
})
|
||||
|
||||
t.Run("rejects non-archive data", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
_, err := extractBinaryFromArchive([]byte("not an archive"))
|
||||
assert.ErrorContains(t, err, "could not find ipfs binary")
|
||||
})
|
||||
}
|
||||
|
||||
// makeTarGz creates an in-memory tar.gz archive containing a single file.
|
||||
func makeTarGz(t *testing.T, path string, content []byte) []byte {
|
||||
t.Helper()
|
||||
var buf bytes.Buffer
|
||||
gzw := gzip.NewWriter(&buf)
|
||||
tw := tar.NewWriter(gzw)
|
||||
require.NoError(t, tw.WriteHeader(&tar.Header{
|
||||
Name: path,
|
||||
Mode: 0o755,
|
||||
Size: int64(len(content)),
|
||||
}))
|
||||
_, err := tw.Write(content)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, tw.Close())
|
||||
require.NoError(t, gzw.Close())
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// --- Asset name and version helpers ---
|
||||
|
||||
// TestAssetNameForPlatformTag ensures the archive filename matches the
|
||||
// naming convention used by Kubo's CI release pipeline:
|
||||
//
|
||||
// kubo_<tag>_<os>-<arch>.<ext>
|
||||
func TestAssetNameForPlatformTag(t *testing.T) {
|
||||
t.Parallel()
|
||||
name := assetNameForPlatformTag("v0.41.0")
|
||||
assert.Contains(t, name, fmt.Sprintf("kubo_v0.41.0_%s-%s.", runtime.GOOS, runtime.GOARCH))
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
assert.Contains(t, name, ".zip")
|
||||
} else {
|
||||
assert.Contains(t, name, ".tar.gz")
|
||||
}
|
||||
}
|
||||
|
||||
// TestVersionHelpers exercises the version string utilities used throughout
|
||||
// the update command. These handle the mismatch between Go's semver
|
||||
// (no "v" prefix) and GitHub's tag convention ("v" prefix).
|
||||
func TestVersionHelpers(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("trimVPrefix strips leading v", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
assert.Equal(t, "0.41.0", trimVPrefix("v0.41.0"))
|
||||
assert.Equal(t, "0.41.0", trimVPrefix("0.41.0"), "no-op when v is absent")
|
||||
})
|
||||
|
||||
t.Run("normalizeVersion adds v prefix for GitHub tags", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
assert.Equal(t, "v0.41.0", normalizeVersion("0.41.0"))
|
||||
assert.Equal(t, "v0.41.0", normalizeVersion("v0.41.0"), "no-op when v is present")
|
||||
assert.Equal(t, "v0.41.0", normalizeVersion(" v0.41.0 "), "trims whitespace")
|
||||
})
|
||||
|
||||
t.Run("isNewerVersion compares semver correctly", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
current, target string
|
||||
wantNewer bool
|
||||
desc string
|
||||
}{
|
||||
{"0.40.0", "0.41.0", true, "newer minor version"},
|
||||
{"0.41.0", "0.40.0", false, "older minor version"},
|
||||
{"0.41.0", "0.41.0", false, "same version"},
|
||||
{"0.41.0-dev", "0.41.0", true, "release is newer than dev pre-release"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
got, err := isNewerVersion(tt.current, tt.target)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.wantNewer, got, tt.desc)
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -10,6 +10,7 @@ This release was brought to you by the [Shipyard](https://ipshipyard.com/) team.
|
||||
|
||||
- [Overview](#overview)
|
||||
- [🔦 Highlights](#-highlights)
|
||||
- [🔄 Built-in `ipfs update` command](#-built-in-ipfs-update-command)
|
||||
- [📝 Changelog](#-changelog)
|
||||
- [👨👩👧👦 Contributors](#-contributors)
|
||||
|
||||
@ -17,6 +18,32 @@ This release was brought to you by the [Shipyard](https://ipshipyard.com/) team.
|
||||
|
||||
### 🔦 Highlights
|
||||
|
||||
#### 🔄 Built-in `ipfs update` command
|
||||
|
||||
Users who prefer to manage the Kubo binary themselves, rather than
|
||||
delegating updates to an OS package manager, can now do so directly
|
||||
from the command line. The new `ipfs update` command downloads pre-built
|
||||
binaries from GitHub Releases, verifies SHA-512 checksums, backs up the
|
||||
current binary, and replaces it in place.
|
||||
|
||||
```console
|
||||
$ ipfs update check
|
||||
Update available: 0.40.0 -> 0.41.0
|
||||
Run 'ipfs update install' to install the latest version.
|
||||
|
||||
$ ipfs update install
|
||||
Downloading Kubo 0.41.0...
|
||||
Checksum verified (SHA-512).
|
||||
Backed up current binary to /home/user/.ipfs/old-bin/ipfs-0.40.0
|
||||
Successfully updated Kubo 0.40.0 -> 0.41.0
|
||||
```
|
||||
|
||||
Roll back with `ipfs update revert`, or list available versions with
|
||||
`ipfs update versions`.
|
||||
|
||||
This replaces the external [`ipfs-update`](https://github.com/ipfs/ipfs-update)
|
||||
tool, which is now deprecated.
|
||||
|
||||
### 📝 Changelog
|
||||
|
||||
### 👨👩👧👦 Contributors
|
||||
|
||||
387
test/cli/update_test.go
Normal file
387
test/cli/update_test.go
Normal file
@ -0,0 +1,387 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha512"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/kubo/test/cli/harness"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestUpdate exercises the built-in "ipfs update" command tree against
|
||||
// the real GitHub Releases API. Network access is required.
|
||||
//
|
||||
// The node is created without Init or daemon, so install/revert error
|
||||
// paths that don't depend on a running daemon can be tested.
|
||||
func TestUpdate(t *testing.T) {
|
||||
t.Parallel()
|
||||
h := harness.NewT(t)
|
||||
node := h.NewNode()
|
||||
|
||||
t.Run("help text describes the command", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
res := node.IPFS("update", "--help")
|
||||
assert.Contains(t, res.Stdout.String(), "Update Kubo to a different version")
|
||||
})
|
||||
|
||||
// check and versions are read-only GitHub API queries. They must work
|
||||
// regardless of daemon state, since users need to check for updates
|
||||
// before deciding whether to stop the daemon and install.
|
||||
t.Run("check", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("text output reports update availability", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
res := node.IPFS("update", "check")
|
||||
out := res.Stdout.String()
|
||||
assert.True(t,
|
||||
strings.Contains(out, "Update available") || strings.Contains(out, "Already up to date"),
|
||||
"expected update status message, got: %s", out)
|
||||
})
|
||||
|
||||
t.Run("json output includes version fields", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
res := node.IPFS("update", "check", "--enc=json")
|
||||
var result struct {
|
||||
CurrentVersion string
|
||||
LatestVersion string
|
||||
UpdateAvailable bool
|
||||
}
|
||||
err := json.Unmarshal(res.Stdout.Bytes(), &result)
|
||||
require.NoError(t, err, "invalid JSON: %s", res.Stdout.String())
|
||||
assert.NotEmpty(t, result.CurrentVersion, "must report current version")
|
||||
assert.NotEmpty(t, result.LatestVersion, "must report latest version")
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("versions", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("lists available versions", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
res := node.IPFS("update", "versions")
|
||||
lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n")
|
||||
assert.Greater(t, len(lines), 0, "should list at least one version")
|
||||
})
|
||||
|
||||
t.Run("respects --count flag", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
res := node.IPFS("update", "versions", "--count=5")
|
||||
lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n")
|
||||
assert.LessOrEqual(t, len(lines), 5)
|
||||
})
|
||||
|
||||
t.Run("json output includes current version and list", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
res := node.IPFS("update", "versions", "--count=3", "--enc=json")
|
||||
var result struct {
|
||||
Current string
|
||||
Versions []string
|
||||
}
|
||||
err := json.Unmarshal(res.Stdout.Bytes(), &result)
|
||||
require.NoError(t, err, "invalid JSON: %s", res.Stdout.String())
|
||||
assert.NotEmpty(t, result.Current, "must report current version")
|
||||
assert.NotEmpty(t, result.Versions, "must list at least one version")
|
||||
})
|
||||
|
||||
t.Run("--pre includes prerelease versions", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
res := node.IPFS("update", "versions", "--count=5", "--pre")
|
||||
lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n")
|
||||
assert.Greater(t, len(lines), 0, "should list at least one version")
|
||||
})
|
||||
})
|
||||
|
||||
// install and revert mutate the binary on disk, so they have stricter
|
||||
// preconditions. These tests verify the error paths.
|
||||
t.Run("install rejects same version", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
vRes := node.IPFS("version", "-n")
|
||||
current := strings.TrimSpace(vRes.Stdout.String())
|
||||
|
||||
res := node.RunIPFS("update", "install", current)
|
||||
assert.Error(t, res.Err)
|
||||
assert.Contains(t, res.Stderr.String(), "already running version",
|
||||
"should refuse to re-install the current version")
|
||||
})
|
||||
|
||||
t.Run("revert fails when no backup exists", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
res := node.RunIPFS("update", "revert")
|
||||
assert.Error(t, res.Err)
|
||||
assert.Contains(t, res.Stderr.String(), "no stashed binaries",
|
||||
"should explain there is no previous version to restore")
|
||||
})
|
||||
}
|
||||
|
||||
// TestUpdateWhileDaemonRuns verifies that read-only update subcommands
|
||||
// (check, versions) work while the IPFS daemon holds the repo lock.
|
||||
// These commands only query the GitHub API and never touch the repo,
|
||||
// so they must succeed regardless of daemon state.
|
||||
func TestUpdateWhileDaemonRuns(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
t.Run("check succeeds with daemon running", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
res := node.IPFS("update", "check")
|
||||
out := res.Stdout.String()
|
||||
assert.True(t,
|
||||
strings.Contains(out, "Update available") || strings.Contains(out, "Already up to date"),
|
||||
"check must work while daemon runs, got: %s", out)
|
||||
})
|
||||
|
||||
t.Run("versions succeeds with daemon running", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
res := node.IPFS("update", "versions", "--count=3")
|
||||
lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n")
|
||||
assert.Greater(t, len(lines), 0,
|
||||
"versions must work while daemon runs")
|
||||
})
|
||||
}
|
||||
|
||||
// TestUpdateInstall exercises the full install flow end-to-end:
|
||||
// API query, archive download, SHA-512 verification, tar.gz extraction,
|
||||
// binary stash (backup), and atomic replace.
|
||||
//
|
||||
// A local mock HTTP server replaces GitHub so the test is fast, offline,
|
||||
// and deterministic. The built ipfs binary is copied to a temp directory
|
||||
// so the install replaces the copy, not the real build artifact.
|
||||
//
|
||||
// The env var KUBO_UPDATE_GITHUB_URL redirects the binary's GitHub API
|
||||
// calls to the mock server. IPFS_VERSION_FAKE makes the binary report
|
||||
// an older version so the "upgrade" to v0.99.0 is accepted.
|
||||
func TestUpdateInstall(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Build a fake binary to put inside the archive. After install, the
|
||||
// file at tmpBinPath should contain exactly these bytes.
|
||||
fakeBinary := []byte("#!/bin/sh\necho fake-ipfs-v0.99.0\n")
|
||||
|
||||
// Archive entry path: extractBinaryFromArchive looks for "kubo/<exename>".
|
||||
binName := "ipfs"
|
||||
if runtime.GOOS == "windows" {
|
||||
binName = "ipfs.exe"
|
||||
}
|
||||
var archive []byte
|
||||
if runtime.GOOS == "windows" {
|
||||
archive = buildTestZip(t, "kubo/"+binName, fakeBinary)
|
||||
} else {
|
||||
archive = buildTestTarGz(t, "kubo/"+binName, fakeBinary)
|
||||
}
|
||||
|
||||
// Compute SHA-512 of the archive for the .sha512 sidecar file.
|
||||
sum := sha512.Sum512(archive)
|
||||
|
||||
// Asset name must match what findReleaseAsset expects for the
|
||||
// current OS/arch (e.g., kubo_v0.99.0_linux-amd64.tar.gz).
|
||||
ext := "tar.gz"
|
||||
if runtime.GOOS == "windows" {
|
||||
ext = "zip"
|
||||
}
|
||||
assetName := fmt.Sprintf("kubo_v0.99.0_%s-%s.%s", runtime.GOOS, runtime.GOARCH, ext)
|
||||
checksumBody := fmt.Sprintf("%x %s\n", sum[:], assetName)
|
||||
|
||||
// Mock server: serves GitHub Releases API, archive, and .sha512 sidecar.
|
||||
// srvURL is captured after the server starts, so the handler can build
|
||||
// browser_download_url values pointing back to itself.
|
||||
var srvURL string
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
switch {
|
||||
// githubReleaseByTag: GET /tags/v0.99.0
|
||||
case r.URL.Path == "/tags/v0.99.0":
|
||||
rel := map[string]any{
|
||||
"tag_name": "v0.99.0",
|
||||
"prerelease": false,
|
||||
"assets": []map[string]any{{
|
||||
"name": assetName,
|
||||
"browser_download_url": srvURL + "/download/" + assetName,
|
||||
}},
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_ = json.NewEncoder(w).Encode(rel)
|
||||
|
||||
// downloadAsset: GET /download/<asset>.tar.gz
|
||||
case r.URL.Path == "/download/"+assetName:
|
||||
_, _ = w.Write(archive)
|
||||
|
||||
// downloadAndVerifySHA512: GET /download/<asset>.tar.gz.sha512
|
||||
case r.URL.Path == "/download/"+assetName+".sha512":
|
||||
_, _ = w.Write([]byte(checksumBody))
|
||||
|
||||
default:
|
||||
http.NotFound(w, r)
|
||||
}
|
||||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
srvURL = srv.URL
|
||||
|
||||
// Copy the real built binary to a temp directory. The install command
|
||||
// uses os.Executable() to find the binary to replace, so the subprocess
|
||||
// will replace this copy instead of the real build artifact.
|
||||
tmpBinDir := t.TempDir()
|
||||
tmpBinPath := filepath.Join(tmpBinDir, binName)
|
||||
copyBuiltBinary(t, tmpBinPath)
|
||||
|
||||
// Create a harness that uses the temp binary copy.
|
||||
h := harness.NewT(t, func(h *harness.Harness) {
|
||||
h.IPFSBin = tmpBinPath
|
||||
})
|
||||
node := h.NewNode()
|
||||
|
||||
// Make the binary think it's running v0.30.0 so the "upgrade" to v0.99.0
|
||||
// is accepted. Point API calls at the mock server.
|
||||
node.Runner.Env["IPFS_VERSION_FAKE"] = "0.30.0"
|
||||
node.Runner.Env["KUBO_UPDATE_GITHUB_URL"] = srvURL
|
||||
|
||||
// Run: ipfs update install v0.99.0
|
||||
res := node.RunIPFS("update", "install", "v0.99.0")
|
||||
require.NoError(t, res.Err, "install failed; stderr:\n%s", res.Stderr.String())
|
||||
|
||||
// Verify progress messages on stderr.
|
||||
stderr := res.Stderr.String()
|
||||
assert.Contains(t, stderr, "Downloading Kubo 0.99.0",
|
||||
"should show download progress")
|
||||
assert.Contains(t, stderr, "Checksum verified (SHA-512)",
|
||||
"should confirm checksum passed")
|
||||
assert.Contains(t, stderr, "Backed up current binary to",
|
||||
"should report where the old binary was stashed")
|
||||
assert.Contains(t, stderr, "Successfully updated Kubo 0.30.0 -> 0.99.0",
|
||||
"should confirm the version change")
|
||||
|
||||
// Verify the stash: the original binary should be saved to
|
||||
// $IPFS_PATH/old-bin/ipfs-0.30.0.
|
||||
stashPath := filepath.Join(node.Dir, "old-bin", "ipfs-0.30.0")
|
||||
_, err := os.Stat(stashPath)
|
||||
require.NoError(t, err, "stash file should exist at %s", stashPath)
|
||||
|
||||
// Verify the binary was replaced with the fake binary from the archive.
|
||||
got, err := os.ReadFile(tmpBinPath)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, fakeBinary, got,
|
||||
"binary at %s should contain the extracted archive content", tmpBinPath)
|
||||
}
|
||||
|
||||
// TestUpdateRevert exercises the full revert flow end-to-end: reading
|
||||
// a stashed binary from $IPFS_PATH/old-bin/, atomically replacing the
|
||||
// current binary, and cleaning up the stash file.
|
||||
//
|
||||
// The stash is created manually (rather than via install) so this test
|
||||
// is self-contained and does not depend on network access or a mock server.
|
||||
//
|
||||
// How it works: the subprocess runs from tmpBinPath, so os.Executable()
|
||||
// inside the subprocess returns tmpBinPath. The revert command reads the
|
||||
// stash and atomically replaces the file at tmpBinPath with stash content.
|
||||
func TestUpdateRevert(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
binName := "ipfs"
|
||||
if runtime.GOOS == "windows" {
|
||||
binName = "ipfs.exe"
|
||||
}
|
||||
|
||||
// Copy the real built binary to a temp directory. Revert will replace
|
||||
// this copy with the stash content via os.Executable() -> tmpBinPath.
|
||||
tmpBinDir := t.TempDir()
|
||||
tmpBinPath := filepath.Join(tmpBinDir, binName)
|
||||
copyBuiltBinary(t, tmpBinPath)
|
||||
|
||||
h := harness.NewT(t, func(h *harness.Harness) {
|
||||
h.IPFSBin = tmpBinPath
|
||||
})
|
||||
node := h.NewNode()
|
||||
|
||||
// Create a stash directory with known content that differs from the
|
||||
// current binary. findLatestStash looks for ipfs-<semver> files.
|
||||
stashDir := filepath.Join(node.Dir, "old-bin")
|
||||
require.NoError(t, os.MkdirAll(stashDir, 0o755))
|
||||
stashName := "ipfs-0.30.0"
|
||||
if runtime.GOOS == "windows" {
|
||||
stashName = "ipfs-0.30.0.exe"
|
||||
}
|
||||
stashPath := filepath.Join(stashDir, stashName)
|
||||
stashContent := []byte("#!/bin/sh\necho reverted-to-0.30.0\n")
|
||||
require.NoError(t, os.WriteFile(stashPath, stashContent, 0o755))
|
||||
|
||||
// Run: ipfs update revert
|
||||
// The subprocess executes from tmpBinPath (a real ipfs binary).
|
||||
// os.Executable() returns tmpBinPath, so revert replaces that file
|
||||
// with stashContent and removes the stash file.
|
||||
res := node.RunIPFS("update", "revert")
|
||||
require.NoError(t, res.Err, "revert failed; stderr:\n%s", res.Stderr.String())
|
||||
|
||||
// Verify the revert message.
|
||||
assert.Contains(t, res.Stderr.String(), "Reverted to Kubo 0.30.0",
|
||||
"should confirm which version was restored")
|
||||
|
||||
// Verify the stash file was cleaned up after successful revert.
|
||||
_, err := os.Stat(stashPath)
|
||||
assert.True(t, os.IsNotExist(err),
|
||||
"stash file should be removed after revert, but still exists at %s", stashPath)
|
||||
|
||||
// Verify the binary was replaced with the stash content.
|
||||
got, err := os.ReadFile(tmpBinPath)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, stashContent, got,
|
||||
"binary at %s should contain the stash content after revert", tmpBinPath)
|
||||
}
|
||||
|
||||
// --- test helpers ---
|
||||
|
||||
// copyBuiltBinary copies the built ipfs binary (cmd/ipfs/ipfs) to dst.
|
||||
// It locates the project root the same way the test harness does.
|
||||
func copyBuiltBinary(t *testing.T, dst string) {
|
||||
t.Helper()
|
||||
// Use a throwaway harness to resolve the default binary path,
|
||||
// reusing the same project-root lookup the harness already has.
|
||||
h := harness.NewT(t)
|
||||
data, err := os.ReadFile(h.IPFSBin)
|
||||
require.NoError(t, err, "failed to read built binary at %s (did you run 'make build'?)", h.IPFSBin)
|
||||
require.NoError(t, os.MkdirAll(filepath.Dir(dst), 0o755))
|
||||
require.NoError(t, os.WriteFile(dst, data, 0o755))
|
||||
}
|
||||
|
||||
// buildTestTarGz creates an in-memory tar.gz archive with a single file entry.
|
||||
func buildTestTarGz(t *testing.T, path string, content []byte) []byte {
|
||||
t.Helper()
|
||||
var buf bytes.Buffer
|
||||
gzw := gzip.NewWriter(&buf)
|
||||
tw := tar.NewWriter(gzw)
|
||||
require.NoError(t, tw.WriteHeader(&tar.Header{
|
||||
Name: path,
|
||||
Mode: 0o755,
|
||||
Size: int64(len(content)),
|
||||
}))
|
||||
_, err := tw.Write(content)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, tw.Close())
|
||||
require.NoError(t, gzw.Close())
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// buildTestZip creates an in-memory zip archive with a single file entry.
|
||||
func buildTestZip(t *testing.T, path string, content []byte) []byte {
|
||||
t.Helper()
|
||||
var buf bytes.Buffer
|
||||
zw := zip.NewWriter(&buf)
|
||||
fw, err := zw.Create(path)
|
||||
require.NoError(t, err)
|
||||
_, err = fw.Write(content)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, zw.Close())
|
||||
return buf.Bytes()
|
||||
}
|
||||
Loading…
Reference in New Issue
Block a user