mirror of
https://github.com/ipfs/kubo.git
synced 2026-02-21 10:27:46 +08:00
Merge pull request #7857 from ipfs/fix/separate-migrations-bins
This commit is contained in:
commit
bb8260abe1
@ -26,7 +26,7 @@ import (
|
||||
libp2p "github.com/ipfs/go-ipfs/core/node/libp2p"
|
||||
nodeMount "github.com/ipfs/go-ipfs/fuse/node"
|
||||
fsrepo "github.com/ipfs/go-ipfs/repo/fsrepo"
|
||||
migrate "github.com/ipfs/go-ipfs/repo/fsrepo/migrations"
|
||||
"github.com/ipfs/go-ipfs/repo/fsrepo/migrations"
|
||||
sockets "github.com/libp2p/go-socket-activation"
|
||||
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
@ -288,7 +288,9 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
|
||||
return fmt.Errorf("fs-repo requires migration")
|
||||
}
|
||||
|
||||
err = migrate.RunMigration(fsrepo.RepoVersion)
|
||||
// Fetch migrations from current distribution, or location from environ
|
||||
fetcher := migrations.NewHttpFetcher(migrations.GetDistPathEnv(migrations.CurrentIpfsDist), "", "go-ipfs", 0)
|
||||
err = migrations.RunMigration(cctx.Context(), fetcher, fsrepo.RepoVersion, "", false)
|
||||
if err != nil {
|
||||
fmt.Println("The migrations of fs-repo failed:")
|
||||
fmt.Printf(" %s\n", err)
|
||||
|
||||
@ -14,7 +14,7 @@ import (
|
||||
keystore "github.com/ipfs/go-ipfs-keystore"
|
||||
repo "github.com/ipfs/go-ipfs/repo"
|
||||
"github.com/ipfs/go-ipfs/repo/common"
|
||||
mfsr "github.com/ipfs/go-ipfs/repo/fsrepo/migrations"
|
||||
"github.com/ipfs/go-ipfs/repo/fsrepo/migrations"
|
||||
dir "github.com/ipfs/go-ipfs/thirdparty/dir"
|
||||
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
@ -142,7 +142,7 @@ func open(repoPath string) (repo.Repo, error) {
|
||||
}()
|
||||
|
||||
// Check version, and error out if not matching
|
||||
ver, err := mfsr.RepoPath(r.path).Version()
|
||||
ver, err := migrations.RepoVersion(r.path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, ErrNoVersion
|
||||
@ -291,7 +291,7 @@ func Init(repoPath string, conf *config.Config) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := mfsr.RepoPath(repoPath).WriteVersion(RepoVersion); err != nil {
|
||||
if err := migrations.WriteRepoVersion(repoPath, RepoVersion); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
172
repo/fsrepo/migrations/fetch.go
Normal file
172
repo/fsrepo/migrations/fetch.go
Normal file
@ -0,0 +1,172 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// FetchBinary downloads an archive from the distribution site and unpacks it.
|
||||
//
|
||||
// The base name of the binary inside the archive may differ from the base
|
||||
// archive name. If it does, then specify binName. For example, the following
|
||||
// is needed because the archive "go-ipfs_v0.7.0_linux-amd64.tar.gz" contains a
|
||||
// binary named "ipfs"
|
||||
//
|
||||
// FetchBinary(ctx, fetcher, "go-ipfs", "v0.7.0", "ipfs", tmpDir)
|
||||
//
|
||||
// If out is a directory, then the binary is written to that directory with the
|
||||
// same name it has inside the archive. Otherwise, the binary file is written
|
||||
// to the file named by out.
|
||||
func FetchBinary(ctx context.Context, fetcher Fetcher, dist, ver, binName, out string) (string, error) {
|
||||
// The archive file name is the base of dist. This is to support a possible subdir in
|
||||
// dist, for example: "ipfs-repo-migrations/fs-repo-11-to-12"
|
||||
arcName := filepath.Base(dist)
|
||||
// If binary base name is not specified, then it is same as archive base name.
|
||||
if binName == "" {
|
||||
binName = arcName
|
||||
}
|
||||
|
||||
// Name of binary that exists inside archive
|
||||
binName = ExeName(binName)
|
||||
|
||||
// Return error if file exists or stat fails for reason other than not
|
||||
// exists. If out is a directory, then write extracted binary to that dir.
|
||||
fi, err := os.Stat(out)
|
||||
if !os.IsNotExist(err) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
return "", &os.PathError{
|
||||
Op: "FetchBinary",
|
||||
Path: out,
|
||||
Err: os.ErrExist,
|
||||
}
|
||||
}
|
||||
// out exists and is a directory, so compose final name
|
||||
out = filepath.Join(out, binName)
|
||||
// Check if the binary already exists in the directory
|
||||
_, err = os.Stat(out)
|
||||
if !os.IsNotExist(err) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return "", &os.PathError{
|
||||
Op: "FetchBinary",
|
||||
Path: out,
|
||||
Err: os.ErrExist,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create temp directory to store download
|
||||
tmpDir, err := ioutil.TempDir("", arcName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
atype := "tar.gz"
|
||||
if runtime.GOOS == "windows" {
|
||||
atype = "zip"
|
||||
}
|
||||
|
||||
arcDistPath, arcFullName := makeArchivePath(dist, arcName, ver, atype)
|
||||
|
||||
// Create a file to write the archive data to
|
||||
arcPath := filepath.Join(tmpDir, arcFullName)
|
||||
arcFile, err := os.Create(arcPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer arcFile.Close()
|
||||
|
||||
// Open connection to download archive from ipfs path
|
||||
rc, err := fetcher.Fetch(ctx, arcDistPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
// Write download data
|
||||
_, err = io.Copy(arcFile, rc)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
arcFile.Close()
|
||||
|
||||
// Unpack the archive and write binary to out
|
||||
err = unpackArchive(arcPath, atype, dist, binName, out)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Set mode of binary to executable
|
||||
err = os.Chmod(out, 0755)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// osWithVariant returns the OS name with optional variant.
|
||||
// Currently returns either runtime.GOOS, or "linux-musl".
|
||||
func osWithVariant() (string, error) {
|
||||
if runtime.GOOS != "linux" {
|
||||
return runtime.GOOS, nil
|
||||
}
|
||||
|
||||
// ldd outputs the system's kind of libc.
|
||||
// - on standard ubuntu: ldd (Ubuntu GLIBC 2.23-0ubuntu5) 2.23
|
||||
// - on alpine: musl libc (x86_64)
|
||||
//
|
||||
// we use the combined stdout+stderr,
|
||||
// because ldd --version prints differently on different OSes.
|
||||
// - on standard ubuntu: stdout
|
||||
// - on alpine: stderr (it probably doesn't know the --version flag)
|
||||
//
|
||||
// we suppress non-zero exit codes (see last point about alpine).
|
||||
out, err := exec.Command("sh", "-c", "ldd --version || true").CombinedOutput()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// now just see if we can find "musl" somewhere in the output
|
||||
scan := bufio.NewScanner(bytes.NewBuffer(out))
|
||||
for scan.Scan() {
|
||||
if strings.Contains(scan.Text(), "musl") {
|
||||
return "linux-musl", nil
|
||||
}
|
||||
}
|
||||
|
||||
return "linux", nil
|
||||
}
|
||||
|
||||
// makeArchivePath composes the path, relative to the distribution site, from which to
|
||||
// download a binary. The path returned does not contain the distribution site path,
|
||||
// e.g. "/ipns/dist.ipfs.io/", since that is know to the fetcher.
|
||||
//
|
||||
// Returns the archive path and the base name.
|
||||
//
|
||||
// The ipfs path format is: distribution/version/archiveName
|
||||
// - distribution is the name of a distribution, such as "go-ipfs"
|
||||
// - version is the version to fetch, such as "v0.8.0-rc2"
|
||||
// - archiveName is formatted as name_version_osv-GOARCH.atype, such as
|
||||
// "go-ipfs_v0.8.0-rc2_linux-amd64.tar.gz"
|
||||
//
|
||||
// This would form the path:
|
||||
// go-ipfs/v0.8.0/go-ipfs_v0.8.0_linux-amd64.tar.gz
|
||||
func makeArchivePath(dist, name, ver, atype string) (string, string) {
|
||||
arcName := fmt.Sprintf("%s_%s_%s-%s.%s", name, ver, runtime.GOOS, runtime.GOARCH, atype)
|
||||
return fmt.Sprintf("%s/%s/%s", dist, ver, arcName), arcName
|
||||
}
|
||||
252
repo/fsrepo/migrations/fetch_test.go
Normal file
252
repo/fsrepo/migrations/fetch_test.go
Normal file
@ -0,0 +1,252 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func createTestServer() *httptest.Server {
|
||||
reqHandler := func(w http.ResponseWriter, r *http.Request) {
|
||||
defer r.Body.Close()
|
||||
if strings.Contains(r.URL.Path, "not-here") {
|
||||
http.NotFound(w, r)
|
||||
} else if strings.HasSuffix(r.URL.Path, "versions") {
|
||||
fmt.Fprint(w, "v1.0.0\nv1.1.0\nv1.1.2\nv2.0.0-rc1\n2.0.0\nv2.0.1\n")
|
||||
} else if strings.HasSuffix(r.URL.Path, ".tar.gz") {
|
||||
createFakeArchive(r.URL.Path, false, w)
|
||||
} else if strings.HasSuffix(r.URL.Path, "zip") {
|
||||
createFakeArchive(r.URL.Path, true, w)
|
||||
} else {
|
||||
http.NotFound(w, r)
|
||||
}
|
||||
}
|
||||
return httptest.NewServer(http.HandlerFunc(reqHandler))
|
||||
}
|
||||
|
||||
func createFakeArchive(name string, archZip bool, w io.Writer) {
|
||||
fileName := strings.Split(path.Base(name), "_")[0]
|
||||
root := path.Base(path.Dir(path.Dir(name)))
|
||||
|
||||
// Simulate fetching go-ipfs, which has "ipfs" as the name in the archive.
|
||||
if fileName == "go-ipfs" {
|
||||
fileName = "ipfs"
|
||||
}
|
||||
fileName = ExeName(fileName)
|
||||
|
||||
var err error
|
||||
if archZip {
|
||||
err = writeZip(root, fileName, "FAKE DATA", w)
|
||||
} else {
|
||||
err = writeTarGzip(root, fileName, "FAKE DATA", w)
|
||||
}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetDistPath(t *testing.T) {
|
||||
os.Unsetenv(envIpfsDistPath)
|
||||
distPath := GetDistPathEnv("")
|
||||
if distPath != LatestIpfsDist {
|
||||
t.Error("did not set default dist path")
|
||||
}
|
||||
|
||||
testDist := "/unit/test/dist"
|
||||
err := os.Setenv(envIpfsDistPath, testDist)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer func() {
|
||||
os.Unsetenv(envIpfsDistPath)
|
||||
}()
|
||||
|
||||
distPath = GetDistPathEnv("")
|
||||
if distPath != testDist {
|
||||
t.Error("did not set dist path from environ")
|
||||
}
|
||||
distPath = GetDistPathEnv("ignored")
|
||||
if distPath != testDist {
|
||||
t.Error("did not set dist path from environ")
|
||||
}
|
||||
|
||||
testDist = "/unit/test/dist2"
|
||||
fetcher := NewHttpFetcher(testDist, "", "", 0)
|
||||
if fetcher.distPath != testDist {
|
||||
t.Error("did not set dist path")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHttpFetch(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
ts := createTestServer()
|
||||
defer ts.Close()
|
||||
|
||||
fetcher := NewHttpFetcher("", ts.URL, "", 0)
|
||||
|
||||
rc, err := fetcher.Fetch(ctx, "/versions")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
var out []string
|
||||
scan := bufio.NewScanner(rc)
|
||||
for scan.Scan() {
|
||||
out = append(out, scan.Text())
|
||||
}
|
||||
err = scan.Err()
|
||||
if err != nil {
|
||||
t.Fatal("could not read versions:", err)
|
||||
}
|
||||
|
||||
if len(out) < 6 {
|
||||
t.Fatal("do not get all expected data")
|
||||
}
|
||||
if out[0] != "v1.0.0" {
|
||||
t.Fatal("expected v1.0.0 as first line, got", out[0])
|
||||
}
|
||||
|
||||
// Check not found
|
||||
_, err = fetcher.Fetch(ctx, "/no_such_file")
|
||||
if err == nil || !strings.Contains(err.Error(), "404") {
|
||||
t.Fatal("expected error 404")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchBinary(t *testing.T) {
|
||||
tmpDir, err := ioutil.TempDir("", "fetchtest")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
ts := createTestServer()
|
||||
defer ts.Close()
|
||||
|
||||
fetcher := NewHttpFetcher("", ts.URL, "", 0)
|
||||
|
||||
vers, err := DistVersions(ctx, fetcher, distFSRM, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log("latest version of", distFSRM, "is", vers[len(vers)-1])
|
||||
|
||||
bin, err := FetchBinary(ctx, fetcher, distFSRM, vers[0], "", tmpDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fi, err := os.Stat(bin)
|
||||
if os.IsNotExist(err) {
|
||||
t.Error("expected file to exist:", bin)
|
||||
}
|
||||
|
||||
t.Log("downloaded and unpacked", fi.Size(), "byte file:", fi.Name())
|
||||
|
||||
bin, err = FetchBinary(ctx, fetcher, "go-ipfs", "v0.3.5", "ipfs", tmpDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fi, err = os.Stat(bin)
|
||||
if os.IsNotExist(err) {
|
||||
t.Error("expected file to exist:", bin)
|
||||
}
|
||||
|
||||
t.Log("downloaded and unpacked", fi.Size(), "byte file:", fi.Name())
|
||||
|
||||
// Check error is destination already exists and is not directory
|
||||
_, err = FetchBinary(ctx, fetcher, "go-ipfs", "v0.3.5", "ipfs", bin)
|
||||
if !os.IsExist(err) {
|
||||
t.Fatal("expected 'exists' error, got", err)
|
||||
}
|
||||
|
||||
_, err = FetchBinary(ctx, fetcher, "go-ipfs", "v0.3.5", "ipfs", tmpDir)
|
||||
if !os.IsExist(err) {
|
||||
t.Error("expected 'exists' error, got:", err)
|
||||
}
|
||||
|
||||
os.Remove(filepath.Join(tmpDir, ExeName("ipfs")))
|
||||
|
||||
// Check error creating temp download directory
|
||||
//
|
||||
// Windows doesn't have read-only directories https://github.com/golang/go/issues/35042 this would need to be
|
||||
// tested another way
|
||||
if runtime.GOOS != "windows" {
|
||||
err = os.Chmod(tmpDir, 0555)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = os.Setenv("TMPDIR", tmpDir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err = FetchBinary(ctx, fetcher, "go-ipfs", "v0.3.5", "ipfs", tmpDir)
|
||||
if !os.IsPermission(err) {
|
||||
t.Error("expected 'permission' error, got:", err)
|
||||
}
|
||||
err = os.Setenv("TMPDIR", "/tmp")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = os.Chmod(tmpDir, 0755)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Check error if failure to fetch due to bad dist
|
||||
_, err = FetchBinary(ctx, fetcher, "not-here", "v0.3.5", "ipfs", tmpDir)
|
||||
if err == nil || !strings.Contains(err.Error(), "Not Found") {
|
||||
t.Error("expected 'Not Found' error, got:", err)
|
||||
}
|
||||
|
||||
// Check error if failure to unpack archive
|
||||
_, err = FetchBinary(ctx, fetcher, "go-ipfs", "v0.3.5", "not-such-bin", tmpDir)
|
||||
if err == nil || err.Error() != "no binary found in archive" {
|
||||
t.Error("expected 'no binary found in archive' error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiFetcher(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
ts := createTestServer()
|
||||
defer ts.Close()
|
||||
|
||||
badFetcher := NewHttpFetcher("", "bad-url", "", 0)
|
||||
fetcher := NewHttpFetcher("", ts.URL, "", 0)
|
||||
|
||||
mf := NewMultiFetcher(badFetcher, fetcher)
|
||||
|
||||
rc, err := mf.Fetch(ctx, "/versions")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
vers, err := ioutil.ReadAll(rc)
|
||||
if err != nil {
|
||||
t.Fatal("could not read versions:", err)
|
||||
}
|
||||
if len(vers) < 45 {
|
||||
fmt.Println("unexpected more data")
|
||||
}
|
||||
}
|
||||
83
repo/fsrepo/migrations/fetcher.go
Normal file
83
repo/fsrepo/migrations/fetcher.go
Normal file
@ -0,0 +1,83 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
const (
|
||||
// Current dirstibution to fetch migrations from
|
||||
CurrentIpfsDist = "/ipfs/QmVxxcTSuryJYdQJGcS8SyhzN7NBNLTqVPAxpu6gp2ZcrR"
|
||||
// Latest distribution path. Default for fetchers.
|
||||
LatestIpfsDist = "/ipns/dist.ipfs.io"
|
||||
|
||||
// Distribution environ variable
|
||||
envIpfsDistPath = "IPFS_DIST_PATH"
|
||||
)
|
||||
|
||||
type Fetcher interface {
|
||||
// Fetch attempts to fetch the file at the given ipfs path.
|
||||
// Returns io.ReadCloser on success, which caller must close.
|
||||
Fetch(ctx context.Context, filePath string) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
// MultiFetcher holds multiple Fetchers and provides a Fetch that tries each
|
||||
// until one succeeds.
|
||||
type MultiFetcher struct {
|
||||
fetchers []Fetcher
|
||||
}
|
||||
|
||||
type limitReadCloser struct {
|
||||
io.Reader
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// NewMultiFetcher creates a MultiFetcher with the given Fetchers. The
|
||||
// Fetchers are tried in order ther passed to this function.
|
||||
func NewMultiFetcher(f ...Fetcher) Fetcher {
|
||||
mf := &MultiFetcher{
|
||||
fetchers: make([]Fetcher, len(f)),
|
||||
}
|
||||
copy(mf.fetchers, f)
|
||||
return mf
|
||||
}
|
||||
|
||||
// Fetch attempts to fetch the file at each of its fetchers until one succeeds.
|
||||
// Returns io.ReadCloser on success, which caller must close.
|
||||
func (f *MultiFetcher) Fetch(ctx context.Context, ipfsPath string) (rc io.ReadCloser, err error) {
|
||||
for _, fetcher := range f.fetchers {
|
||||
rc, err = fetcher.Fetch(ctx, ipfsPath)
|
||||
if err == nil {
|
||||
// Transferred using this fetcher
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewLimitReadCloser returns a new io.ReadCloser with the reader wrappen in a
|
||||
// io.LimitedReader limited to reading the amount specified.
|
||||
func NewLimitReadCloser(rc io.ReadCloser, limit int64) io.ReadCloser {
|
||||
return limitReadCloser{
|
||||
Reader: io.LimitReader(rc, limit),
|
||||
Closer: rc,
|
||||
}
|
||||
}
|
||||
|
||||
// GetDistPathEnv returns the IPFS path to the distribution site, using
|
||||
// the value of environ variable specified by envIpfsDistPath. If the environ
|
||||
// variable is not set, then returns the provided distPath, and if that is not set
|
||||
// then returns the IPNS path.
|
||||
//
|
||||
// To get the IPFS path of the latest distribution, if not overriddin by the
|
||||
// environ variable: GetDistPathEnv(CurrentIpfsDist)
|
||||
func GetDistPathEnv(distPath string) string {
|
||||
if dist := os.Getenv(envIpfsDistPath); dist != "" {
|
||||
return dist
|
||||
}
|
||||
if distPath == "" {
|
||||
return LatestIpfsDist
|
||||
}
|
||||
return distPath
|
||||
}
|
||||
94
repo/fsrepo/migrations/httpfetcher.go
Normal file
94
repo/fsrepo/migrations/httpfetcher.go
Normal file
@ -0,0 +1,94 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultGatewayURL = "https://ipfs.io"
|
||||
defaultFetchLimit = 1024 * 1024 * 512
|
||||
)
|
||||
|
||||
// HttpFetcher fetches files over HTTP
|
||||
type HttpFetcher struct {
|
||||
distPath string
|
||||
gateway string
|
||||
limit int64
|
||||
userAgent string
|
||||
}
|
||||
|
||||
var _ Fetcher = (*HttpFetcher)(nil)
|
||||
|
||||
// NewHttpFetcher creates a new HttpFetcher
|
||||
//
|
||||
// Specifying "" for distPath sets the default IPNS path.
|
||||
// Specifying "" for gateway sets the default.
|
||||
// Specifying 0 for fetchLimit sets the default, -1 means no limit.
|
||||
func NewHttpFetcher(distPath, gateway, userAgent string, fetchLimit int64) *HttpFetcher {
|
||||
f := &HttpFetcher{
|
||||
distPath: LatestIpfsDist,
|
||||
gateway: defaultGatewayURL,
|
||||
limit: defaultFetchLimit,
|
||||
}
|
||||
|
||||
if distPath != "" {
|
||||
if !strings.HasPrefix(distPath, "/") {
|
||||
distPath = "/" + distPath
|
||||
}
|
||||
f.distPath = distPath
|
||||
}
|
||||
|
||||
if gateway != "" {
|
||||
f.gateway = strings.TrimRight(gateway, "/")
|
||||
}
|
||||
|
||||
if fetchLimit != 0 {
|
||||
if fetchLimit == -1 {
|
||||
fetchLimit = 0
|
||||
}
|
||||
f.limit = fetchLimit
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
// Fetch attempts to fetch the file at the given path, from the distribution
|
||||
// site configured for this HttpFetcher. Returns io.ReadCloser on success,
|
||||
// which caller must close.
|
||||
func (f *HttpFetcher) Fetch(ctx context.Context, filePath string) (io.ReadCloser, error) {
|
||||
gwURL := f.gateway + path.Join(f.distPath, filePath)
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, gwURL, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("http.NewRequest error: %s", err)
|
||||
}
|
||||
|
||||
if f.userAgent != "" {
|
||||
req.Header.Set("User-Agent", f.userAgent)
|
||||
}
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("http.DefaultClient.Do error: %s", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
defer resp.Body.Close()
|
||||
mes, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading error body: %s", err)
|
||||
}
|
||||
return nil, fmt.Errorf("GET %s error: %s: %s", gwURL, resp.Status, string(mes))
|
||||
}
|
||||
|
||||
if f.limit != 0 {
|
||||
return NewLimitReadCloser(resp.Body, f.limit), nil
|
||||
}
|
||||
return resp.Body, nil
|
||||
}
|
||||
102
repo/fsrepo/migrations/ipfsdir.go
Normal file
102
repo/fsrepo/migrations/ipfsdir.go
Normal file
@ -0,0 +1,102 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/mitchellh/go-homedir"
|
||||
)
|
||||
|
||||
const (
|
||||
envIpfsPath = "IPFS_PATH"
|
||||
defIpfsDir = ".ipfs"
|
||||
versionFile = "version"
|
||||
)
|
||||
|
||||
func init() {
|
||||
homedir.DisableCache = true
|
||||
}
|
||||
|
||||
// IpfsDir returns the path of the ipfs directory. If dir specified, then
|
||||
// returns the expanded version dir. If dir is "", then return the directory
|
||||
// set by IPFS_PATH, or if IPFS_PATH is not set, then return the default
|
||||
// location in the home directory.
|
||||
func IpfsDir(dir string) (string, error) {
|
||||
var err error
|
||||
if dir == "" {
|
||||
dir = os.Getenv(envIpfsPath)
|
||||
}
|
||||
if dir != "" {
|
||||
dir, err = homedir.Expand(dir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return dir, nil
|
||||
}
|
||||
|
||||
home, err := homedir.Dir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if home == "" {
|
||||
return "", errors.New("could not determine IPFS_PATH, home dir not set")
|
||||
}
|
||||
|
||||
return filepath.Join(home, defIpfsDir), nil
|
||||
}
|
||||
|
||||
// CheckIpfsDir gets the ipfs directory and checks that the directory exists.
|
||||
func CheckIpfsDir(dir string) (string, error) {
|
||||
var err error
|
||||
dir, err = IpfsDir(dir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
_, err = os.Stat(dir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return dir, nil
|
||||
}
|
||||
|
||||
// RepoVersion returns the version of the repo in the ipfs directory. If the
|
||||
// ipfs directory is not specified then the default location is used.
|
||||
func RepoVersion(ipfsDir string) (int, error) {
|
||||
ipfsDir, err := CheckIpfsDir(ipfsDir)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return repoVersion(ipfsDir)
|
||||
}
|
||||
|
||||
// WriteRepoVersion writes the specified repo version to the repo located in
|
||||
// ipfsDir. If ipfsDir is not specified, then the default location is used.
|
||||
func WriteRepoVersion(ipfsDir string, version int) error {
|
||||
ipfsDir, err := IpfsDir(ipfsDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vFilePath := filepath.Join(ipfsDir, versionFile)
|
||||
return ioutil.WriteFile(vFilePath, []byte(fmt.Sprintf("%d\n", version)), 0644)
|
||||
}
|
||||
|
||||
func repoVersion(ipfsDir string) (int, error) {
|
||||
c, err := ioutil.ReadFile(filepath.Join(ipfsDir, versionFile))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
ver, err := strconv.Atoi(strings.TrimSpace(string(c)))
|
||||
if err != nil {
|
||||
return 0, errors.New("invalid data in repo version file")
|
||||
}
|
||||
return ver, nil
|
||||
}
|
||||
160
repo/fsrepo/migrations/ipfsdir_test.go
Normal file
160
repo/fsrepo/migrations/ipfsdir_test.go
Normal file
@ -0,0 +1,160 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
fakeHome string
|
||||
fakeIpfs string
|
||||
)
|
||||
|
||||
func TestRepoDir(t *testing.T) {
|
||||
var err error
|
||||
fakeHome, err = ioutil.TempDir("", "testhome")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(fakeHome)
|
||||
|
||||
os.Setenv("HOME", fakeHome)
|
||||
fakeIpfs = filepath.Join(fakeHome, ".ipfs")
|
||||
|
||||
t.Run("testIpfsDir", testIpfsDir)
|
||||
t.Run("testCheckIpfsDir", testCheckIpfsDir)
|
||||
t.Run("testRepoVersion", testRepoVersion)
|
||||
}
|
||||
|
||||
func testIpfsDir(t *testing.T) {
|
||||
_, err := CheckIpfsDir("")
|
||||
if err == nil {
|
||||
t.Fatal("expected error when no .ipfs directory to find")
|
||||
}
|
||||
|
||||
err = os.Mkdir(fakeIpfs, os.ModePerm)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
dir, err := IpfsDir("")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if dir != fakeIpfs {
|
||||
t.Fatal("wrong ipfs directory:", dir)
|
||||
}
|
||||
|
||||
os.Setenv(envIpfsPath, "~/.ipfs")
|
||||
dir, err = IpfsDir("")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if dir != fakeIpfs {
|
||||
t.Fatal("wrong ipfs directory:", dir)
|
||||
}
|
||||
|
||||
_, err = IpfsDir("~somesuer/foo")
|
||||
if err == nil {
|
||||
t.Fatal("expected error with user-specific home dir")
|
||||
}
|
||||
|
||||
err = os.Setenv(envIpfsPath, "~somesuer/foo")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err = IpfsDir("~somesuer/foo")
|
||||
if err == nil {
|
||||
t.Fatal("expected error with user-specific home dir")
|
||||
}
|
||||
err = os.Unsetenv(envIpfsPath)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
dir, err = IpfsDir("~/.ipfs")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if dir != fakeIpfs {
|
||||
t.Fatal("wrong ipfs directory:", dir)
|
||||
}
|
||||
|
||||
_, err = IpfsDir("")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testCheckIpfsDir(t *testing.T) {
|
||||
_, err := CheckIpfsDir("~somesuer/foo")
|
||||
if err == nil {
|
||||
t.Fatal("expected error with user-specific home dir")
|
||||
}
|
||||
|
||||
_, err = CheckIpfsDir("~/no_such_dir")
|
||||
if err == nil {
|
||||
t.Fatal("expected error from nonexistent directory")
|
||||
}
|
||||
|
||||
dir, err := CheckIpfsDir("~/.ipfs")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if dir != fakeIpfs {
|
||||
t.Fatal("wrong ipfs directory:", dir)
|
||||
}
|
||||
}
|
||||
|
||||
func testRepoVersion(t *testing.T) {
|
||||
badDir := "~somesuer/foo"
|
||||
_, err := RepoVersion(badDir)
|
||||
if err == nil {
|
||||
t.Fatal("expected error with user-specific home dir")
|
||||
}
|
||||
|
||||
_, err = RepoVersion(fakeIpfs)
|
||||
if !os.IsNotExist(err) {
|
||||
t.Fatal("expected not-exist error")
|
||||
}
|
||||
|
||||
testVer := 42
|
||||
err = WriteRepoVersion(fakeIpfs, testVer)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var ver int
|
||||
ver, err = RepoVersion(fakeIpfs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if ver != testVer {
|
||||
t.Fatalf("expected version %d, got %d", testVer, ver)
|
||||
}
|
||||
|
||||
err = WriteRepoVersion(badDir, testVer)
|
||||
if err == nil {
|
||||
t.Fatal("expected error with user-specific home dir")
|
||||
}
|
||||
|
||||
ipfsDir, err := IpfsDir(fakeIpfs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
vFilePath := filepath.Join(ipfsDir, versionFile)
|
||||
err = ioutil.WriteFile(vFilePath, []byte("bad-version-data\n"), 0644)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err = RepoVersion(fakeIpfs)
|
||||
if err == nil || err.Error() != "invalid data in repo version file" {
|
||||
t.Fatal("expected 'invalid data' error")
|
||||
}
|
||||
err = WriteRepoVersion(fakeIpfs, testVer)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@ -1,55 +0,0 @@
|
||||
package mfsr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const VersionFile = "version"
|
||||
|
||||
type RepoPath string
|
||||
|
||||
func (rp RepoPath) VersionFile() string {
|
||||
return path.Join(string(rp), VersionFile)
|
||||
}
|
||||
|
||||
func (rp RepoPath) Version() (int, error) {
|
||||
if rp == "" {
|
||||
return 0, fmt.Errorf("invalid repo path \"%s\"", rp)
|
||||
}
|
||||
|
||||
fn := rp.VersionFile()
|
||||
if _, err := os.Stat(fn); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
c, err := ioutil.ReadFile(fn)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
s := strings.TrimSpace(string(c))
|
||||
return strconv.Atoi(s)
|
||||
}
|
||||
|
||||
func (rp RepoPath) CheckVersion(version int) error {
|
||||
v, err := rp.Version()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if v != version {
|
||||
return fmt.Errorf("versions differ (expected: %d, actual:%d)", version, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rp RepoPath) WriteVersion(version int) error {
|
||||
fn := rp.VersionFile()
|
||||
return ioutil.WriteFile(fn, []byte(fmt.Sprintf("%d\n", version)), 0644)
|
||||
}
|
||||
@ -1,43 +0,0 @@
|
||||
package mfsr
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/go-ipfs/thirdparty/assert"
|
||||
)
|
||||
|
||||
func testVersionFile(v string, t *testing.T) (rp RepoPath) {
|
||||
name, err := ioutil.TempDir("", v)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rp = RepoPath(name)
|
||||
return rp
|
||||
}
|
||||
|
||||
func TestVersion(t *testing.T) {
|
||||
rp := RepoPath("")
|
||||
_, err := rp.Version()
|
||||
assert.Err(err, t, "Should throw an error when path is bad,")
|
||||
|
||||
rp = RepoPath("/path/to/nowhere")
|
||||
_, err = rp.Version()
|
||||
if !os.IsNotExist(err) {
|
||||
t.Fatalf("Should throw an `IsNotExist` error when file doesn't exist: %v", err)
|
||||
}
|
||||
|
||||
fsrepoV := 5
|
||||
|
||||
rp = testVersionFile(strconv.Itoa(fsrepoV), t)
|
||||
_, err = rp.Version()
|
||||
assert.Err(err, t, "Bad VersionFile")
|
||||
|
||||
assert.Nil(rp.WriteVersion(fsrepoV), t, "Trouble writing version")
|
||||
|
||||
assert.Nil(rp.CheckVersion(fsrepoV), t, "Trouble checking the version")
|
||||
|
||||
assert.Err(rp.CheckVersion(1), t, "Should throw an error for the wrong version.")
|
||||
}
|
||||
@ -1,282 +1,211 @@
|
||||
package mfsr
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"path"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var DistPath = "https://ipfs.io/ipfs/QmYRLRDKobvg1AXTGeK5Xk6ntWTsjGiHbyNKhWfz7koGpa"
|
||||
|
||||
func init() {
|
||||
if dist := os.Getenv("IPFS_DIST_PATH"); dist != "" {
|
||||
DistPath = dist
|
||||
}
|
||||
}
|
||||
|
||||
const migrations = "fs-repo-migrations"
|
||||
|
||||
func migrationsBinName() string {
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
return migrations + ".exe"
|
||||
default:
|
||||
return migrations
|
||||
}
|
||||
}
|
||||
|
||||
func RunMigration(newv int) error {
|
||||
migrateBin := migrationsBinName()
|
||||
|
||||
fmt.Println(" => Looking for suitable fs-repo-migrations binary.")
|
||||
|
||||
var err error
|
||||
migrateBin, err = exec.LookPath(migrateBin)
|
||||
if err == nil {
|
||||
// check to make sure migrations binary supports our target version
|
||||
err = verifyMigrationSupportsVersion(migrateBin, newv)
|
||||
}
|
||||
const (
|
||||
// Migrations subdirectory in distribution. Empty for root (no subdir).
|
||||
distMigsRoot = ""
|
||||
distFSRM = "fs-repo-migrations"
|
||||
)
|
||||
|
||||
// RunMigration finds, downloads, and runs the individual migrations needed to
|
||||
// migrate the repo from its current version to the target version.
|
||||
func RunMigration(ctx context.Context, fetcher Fetcher, targetVer int, ipfsDir string, allowDowngrade bool) error {
|
||||
ipfsDir, err := CheckIpfsDir(ipfsDir)
|
||||
if err != nil {
|
||||
fmt.Println(" => None found, downloading.")
|
||||
return err
|
||||
}
|
||||
fromVer, err := repoVersion(ipfsDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get repo version: %s", err)
|
||||
}
|
||||
if fromVer == targetVer {
|
||||
// repo already at target version number
|
||||
return nil
|
||||
}
|
||||
if fromVer > targetVer && !allowDowngrade {
|
||||
return fmt.Errorf("downgrade not allowed from %d to %d", fromVer, targetVer)
|
||||
}
|
||||
|
||||
loc, err := GetMigrations()
|
||||
log.Print("Looking for suitable migration binaries.")
|
||||
|
||||
migrations, binPaths, err := findMigrations(ctx, fromVer, targetVer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Download migrations that were not found
|
||||
if len(binPaths) < len(migrations) {
|
||||
missing := make([]string, 0, len(migrations)-len(binPaths))
|
||||
for _, mig := range migrations {
|
||||
if _, ok := binPaths[mig]; !ok {
|
||||
missing = append(missing, mig)
|
||||
}
|
||||
}
|
||||
|
||||
log.Println("Need", len(missing), "migrations, downloading.")
|
||||
|
||||
tmpDir, err := ioutil.TempDir("", "migrations")
|
||||
if err != nil {
|
||||
fmt.Println(" => Failed to download fs-repo-migrations.")
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
err = verifyMigrationSupportsVersion(loc, newv)
|
||||
fetched, err := fetchMigrations(ctx, fetcher, missing, tmpDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("no fs-repo-migration binary found for version %d: %s", newv, err)
|
||||
log.Print("Failed to download migrations.")
|
||||
return err
|
||||
}
|
||||
for i := range missing {
|
||||
binPaths[missing[i]] = fetched[i]
|
||||
}
|
||||
|
||||
migrateBin = loc
|
||||
}
|
||||
|
||||
cmd := exec.Command(migrateBin, "-to", fmt.Sprint(newv), "-y")
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
fmt.Printf(" => Running: %s -to %d -y\n", migrateBin, newv)
|
||||
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
fmt.Printf(" => Failed: %s -to %d -y\n", migrateBin, newv)
|
||||
return fmt.Errorf("migration failed: %s", err)
|
||||
var revert bool
|
||||
if fromVer > targetVer {
|
||||
revert = true
|
||||
}
|
||||
|
||||
fmt.Printf(" => Success: fs-repo has been migrated to version %d.\n", newv)
|
||||
for _, migration := range migrations {
|
||||
log.Println("Running migration", migration, "...")
|
||||
err = runMigration(ctx, binPaths[migration], ipfsDir, revert)
|
||||
if err != nil {
|
||||
return fmt.Errorf("migration %s failed: %s", migration, err)
|
||||
}
|
||||
}
|
||||
log.Printf("Success: fs-repo migrated to version %d.\n", targetVer)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetMigrations() (string, error) {
|
||||
latest, err := GetLatestVersion(DistPath, migrations)
|
||||
func NeedMigration(target int) (bool, error) {
|
||||
vnum, err := RepoVersion("")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to find latest fs-repo-migrations: %s", err)
|
||||
return false, fmt.Errorf("could not get repo version: %s", err)
|
||||
}
|
||||
|
||||
dir, err := ioutil.TempDir("", "go-ipfs-migrate")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create fs-repo-migrations tempdir: %s", err)
|
||||
}
|
||||
|
||||
out := filepath.Join(dir, migrationsBinName())
|
||||
|
||||
err = GetBinaryForVersion(migrations, migrations, DistPath, latest, out)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to download latest fs-repo-migrations: %s", err)
|
||||
}
|
||||
|
||||
err = os.Chmod(out, 0755)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return out, nil
|
||||
return vnum != target, nil
|
||||
}
|
||||
|
||||
func verifyMigrationSupportsVersion(fsrbin string, vn int) error {
|
||||
sn, err := migrationsVersion(fsrbin)
|
||||
if err != nil {
|
||||
return err
|
||||
func ExeName(name string) string {
|
||||
if runtime.GOOS == "windows" {
|
||||
return name + ".exe"
|
||||
}
|
||||
|
||||
if sn >= vn {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("migrations binary doesn't support version %d: %s", vn, fsrbin)
|
||||
return name
|
||||
}
|
||||
|
||||
func migrationsVersion(bin string) (int, error) {
|
||||
out, err := exec.Command(bin, "-v").CombinedOutput()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to check migrations version: %s", err)
|
||||
}
|
||||
|
||||
vs := strings.Trim(string(out), " \n\t")
|
||||
vn, err := strconv.Atoi(vs)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("migrations binary version check did not return a number: %s", err)
|
||||
}
|
||||
|
||||
return vn, nil
|
||||
func migrationName(from, to int) string {
|
||||
return fmt.Sprintf("fs-repo-%d-to-%d", from, to)
|
||||
}
|
||||
|
||||
func GetVersions(ipfspath, dist string) ([]string, error) {
|
||||
rc, err := httpFetch(ipfspath + "/" + dist + "/versions")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
var out []string
|
||||
scan := bufio.NewScanner(rc)
|
||||
for scan.Scan() {
|
||||
out = append(out, scan.Text())
|
||||
// findMigrations returns a list of migrations, ordered from first to last
|
||||
// migration to apply, and a map of locations of migration binaries of any
|
||||
// migrations that were found.
|
||||
func findMigrations(ctx context.Context, from, to int) ([]string, map[string]string, error) {
|
||||
step := 1
|
||||
count := to - from
|
||||
if from > to {
|
||||
step = -1
|
||||
count = from - to
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
migrations := make([]string, 0, count)
|
||||
binPaths := make(map[string]string, count)
|
||||
|
||||
func GetLatestVersion(ipfspath, dist string) (string, error) {
|
||||
vs, err := GetVersions(ipfspath, dist)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var latest string
|
||||
for i := len(vs) - 1; i >= 0; i-- {
|
||||
if !strings.Contains(vs[i], "-dev") {
|
||||
latest = vs[i]
|
||||
break
|
||||
for cur := from; cur != to; cur += step {
|
||||
if ctx.Err() != nil {
|
||||
return nil, nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
if latest == "" {
|
||||
return "", fmt.Errorf("couldn't find a non dev version in the list")
|
||||
}
|
||||
return vs[len(vs)-1], nil
|
||||
}
|
||||
|
||||
func httpGet(url string) (*http.Response, error) {
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("http.NewRequest error: %s", err)
|
||||
}
|
||||
|
||||
req.Header.Set("User-Agent", "go-ipfs")
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("http.DefaultClient.Do error: %s", err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func httpFetch(url string) (io.ReadCloser, error) {
|
||||
resp, err := httpGet(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
mes, err := ioutil.ReadAll(resp.Body)
|
||||
var migName string
|
||||
if step == -1 {
|
||||
migName = migrationName(cur+step, cur)
|
||||
} else {
|
||||
migName = migrationName(cur, cur+step)
|
||||
}
|
||||
migrations = append(migrations, migName)
|
||||
bin, err := exec.LookPath(migName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading error body: %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("GET %s error: %s: %s", url, resp.Status, string(mes))
|
||||
binPaths[migName] = bin
|
||||
}
|
||||
|
||||
return resp.Body, nil
|
||||
return migrations, binPaths, nil
|
||||
}
|
||||
|
||||
func GetBinaryForVersion(distname, binnom, root, vers, out string) error {
|
||||
dir, err := ioutil.TempDir("", "go-ipfs-auto-migrate")
|
||||
if err != nil {
|
||||
return err
|
||||
func runMigration(ctx context.Context, binPath, ipfsDir string, revert bool) error {
|
||||
pathArg := fmt.Sprintf("-path=%s", ipfsDir)
|
||||
var cmd *exec.Cmd
|
||||
if revert {
|
||||
log.Println(" => Running:", binPath, pathArg, "-verbose=true -revert")
|
||||
cmd = exec.CommandContext(ctx, binPath, pathArg, "-verbose=true", "-revert")
|
||||
} else {
|
||||
log.Println(" => Running:", binPath, pathArg, "-verbose=true")
|
||||
cmd = exec.CommandContext(ctx, binPath, pathArg, "-verbose=true")
|
||||
}
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
var archive string
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
archive = "zip"
|
||||
binnom += ".exe"
|
||||
default:
|
||||
archive = "tar.gz"
|
||||
}
|
||||
// fetchMigrations downloads the requested migrations, and returns a slice with
|
||||
// the paths of each binary, in the same order specified by needed.
|
||||
func fetchMigrations(ctx context.Context, fetcher Fetcher, needed []string, destDir string) ([]string, error) {
|
||||
osv, err := osWithVariant()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if osv == "linux-musl" {
|
||||
return fmt.Errorf("linux-musl not supported, you must build the binary from source for your platform")
|
||||
return nil, fmt.Errorf("linux-musl not supported, you must build the binary from source for your platform")
|
||||
}
|
||||
|
||||
finame := fmt.Sprintf("%s_%s_%s-%s.%s", distname, vers, osv, runtime.GOARCH, archive)
|
||||
distpath := fmt.Sprintf("%s/%s/%s/%s", root, distname, vers, finame)
|
||||
|
||||
data, err := httpFetch(distpath)
|
||||
if err != nil {
|
||||
return err
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(needed))
|
||||
bins := make([]string, len(needed))
|
||||
// Download and unpack all requested migrations concurrently.
|
||||
for i, name := range needed {
|
||||
log.Printf("Downloading migration: %s...", name)
|
||||
go func(i int, name string) {
|
||||
defer wg.Done()
|
||||
dist := path.Join(distMigsRoot, name)
|
||||
ver, err := LatestDistVersion(ctx, fetcher, dist, false)
|
||||
if err != nil {
|
||||
log.Printf("could not get latest version of migration %s: %s", name, err)
|
||||
return
|
||||
}
|
||||
loc, err := FetchBinary(ctx, fetcher, dist, ver, name, destDir)
|
||||
if err != nil {
|
||||
log.Printf("could not download %s: %s", name, err)
|
||||
return
|
||||
}
|
||||
log.Printf("Downloaded and unpacked migration: %s (%s)", loc, ver)
|
||||
bins[i] = loc
|
||||
}(i, name)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
arcpath := filepath.Join(dir, finame)
|
||||
fi, err := os.Create(arcpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = io.Copy(fi, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fi.Close()
|
||||
|
||||
return unpackArchive(distname, binnom, arcpath, out, archive)
|
||||
}
|
||||
|
||||
// osWithVariant returns the OS name with optional variant.
|
||||
// Currently returns either runtime.GOOS, or "linux-musl".
|
||||
func osWithVariant() (string, error) {
|
||||
if runtime.GOOS != "linux" {
|
||||
return runtime.GOOS, nil
|
||||
}
|
||||
|
||||
// ldd outputs the system's kind of libc.
|
||||
// - on standard ubuntu: ldd (Ubuntu GLIBC 2.23-0ubuntu5) 2.23
|
||||
// - on alpine: musl libc (x86_64)
|
||||
//
|
||||
// we use the combined stdout+stderr,
|
||||
// because ldd --version prints differently on different OSes.
|
||||
// - on standard ubuntu: stdout
|
||||
// - on alpine: stderr (it probably doesn't know the --version flag)
|
||||
//
|
||||
// we suppress non-zero exit codes (see last point about alpine).
|
||||
out, err := exec.Command("sh", "-c", "ldd --version || true").CombinedOutput()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// now just see if we can find "musl" somewhere in the output
|
||||
scan := bufio.NewScanner(bytes.NewBuffer(out))
|
||||
for scan.Scan() {
|
||||
if strings.Contains(scan.Text(), "musl") {
|
||||
return "linux-musl", nil
|
||||
var fails []string
|
||||
for i := range bins {
|
||||
if bins[i] == "" {
|
||||
fails = append(fails, needed[i])
|
||||
}
|
||||
}
|
||||
if len(fails) != 0 {
|
||||
err = fmt.Errorf("failed to download migrations: %s", strings.Join(fails, " "))
|
||||
if ctx.Err() != nil {
|
||||
err = fmt.Errorf("%s, %s", ctx.Err(), err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return "linux", nil
|
||||
return bins, nil
|
||||
}
|
||||
|
||||
197
repo/fsrepo/migrations/migrations_test.go
Normal file
197
repo/fsrepo/migrations/migrations_test.go
Normal file
@ -0,0 +1,197 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFindMigrations(t *testing.T) {
|
||||
tmpDir, err := ioutil.TempDir("", "migratetest")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
migs, bins, err := findMigrations(ctx, 0, 5)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(migs) != 5 {
|
||||
t.Fatal("expected 5 migrations")
|
||||
}
|
||||
if len(bins) != 0 {
|
||||
t.Fatal("should not have found migrations")
|
||||
}
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
createFakeBin(i-1, i, tmpDir)
|
||||
}
|
||||
|
||||
origPath := os.Getenv("PATH")
|
||||
os.Setenv("PATH", tmpDir)
|
||||
defer os.Setenv("PATH", origPath)
|
||||
|
||||
migs, bins, err = findMigrations(ctx, 0, 5)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(migs) != 5 {
|
||||
t.Fatal("expected 5 migrations")
|
||||
}
|
||||
if len(bins) != len(migs) {
|
||||
t.Fatal("missing", len(migs)-len(bins), "migrations")
|
||||
}
|
||||
|
||||
os.Remove(bins[migs[2]])
|
||||
|
||||
migs, bins, err = findMigrations(ctx, 0, 5)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(bins) != len(migs)-1 {
|
||||
t.Fatal("should be missing one migration bin")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindMigrationsReverse(t *testing.T) {
|
||||
tmpDir, err := ioutil.TempDir("", "migratetest")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
migs, bins, err := findMigrations(ctx, 5, 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(migs) != 5 {
|
||||
t.Fatal("expected 5 migrations")
|
||||
}
|
||||
if len(bins) != 0 {
|
||||
t.Fatal("should not have found migrations")
|
||||
}
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
createFakeBin(i-1, i, tmpDir)
|
||||
}
|
||||
|
||||
origPath := os.Getenv("PATH")
|
||||
os.Setenv("PATH", tmpDir)
|
||||
defer os.Setenv("PATH", origPath)
|
||||
|
||||
migs, bins, err = findMigrations(ctx, 5, 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(migs) != 5 {
|
||||
t.Fatal("expected 5 migrations")
|
||||
}
|
||||
if len(bins) != len(migs) {
|
||||
t.Fatal("missing", len(migs)-len(bins), "migrations:", migs)
|
||||
}
|
||||
|
||||
os.Remove(bins[migs[2]])
|
||||
|
||||
migs, bins, err = findMigrations(ctx, 5, 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(bins) != len(migs)-1 {
|
||||
t.Fatal("should be missing one migration bin")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchMigrations(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
ts := createTestServer()
|
||||
defer ts.Close()
|
||||
fetcher := NewHttpFetcher(CurrentIpfsDist, ts.URL, "", 0)
|
||||
|
||||
tmpDir, err := ioutil.TempDir("", "migratetest")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
needed := []string{"fs-repo-1-to-2", "fs-repo-2-to-3"}
|
||||
fetched, err := fetchMigrations(ctx, fetcher, needed, tmpDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, bin := range fetched {
|
||||
_, err = os.Stat(bin)
|
||||
if os.IsNotExist(err) {
|
||||
t.Error("expected file to exist:", bin)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunMigrations(t *testing.T) {
|
||||
var err error
|
||||
fakeHome, err = ioutil.TempDir("", "testhome")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(fakeHome)
|
||||
|
||||
os.Setenv("HOME", fakeHome)
|
||||
fakeIpfs := filepath.Join(fakeHome, ".ipfs")
|
||||
|
||||
err = os.Mkdir(fakeIpfs, os.ModePerm)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
testVer := 11
|
||||
err = WriteRepoVersion(fakeIpfs, testVer)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ts := createTestServer()
|
||||
defer ts.Close()
|
||||
fetcher := NewHttpFetcher(CurrentIpfsDist, ts.URL, "", 0)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
targetVer := 9
|
||||
|
||||
err = RunMigration(ctx, fetcher, targetVer, fakeIpfs, false)
|
||||
if err == nil || !strings.HasPrefix(err.Error(), "downgrade not allowed") {
|
||||
t.Fatal("expected 'downgrade not alloed' error")
|
||||
}
|
||||
|
||||
err = RunMigration(ctx, fetcher, targetVer, fakeIpfs, true)
|
||||
if err != nil {
|
||||
if !strings.HasPrefix(err.Error(), "migration fs-repo-10-to-11 failed") {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createFakeBin(from, to int, tmpDir string) {
|
||||
migPath := filepath.Join(tmpDir, ExeName(migrationName(from, to)))
|
||||
emptyFile, err := os.Create(migPath)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
emptyFile.Close()
|
||||
err = os.Chmod(migPath, 0755)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
@ -1,62 +1,94 @@
|
||||
package mfsr
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"archive/zip"
|
||||
"compress/gzip"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
func unpackArchive(dist, binnom, path, out, atype string) error {
|
||||
func unpackArchive(arcPath, atype, root, name, out string) error {
|
||||
var err error
|
||||
switch atype {
|
||||
case "zip":
|
||||
return unpackZip(dist, binnom, path, out)
|
||||
case "tar.gz":
|
||||
return unpackTgz(dist, binnom, path, out)
|
||||
err = unpackTgz(arcPath, root, name, out)
|
||||
case "zip":
|
||||
err = unpackZip(arcPath, root, name, out)
|
||||
default:
|
||||
return fmt.Errorf("unrecognized archive type: %s", atype)
|
||||
err = fmt.Errorf("unrecognized archive type: %s", atype)
|
||||
}
|
||||
}
|
||||
|
||||
func unpackTgz(dist, binnom, path, out string) error {
|
||||
fi, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
os.Remove(arcPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
func unpackTgz(arcPath, root, name, out string) error {
|
||||
fi, err := os.Open(arcPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open archive file: %w", err)
|
||||
}
|
||||
defer fi.Close()
|
||||
|
||||
gzr, err := gzip.NewReader(fi)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("error opening gzip reader: %w", err)
|
||||
}
|
||||
|
||||
defer gzr.Close()
|
||||
|
||||
var bin io.Reader
|
||||
tarr := tar.NewReader(gzr)
|
||||
|
||||
loop:
|
||||
lookFor := root + "/" + name
|
||||
for {
|
||||
th, err := tarr.Next()
|
||||
switch err {
|
||||
default:
|
||||
return err
|
||||
case io.EOF:
|
||||
break loop
|
||||
case nil:
|
||||
// continue
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return fmt.Errorf("cannot read archive: %w", err)
|
||||
}
|
||||
|
||||
if th.Name == dist+"/"+binnom {
|
||||
if th.Name == lookFor {
|
||||
bin = tarr
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if bin == nil {
|
||||
return fmt.Errorf("no binary found in downloaded archive")
|
||||
return errors.New("no binary found in archive")
|
||||
}
|
||||
|
||||
return writeToPath(bin, out)
|
||||
}
|
||||
|
||||
func unpackZip(arcPath, root, name, out string) error {
|
||||
zipr, err := zip.OpenReader(arcPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error opening zip reader: %w", err)
|
||||
}
|
||||
defer zipr.Close()
|
||||
|
||||
lookFor := root + "/" + name
|
||||
var bin io.ReadCloser
|
||||
for _, fis := range zipr.File {
|
||||
if fis.Name == lookFor {
|
||||
rc, err := fis.Open()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error extracting binary from archive: %w", err)
|
||||
}
|
||||
|
||||
bin = rc
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if bin == nil {
|
||||
return errors.New("no binary found in archive")
|
||||
}
|
||||
|
||||
return writeToPath(bin, out)
|
||||
@ -65,7 +97,7 @@ loop:
|
||||
func writeToPath(rc io.Reader, out string) error {
|
||||
binfi, err := os.Create(out)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error opening tmp bin path '%s': %s", out, err)
|
||||
return fmt.Errorf("error creating output file '%s': %w", out, err)
|
||||
}
|
||||
defer binfi.Close()
|
||||
|
||||
@ -73,26 +105,3 @@ func writeToPath(rc io.Reader, out string) error {
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func unpackZip(dist, binnom, path, out string) error {
|
||||
zipr, err := zip.OpenReader(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error opening zipreader: %s", err)
|
||||
}
|
||||
|
||||
defer zipr.Close()
|
||||
|
||||
var bin io.ReadCloser
|
||||
for _, fis := range zipr.File {
|
||||
if fis.Name == dist+"/"+binnom {
|
||||
rc, err := fis.Open()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error extracting binary from archive: %s", err)
|
||||
}
|
||||
|
||||
bin = rc
|
||||
}
|
||||
}
|
||||
|
||||
return writeToPath(bin, out)
|
||||
}
|
||||
|
||||
231
repo/fsrepo/migrations/unpack_test.go
Normal file
231
repo/fsrepo/migrations/unpack_test.go
Normal file
@ -0,0 +1,231 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"archive/zip"
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestUnpackArchive(t *testing.T) {
|
||||
// Check unrecognized archive type
|
||||
err := unpackArchive("", "no-arch-type", "", "", "")
|
||||
if err == nil || err.Error() != "unrecognized archive type: no-arch-type" {
|
||||
t.Fatal("expected 'unrecognized archive type' error")
|
||||
}
|
||||
|
||||
// Test cannot open errors
|
||||
err = unpackArchive("no-archive", "tar.gz", "", "", "")
|
||||
if err == nil || !strings.HasPrefix(err.Error(), "cannot open archive file") {
|
||||
t.Fatal("expected 'cannot open' error, got:", err)
|
||||
}
|
||||
err = unpackArchive("no-archive", "zip", "", "", "")
|
||||
if err == nil || !strings.HasPrefix(err.Error(), "error opening zip reader") {
|
||||
t.Fatal("expected 'cannot open' error, got:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnpackTgz(t *testing.T) {
|
||||
tmpDir, err := ioutil.TempDir("", "testunpacktgz")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
badTarGzip := filepath.Join(tmpDir, "bad.tar.gz")
|
||||
err = ioutil.WriteFile(badTarGzip, []byte("bad-data\n"), 0644)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = unpackTgz(badTarGzip, "", "abc", "abc")
|
||||
if err == nil || !strings.HasPrefix(err.Error(), "error opening gzip reader") {
|
||||
t.Fatal("expected error opening gzip reader, got:", err)
|
||||
}
|
||||
|
||||
testTarGzip := filepath.Join(tmpDir, "test.tar.gz")
|
||||
testData := "some data"
|
||||
err = writeTarGzipFile(testTarGzip, "testroot", "testfile", testData)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
out := filepath.Join(tmpDir, "out.txt")
|
||||
|
||||
// Test looking for file that is not in archive
|
||||
err = unpackTgz(testTarGzip, "testroot", "abc", out)
|
||||
if err == nil || err.Error() != "no binary found in archive" {
|
||||
t.Fatal("expected 'no binary found in archive' error, got:", err)
|
||||
}
|
||||
|
||||
// Test that unpack works.
|
||||
err = unpackTgz(testTarGzip, "testroot", "testfile", out)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fi, err := os.Stat(out)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if fi.Size() != int64(len(testData)) {
|
||||
t.Fatal("unpacked file size is", fi.Size(), "expected", len(testData))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestUnpackZip(t *testing.T) {
|
||||
tmpDir, err := ioutil.TempDir("", "testunpackzip")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
badZip := filepath.Join(tmpDir, "bad.zip")
|
||||
err = ioutil.WriteFile(badZip, []byte("bad-data\n"), 0644)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = unpackZip(badZip, "", "abc", "abc")
|
||||
if err == nil || !strings.HasPrefix(err.Error(), "error opening zip reader") {
|
||||
t.Fatal("expected error opening zip reader, got:", err)
|
||||
}
|
||||
|
||||
testZip := filepath.Join(tmpDir, "test.zip")
|
||||
testData := "some data"
|
||||
err = writeZipFile(testZip, "testroot", "testfile", testData)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
out := filepath.Join(tmpDir, "out.txt")
|
||||
|
||||
// Test looking for file that is not in archive
|
||||
err = unpackZip(testZip, "testroot", "abc", out)
|
||||
if err == nil || err.Error() != "no binary found in archive" {
|
||||
t.Fatal("expected 'no binary found in archive' error, got:", err)
|
||||
}
|
||||
|
||||
// Test that unpack works.
|
||||
err = unpackZip(testZip, "testroot", "testfile", out)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fi, err := os.Stat(out)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if fi.Size() != int64(len(testData)) {
|
||||
t.Fatal("unpacked file size is", fi.Size(), "expected", len(testData))
|
||||
}
|
||||
}
|
||||
|
||||
func writeTarGzipFile(archName, root, fileName, data string) error {
|
||||
archFile, err := os.Create(archName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer archFile.Close()
|
||||
w := bufio.NewWriter(archFile)
|
||||
|
||||
err = writeTarGzip(root, fileName, data, w)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Flush buffered data to file
|
||||
if err = w.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Close tar file
|
||||
if err = archFile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeTarGzip(root, fileName, data string, w io.Writer) error {
|
||||
// gzip writer writes to buffer
|
||||
gzw := gzip.NewWriter(w)
|
||||
defer gzw.Close()
|
||||
// tar writer writes to gzip
|
||||
tw := tar.NewWriter(gzw)
|
||||
defer tw.Close()
|
||||
|
||||
var err error
|
||||
if fileName != "" {
|
||||
hdr := &tar.Header{
|
||||
Name: path.Join(root, fileName),
|
||||
Mode: 0600,
|
||||
Size: int64(len(data)),
|
||||
}
|
||||
// Write header
|
||||
if err = tw.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
// Write file body
|
||||
if _, err := tw.Write([]byte(data)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = tw.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Close gzip writer; finish writing gzip data to buffer
|
||||
if err = gzw.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeZipFile(archName, root, fileName, data string) error {
|
||||
archFile, err := os.Create(archName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer archFile.Close()
|
||||
w := bufio.NewWriter(archFile)
|
||||
|
||||
err = writeZip(root, fileName, data, w)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Flush buffered data to file
|
||||
if err = w.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Close zip file
|
||||
if err = archFile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeZip(root, fileName, data string, w io.Writer) error {
|
||||
zw := zip.NewWriter(w)
|
||||
defer zw.Close()
|
||||
|
||||
// Write file name
|
||||
f, err := zw.Create(path.Join(root, fileName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Write file data
|
||||
_, err = f.Write([]byte(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Close zip writer
|
||||
if err = zw.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
75
repo/fsrepo/migrations/versions.go
Normal file
75
repo/fsrepo/migrations/versions.go
Normal file
@ -0,0 +1,75 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/blang/semver/v4"
|
||||
)
|
||||
|
||||
const distVersions = "versions"
|
||||
|
||||
// LatestDistVersion returns the latest version, of the specified distribution,
|
||||
// that is available on the distribution site.
|
||||
func LatestDistVersion(ctx context.Context, fetcher Fetcher, dist string, stableOnly bool) (string, error) {
|
||||
vs, err := DistVersions(ctx, fetcher, dist, false)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for i := len(vs) - 1; i >= 0; i-- {
|
||||
ver := vs[i]
|
||||
if stableOnly && strings.Contains(ver, "-rc") {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(ver, "-dev") {
|
||||
continue
|
||||
}
|
||||
return ver, nil
|
||||
}
|
||||
return "", errors.New("could not find a non dev version")
|
||||
}
|
||||
|
||||
// DistVersions returns all versions of the specified distribution, that are
|
||||
// available on the distriburion site. List is in ascending order, unless
|
||||
// sortDesc is true.
|
||||
func DistVersions(ctx context.Context, fetcher Fetcher, dist string, sortDesc bool) ([]string, error) {
|
||||
rc, err := fetcher.Fetch(ctx, path.Join(dist, distVersions))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
prefix := "v"
|
||||
var vers []semver.Version
|
||||
|
||||
scan := bufio.NewScanner(rc)
|
||||
for scan.Scan() {
|
||||
ver, err := semver.Make(strings.TrimLeft(scan.Text(), prefix))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
vers = append(vers, ver)
|
||||
}
|
||||
if scan.Err() != nil {
|
||||
return nil, fmt.Errorf("could not read versions: %s", scan.Err())
|
||||
}
|
||||
|
||||
if sortDesc {
|
||||
sort.Sort(sort.Reverse(semver.Versions(vers)))
|
||||
} else {
|
||||
sort.Sort(semver.Versions(vers))
|
||||
}
|
||||
|
||||
out := make([]string, len(vers))
|
||||
for i := range vers {
|
||||
out[i] = prefix + vers[i].String()
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
51
repo/fsrepo/migrations/versions_test.go
Normal file
51
repo/fsrepo/migrations/versions_test.go
Normal file
@ -0,0 +1,51 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/blang/semver/v4"
|
||||
)
|
||||
|
||||
const testDist = "go-ipfs"
|
||||
|
||||
func TestDistVersions(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
ts := createTestServer()
|
||||
defer ts.Close()
|
||||
fetcher := NewHttpFetcher("", ts.URL, "", 0)
|
||||
|
||||
vers, err := DistVersions(ctx, fetcher, testDist, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(vers) == 0 {
|
||||
t.Fatal("no versions of", testDist)
|
||||
}
|
||||
t.Log("There are", len(vers), "versions of", testDist)
|
||||
t.Log("Latest 5 are:", vers[:5])
|
||||
}
|
||||
|
||||
func TestLatestDistVersion(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
ts := createTestServer()
|
||||
defer ts.Close()
|
||||
fetcher := NewHttpFetcher("", ts.URL, "", 0)
|
||||
|
||||
latest, err := LatestDistVersion(ctx, fetcher, testDist, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(latest) < 6 {
|
||||
t.Fatal("latest version string too short", latest)
|
||||
}
|
||||
_, err = semver.New(latest[1:])
|
||||
if err != nil {
|
||||
t.Fatal("latest version has invalid format:", latest)
|
||||
}
|
||||
t.Log("Latest version of", testDist, "is", latest)
|
||||
}
|
||||
Loading…
Reference in New Issue
Block a user