mirror of
https://github.com/ipfs/kubo.git
synced 2026-02-24 11:57:44 +08:00
Some checks failed
CodeQL / codeql (push) Has been cancelled
Docker Check / lint (push) Has been cancelled
Docker Check / build (push) Has been cancelled
Gateway Conformance / gateway-conformance (push) Has been cancelled
Gateway Conformance / gateway-conformance-libp2p-experiment (push) Has been cancelled
Go Build / go-build (push) Has been cancelled
Go Check / go-check (push) Has been cancelled
Go Lint / go-lint (push) Has been cancelled
Go Test / unit-tests (push) Has been cancelled
Go Test / cli-tests (push) Has been cancelled
Go Test / example-tests (push) Has been cancelled
Interop / interop-prep (push) Has been cancelled
Sharness / sharness-test (push) Has been cancelled
Spell Check / spellcheck (push) Has been cancelled
Interop / helia-interop (push) Has been cancelled
Interop / ipfs-webui (push) Has been cancelled
* fix http header when compress enabled for get command Closes #2376 * fix(rpc): set Content-Type for ipfs get based on output format - set application/x-tar when outputting tar (default and --archive) - set application/gzip when compression is enabled (--compress) - update go-ipfs-cmds with Tar encoding type and RFC 6713 compliant MIME types (application/gzip instead of application/x-gzip) * test(rpc): add Content-Type header tests for ipfs get * feat(rpc): add Content-Type headers for binary responses set proper Content-Type headers for RPC endpoints that return binary data: - `dag export`: application/vnd.ipld.car - `block get`: application/vnd.ipld.raw - `diag profile`: application/zip - `get`: application/x-tar or application/gzip (already worked, migrated to new API) uses the new OctetStream encoding type and SetContentType() method from go-ipfs-cmds to specify custom MIME types for binary responses. refs: https://github.com/ipfs/kubo/issues/2376 * feat(rpc): add `ipfs name get` command for IPNS record retrieval add dedicated command to retrieve raw signed IPNS records from the routing system. returns protobuf-encoded IPNS record with Content-Type `application/vnd.ipfs.ipns-record`. this provides a more convenient alternative to `ipfs routing get /ipns/<name>` which returns JSON with base64-encoded data. the raw output can be piped directly to `ipfs name inspect`: ipfs name get <name> | ipfs name inspect spec: https://specs.ipfs.tech/ipns/ipns-record/ * feat(rpc): add `ipfs name put` command for IPNS record storage adds `ipfs name put` to complement `ipfs name get`, allowing users to store IPNS records obtained from external sources without needing the private key. useful for backup, restore, and debugging workflows. the command validates records by default (signature, sequence number). use `--force` to bypass validation for testing how routing handles malformed or outdated records. also reorganizes test/cli files: - rename http_rpc_* -> rpc_* to match existing convention - merge name_get_put_test.go into name_test.go - add file header comments documenting test purposes * chore(deps): update go-ipfs-cmds to latest master includes SetContentType() for dynamic Content-Type headers --------- Co-authored-by: Marcin Rataj <lidel@lidel.org>
381 lines
9.4 KiB
Go
381 lines
9.4 KiB
Go
package commands
|
|
|
|
import (
|
|
gotar "archive/tar"
|
|
"bufio"
|
|
"compress/gzip"
|
|
"errors"
|
|
"fmt"
|
|
"io"
|
|
"os"
|
|
gopath "path"
|
|
"path/filepath"
|
|
"strings"
|
|
|
|
"github.com/ipfs/kubo/core/commands/cmdenv"
|
|
"github.com/ipfs/kubo/core/commands/cmdutils"
|
|
"github.com/ipfs/kubo/core/commands/e"
|
|
|
|
"github.com/cheggaaa/pb"
|
|
"github.com/ipfs/boxo/files"
|
|
"github.com/ipfs/boxo/tar"
|
|
cmds "github.com/ipfs/go-ipfs-cmds"
|
|
)
|
|
|
|
var ErrInvalidCompressionLevel = errors.New("compression level must be between 1 and 9")
|
|
|
|
const (
|
|
outputOptionName = "output"
|
|
archiveOptionName = "archive"
|
|
compressOptionName = "compress"
|
|
compressionLevelOptionName = "compression-level"
|
|
)
|
|
|
|
var GetCmd = &cmds.Command{
|
|
Helptext: cmds.HelpText{
|
|
Tagline: "Download IPFS objects.",
|
|
ShortDescription: `
|
|
Stores to disk the data contained an IPFS or IPNS object(s) at the given path.
|
|
|
|
By default, the output will be stored at './<ipfs-path>', but an alternate
|
|
path can be specified with '--output=<path>' or '-o=<path>'.
|
|
|
|
To output a TAR archive instead of unpacked files, use '--archive' or '-a'.
|
|
|
|
To compress the output with GZIP compression, use '--compress' or '-C'. You
|
|
may also specify the level of compression by specifying '-l=<1-9>'.
|
|
`,
|
|
HTTP: &cmds.HTTPHelpText{
|
|
ResponseContentType: "application/x-tar, or application/gzip when compress=true",
|
|
},
|
|
},
|
|
|
|
Arguments: []cmds.Argument{
|
|
cmds.StringArg("ipfs-path", true, false, "The path to the IPFS object(s) to be outputted.").EnableStdin(),
|
|
},
|
|
Options: []cmds.Option{
|
|
cmds.StringOption(outputOptionName, "o", "The path where the output should be stored."),
|
|
cmds.BoolOption(archiveOptionName, "a", "Output a TAR archive."),
|
|
cmds.BoolOption(compressOptionName, "C", "Compress the output with GZIP compression."),
|
|
cmds.IntOption(compressionLevelOptionName, "l", "The level of compression (1-9)."),
|
|
cmds.BoolOption(progressOptionName, "p", "Stream progress data.").WithDefault(true),
|
|
},
|
|
PreRun: func(req *cmds.Request, env cmds.Environment) error {
|
|
_, err := getCompressOptions(req)
|
|
return err
|
|
},
|
|
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
|
ctx := req.Context
|
|
cmplvl, err := getCompressOptions(req)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
api, err := cmdenv.GetApi(env, req)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
p, err := cmdutils.PathOrCidPath(req.Arguments[0])
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
file, err := api.Unixfs().Get(ctx, p)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
size, err := file.Size()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
res.SetLength(uint64(size))
|
|
|
|
archive, _ := req.Options[archiveOptionName].(bool)
|
|
reader, err := fileArchive(file, p.String(), archive, cmplvl)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
go func() {
|
|
// We cannot defer a close in the response writer (like we should)
|
|
// Because the cmd framework outsmart us and doesn't call response
|
|
// if the context is over.
|
|
<-ctx.Done()
|
|
reader.Close()
|
|
}()
|
|
|
|
// Set Content-Type based on output format.
|
|
// When compression is enabled, output is gzip (or tar.gz for directories).
|
|
// Otherwise, tar is used as the transport format.
|
|
res.SetEncodingType(cmds.OctetStream)
|
|
if cmplvl != gzip.NoCompression {
|
|
res.SetContentType("application/gzip")
|
|
} else {
|
|
res.SetContentType("application/x-tar")
|
|
}
|
|
|
|
return res.Emit(reader)
|
|
},
|
|
PostRun: cmds.PostRunMap{
|
|
cmds.CLI: func(res cmds.Response, re cmds.ResponseEmitter) error {
|
|
req := res.Request()
|
|
|
|
v, err := res.Next()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
outReader, ok := v.(io.Reader)
|
|
if !ok {
|
|
return e.New(e.TypeErr(outReader, v))
|
|
}
|
|
|
|
outPath := getOutPath(req)
|
|
|
|
cmplvl, err := getCompressOptions(req)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
archive, _ := req.Options[archiveOptionName].(bool)
|
|
progress, _ := req.Options[progressOptionName].(bool)
|
|
|
|
gw := getWriter{
|
|
Out: os.Stdout,
|
|
Err: os.Stderr,
|
|
Archive: archive,
|
|
Compression: cmplvl,
|
|
Size: int64(res.Length()),
|
|
Progress: progress,
|
|
}
|
|
|
|
return gw.Write(outReader, outPath)
|
|
},
|
|
},
|
|
}
|
|
|
|
type clearlineReader struct {
|
|
io.Reader
|
|
out io.Writer
|
|
}
|
|
|
|
func (r *clearlineReader) Read(p []byte) (n int, err error) {
|
|
n, err = r.Reader.Read(p)
|
|
if err == io.EOF {
|
|
// callback
|
|
fmt.Fprintf(r.out, "\033[2K\r") // clear progress bar line on EOF
|
|
}
|
|
return
|
|
}
|
|
|
|
func progressBarForReader(out io.Writer, r io.Reader, l int64) (*pb.ProgressBar, io.Reader) {
|
|
bar := makeProgressBar(out, l)
|
|
barR := bar.NewProxyReader(r)
|
|
return bar, &clearlineReader{barR, out}
|
|
}
|
|
|
|
func makeProgressBar(out io.Writer, l int64) *pb.ProgressBar {
|
|
// setup bar reader
|
|
// TODO: get total length of files
|
|
bar := pb.New64(l).SetUnits(pb.U_BYTES)
|
|
bar.Output = out
|
|
|
|
// the progress bar lib doesn't give us a way to get the width of the output,
|
|
// so as a hack we just use a callback to measure the output, then get rid of it
|
|
bar.Callback = func(line string) {
|
|
terminalWidth := len(line)
|
|
bar.Callback = nil
|
|
log.Infof("terminal width: %v\n", terminalWidth)
|
|
}
|
|
return bar
|
|
}
|
|
|
|
func getOutPath(req *cmds.Request) string {
|
|
outPath, _ := req.Options[outputOptionName].(string)
|
|
if outPath == "" {
|
|
trimmed := strings.TrimRight(req.Arguments[0], "/")
|
|
_, outPath = filepath.Split(trimmed)
|
|
outPath = filepath.Clean(outPath)
|
|
}
|
|
return outPath
|
|
}
|
|
|
|
type getWriter struct {
|
|
Out io.Writer // for output to user
|
|
Err io.Writer // for progress bar output
|
|
|
|
Archive bool
|
|
Compression int
|
|
Size int64
|
|
Progress bool
|
|
}
|
|
|
|
func (gw *getWriter) Write(r io.Reader, fpath string) error {
|
|
if gw.Archive || gw.Compression != gzip.NoCompression {
|
|
return gw.writeArchive(r, fpath)
|
|
}
|
|
return gw.writeExtracted(r, fpath)
|
|
}
|
|
|
|
func (gw *getWriter) writeArchive(r io.Reader, fpath string) error {
|
|
// adjust file name if tar
|
|
if gw.Archive {
|
|
if !strings.HasSuffix(fpath, ".tar") && !strings.HasSuffix(fpath, ".tar.gz") {
|
|
fpath += ".tar"
|
|
}
|
|
}
|
|
|
|
// adjust file name if gz
|
|
if gw.Compression != gzip.NoCompression {
|
|
if !strings.HasSuffix(fpath, ".gz") {
|
|
fpath += ".gz"
|
|
}
|
|
}
|
|
|
|
// create file
|
|
file, err := os.Create(fpath)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer file.Close()
|
|
|
|
fmt.Fprintf(gw.Out, "Saving archive to %s\n", fpath)
|
|
if gw.Progress {
|
|
var bar *pb.ProgressBar
|
|
bar, r = progressBarForReader(gw.Err, r, gw.Size)
|
|
bar.Start()
|
|
defer bar.Finish()
|
|
}
|
|
|
|
_, err = io.Copy(file, r)
|
|
return err
|
|
}
|
|
|
|
func (gw *getWriter) writeExtracted(r io.Reader, fpath string) error {
|
|
fmt.Fprintf(gw.Out, "Saving file(s) to %s\n", fpath)
|
|
var progressCb func(int64) int64
|
|
if gw.Progress {
|
|
bar := makeProgressBar(gw.Err, gw.Size)
|
|
bar.Start()
|
|
defer bar.Finish()
|
|
defer bar.Set64(gw.Size)
|
|
progressCb = bar.Add64
|
|
}
|
|
|
|
extractor := &tar.Extractor{Path: fpath, Progress: progressCb}
|
|
return extractor.Extract(r)
|
|
}
|
|
|
|
func getCompressOptions(req *cmds.Request) (int, error) {
|
|
cmprs, _ := req.Options[compressOptionName].(bool)
|
|
cmplvl, cmplvlFound := req.Options[compressionLevelOptionName].(int)
|
|
switch {
|
|
case !cmprs:
|
|
return gzip.NoCompression, nil
|
|
case cmprs && !cmplvlFound:
|
|
return gzip.DefaultCompression, nil
|
|
case cmprs && (cmplvl < 1 || cmplvl > 9):
|
|
return gzip.NoCompression, ErrInvalidCompressionLevel
|
|
}
|
|
return cmplvl, nil
|
|
}
|
|
|
|
// DefaultBufSize is the buffer size for gets. for now, 1MiB, which is ~4 blocks.
|
|
// TODO: does this need to be configurable?
|
|
var DefaultBufSize = 1048576
|
|
|
|
type identityWriteCloser struct {
|
|
w io.Writer
|
|
}
|
|
|
|
func (i *identityWriteCloser) Write(p []byte) (int, error) {
|
|
return i.w.Write(p)
|
|
}
|
|
|
|
func (i *identityWriteCloser) Close() error {
|
|
return nil
|
|
}
|
|
|
|
func fileArchive(f files.Node, name string, archive bool, compression int) (io.ReadCloser, error) {
|
|
cleaned := gopath.Clean(name)
|
|
_, filename := gopath.Split(cleaned)
|
|
|
|
// need to connect a writer to a reader
|
|
piper, pipew := io.Pipe()
|
|
checkErrAndClosePipe := func(err error) bool {
|
|
if err != nil {
|
|
_ = pipew.CloseWithError(err)
|
|
return true
|
|
}
|
|
return false
|
|
}
|
|
|
|
// use a buffered writer to parallelize task
|
|
bufw := bufio.NewWriterSize(pipew, DefaultBufSize)
|
|
|
|
// compression determines whether to use gzip compression.
|
|
maybeGzw, err := newMaybeGzWriter(bufw, compression)
|
|
if checkErrAndClosePipe(err) {
|
|
return nil, err
|
|
}
|
|
|
|
closeGzwAndPipe := func() {
|
|
if err := maybeGzw.Close(); checkErrAndClosePipe(err) {
|
|
return
|
|
}
|
|
if err := bufw.Flush(); checkErrAndClosePipe(err) {
|
|
return
|
|
}
|
|
pipew.Close() // everything seems to be ok.
|
|
}
|
|
|
|
if !archive && compression != gzip.NoCompression {
|
|
// the case when the node is a file
|
|
r := files.ToFile(f)
|
|
if r == nil {
|
|
return nil, errors.New("file is not regular")
|
|
}
|
|
|
|
go func() {
|
|
if _, err := io.Copy(maybeGzw, r); checkErrAndClosePipe(err) {
|
|
return
|
|
}
|
|
closeGzwAndPipe() // everything seems to be ok
|
|
}()
|
|
} else {
|
|
// the case for 1. archive, and 2. not archived and not compressed, in
|
|
// which tar is used anyway as a transport format
|
|
|
|
// construct the tar writer
|
|
w, err := files.NewTarWriter(maybeGzw)
|
|
if checkErrAndClosePipe(err) {
|
|
return nil, err
|
|
}
|
|
|
|
// if not creating an archive set the format to PAX in order to preserve nanoseconds
|
|
if !archive {
|
|
w.SetFormat(gotar.FormatPAX)
|
|
}
|
|
|
|
go func() {
|
|
// write all the nodes recursively
|
|
if err := w.WriteFile(f, filename); checkErrAndClosePipe(err) {
|
|
return
|
|
}
|
|
w.Close() // close tar writer
|
|
closeGzwAndPipe() // everything seems to be ok
|
|
}()
|
|
}
|
|
|
|
return piper, nil
|
|
}
|
|
|
|
func newMaybeGzWriter(w io.Writer, compression int) (io.WriteCloser, error) {
|
|
if compression != gzip.NoCompression {
|
|
return gzip.NewWriterLevel(w, compression)
|
|
}
|
|
return &identityWriteCloser{w}, nil
|
|
}
|