mirror of
https://github.com/ipfs/kubo.git
synced 2026-02-21 10:27:46 +08:00
refactor(config): migration 17-to-18 to unify Provider/Reprovider into Provide.DHT (#10951)
Some checks are pending
CodeQL / codeql (push) Waiting to run
Docker Check / lint (push) Waiting to run
Docker Check / build (push) Waiting to run
Gateway Conformance / gateway-conformance (push) Waiting to run
Gateway Conformance / gateway-conformance-libp2p-experiment (push) Waiting to run
Go Build / go-build (push) Waiting to run
Go Check / go-check (push) Waiting to run
Go Lint / go-lint (push) Waiting to run
Go Test / go-test (push) Waiting to run
Interop / interop-prep (push) Waiting to run
Interop / helia-interop (push) Blocked by required conditions
Interop / ipfs-webui (push) Blocked by required conditions
Sharness / sharness-test (push) Waiting to run
Spell Check / spellcheck (push) Waiting to run
Some checks are pending
CodeQL / codeql (push) Waiting to run
Docker Check / lint (push) Waiting to run
Docker Check / build (push) Waiting to run
Gateway Conformance / gateway-conformance (push) Waiting to run
Gateway Conformance / gateway-conformance-libp2p-experiment (push) Waiting to run
Go Build / go-build (push) Waiting to run
Go Check / go-check (push) Waiting to run
Go Lint / go-lint (push) Waiting to run
Go Test / go-test (push) Waiting to run
Interop / interop-prep (push) Waiting to run
Interop / helia-interop (push) Blocked by required conditions
Interop / ipfs-webui (push) Blocked by required conditions
Sharness / sharness-test (push) Waiting to run
Spell Check / spellcheck (push) Waiting to run
* refactor: consolidate Provider/Reprovider into unified Provide config - merge Provider and Reprovider configs into single Provide section - add fs-repo-17-to-18 migration for config consolidation - improve migration ergonomics with common package utilities - convert deprecated "flat" strategy to "all" during migration - improve Provide docs * docs: add total_provide_count metric guidance - document how to monitor provide success rates via prometheus metrics - add performance comparison section to changelog - explain how to evaluate sweep vs legacy provider effectiveness * fix: add OpenTelemetry meter provider for metrics - set up meter provider with Prometheus exporter in daemon - enables metrics from external libs like go-libp2p-kad-dht - fixes missing total_provide_count_total when SweepEnabled=true - update docs to reflect actual metric names --------- Co-authored-by: gammazero <11790789+gammazero@users.noreply.github.com> Co-authored-by: guillaumemichel <guillaume@michel.id> Co-authored-by: Daniel Norman <1992255+2color@users.noreply.github.com> Co-authored-by: Hector Sanjuan <code@hector.link>
This commit is contained in:
parent
006f9dc704
commit
71e883440e
@ -47,7 +47,7 @@ func (np NodeProvider) MakeAPISwarm(t *testing.T, ctx context.Context, fullIdent
|
|||||||
c.Experimental.FilestoreEnabled = true
|
c.Experimental.FilestoreEnabled = true
|
||||||
// only provide things we pin. Allows to test
|
// only provide things we pin. Allows to test
|
||||||
// provide operations.
|
// provide operations.
|
||||||
c.Reprovider.Strategy = config.NewOptionalString("roots")
|
c.Provide.Strategy = config.NewOptionalString("roots")
|
||||||
n.WriteConfig(c)
|
n.WriteConfig(c)
|
||||||
n.StartDaemon("--enable-pubsub-experiment", "--offline="+strconv.FormatBool(!online))
|
n.StartDaemon("--enable-pubsub-experiment", "--offline="+strconv.FormatBool(!online))
|
||||||
|
|
||||||
|
|||||||
@ -43,6 +43,9 @@ import (
|
|||||||
manet "github.com/multiformats/go-multiaddr/net"
|
manet "github.com/multiformats/go-multiaddr/net"
|
||||||
prometheus "github.com/prometheus/client_golang/prometheus"
|
prometheus "github.com/prometheus/client_golang/prometheus"
|
||||||
promauto "github.com/prometheus/client_golang/prometheus/promauto"
|
promauto "github.com/prometheus/client_golang/prometheus/promauto"
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
promexporter "go.opentelemetry.io/otel/exporters/prometheus"
|
||||||
|
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -211,6 +214,21 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
|
|||||||
log.Errorf("Injecting prometheus handler for metrics failed with message: %s\n", err.Error())
|
log.Errorf("Injecting prometheus handler for metrics failed with message: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set up OpenTelemetry meter provider to enable metrics from external libraries
|
||||||
|
// like go-libp2p-kad-dht. Without this, metrics registered via otel.Meter()
|
||||||
|
// (such as total_provide_count from sweep provider) won't be exposed at the
|
||||||
|
// /debug/metrics/prometheus endpoint.
|
||||||
|
if exporter, err := promexporter.New(
|
||||||
|
promexporter.WithRegisterer(prometheus.DefaultRegisterer),
|
||||||
|
); err != nil {
|
||||||
|
log.Errorf("Creating prometheus exporter for OpenTelemetry failed: %s (some metrics will be missing from /debug/metrics/prometheus)\n", err.Error())
|
||||||
|
} else {
|
||||||
|
meterProvider := sdkmetric.NewMeterProvider(
|
||||||
|
sdkmetric.WithReader(exporter),
|
||||||
|
)
|
||||||
|
otel.SetMeterProvider(meterProvider)
|
||||||
|
}
|
||||||
|
|
||||||
// let the user know we're going.
|
// let the user know we're going.
|
||||||
fmt.Printf("Initializing daemon...\n")
|
fmt.Printf("Initializing daemon...\n")
|
||||||
|
|
||||||
@ -486,25 +504,33 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
|
|||||||
// This should never happen, but better safe than sorry
|
// This should never happen, but better safe than sorry
|
||||||
log.Fatal("Private network does not work with Routing.Type=auto. Update your config to Routing.Type=dht (or none, and do manual peering)")
|
log.Fatal("Private network does not work with Routing.Type=auto. Update your config to Routing.Type=dht (or none, and do manual peering)")
|
||||||
}
|
}
|
||||||
if cfg.Provider.Strategy.WithDefault("") != "" && cfg.Reprovider.Strategy.IsDefault() {
|
// Check for deprecated Provider/Reprovider configuration after migration
|
||||||
log.Fatal("Invalid config. Remove unused Provider.Strategy and set Reprovider.Strategy instead. Documentation: https://github.com/ipfs/kubo/blob/master/docs/config.md#reproviderstrategy")
|
// This should never happen for regular users, but is useful error for people who have Docker orchestration
|
||||||
|
// that blindly sets config keys (overriding automatic Kubo migration).
|
||||||
|
//nolint:staticcheck // intentionally checking deprecated fields
|
||||||
|
if cfg.Provider.Enabled != config.Default || !cfg.Provider.Strategy.IsDefault() || !cfg.Provider.WorkerCount.IsDefault() {
|
||||||
|
log.Fatal("Deprecated configuration detected. Manually migrate 'Provider' fields to 'Provide' and remove 'Provider' from your config. Documentation: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide")
|
||||||
}
|
}
|
||||||
// Check for deprecated "flat" strategy
|
//nolint:staticcheck // intentionally checking deprecated fields
|
||||||
if cfg.Reprovider.Strategy.WithDefault("") == "flat" {
|
if !cfg.Reprovider.Interval.IsDefault() || !cfg.Reprovider.Strategy.IsDefault() {
|
||||||
log.Error("Reprovider.Strategy='flat' is deprecated and will be removed in the next release. Please update your config to use 'all' instead.")
|
log.Fatal("Deprecated configuration detected. Manually migrate 'Reprovider' fields to 'Provide': Reprovider.Strategy -> Provide.Strategy, Reprovider.Interval -> Provide.Interval. Remove 'Reprovider' from your config. Documentation: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide")
|
||||||
|
}
|
||||||
|
// Check for deprecated "flat" strategy (should have been migrated to "all")
|
||||||
|
if cfg.Provide.Strategy.WithDefault("") == "flat" {
|
||||||
|
log.Fatal("Provide.Strategy='flat' is no longer supported. Use 'all' instead. Documentation: https://github.com/ipfs/kubo/blob/master/docs/config.md#providestrategy")
|
||||||
}
|
}
|
||||||
if cfg.Experimental.StrategicProviding {
|
if cfg.Experimental.StrategicProviding {
|
||||||
log.Error("Experimental.StrategicProviding was removed. Remove it from your config and set Provider.Enabled=false to remove this message. Documentation: https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md#strategic-providing")
|
log.Fatal("Experimental.StrategicProviding was removed. Remove it from your config. Documentation: https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md#strategic-providing")
|
||||||
cfg.Experimental.StrategicProviding = false
|
}
|
||||||
cfg.Provider.Enabled = config.False
|
// Check for invalid MaxWorkers=0 with SweepEnabled
|
||||||
|
if cfg.Provide.DHT.SweepEnabled.WithDefault(config.DefaultProvideDHTSweepEnabled) &&
|
||||||
|
cfg.Provide.DHT.MaxWorkers.WithDefault(config.DefaultProvideDHTMaxWorkers) == 0 {
|
||||||
|
log.Fatal("Invalid configuration: Provide.DHT.MaxWorkers cannot be 0 when Provide.DHT.SweepEnabled=true. Set Provide.DHT.MaxWorkers to a positive value (e.g., 16) to control resource usage. Documentation: https://github.com/ipfs/kubo/blob/master/docs/config.md#providedhtmaxworkers")
|
||||||
}
|
}
|
||||||
if routingOption == routingOptionDelegatedKwd {
|
if routingOption == routingOptionDelegatedKwd {
|
||||||
// Delegated routing is read-only mode - content providing must be disabled
|
// Delegated routing is read-only mode - content providing must be disabled
|
||||||
if cfg.Provider.Enabled.WithDefault(config.DefaultProviderEnabled) {
|
if cfg.Provide.Enabled.WithDefault(config.DefaultProvideEnabled) {
|
||||||
log.Fatal("Routing.Type=delegated does not support content providing. Set Provider.Enabled=false in your config.")
|
log.Fatal("Routing.Type=delegated does not support content providing. Set Provide.Enabled=false in your config.")
|
||||||
}
|
|
||||||
if cfg.Reprovider.Interval.WithDefault(config.DefaultReproviderInterval) != 0 {
|
|
||||||
log.Fatal("Routing.Type=delegated does not support content providing. Set Reprovider.Interval='0' in your config.")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -659,7 +685,7 @@ take effect.
|
|||||||
|
|
||||||
if !offline {
|
if !offline {
|
||||||
// Warn users when provide systems are disabled
|
// Warn users when provide systems are disabled
|
||||||
if !cfg.Provider.Enabled.WithDefault(config.DefaultProviderEnabled) {
|
if !cfg.Provide.Enabled.WithDefault(config.DefaultProvideEnabled) {
|
||||||
fmt.Print(`
|
fmt.Print(`
|
||||||
|
|
||||||
⚠️ Provide and Reprovide systems are disabled due to 'Provide.Enabled=false'
|
⚠️ Provide and Reprovide systems are disabled due to 'Provide.Enabled=false'
|
||||||
@ -667,12 +693,12 @@ take effect.
|
|||||||
⚠️ If this is not intentional, call 'ipfs config profile apply announce-on' or set Provide.Enabled=true'
|
⚠️ If this is not intentional, call 'ipfs config profile apply announce-on' or set Provide.Enabled=true'
|
||||||
|
|
||||||
`)
|
`)
|
||||||
} else if cfg.Reprovider.Interval.WithDefault(config.DefaultReproviderInterval) == 0 {
|
} else if cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval) == 0 {
|
||||||
fmt.Print(`
|
fmt.Print(`
|
||||||
|
|
||||||
⚠️ Provide and Reprovide systems are disabled due to 'Reprovider.Interval=0'
|
⚠️ Providing to the DHT is disabled due to 'Provide.DHT.Interval=0'
|
||||||
⚠️ Local CIDs will not be announced to Amino DHT, making them impossible to retrieve without manual peering
|
⚠️ Local CIDs will not be provided to Amino DHT, making them impossible to retrieve without manual peering
|
||||||
⚠️ If this is not intentional, call 'ipfs config profile apply announce-on', or set 'Reprovider.Interval=22h'
|
⚠️ If this is not intentional, call 'ipfs config profile apply announce-on', or set 'Provide.DHT.Interval=22h'
|
||||||
|
|
||||||
`)
|
`)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -35,8 +35,9 @@ type Config struct {
|
|||||||
Migration Migration
|
Migration Migration
|
||||||
AutoConf AutoConf
|
AutoConf AutoConf
|
||||||
|
|
||||||
Provider Provider
|
Provide Provide // Merged Provider and Reprovider configuration
|
||||||
Reprovider Reprovider
|
Provider Provider // Deprecated: use Provide. Will be removed in a future release.
|
||||||
|
Reprovider Reprovider // Deprecated: use Provide. Will be removed in a future release.
|
||||||
HTTPRetrieval HTTPRetrieval
|
HTTPRetrieval HTTPRetrieval
|
||||||
Experimental Experiments
|
Experimental Experiments
|
||||||
Plugins Plugins
|
Plugins Plugins
|
||||||
|
|||||||
@ -134,14 +134,24 @@ func TestCheckKey(t *testing.T) {
|
|||||||
t.Fatal("Foo.Bar isn't a valid key in the config")
|
t.Fatal("Foo.Bar isn't a valid key in the config")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = CheckKey("Reprovider.Strategy")
|
err = CheckKey("Provide.Strategy")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: %s", err, "Reprovider.Strategy is a valid key in the config")
|
t.Fatalf("%s: %s", err, "Provide.Strategy is a valid key in the config")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = CheckKey("Provider.Foo")
|
err = CheckKey("Provide.DHT.MaxWorkers")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("%s: %s", err, "Provide.DHT.MaxWorkers is a valid key in the config")
|
||||||
|
}
|
||||||
|
|
||||||
|
err = CheckKey("Provide.DHT.Interval")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("%s: %s", err, "Provide.DHT.Interval is a valid key in the config")
|
||||||
|
}
|
||||||
|
|
||||||
|
err = CheckKey("Provide.Foo")
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Provider.Foo isn't a valid key in the config")
|
t.Fatal("Provide.Foo isn't a valid key in the config")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = CheckKey("Gateway.PublicGateways.Foo.Paths")
|
err = CheckKey("Gateway.PublicGateways.Foo.Paths")
|
||||||
|
|||||||
@ -60,10 +60,6 @@ func InitWithIdentity(identity Identity) (*Config, error) {
|
|||||||
NoFetch: false,
|
NoFetch: false,
|
||||||
HTTPHeaders: map[string][]string{},
|
HTTPHeaders: map[string][]string{},
|
||||||
},
|
},
|
||||||
Reprovider: Reprovider{
|
|
||||||
Interval: nil,
|
|
||||||
Strategy: nil,
|
|
||||||
},
|
|
||||||
Pinning: Pinning{
|
Pinning: Pinning{
|
||||||
RemoteServices: map[string]RemotePinningService{},
|
RemoteServices: map[string]RemotePinningService{},
|
||||||
},
|
},
|
||||||
|
|||||||
@ -275,7 +275,7 @@ fetching may be degraded.
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
"announce-off": {
|
"announce-off": {
|
||||||
Description: `Disables Provide and Reprovide systems (announcing to Amino DHT).
|
Description: `Disables Provide system (announcing to Amino DHT).
|
||||||
|
|
||||||
USE WITH CAUTION:
|
USE WITH CAUTION:
|
||||||
The main use case for this is setups with manual Peering.Peers config.
|
The main use case for this is setups with manual Peering.Peers config.
|
||||||
@ -284,16 +284,16 @@ fetching may be degraded.
|
|||||||
one hosting it, and other peers are not already connected to it.
|
one hosting it, and other peers are not already connected to it.
|
||||||
`,
|
`,
|
||||||
Transform: func(c *Config) error {
|
Transform: func(c *Config) error {
|
||||||
c.Provider.Enabled = False
|
c.Provide.Enabled = False
|
||||||
c.Reprovider.Interval = NewOptionalDuration(0) // 0 disables periodic reprovide
|
c.Provide.DHT.Interval = NewOptionalDuration(0) // 0 disables periodic reprovide
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"announce-on": {
|
"announce-on": {
|
||||||
Description: `Re-enables Provide and Reprovide systems (reverts announce-off profile).`,
|
Description: `Re-enables Provide system (reverts announce-off profile).`,
|
||||||
Transform: func(c *Config) error {
|
Transform: func(c *Config) error {
|
||||||
c.Provider.Enabled = True
|
c.Provide.Enabled = True
|
||||||
c.Reprovider.Interval = NewOptionalDuration(DefaultReproviderInterval) // have to apply explicit default because nil would be ignored
|
c.Provide.DHT.Interval = NewOptionalDuration(DefaultProvideDHTInterval) // have to apply explicit default because nil would be ignored
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|||||||
103
config/provide.go
Normal file
103
config/provide.go
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
DefaultProvideEnabled = true
|
||||||
|
DefaultProvideStrategy = "all"
|
||||||
|
|
||||||
|
// DHT provider defaults
|
||||||
|
DefaultProvideDHTInterval = 22 * time.Hour // https://github.com/ipfs/kubo/pull/9326
|
||||||
|
DefaultProvideDHTMaxWorkers = 16 // Unified default for both sweep and legacy providers
|
||||||
|
DefaultProvideDHTSweepEnabled = false
|
||||||
|
DefaultProvideDHTDedicatedPeriodicWorkers = 2
|
||||||
|
DefaultProvideDHTDedicatedBurstWorkers = 1
|
||||||
|
DefaultProvideDHTMaxProvideConnsPerWorker = 16
|
||||||
|
DefaultProvideDHTKeyStoreBatchSize = 1 << 14 // ~544 KiB per batch (1 multihash = 34 bytes)
|
||||||
|
DefaultProvideDHTOfflineDelay = 2 * time.Hour
|
||||||
|
)
|
||||||
|
|
||||||
|
type ProvideStrategy int
|
||||||
|
|
||||||
|
const (
|
||||||
|
ProvideStrategyAll ProvideStrategy = 1 << iota
|
||||||
|
ProvideStrategyPinned
|
||||||
|
ProvideStrategyRoots
|
||||||
|
ProvideStrategyMFS
|
||||||
|
)
|
||||||
|
|
||||||
|
// Provide configures both immediate CID announcements (provide operations) for new content
|
||||||
|
// and periodic re-announcements of existing CIDs (reprovide operations).
|
||||||
|
// This section combines the functionality previously split between Provider and Reprovider.
|
||||||
|
type Provide struct {
|
||||||
|
// Enabled controls whether both provide and reprovide systems are enabled.
|
||||||
|
// When disabled, the node will not announce any content to the routing system.
|
||||||
|
Enabled Flag `json:",omitempty"`
|
||||||
|
|
||||||
|
// Strategy determines which CIDs are announced to the routing system.
|
||||||
|
// Default: DefaultProvideStrategy
|
||||||
|
Strategy *OptionalString `json:",omitempty"`
|
||||||
|
|
||||||
|
// DHT configures DHT-specific provide and reprovide settings.
|
||||||
|
DHT ProvideDHT
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProvideDHT configures DHT provider settings for both immediate announcements
|
||||||
|
// and periodic reprovides.
|
||||||
|
type ProvideDHT struct {
|
||||||
|
// Interval sets the time between rounds of reproviding local content
|
||||||
|
// to the routing system. Set to "0" to disable content reproviding.
|
||||||
|
// Default: DefaultProvideDHTInterval
|
||||||
|
Interval *OptionalDuration `json:",omitempty"`
|
||||||
|
|
||||||
|
// MaxWorkers sets the maximum number of concurrent workers for provide operations.
|
||||||
|
// When SweepEnabled is false: controls NEW CID announcements only.
|
||||||
|
// When SweepEnabled is true: controls total worker pool for all operations.
|
||||||
|
// Default: DefaultProvideDHTMaxWorkers
|
||||||
|
MaxWorkers *OptionalInteger `json:",omitempty"`
|
||||||
|
|
||||||
|
// SweepEnabled activates the sweeping reprovider system which spreads
|
||||||
|
// reprovide operations over time. This will become the default in a future release.
|
||||||
|
// Default: DefaultProvideDHTSweepEnabled
|
||||||
|
SweepEnabled Flag `json:",omitempty"`
|
||||||
|
|
||||||
|
// DedicatedPeriodicWorkers sets workers dedicated to periodic reprovides (sweep mode only).
|
||||||
|
// Default: DefaultProvideDHTDedicatedPeriodicWorkers
|
||||||
|
DedicatedPeriodicWorkers *OptionalInteger `json:",omitempty"`
|
||||||
|
|
||||||
|
// DedicatedBurstWorkers sets workers dedicated to burst provides (sweep mode only).
|
||||||
|
// Default: DefaultProvideDHTDedicatedBurstWorkers
|
||||||
|
DedicatedBurstWorkers *OptionalInteger `json:",omitempty"`
|
||||||
|
|
||||||
|
// MaxProvideConnsPerWorker sets concurrent connections per worker for sending provider records (sweep mode only).
|
||||||
|
// Default: DefaultProvideDHTMaxProvideConnsPerWorker
|
||||||
|
MaxProvideConnsPerWorker *OptionalInteger `json:",omitempty"`
|
||||||
|
|
||||||
|
// KeyStoreBatchSize sets the batch size for keystore operations during reprovide refresh (sweep mode only).
|
||||||
|
// Default: DefaultProvideDHTKeyStoreBatchSize
|
||||||
|
KeyStoreBatchSize *OptionalInteger `json:",omitempty"`
|
||||||
|
|
||||||
|
// OfflineDelay sets the delay after which the provider switches from Disconnected to Offline state (sweep mode only).
|
||||||
|
// Default: DefaultProvideDHTOfflineDelay
|
||||||
|
OfflineDelay *OptionalDuration `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParseProvideStrategy(s string) ProvideStrategy {
|
||||||
|
var strategy ProvideStrategy
|
||||||
|
for _, part := range strings.Split(s, "+") {
|
||||||
|
switch part {
|
||||||
|
case "all", "flat", "": // special case, does not mix with others ("flat" is deprecated, maps to "all")
|
||||||
|
return ProvideStrategyAll
|
||||||
|
case "pinned":
|
||||||
|
strategy |= ProvideStrategyPinned
|
||||||
|
case "roots":
|
||||||
|
strategy |= ProvideStrategyRoots
|
||||||
|
case "mfs":
|
||||||
|
strategy |= ProvideStrategyMFS
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strategy
|
||||||
|
}
|
||||||
27
config/provide_test.go
Normal file
27
config/provide_test.go
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
package config
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestParseProvideStrategy(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
input string
|
||||||
|
expect ProvideStrategy
|
||||||
|
}{
|
||||||
|
{"all", ProvideStrategyAll},
|
||||||
|
{"pinned", ProvideStrategyPinned},
|
||||||
|
{"mfs", ProvideStrategyMFS},
|
||||||
|
{"pinned+mfs", ProvideStrategyPinned | ProvideStrategyMFS},
|
||||||
|
{"invalid", 0},
|
||||||
|
{"all+invalid", ProvideStrategyAll},
|
||||||
|
{"", ProvideStrategyAll},
|
||||||
|
{"flat", ProvideStrategyAll}, // deprecated, maps to "all"
|
||||||
|
{"flat+all", ProvideStrategyAll},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
result := ParseProvideStrategy(tt.input)
|
||||||
|
if result != tt.expect {
|
||||||
|
t.Errorf("ParseProvideStrategy(%q) = %d, want %d", tt.input, result, tt.expect)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -1,14 +1,16 @@
|
|||||||
package config
|
package config
|
||||||
|
|
||||||
const (
|
|
||||||
DefaultProviderEnabled = true
|
|
||||||
DefaultProviderWorkerCount = 16
|
|
||||||
)
|
|
||||||
|
|
||||||
// Provider configuration describes how NEW CIDs are announced the moment they are created.
|
// Provider configuration describes how NEW CIDs are announced the moment they are created.
|
||||||
// For periodical reprovide configuration, see Reprovider.*
|
// For periodical reprovide configuration, see Provide.*
|
||||||
|
//
|
||||||
|
// Deprecated: use Provide instead. This will be removed in a future release.
|
||||||
type Provider struct {
|
type Provider struct {
|
||||||
|
// Deprecated: use Provide.Enabled instead. This will be removed in a future release.
|
||||||
Enabled Flag `json:",omitempty"`
|
Enabled Flag `json:",omitempty"`
|
||||||
Strategy *OptionalString `json:",omitempty"` // Unused, you are likely looking for Reprovider.Strategy instead
|
|
||||||
WorkerCount *OptionalInteger `json:",omitempty"` // Number of concurrent provides allowed, 0 means unlimited
|
// Deprecated: unused, you are likely looking for Provide.Strategy instead. This will be removed in a future release.
|
||||||
|
Strategy *OptionalString `json:",omitempty"`
|
||||||
|
|
||||||
|
// Deprecated: use Provide.DHT.MaxWorkers instead. This will be removed in a future release.
|
||||||
|
WorkerCount *OptionalInteger `json:",omitempty"`
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,69 +1,13 @@
|
|||||||
package config
|
package config
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
DefaultReproviderInterval = time.Hour * 22 // https://github.com/ipfs/kubo/pull/9326
|
|
||||||
DefaultReproviderStrategy = "all"
|
|
||||||
|
|
||||||
DefaultReproviderSweepEnabled = false
|
|
||||||
DefaultReproviderSweepMaxWorkers = 4
|
|
||||||
DefaultReproviderSweepDedicatedPeriodicWorkers = 2
|
|
||||||
DefaultReproviderSweepDedicatedBurstWorkers = 1
|
|
||||||
DefaultReproviderSweepMaxProvideConnsPerWorker = 16
|
|
||||||
DefaultReproviderSweepKeyStoreBatchSize = 1 << 14 // ~544 KiB per batch (1 multihash = 34 bytes)
|
|
||||||
DefaultReproviderSweepOfflineDelay = 2 * time.Hour
|
|
||||||
)
|
|
||||||
|
|
||||||
type ReproviderStrategy int
|
|
||||||
|
|
||||||
const (
|
|
||||||
ReproviderStrategyAll ReproviderStrategy = 1 << iota
|
|
||||||
ReproviderStrategyPinned
|
|
||||||
ReproviderStrategyRoots
|
|
||||||
ReproviderStrategyMFS
|
|
||||||
)
|
|
||||||
|
|
||||||
// Reprovider configuration describes how CID from local datastore are periodically re-announced to routing systems.
|
// Reprovider configuration describes how CID from local datastore are periodically re-announced to routing systems.
|
||||||
// For provide behavior of ad-hoc or newly created CIDs and their first-time announcement, see Provider.*
|
// For provide behavior of ad-hoc or newly created CIDs and their first-time announcement, see Provide.*
|
||||||
|
//
|
||||||
|
// Deprecated: use Provide instead. This will be removed in a future release.
|
||||||
type Reprovider struct {
|
type Reprovider struct {
|
||||||
Interval *OptionalDuration `json:",omitempty"` // Time period to reprovide locally stored objects to the network
|
// Deprecated: use Provide.DHT.Interval instead. This will be removed in a future release.
|
||||||
Strategy *OptionalString `json:",omitempty"` // Which keys to announce
|
Interval *OptionalDuration `json:",omitempty"`
|
||||||
|
|
||||||
Sweep Sweep
|
// Deprecated: use Provide.Strategy instead. This will be removed in a future release.
|
||||||
}
|
Strategy *OptionalString `json:",omitempty"`
|
||||||
|
|
||||||
// Sweep configuration describes how the Sweeping Reprovider is configured if enabled.
|
|
||||||
type Sweep struct {
|
|
||||||
Enabled Flag `json:",omitempty"`
|
|
||||||
|
|
||||||
MaxWorkers *OptionalInteger // Max number of concurrent workers performing a provide operation.
|
|
||||||
DedicatedPeriodicWorkers *OptionalInteger // Number of workers dedicated to periodic reprovides.
|
|
||||||
DedicatedBurstWorkers *OptionalInteger // Number of workers dedicated to initial provides or burst reproviding keyspace regions after a period of inactivity.
|
|
||||||
MaxProvideConnsPerWorker *OptionalInteger // Number of connections that a worker is able to open to send provider records during a (re)provide operation.
|
|
||||||
|
|
||||||
KeyStoreGCInterval *OptionalDuration // Interval for garbage collection in KeyStore.
|
|
||||||
KeyStoreBatchSize *OptionalInteger // Number of multihashes to keep in memory when gc'ing the KeyStore.
|
|
||||||
|
|
||||||
OfflineDelay *OptionalDuration // Delay after which the provides changes state from Disconnected to Offline.
|
|
||||||
}
|
|
||||||
|
|
||||||
func ParseReproviderStrategy(s string) ReproviderStrategy {
|
|
||||||
var strategy ReproviderStrategy
|
|
||||||
for _, part := range strings.Split(s, "+") {
|
|
||||||
switch part {
|
|
||||||
case "all", "flat", "": // special case, does not mix with others ("flat" is deprecated, maps to "all")
|
|
||||||
return ReproviderStrategyAll
|
|
||||||
case "pinned":
|
|
||||||
strategy |= ReproviderStrategyPinned
|
|
||||||
case "roots":
|
|
||||||
strategy |= ReproviderStrategyRoots
|
|
||||||
case "mfs":
|
|
||||||
strategy |= ReproviderStrategyMFS
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return strategy
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,27 +0,0 @@
|
|||||||
package config
|
|
||||||
|
|
||||||
import "testing"
|
|
||||||
|
|
||||||
func TestParseReproviderStrategy(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
input string
|
|
||||||
expect ReproviderStrategy
|
|
||||||
}{
|
|
||||||
{"all", ReproviderStrategyAll},
|
|
||||||
{"pinned", ReproviderStrategyPinned},
|
|
||||||
{"mfs", ReproviderStrategyMFS},
|
|
||||||
{"pinned+mfs", ReproviderStrategyPinned | ReproviderStrategyMFS},
|
|
||||||
{"invalid", 0},
|
|
||||||
{"all+invalid", ReproviderStrategyAll},
|
|
||||||
{"", ReproviderStrategyAll},
|
|
||||||
{"flat", ReproviderStrategyAll}, // deprecated, maps to "all"
|
|
||||||
{"flat+all", ReproviderStrategyAll},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
result := ParseReproviderStrategy(tt.input)
|
|
||||||
if result != tt.expect {
|
|
||||||
t.Errorf("ParseReproviderStrategy(%q) = %d, want %d", tt.input, result, tt.expect)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -82,7 +82,7 @@ to form the IPFS MerkleDAG. Learn more: https://docs.ipfs.tech/concepts/merkle-d
|
|||||||
|
|
||||||
If the daemon is not running, it will just add locally to the repo at $IPFS_PATH.
|
If the daemon is not running, it will just add locally to the repo at $IPFS_PATH.
|
||||||
If the daemon is started later, it will be advertised after a few
|
If the daemon is started later, it will be advertised after a few
|
||||||
seconds when the reprovider runs.
|
seconds when the provide system runs.
|
||||||
|
|
||||||
BASIC EXAMPLES:
|
BASIC EXAMPLES:
|
||||||
|
|
||||||
|
|||||||
@ -45,12 +45,12 @@ var provideClearCmd = &cmds.Command{
|
|||||||
Helptext: cmds.HelpText{
|
Helptext: cmds.HelpText{
|
||||||
Tagline: "Clear all CIDs from the provide queue.",
|
Tagline: "Clear all CIDs from the provide queue.",
|
||||||
ShortDescription: `
|
ShortDescription: `
|
||||||
Clear all CIDs from the reprovide queue.
|
Clear all CIDs pending to be provided for the first time.
|
||||||
|
|
||||||
Note: Kubo will automatically clear the queue when it detects a change of
|
Note: Kubo will automatically clear the queue when it detects a change of
|
||||||
Reprovider.Strategy upon a restart. For more information about reprovider
|
Provide.Strategy upon a restart. For more information about provide
|
||||||
strategies, see:
|
strategies, see:
|
||||||
https://github.com/ipfs/kubo/blob/master/docs/config.md#reproviderstrategy
|
https://github.com/ipfs/kubo/blob/master/docs/config.md#providestrategy
|
||||||
`,
|
`,
|
||||||
},
|
},
|
||||||
Options: []cmds.Option{
|
Options: []cmds.Option{
|
||||||
@ -100,8 +100,8 @@ var provideStatCmd = &cmds.Command{
|
|||||||
Tagline: "Returns statistics about the node's provider system.",
|
Tagline: "Returns statistics about the node's provider system.",
|
||||||
ShortDescription: `
|
ShortDescription: `
|
||||||
Returns statistics about the content the node is reproviding every
|
Returns statistics about the content the node is reproviding every
|
||||||
Reprovider.Interval according to Reprovider.Strategy:
|
Provide.DHT.Interval according to Provide.Strategy:
|
||||||
https://github.com/ipfs/kubo/blob/master/docs/config.md#reprovider
|
https://github.com/ipfs/kubo/blob/master/docs/config.md#provide
|
||||||
|
|
||||||
This interface is not stable and may change from release to release.
|
This interface is not stable and may change from release to release.
|
||||||
|
|
||||||
@ -121,7 +121,7 @@ This interface is not stable and may change from release to release.
|
|||||||
|
|
||||||
provideSys, ok := nd.Provider.(provider.System)
|
provideSys, ok := nd.Provider.(provider.System)
|
||||||
if !ok {
|
if !ok {
|
||||||
return errors.New("stats not available with experimental sweeping provider (Reprovider.Sweep.Enabled=true)")
|
return errors.New("stats not available with experimental sweeping provider (Provide.DHT.SweepEnabled=true)")
|
||||||
}
|
}
|
||||||
|
|
||||||
stats, err := provideSys.Stat()
|
stats, err := provideSys.Stat()
|
||||||
|
|||||||
@ -166,8 +166,8 @@ var provideRefRoutingCmd = &cmds.Command{
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !cfg.Provider.Enabled.WithDefault(config.DefaultProviderEnabled) {
|
if !cfg.Provide.Enabled.WithDefault(config.DefaultProvideEnabled) {
|
||||||
return errors.New("invalid configuration: Provider.Enabled is set to 'false'")
|
return errors.New("invalid configuration: Provide.Enabled is set to 'false'")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(nd.PeerHost.Network().Conns()) == 0 {
|
if len(nd.PeerHost.Network().Conns()) == 0 {
|
||||||
@ -270,15 +270,15 @@ Trigger reprovider to announce our data to network.
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !cfg.Provider.Enabled.WithDefault(config.DefaultProviderEnabled) {
|
if !cfg.Provide.Enabled.WithDefault(config.DefaultProvideEnabled) {
|
||||||
return errors.New("invalid configuration: Provider.Enabled is set to 'false'")
|
return errors.New("invalid configuration: Provide.Enabled is set to 'false'")
|
||||||
}
|
}
|
||||||
if cfg.Reprovider.Interval.WithDefault(config.DefaultReproviderInterval) == 0 {
|
if cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval) == 0 {
|
||||||
return errors.New("invalid configuration: Reprovider.Interval is set to '0'")
|
return errors.New("invalid configuration: Provide.DHT.Interval is set to '0'")
|
||||||
}
|
}
|
||||||
provideSys, ok := nd.Provider.(*node.LegacyProvider)
|
provideSys, ok := nd.Provider.(*node.LegacyProvider)
|
||||||
if !ok {
|
if !ok {
|
||||||
return errors.New("manual reprovide not available with experimental sweeping provider (Reprovider.Sweep.Enabled=true)")
|
return errors.New("manual reprovide not available with experimental sweeping provider (Provide.DHT.SweepEnabled=true)")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = provideSys.Reprovide(req.Context)
|
err = provideSys.Reprovide(req.Context)
|
||||||
|
|||||||
@ -105,7 +105,7 @@ type IpfsNode struct {
|
|||||||
Exchange exchange.Interface // the block exchange + strategy
|
Exchange exchange.Interface // the block exchange + strategy
|
||||||
Bitswap *bitswap.Bitswap `optional:"true"` // The Bitswap instance
|
Bitswap *bitswap.Bitswap `optional:"true"` // The Bitswap instance
|
||||||
Namesys namesys.NameSystem // the name system, resolves paths to hashes
|
Namesys namesys.NameSystem // the name system, resolves paths to hashes
|
||||||
ProvidingStrategy config.ReproviderStrategy `optional:"true"`
|
ProvidingStrategy config.ProvideStrategy `optional:"true"`
|
||||||
ProvidingKeyChanFunc provider.KeyChanFunc `optional:"true"`
|
ProvidingKeyChanFunc provider.KeyChanFunc `optional:"true"`
|
||||||
IpnsRepub *ipnsrp.Republisher `optional:"true"`
|
IpnsRepub *ipnsrp.Republisher `optional:"true"`
|
||||||
ResourceManager network.ResourceManager `optional:"true"`
|
ResourceManager network.ResourceManager `optional:"true"`
|
||||||
|
|||||||
@ -70,7 +70,7 @@ type CoreAPI struct {
|
|||||||
unixFSPathResolver pathresolver.Resolver
|
unixFSPathResolver pathresolver.Resolver
|
||||||
|
|
||||||
provider node.DHTProvider
|
provider node.DHTProvider
|
||||||
providingStrategy config.ReproviderStrategy
|
providingStrategy config.ProvideStrategy
|
||||||
|
|
||||||
pubSub *pubsub.PubSub
|
pubSub *pubsub.PubSub
|
||||||
|
|
||||||
|
|||||||
@ -72,7 +72,7 @@ func (NodeProvider) MakeAPISwarm(t *testing.T, ctx context.Context, fullIdentity
|
|||||||
c.AutoTLS.Enabled = config.False // disable so no /ws listener is added
|
c.AutoTLS.Enabled = config.False // disable so no /ws listener is added
|
||||||
// For provider tests, avoid that content gets
|
// For provider tests, avoid that content gets
|
||||||
// auto-provided without calling "provide" (unless pinned).
|
// auto-provided without calling "provide" (unless pinned).
|
||||||
c.Reprovider.Strategy = config.NewOptionalString("roots")
|
c.Provide.Strategy = config.NewOptionalString("roots")
|
||||||
|
|
||||||
ds := syncds.MutexWrap(datastore.NewMapDatastore())
|
ds := syncds.MutexWrap(datastore.NewMapDatastore())
|
||||||
r := &repo.Mock{
|
r := &repo.Mock{
|
||||||
|
|||||||
@ -120,7 +120,7 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options
|
|||||||
// nor by the pinner (the pinner doesn't traverse the pinned DAG itself, it only
|
// nor by the pinner (the pinner doesn't traverse the pinned DAG itself, it only
|
||||||
// handles roots). This wrapping ensures all blocks of pinned content get provided.
|
// handles roots). This wrapping ensures all blocks of pinned content get provided.
|
||||||
if settings.Pin && !settings.OnlyHash &&
|
if settings.Pin && !settings.OnlyHash &&
|
||||||
(api.providingStrategy&config.ReproviderStrategyPinned) != 0 {
|
(api.providingStrategy&config.ProvideStrategyPinned) != 0 {
|
||||||
dserv = &providingDagService{dserv, api.provider}
|
dserv = &providingDagService{dserv, api.provider}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -52,7 +52,7 @@ func Pinning(strategy string) func(bstore blockstore.Blockstore, ds format.DAGSe
|
|||||||
// Parse strategy at function creation time (not inside the returned function)
|
// Parse strategy at function creation time (not inside the returned function)
|
||||||
// This happens before the provider is created, which is why we pass the strategy
|
// This happens before the provider is created, which is why we pass the strategy
|
||||||
// string and parse it here, rather than using fx-provided ProvidingStrategy.
|
// string and parse it here, rather than using fx-provided ProvidingStrategy.
|
||||||
strategyFlag := config.ParseReproviderStrategy(strategy)
|
strategyFlag := config.ParseProvideStrategy(strategy)
|
||||||
|
|
||||||
return func(bstore blockstore.Blockstore,
|
return func(bstore blockstore.Blockstore,
|
||||||
ds format.DAGService,
|
ds format.DAGService,
|
||||||
@ -72,8 +72,8 @@ func Pinning(strategy string) func(bstore blockstore.Blockstore, ds format.DAGSe
|
|||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
|
|
||||||
var opts []dspinner.Option
|
var opts []dspinner.Option
|
||||||
roots := (strategyFlag & config.ReproviderStrategyRoots) != 0
|
roots := (strategyFlag & config.ProvideStrategyRoots) != 0
|
||||||
pinned := (strategyFlag & config.ReproviderStrategyPinned) != 0
|
pinned := (strategyFlag & config.ProvideStrategyPinned) != 0
|
||||||
|
|
||||||
// Important: Only one of WithPinnedProvider or WithRootsProvider should be active.
|
// Important: Only one of WithPinnedProvider or WithRootsProvider should be active.
|
||||||
// Having both would cause duplicate root advertisements since "pinned" includes all
|
// Having both would cause duplicate root advertisements since "pinned" includes all
|
||||||
@ -236,8 +236,8 @@ func Files(strategy string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo
|
|||||||
// strategy - it ensures all MFS content gets announced as it's added or
|
// strategy - it ensures all MFS content gets announced as it's added or
|
||||||
// modified. For non-mfs strategies, we set provider to nil to avoid
|
// modified. For non-mfs strategies, we set provider to nil to avoid
|
||||||
// unnecessary providing.
|
// unnecessary providing.
|
||||||
strategyFlag := config.ParseReproviderStrategy(strategy)
|
strategyFlag := config.ParseProvideStrategy(strategy)
|
||||||
if strategyFlag&config.ReproviderStrategyMFS == 0 {
|
if strategyFlag&config.ProvideStrategyMFS == 0 {
|
||||||
prov = nil
|
prov = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -254,7 +254,7 @@ func Storage(bcfg *BuildCfg, cfg *config.Config) fx.Option {
|
|||||||
cacheOpts,
|
cacheOpts,
|
||||||
cfg.Datastore.HashOnRead,
|
cfg.Datastore.HashOnRead,
|
||||||
cfg.Datastore.WriteThrough.WithDefault(config.DefaultWriteThrough),
|
cfg.Datastore.WriteThrough.WithDefault(config.DefaultWriteThrough),
|
||||||
cfg.Reprovider.Strategy.WithDefault(config.DefaultReproviderStrategy),
|
cfg.Provide.Strategy.WithDefault(config.DefaultProvideStrategy),
|
||||||
)),
|
)),
|
||||||
finalBstore,
|
finalBstore,
|
||||||
)
|
)
|
||||||
@ -347,9 +347,9 @@ func Online(bcfg *BuildCfg, cfg *config.Config, userResourceOverrides rcmgr.Part
|
|||||||
isBitswapServerEnabled := cfg.Bitswap.ServerEnabled.WithDefault(config.DefaultBitswapServerEnabled)
|
isBitswapServerEnabled := cfg.Bitswap.ServerEnabled.WithDefault(config.DefaultBitswapServerEnabled)
|
||||||
isHTTPRetrievalEnabled := cfg.HTTPRetrieval.Enabled.WithDefault(config.DefaultHTTPRetrievalEnabled)
|
isHTTPRetrievalEnabled := cfg.HTTPRetrieval.Enabled.WithDefault(config.DefaultHTTPRetrievalEnabled)
|
||||||
|
|
||||||
// Right now Provider and Reprovider systems are tied together - disabling Reprovider by setting interval to 0 disables Provider
|
// The Provide system handles both new CID announcements and periodic re-announcements.
|
||||||
// and vice versa: Provider.Enabled=false will disable both Provider of new CIDs and the Reprovider of old ones.
|
// Disabling is controlled by Provide.Enabled=false or setting Interval to 0.
|
||||||
isProviderEnabled := cfg.Provider.Enabled.WithDefault(config.DefaultProviderEnabled) && cfg.Reprovider.Interval.WithDefault(config.DefaultReproviderInterval) != 0
|
isProviderEnabled := cfg.Provide.Enabled.WithDefault(config.DefaultProvideEnabled) && cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval) != 0
|
||||||
|
|
||||||
return fx.Options(
|
return fx.Options(
|
||||||
fx.Provide(BitswapOptions(cfg)),
|
fx.Provide(BitswapOptions(cfg)),
|
||||||
@ -442,7 +442,7 @@ func IPFS(ctx context.Context, bcfg *BuildCfg) fx.Option {
|
|||||||
uio.HAMTShardingSize = int(shardSingThresholdInt)
|
uio.HAMTShardingSize = int(shardSingThresholdInt)
|
||||||
uio.DefaultShardWidth = int(shardMaxFanout)
|
uio.DefaultShardWidth = int(shardMaxFanout)
|
||||||
|
|
||||||
providerStrategy := cfg.Reprovider.Strategy.WithDefault(config.DefaultReproviderStrategy)
|
providerStrategy := cfg.Provide.Strategy.WithDefault(config.DefaultProvideStrategy)
|
||||||
|
|
||||||
return fx.Options(
|
return fx.Options(
|
||||||
bcfgOpts,
|
bcfgOpts,
|
||||||
|
|||||||
@ -24,7 +24,6 @@ import (
|
|||||||
"github.com/libp2p/go-libp2p-kad-dht/fullrt"
|
"github.com/libp2p/go-libp2p-kad-dht/fullrt"
|
||||||
dht_pb "github.com/libp2p/go-libp2p-kad-dht/pb"
|
dht_pb "github.com/libp2p/go-libp2p-kad-dht/pb"
|
||||||
dhtprovider "github.com/libp2p/go-libp2p-kad-dht/provider"
|
dhtprovider "github.com/libp2p/go-libp2p-kad-dht/provider"
|
||||||
"github.com/libp2p/go-libp2p-kad-dht/provider/buffered"
|
|
||||||
ddhtprovider "github.com/libp2p/go-libp2p-kad-dht/provider/dual"
|
ddhtprovider "github.com/libp2p/go-libp2p-kad-dht/provider/dual"
|
||||||
"github.com/libp2p/go-libp2p-kad-dht/provider/keystore"
|
"github.com/libp2p/go-libp2p-kad-dht/provider/keystore"
|
||||||
routinghelpers "github.com/libp2p/go-libp2p-routing-helpers"
|
routinghelpers "github.com/libp2p/go-libp2p-routing-helpers"
|
||||||
@ -85,7 +84,7 @@ type DHTProvider interface {
|
|||||||
// The keys are not deleted from the keystore, so they will continue to be
|
// The keys are not deleted from the keystore, so they will continue to be
|
||||||
// reprovided as scheduled.
|
// reprovided as scheduled.
|
||||||
Clear() int
|
Clear() int
|
||||||
// RefreshSchedule scans the Keystore for any keys that are not currently
|
// RefreshSchedule scans the KeyStore for any keys that are not currently
|
||||||
// scheduled for reproviding. If such keys are found, it schedules their
|
// scheduled for reproviding. If such keys are found, it schedules their
|
||||||
// associated keyspace region to be reprovided.
|
// associated keyspace region to be reprovided.
|
||||||
//
|
//
|
||||||
@ -107,6 +106,9 @@ var (
|
|||||||
_ DHTProvider = &LegacyProvider{}
|
_ DHTProvider = &LegacyProvider{}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// NoopProvider is a no-operation provider implementation that does nothing.
|
||||||
|
// It is used when providing is disabled or when no DHT is available.
|
||||||
|
// All methods return successfully without performing any actual operations.
|
||||||
type NoopProvider struct{}
|
type NoopProvider struct{}
|
||||||
|
|
||||||
func (r *NoopProvider) StartProviding(bool, ...mh.Multihash) error { return nil }
|
func (r *NoopProvider) StartProviding(bool, ...mh.Multihash) error { return nil }
|
||||||
@ -114,9 +116,14 @@ func (r *NoopProvider) ProvideOnce(...mh.Multihash) error { return nil
|
|||||||
func (r *NoopProvider) Clear() int { return 0 }
|
func (r *NoopProvider) Clear() int { return 0 }
|
||||||
func (r *NoopProvider) RefreshSchedule() error { return nil }
|
func (r *NoopProvider) RefreshSchedule() error { return nil }
|
||||||
|
|
||||||
// LegacyProvider is a wrapper around the boxo/provider.System. This DHT
|
// LegacyProvider is a wrapper around the boxo/provider.System that implements
|
||||||
// provide system manages reprovides by bursts where it sequentially reprovides
|
// the DHTProvider interface. This provider manages reprovides using a burst
|
||||||
// all keys.
|
// strategy where it sequentially reprovides all keys at once during each
|
||||||
|
// reprovide interval, rather than spreading the load over time.
|
||||||
|
//
|
||||||
|
// This is the legacy provider implementation that can cause resource spikes
|
||||||
|
// during reprovide operations. For more efficient providing, consider using
|
||||||
|
// the SweepingProvider which spreads the load over the reprovide interval.
|
||||||
type LegacyProvider struct {
|
type LegacyProvider struct {
|
||||||
provider.System
|
provider.System
|
||||||
}
|
}
|
||||||
@ -297,7 +304,7 @@ type addrsFilter interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func SweepingProviderOpt(cfg *config.Config) fx.Option {
|
func SweepingProviderOpt(cfg *config.Config) fx.Option {
|
||||||
reprovideInterval := cfg.Reprovider.Interval.WithDefault(config.DefaultReproviderInterval)
|
reprovideInterval := cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval)
|
||||||
type providerInput struct {
|
type providerInput struct {
|
||||||
fx.In
|
fx.In
|
||||||
DHT routing.Routing `name:"dhtc"`
|
DHT routing.Routing `name:"dhtc"`
|
||||||
@ -305,21 +312,14 @@ func SweepingProviderOpt(cfg *config.Config) fx.Option {
|
|||||||
}
|
}
|
||||||
sweepingReprovider := fx.Provide(func(in providerInput) (DHTProvider, *keystore.ResettableKeystore, error) {
|
sweepingReprovider := fx.Provide(func(in providerInput) (DHTProvider, *keystore.ResettableKeystore, error) {
|
||||||
ds := in.Repo.Datastore()
|
ds := in.Repo.Datastore()
|
||||||
ks, err := keystore.NewResettableKeystore(ds,
|
keyStore, err := keystore.NewResettableKeystore(ds,
|
||||||
keystore.WithPrefixBits(16),
|
keystore.WithPrefixBits(16),
|
||||||
keystore.WithDatastorePath("/provider/keystore"),
|
keystore.WithDatastorePath("/provider/keystore"),
|
||||||
keystore.WithBatchSize(int(cfg.Reprovider.Sweep.KeyStoreBatchSize.WithDefault(config.DefaultReproviderSweepKeyStoreBatchSize))),
|
keystore.WithBatchSize(int(cfg.Provide.DHT.KeyStoreBatchSize.WithDefault(config.DefaultProvideDHTKeyStoreBatchSize))),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &NoopProvider{}, nil, err
|
return &NoopProvider{}, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
bufferedProviderOpts := []buffered.Option{
|
|
||||||
buffered.WithBatchSize(1 << 10),
|
|
||||||
buffered.WithDsName("bprov"),
|
|
||||||
buffered.WithIdleWriteTime(time.Minute),
|
|
||||||
}
|
|
||||||
|
|
||||||
var impl dhtImpl
|
var impl dhtImpl
|
||||||
switch inDht := in.DHT.(type) {
|
switch inDht := in.DHT.(type) {
|
||||||
case *dht.IpfsDHT:
|
case *dht.IpfsDHT:
|
||||||
@ -329,22 +329,23 @@ func SweepingProviderOpt(cfg *config.Config) fx.Option {
|
|||||||
case *dual.DHT:
|
case *dual.DHT:
|
||||||
if inDht != nil {
|
if inDht != nil {
|
||||||
prov, err := ddhtprovider.New(inDht,
|
prov, err := ddhtprovider.New(inDht,
|
||||||
ddhtprovider.WithKeystore(ks),
|
ddhtprovider.WithKeystore(keyStore),
|
||||||
|
|
||||||
ddhtprovider.WithReprovideInterval(reprovideInterval),
|
ddhtprovider.WithReprovideInterval(reprovideInterval),
|
||||||
ddhtprovider.WithMaxReprovideDelay(time.Hour),
|
ddhtprovider.WithMaxReprovideDelay(time.Hour),
|
||||||
ddhtprovider.WithOfflineDelay(cfg.Reprovider.Sweep.OfflineDelay.WithDefault(config.DefaultReproviderSweepOfflineDelay)),
|
ddhtprovider.WithOfflineDelay(cfg.Provide.DHT.OfflineDelay.WithDefault(config.DefaultProvideDHTOfflineDelay)),
|
||||||
ddhtprovider.WithConnectivityCheckOnlineInterval(1*time.Minute),
|
ddhtprovider.WithConnectivityCheckOnlineInterval(1*time.Minute),
|
||||||
|
|
||||||
ddhtprovider.WithMaxWorkers(int(cfg.Reprovider.Sweep.MaxWorkers.WithDefault(config.DefaultReproviderSweepMaxWorkers))),
|
ddhtprovider.WithMaxWorkers(int(cfg.Provide.DHT.MaxWorkers.WithDefault(config.DefaultProvideDHTMaxWorkers))),
|
||||||
ddhtprovider.WithDedicatedPeriodicWorkers(int(cfg.Reprovider.Sweep.DedicatedPeriodicWorkers.WithDefault(config.DefaultReproviderSweepDedicatedPeriodicWorkers))),
|
ddhtprovider.WithDedicatedPeriodicWorkers(int(cfg.Provide.DHT.DedicatedPeriodicWorkers.WithDefault(config.DefaultProvideDHTDedicatedPeriodicWorkers))),
|
||||||
ddhtprovider.WithDedicatedBurstWorkers(int(cfg.Reprovider.Sweep.DedicatedBurstWorkers.WithDefault(config.DefaultReproviderSweepDedicatedBurstWorkers))),
|
ddhtprovider.WithDedicatedBurstWorkers(int(cfg.Provide.DHT.DedicatedBurstWorkers.WithDefault(config.DefaultProvideDHTDedicatedBurstWorkers))),
|
||||||
ddhtprovider.WithMaxProvideConnsPerWorker(int(cfg.Reprovider.Sweep.MaxProvideConnsPerWorker.WithDefault(config.DefaultReproviderSweepMaxProvideConnsPerWorker))),
|
ddhtprovider.WithMaxProvideConnsPerWorker(int(cfg.Provide.DHT.MaxProvideConnsPerWorker.WithDefault(config.DefaultProvideDHTMaxProvideConnsPerWorker))),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
return buffered.New(prov, ds, bufferedProviderOpts...), ks, nil
|
_ = prov
|
||||||
|
return prov, keyStore, nil
|
||||||
}
|
}
|
||||||
case *fullrt.FullRT:
|
case *fullrt.FullRT:
|
||||||
if inDht != nil {
|
if inDht != nil {
|
||||||
@ -352,7 +353,7 @@ func SweepingProviderOpt(cfg *config.Config) fx.Option {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if impl == nil {
|
if impl == nil {
|
||||||
return &NoopProvider{}, nil, nil
|
return &NoopProvider{}, nil, errors.New("provider: no valid DHT available for providing")
|
||||||
}
|
}
|
||||||
|
|
||||||
var selfAddrsFunc func() []ma.Multiaddr
|
var selfAddrsFunc func() []ma.Multiaddr
|
||||||
@ -362,7 +363,7 @@ func SweepingProviderOpt(cfg *config.Config) fx.Option {
|
|||||||
selfAddrsFunc = func() []ma.Multiaddr { return impl.Host().Addrs() }
|
selfAddrsFunc = func() []ma.Multiaddr { return impl.Host().Addrs() }
|
||||||
}
|
}
|
||||||
opts := []dhtprovider.Option{
|
opts := []dhtprovider.Option{
|
||||||
dhtprovider.WithKeystore(ks),
|
dhtprovider.WithKeystore(keyStore),
|
||||||
dhtprovider.WithPeerID(impl.Host().ID()),
|
dhtprovider.WithPeerID(impl.Host().ID()),
|
||||||
dhtprovider.WithRouter(impl),
|
dhtprovider.WithRouter(impl),
|
||||||
dhtprovider.WithMessageSender(impl.MessageSender()),
|
dhtprovider.WithMessageSender(impl.MessageSender()),
|
||||||
@ -374,40 +375,37 @@ func SweepingProviderOpt(cfg *config.Config) fx.Option {
|
|||||||
dhtprovider.WithReplicationFactor(amino.DefaultBucketSize),
|
dhtprovider.WithReplicationFactor(amino.DefaultBucketSize),
|
||||||
dhtprovider.WithReprovideInterval(reprovideInterval),
|
dhtprovider.WithReprovideInterval(reprovideInterval),
|
||||||
dhtprovider.WithMaxReprovideDelay(time.Hour),
|
dhtprovider.WithMaxReprovideDelay(time.Hour),
|
||||||
dhtprovider.WithOfflineDelay(cfg.Reprovider.Sweep.OfflineDelay.WithDefault(config.DefaultReproviderSweepOfflineDelay)),
|
dhtprovider.WithOfflineDelay(cfg.Provide.DHT.OfflineDelay.WithDefault(config.DefaultProvideDHTOfflineDelay)),
|
||||||
dhtprovider.WithConnectivityCheckOnlineInterval(1 * time.Minute),
|
dhtprovider.WithConnectivityCheckOnlineInterval(1 * time.Minute),
|
||||||
|
|
||||||
dhtprovider.WithMaxWorkers(int(cfg.Reprovider.Sweep.MaxWorkers.WithDefault(config.DefaultReproviderSweepMaxWorkers))),
|
dhtprovider.WithMaxWorkers(int(cfg.Provide.DHT.MaxWorkers.WithDefault(config.DefaultProvideDHTMaxWorkers))),
|
||||||
dhtprovider.WithDedicatedPeriodicWorkers(int(cfg.Reprovider.Sweep.DedicatedPeriodicWorkers.WithDefault(config.DefaultReproviderSweepDedicatedPeriodicWorkers))),
|
dhtprovider.WithDedicatedPeriodicWorkers(int(cfg.Provide.DHT.DedicatedPeriodicWorkers.WithDefault(config.DefaultProvideDHTDedicatedPeriodicWorkers))),
|
||||||
dhtprovider.WithDedicatedBurstWorkers(int(cfg.Reprovider.Sweep.DedicatedBurstWorkers.WithDefault(config.DefaultReproviderSweepDedicatedBurstWorkers))),
|
dhtprovider.WithDedicatedBurstWorkers(int(cfg.Provide.DHT.DedicatedBurstWorkers.WithDefault(config.DefaultProvideDHTDedicatedBurstWorkers))),
|
||||||
dhtprovider.WithMaxProvideConnsPerWorker(int(cfg.Reprovider.Sweep.MaxProvideConnsPerWorker.WithDefault(config.DefaultReproviderSweepMaxProvideConnsPerWorker))),
|
dhtprovider.WithMaxProvideConnsPerWorker(int(cfg.Provide.DHT.MaxProvideConnsPerWorker.WithDefault(config.DefaultProvideDHTMaxProvideConnsPerWorker))),
|
||||||
}
|
}
|
||||||
|
|
||||||
prov, err := dhtprovider.New(opts...)
|
prov, err := dhtprovider.New(opts...)
|
||||||
if err != nil {
|
return prov, keyStore, err
|
||||||
return &NoopProvider{}, nil, err
|
|
||||||
}
|
|
||||||
return buffered.New(prov, ds, bufferedProviderOpts...), ks, nil
|
|
||||||
})
|
})
|
||||||
|
|
||||||
type keystoreInput struct {
|
type keystoreInput struct {
|
||||||
fx.In
|
fx.In
|
||||||
Provider DHTProvider
|
Provider DHTProvider
|
||||||
Keystore *keystore.ResettableKeystore
|
KeyStore *keystore.ResettableKeystore
|
||||||
KeyProvider provider.KeyChanFunc
|
KeyProvider provider.KeyChanFunc
|
||||||
}
|
}
|
||||||
initKeystore := fx.Invoke(func(lc fx.Lifecycle, in keystoreInput) {
|
initKeyStore := fx.Invoke(func(lc fx.Lifecycle, in keystoreInput) {
|
||||||
var (
|
var (
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
done = make(chan struct{})
|
done = make(chan struct{})
|
||||||
)
|
)
|
||||||
|
|
||||||
syncKeystore := func(ctx context.Context) error {
|
syncKeyStore := func(ctx context.Context) error {
|
||||||
kcf, err := in.KeyProvider(ctx)
|
kcf, err := in.KeyProvider(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := in.Keystore.ResetCids(ctx, kcf); err != nil {
|
if err := in.KeyStore.ResetCids(ctx, kcf); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := in.Provider.RefreshSchedule(); err != nil {
|
if err := in.Provider.RefreshSchedule(); err != nil {
|
||||||
@ -418,15 +416,12 @@ func SweepingProviderOpt(cfg *config.Config) fx.Option {
|
|||||||
|
|
||||||
lc.Append(fx.Hook{
|
lc.Append(fx.Hook{
|
||||||
OnStart: func(ctx context.Context) error {
|
OnStart: func(ctx context.Context) error {
|
||||||
if in.Provider == nil || in.Keystore == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Set the KeyProvider as a garbage collection function for the
|
// Set the KeyProvider as a garbage collection function for the
|
||||||
// keystore. Periodically purge the Keystore from all its keys and
|
// keystore. Periodically purge the KeyStore from all its keys and
|
||||||
// replace them with the keys that needs to be reprovided, coming from
|
// replace them with the keys that needs to be reprovided, coming from
|
||||||
// the KeyChanFunc. So far, this is the less worse way to remove CIDs
|
// the KeyChanFunc. So far, this is the less worse way to remove CIDs
|
||||||
// that shouldn't be reprovided from the provider's state.
|
// that shouldn't be reprovided from the provider's state.
|
||||||
if err := syncKeystore(ctx); err != nil {
|
if err := syncKeyStore(ctx); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -443,7 +438,7 @@ func SweepingProviderOpt(cfg *config.Config) fx.Option {
|
|||||||
case <-gcCtx.Done():
|
case <-gcCtx.Done():
|
||||||
return
|
return
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
if err := syncKeystore(gcCtx); err != nil {
|
if err := syncKeyStore(gcCtx); err != nil {
|
||||||
logger.Errorw("provider keystore sync", "err", err)
|
logger.Errorw("provider keystore sync", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -452,11 +447,7 @@ func SweepingProviderOpt(cfg *config.Config) fx.Option {
|
|||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
OnStop: func(ctx context.Context) error {
|
OnStop: func(ctx context.Context) error {
|
||||||
if in.Provider == nil || in.Keystore == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if cancel != nil {
|
if cancel != nil {
|
||||||
// Cancel Keystore garbage collection loop
|
|
||||||
cancel()
|
cancel()
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
@ -464,43 +455,45 @@ func SweepingProviderOpt(cfg *config.Config) fx.Option {
|
|||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
}
|
}
|
||||||
|
// KeyStore state isn't be persisted across restarts.
|
||||||
// Keystore state isn't be persisted across restarts.
|
if err := in.KeyStore.Empty(ctx); err != nil {
|
||||||
return in.Keystore.Empty(ctx)
|
return err
|
||||||
|
}
|
||||||
|
return in.KeyStore.Close()
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
return fx.Options(
|
return fx.Options(
|
||||||
sweepingReprovider,
|
sweepingReprovider,
|
||||||
initKeystore,
|
initKeyStore,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ONLINE/OFFLINE
|
// ONLINE/OFFLINE
|
||||||
|
|
||||||
// OnlineProviders groups units managing provider routing records online
|
// OnlineProviders groups units managing provide routing records online
|
||||||
func OnlineProviders(provide bool, cfg *config.Config) fx.Option {
|
func OnlineProviders(provide bool, cfg *config.Config) fx.Option {
|
||||||
if !provide {
|
if !provide {
|
||||||
return OfflineProviders()
|
return OfflineProviders()
|
||||||
}
|
}
|
||||||
|
|
||||||
providerStrategy := cfg.Reprovider.Strategy.WithDefault(config.DefaultReproviderStrategy)
|
providerStrategy := cfg.Provide.Strategy.WithDefault(config.DefaultProvideStrategy)
|
||||||
|
|
||||||
strategyFlag := config.ParseReproviderStrategy(providerStrategy)
|
strategyFlag := config.ParseProvideStrategy(providerStrategy)
|
||||||
if strategyFlag == 0 {
|
if strategyFlag == 0 {
|
||||||
return fx.Error(fmt.Errorf("unknown reprovider strategy %q", providerStrategy))
|
return fx.Error(fmt.Errorf("provider: unknown strategy %q", providerStrategy))
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := []fx.Option{
|
opts := []fx.Option{
|
||||||
fx.Provide(setReproviderKeyProvider(providerStrategy)),
|
fx.Provide(setReproviderKeyProvider(providerStrategy)),
|
||||||
}
|
}
|
||||||
if cfg.Reprovider.Sweep.Enabled.WithDefault(config.DefaultReproviderSweepEnabled) {
|
if cfg.Provide.DHT.SweepEnabled.WithDefault(config.DefaultProvideDHTSweepEnabled) {
|
||||||
opts = append(opts, SweepingProviderOpt(cfg))
|
opts = append(opts, SweepingProviderOpt(cfg))
|
||||||
} else {
|
} else {
|
||||||
reprovideInterval := cfg.Reprovider.Interval.WithDefault(config.DefaultReproviderInterval)
|
reprovideInterval := cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval)
|
||||||
acceleratedDHTClient := cfg.Routing.AcceleratedDHTClient.WithDefault(config.DefaultAcceleratedDHTClient)
|
acceleratedDHTClient := cfg.Routing.AcceleratedDHTClient.WithDefault(config.DefaultAcceleratedDHTClient)
|
||||||
provideWorkerCount := int(cfg.Provider.WorkerCount.WithDefault(config.DefaultProviderWorkerCount))
|
provideWorkerCount := int(cfg.Provide.DHT.MaxWorkers.WithDefault(config.DefaultProvideDHTMaxWorkers))
|
||||||
|
|
||||||
opts = append(opts, LegacyProviderOpt(reprovideInterval, providerStrategy, acceleratedDHTClient, provideWorkerCount))
|
opts = append(opts, LegacyProviderOpt(reprovideInterval, providerStrategy, acceleratedDHTClient, provideWorkerCount))
|
||||||
}
|
}
|
||||||
@ -508,7 +501,7 @@ func OnlineProviders(provide bool, cfg *config.Config) fx.Option {
|
|||||||
return fx.Options(opts...)
|
return fx.Options(opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// OfflineProviders groups units managing provider routing records offline
|
// OfflineProviders groups units managing provide routing records offline
|
||||||
func OfflineProviders() fx.Option {
|
func OfflineProviders() fx.Option {
|
||||||
return fx.Provide(func() DHTProvider {
|
return fx.Provide(func() DHTProvider {
|
||||||
return &NoopProvider{}
|
return &NoopProvider{}
|
||||||
@ -519,11 +512,11 @@ func mfsProvider(mfsRoot *mfs.Root, fetcher fetcher.Factory) provider.KeyChanFun
|
|||||||
return func(ctx context.Context) (<-chan cid.Cid, error) {
|
return func(ctx context.Context) (<-chan cid.Cid, error) {
|
||||||
err := mfsRoot.FlushMemFree(ctx)
|
err := mfsRoot.FlushMemFree(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error flushing mfs, cannot provide MFS: %w", err)
|
return nil, fmt.Errorf("provider: error flushing MFS, cannot provide MFS: %w", err)
|
||||||
}
|
}
|
||||||
rootNode, err := mfsRoot.GetDirectory().GetNode()
|
rootNode, err := mfsRoot.GetDirectory().GetNode()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error loading mfs root, cannot provide MFS: %w", err)
|
return nil, fmt.Errorf("provider: error loading MFS root, cannot provide MFS: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
kcf := provider.NewDAGProvider(rootNode.Cid(), fetcher)
|
kcf := provider.NewDAGProvider(rootNode.Cid(), fetcher)
|
||||||
@ -543,7 +536,7 @@ type provStrategyIn struct {
|
|||||||
|
|
||||||
type provStrategyOut struct {
|
type provStrategyOut struct {
|
||||||
fx.Out
|
fx.Out
|
||||||
ProvidingStrategy config.ReproviderStrategy
|
ProvidingStrategy config.ProvideStrategy
|
||||||
ProvidingKeyChanFunc provider.KeyChanFunc
|
ProvidingKeyChanFunc provider.KeyChanFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -553,18 +546,18 @@ type provStrategyOut struct {
|
|||||||
// - "pinned": All pinned content (roots + children)
|
// - "pinned": All pinned content (roots + children)
|
||||||
// - "mfs": Only MFS content
|
// - "mfs": Only MFS content
|
||||||
// - "all": all blocks
|
// - "all": all blocks
|
||||||
func createKeyProvider(strategyFlag config.ReproviderStrategy, in provStrategyIn) provider.KeyChanFunc {
|
func createKeyProvider(strategyFlag config.ProvideStrategy, in provStrategyIn) provider.KeyChanFunc {
|
||||||
switch strategyFlag {
|
switch strategyFlag {
|
||||||
case config.ReproviderStrategyRoots:
|
case config.ProvideStrategyRoots:
|
||||||
return provider.NewBufferedProvider(dspinner.NewPinnedProvider(true, in.Pinner, in.OfflineIPLDFetcher))
|
return provider.NewBufferedProvider(dspinner.NewPinnedProvider(true, in.Pinner, in.OfflineIPLDFetcher))
|
||||||
case config.ReproviderStrategyPinned:
|
case config.ProvideStrategyPinned:
|
||||||
return provider.NewBufferedProvider(dspinner.NewPinnedProvider(false, in.Pinner, in.OfflineIPLDFetcher))
|
return provider.NewBufferedProvider(dspinner.NewPinnedProvider(false, in.Pinner, in.OfflineIPLDFetcher))
|
||||||
case config.ReproviderStrategyPinned | config.ReproviderStrategyMFS:
|
case config.ProvideStrategyPinned | config.ProvideStrategyMFS:
|
||||||
return provider.NewPrioritizedProvider(
|
return provider.NewPrioritizedProvider(
|
||||||
provider.NewBufferedProvider(dspinner.NewPinnedProvider(false, in.Pinner, in.OfflineIPLDFetcher)),
|
provider.NewBufferedProvider(dspinner.NewPinnedProvider(false, in.Pinner, in.OfflineIPLDFetcher)),
|
||||||
mfsProvider(in.MFSRoot, in.OfflineUnixFSFetcher),
|
mfsProvider(in.MFSRoot, in.OfflineUnixFSFetcher),
|
||||||
)
|
)
|
||||||
case config.ReproviderStrategyMFS:
|
case config.ProvideStrategyMFS:
|
||||||
return mfsProvider(in.MFSRoot, in.OfflineUnixFSFetcher)
|
return mfsProvider(in.MFSRoot, in.OfflineUnixFSFetcher)
|
||||||
default: // "all", "", "flat" (compat)
|
default: // "all", "", "flat" (compat)
|
||||||
return in.Blockstore.AllKeysChan
|
return in.Blockstore.AllKeysChan
|
||||||
@ -616,7 +609,7 @@ func handleStrategyChange(strategy string, provider DHTProvider, ds datastore.Da
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Infow("Reprovider.Strategy changed, clearing provide queue", "previous", previous, "current", strategy)
|
logger.Infow("Provide.Strategy changed, clearing provide queue", "previous", previous, "current", strategy)
|
||||||
provider.Clear()
|
provider.Clear()
|
||||||
|
|
||||||
if err := persistStrategy(ctx, strategy, ds); err != nil {
|
if err := persistStrategy(ctx, strategy, ds); err != nil {
|
||||||
@ -625,7 +618,7 @@ func handleStrategyChange(strategy string, provider DHTProvider, ds datastore.Da
|
|||||||
}
|
}
|
||||||
|
|
||||||
func setReproviderKeyProvider(strategy string) func(in provStrategyIn) provStrategyOut {
|
func setReproviderKeyProvider(strategy string) func(in provStrategyIn) provStrategyOut {
|
||||||
strategyFlag := config.ParseReproviderStrategy(strategy)
|
strategyFlag := config.ParseProvideStrategy(strategy)
|
||||||
|
|
||||||
return func(in provStrategyIn) provStrategyOut {
|
return func(in provStrategyIn) provStrategyOut {
|
||||||
// Create the appropriate key provider based on strategy
|
// Create the appropriate key provider based on strategy
|
||||||
|
|||||||
@ -41,8 +41,8 @@ func BaseBlockstoreCtor(
|
|||||||
// Important: Provide calls from blockstore are intentionally BLOCKING.
|
// Important: Provide calls from blockstore are intentionally BLOCKING.
|
||||||
// The Provider implementation (not the blockstore) should handle concurrency/queuing.
|
// The Provider implementation (not the blockstore) should handle concurrency/queuing.
|
||||||
// This avoids spawning unbounded goroutines for concurrent block additions.
|
// This avoids spawning unbounded goroutines for concurrent block additions.
|
||||||
strategyFlag := config.ParseReproviderStrategy(providingStrategy)
|
strategyFlag := config.ParseProvideStrategy(providingStrategy)
|
||||||
if strategyFlag&config.ReproviderStrategyAll != 0 {
|
if strategyFlag&config.ProvideStrategyAll != 0 {
|
||||||
opts = append(opts, blockstore.Provider(prov))
|
opts = append(opts, blockstore.Provider(prov))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -10,24 +10,49 @@ This release was brought to you by the [Shipyard](https://ipshipyard.com/) team.
|
|||||||
|
|
||||||
- [Overview](#overview)
|
- [Overview](#overview)
|
||||||
- [🔦 Highlights](#-highlights)
|
- [🔦 Highlights](#-highlights)
|
||||||
|
- [🚀 Repository migration: simplified provide configuration](#-repository-migration-simplified-provide-configuration)
|
||||||
- [🧹 Experimental Sweeping DHT Provider](#-experimental-sweeping-dht-provider)
|
- [🧹 Experimental Sweeping DHT Provider](#-experimental-sweeping-dht-provider)
|
||||||
|
- [📊 Exposed DHT metrics](#-exposed-dht-metrics)
|
||||||
|
- [🚨 Improved gateway error pages with diagnostic tools](#-improved-gateway-error-pages-with-diagnostic-tools)
|
||||||
|
- [🛠️ Identity CID size enforcement and `ipfs files write` fixes](#-identity-cid-size-enforcement-and-ipfs-files-write-fixes)
|
||||||
- [📦️ Important dependency updates](#-important-dependency-updates)
|
- [📦️ Important dependency updates](#-important-dependency-updates)
|
||||||
- [📝 Changelog](#-changelog)
|
- [📝 Changelog](#-changelog)
|
||||||
- [👨👩👧👦 Contributors](#-contributors)
|
- [👨👩👧👦 Contributors](#-contributors)
|
||||||
|
|
||||||
### Overview
|
### Overview
|
||||||
|
|
||||||
|
Kubo 0.38.0 simplifies content announcement configuration, introduces an experimental sweeping DHT provider for efficient large-scale operations, and includes various performance improvements.
|
||||||
|
|
||||||
### 🔦 Highlights
|
### 🔦 Highlights
|
||||||
|
|
||||||
|
#### 🚀 Repository migration: simplified provide configuration
|
||||||
|
|
||||||
|
This release migrates the repository from version 17 to version 18, simplifying how you configure content announcements.
|
||||||
|
|
||||||
|
The old `Provider` and `Reprovider` sections are now combined into a single [`Provide`](https://github.com/ipfs/kubo/blob/master/docs/config.md#provide) section. Your existing settings are automatically migrated - no manual changes needed.
|
||||||
|
|
||||||
|
**Migration happens automatically** when you run `ipfs daemon --migrate`. For manual migration: `ipfs repo migrate --to=18`.
|
||||||
|
|
||||||
|
Read more about the new system below.
|
||||||
|
|
||||||
#### 🧹 Experimental Sweeping DHT Provider
|
#### 🧹 Experimental Sweeping DHT Provider
|
||||||
|
|
||||||
An experimental alternative to both the default DHT provider and the resource-intensive [accelerated DHT client](https://github.com/ipfs/kubo/blob/master/docs/config.md#routingaccelerateddhtclient) is now available. When enabled via [`Reprovider.Sweep.Enabled`](https://github.com/ipfs/kubo/blob/master/docs/config.md#reprovidersweep), this sweeping DHT provider explores keyspace regions instead of providing keys one-by-one.
|
A new experimental DHT provider is available as an alternative to both the default provider and the resource-intensive [accelerated DHT client](https://github.com/ipfs/kubo/blob/master/docs/config.md#routingaccelerateddhtclient). Enable it via [`Provide.DHT.SweepEnabled`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providedhtssweepenabled).
|
||||||
|
|
||||||
This aims to help both large and small storage providers efficiently advertise hundreds of thousands of CIDs by batching operations and spreading work evenly over time.
|
> [!NOTE]
|
||||||
|
> This feature is experimental and opt-in. In the future, it will become the default and replace the legacy system. Some commands like `ipfs stats provide` and `ipfs routing provide` are not yet available with sweep mode. Run `ipfs provide --help` for alternatives.
|
||||||
|
|
||||||
**Note:** While this feature is experimental, some commands like `ipfs stats provide` and manual reprovide (`ipfs routing provide`) are not available. Run `ipfs provide --help` for alternative commands.
|
**How it works:** Instead of providing keys one-by-one, the sweep provider systematically explores DHT keyspace regions in batches.
|
||||||
|
|
||||||
For configuration options and more details, see [`Reprovider.Sweep`](https://github.com/ipfs/kubo/blob/master/docs/config.md#reprovidersweep) in the config documentation.
|
**Benefits for large-scale operations:** Handles hundreds of thousands of CIDs with reduced memory and network connections, spreads operations evenly to eliminate resource spikes, maintains state across restarts through persistent keystore, and provides better metrics visibility.
|
||||||
|
|
||||||
|
**Monitoring and debugging:** Legacy mode (`SweepEnabled=false`) tracks `provider_reprovider_provide_count` and `provider_reprovider_reprovide_count`, while sweep mode (`SweepEnabled=true`) tracks `total_provide_count_total`. Enable debug logging with `GOLOG_LOG_LEVEL=error,provider=debug,dht/provider=debug` to see detailed logs from either system.
|
||||||
|
|
||||||
|
For configuration details, see [`Provide.DHT`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providedht). For metrics documentation, see [Provide metrics](https://github.com/ipfs/kubo/blob/master/docs/metrics.md#provide).
|
||||||
|
|
||||||
|
#### 📊 Exposed DHT metrics
|
||||||
|
|
||||||
|
Kubo now exposes DHT metrics from go-libp2p-kad-dht, including `total_provide_count_total` for sweep provider operations and RPC metrics prefixed with `rpc_inbound_` and `rpc_outbound_` for DHT message traffic. See [Kubo metrics documentation](https://github.com/ipfs/kubo/blob/master/docs/metrics.md) for details.
|
||||||
|
|
||||||
#### 🚨 Improved gateway error pages with diagnostic tools
|
#### 🚨 Improved gateway error pages with diagnostic tools
|
||||||
|
|
||||||
|
|||||||
495
docs/config.md
495
docs/config.md
@ -125,6 +125,18 @@ config file at runtime.
|
|||||||
- [`Pinning.RemoteServices: Policies.MFS.Enabled`](#pinningremoteservices-policiesmfsenabled)
|
- [`Pinning.RemoteServices: Policies.MFS.Enabled`](#pinningremoteservices-policiesmfsenabled)
|
||||||
- [`Pinning.RemoteServices: Policies.MFS.PinName`](#pinningremoteservices-policiesmfspinname)
|
- [`Pinning.RemoteServices: Policies.MFS.PinName`](#pinningremoteservices-policiesmfspinname)
|
||||||
- [`Pinning.RemoteServices: Policies.MFS.RepinInterval`](#pinningremoteservices-policiesmfsrepininterval)
|
- [`Pinning.RemoteServices: Policies.MFS.RepinInterval`](#pinningremoteservices-policiesmfsrepininterval)
|
||||||
|
- [`Provide`](#provide)
|
||||||
|
- [`Provide.Enabled`](#provideenabled)
|
||||||
|
- [`Provide.Strategy`](#providestrategy)
|
||||||
|
- [`Provide.DHT`](#providedht)
|
||||||
|
- [`Provide.DHT.MaxWorkers`](#providedhtmaxworkers)
|
||||||
|
- [`Provide.DHT.Interval`](#providedhtinterval)
|
||||||
|
- [`Provide.DHT.SweepEnabled`](#providedhtssweepenabled)
|
||||||
|
- [`Provide.DHT.DedicatedPeriodicWorkers`](#providedhtdedicatedperiodicworkers)
|
||||||
|
- [`Provide.DHT.DedicatedBurstWorkers`](#providedhtdedicatedburstworkers)
|
||||||
|
- [`Provide.DHT.MaxProvideConnsPerWorker`](#providedhtmaxprovideconnsperworker)
|
||||||
|
- [`Provide.DHT.KeyStoreBatchSize`](#providedhtkeystorebatchsize)
|
||||||
|
- [`Provide.DHT.OfflineDelay`](#providedhtofflinedelay)
|
||||||
- [`Provider`](#provider)
|
- [`Provider`](#provider)
|
||||||
- [`Provider.Enabled`](#providerenabled)
|
- [`Provider.Enabled`](#providerenabled)
|
||||||
- [`Provider.Strategy`](#providerstrategy)
|
- [`Provider.Strategy`](#providerstrategy)
|
||||||
@ -139,8 +151,7 @@ config file at runtime.
|
|||||||
- [`Peering.Peers`](#peeringpeers)
|
- [`Peering.Peers`](#peeringpeers)
|
||||||
- [`Reprovider`](#reprovider)
|
- [`Reprovider`](#reprovider)
|
||||||
- [`Reprovider.Interval`](#reproviderinterval)
|
- [`Reprovider.Interval`](#reproviderinterval)
|
||||||
- [`Reprovider.Strategy`](#reproviderstrategy)
|
- [`Reprovider.Strategy`](#providestrategy)
|
||||||
- [`Reprovider.Sweep`](#reprovidersweep)
|
|
||||||
- [`Routing`](#routing)
|
- [`Routing`](#routing)
|
||||||
- [`Routing.Type`](#routingtype)
|
- [`Routing.Type`](#routingtype)
|
||||||
- [`Routing.AcceleratedDHTClient`](#routingaccelerateddhtclient)
|
- [`Routing.AcceleratedDHTClient`](#routingaccelerateddhtclient)
|
||||||
@ -1368,7 +1379,7 @@ Below is a list of the most common gateway setups.
|
|||||||
}
|
}
|
||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
- **Performance:** consider running with `Routing.AcceleratedDHTClient=true` and either `Provider.Enabled=false` (avoid providing newly retrieved blocks) or `Provider.WorkerCount=0` (provide as fast as possible, at the cost of increased load)
|
- **Performance:** Consider enabling `Routing.AcceleratedDHTClient=true` to improve content routing lookups. Separately, gateway operators should decide if the gateway node should also co-host and provide (announce) fetched content to the DHT. If providing content, enable `Provide.DHT.SweepEnabled=true` for efficient announcements. If announcements are still not fast enough, adjust `Provide.DHT.MaxWorkers`. For a read-only gateway that doesn't announce content, use `Provide.Enabled=false`.
|
||||||
- **Backward-compatible:** this feature enables automatic redirects from content paths to subdomains:
|
- **Backward-compatible:** this feature enables automatic redirects from content paths to subdomains:
|
||||||
|
|
||||||
`http://dweb.link/ipfs/{cid}` → `http://{cid}.ipfs.dweb.link`
|
`http://dweb.link/ipfs/{cid}` → `http://{cid}.ipfs.dweb.link`
|
||||||
@ -1393,7 +1404,7 @@ Below is a list of the most common gateway setups.
|
|||||||
}
|
}
|
||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
- **Performance:** when running an open, recursive gateway consider running with `Routing.AcceleratedDHTClient=true` and either `Provider.Enabled=false` (avoid providing newly retrieved blocks) or `Provider.WorkerCount=0` (provide as fast as possible, at the cost of increased load)
|
- **Performance:** Consider enabling `Routing.AcceleratedDHTClient=true` to improve content routing lookups. When running an open, recursive gateway, decide if the gateway should also co-host and provide (announce) fetched content to the DHT. If providing content, enable `Provide.DHT.SweepEnabled=true` for efficient announcements. If announcements are still not fast enough, adjust `Provide.DHT.MaxWorkers`. For a read-only gateway that doesn't announce content, use `Provide.Enabled=false`.
|
||||||
|
|
||||||
* Public [DNSLink](https://dnslink.io/) gateway resolving every hostname passed in `Host` header.
|
* Public [DNSLink](https://dnslink.io/) gateway resolving every hostname passed in `Host` header.
|
||||||
```console
|
```console
|
||||||
@ -1833,36 +1844,113 @@ Default: `"5m"`
|
|||||||
|
|
||||||
Type: `duration`
|
Type: `duration`
|
||||||
|
|
||||||
## `Provider`
|
## `Provide`
|
||||||
|
|
||||||
Configuration applied to the initial one-time announcement of fresh CIDs
|
Configures CID announcements to the routing system, including both immediate
|
||||||
created with `ipfs add`, `ipfs files`, `ipfs dag import`, `ipfs block|dag put`
|
announcements for new content (provide) and periodic re-announcements
|
||||||
commands.
|
(reprovide) on systems that require it, like Amino DHT. While designed to support
|
||||||
|
multiple routing systems in the future, the current default configuration only supports providing to the Amino DHT.
|
||||||
|
|
||||||
For periodical DHT reprovide settings, see [`Reprovide.*`](#reprovider).
|
### `Provide.Enabled`
|
||||||
|
|
||||||
### `Provider.Enabled`
|
Controls whether Kubo provide and reprovide systems are enabled.
|
||||||
|
|
||||||
Controls whether Kubo provider and reprovide systems are enabled.
|
|
||||||
|
|
||||||
> [!CAUTION]
|
> [!CAUTION]
|
||||||
> Disabling this, will disable BOTH `Provider` system for new CIDs
|
> Disabling this will prevent other nodes from discovering your content.
|
||||||
> and the periodical reprovide ([`Reprovider.Interval`](#reprovider)) of old CIDs.
|
> Your node will stop announcing data to the routing system, making it
|
||||||
|
> inaccessible unless peers connect to you directly.
|
||||||
|
|
||||||
Default: `true`
|
Default: `true`
|
||||||
|
|
||||||
Type: `flag`
|
Type: `flag`
|
||||||
|
|
||||||
### `Provider.Strategy`
|
### `Provide.Strategy`
|
||||||
|
|
||||||
Legacy, not used at the moment, see [`Reprovider.Strategy`](#reproviderstrategy) instead.
|
Tells the provide system what should be announced. Valid strategies are:
|
||||||
|
|
||||||
### `Provider.WorkerCount`
|
- `"all"` - announce all CIDs of stored blocks
|
||||||
|
- `"pinned"` - only announce recursively pinned CIDs (`ipfs pin add -r`, both roots and child blocks)
|
||||||
|
- Order: root blocks of direct and recursive pins are announced first, then the child blocks of recursive pins
|
||||||
|
- `"roots"` - only announce the root block of explicitly pinned CIDs (`ipfs pin add`)
|
||||||
|
- **⚠️ BE CAREFUL:** node with `roots` strategy will not announce child blocks.
|
||||||
|
It makes sense only for use cases where the entire DAG is fetched in full,
|
||||||
|
and a graceful resume does not have to be guaranteed: the lack of child
|
||||||
|
announcements means an interrupted retrieval won't be able to find
|
||||||
|
providers for the missing block in the middle of a file, unless the peer
|
||||||
|
happens to already be connected to a provider and asks for child CID over
|
||||||
|
bitswap.
|
||||||
|
- `"mfs"` - announce only the local CIDs that are part of the MFS (`ipfs files`)
|
||||||
|
- Note: MFS is lazy-loaded. Only the MFS blocks present in local datastore are announced.
|
||||||
|
- `"pinned+mfs"` - a combination of the `pinned` and `mfs` strategies.
|
||||||
|
- **ℹ️ NOTE:** This is the suggested strategy for users who run without GC and don't want to provide everything in cache.
|
||||||
|
- Order: first `pinned` and then the locally available part of `mfs`.
|
||||||
|
|
||||||
Sets the maximum number of _concurrent_ DHT provide operations (announcement of new CIDs).
|
**Strategy changes automatically clear the provide queue.** When you change `Provide.Strategy` and restart Kubo, the provide queue is automatically cleared to ensure only content matching your new strategy is announced. You can also manually clear the queue using `ipfs provide clear`.
|
||||||
|
|
||||||
[`Reprovider`](#reprovider) operations do **not** count against this limit.
|
**Memory requirements:**
|
||||||
A value of `0` allows an unlimited number of provide workers.
|
|
||||||
|
- Reproviding larger pinsets using the `mfs`, `pinned`, `pinned+mfs` or `roots` strategies requires additional memory, with an estimated ~1 GiB of RAM per 20 million CIDs for reproviding to the Amino DHT.
|
||||||
|
- This is due to the use of a buffered provider, which loads all CIDs into memory to avoid holding a lock on the entire pinset during the reprovide cycle.
|
||||||
|
|
||||||
|
Default: `"all"`
|
||||||
|
|
||||||
|
Type: `optionalString` (unset for the default)
|
||||||
|
|
||||||
|
### `Provide.DHT`
|
||||||
|
|
||||||
|
Configuration for providing data to Amino DHT peers.
|
||||||
|
|
||||||
|
#### Monitoring Provide Operations
|
||||||
|
|
||||||
|
You can monitor the effectiveness of your provide configuration through metrics exposed at the Prometheus endpoint: `{Addresses.API}/debug/metrics/prometheus` (default: `http://127.0.0.1:5001/debug/metrics/prometheus`).
|
||||||
|
|
||||||
|
Different metrics are available depending on whether you use legacy mode (`SweepEnabled=false`) or sweep mode (`SweepEnabled=true`). See [Provide metrics documentation](https://github.com/ipfs/kubo/blob/master/docs/metrics.md#provide) for details.
|
||||||
|
|
||||||
|
To enable detailed debug logging for both providers, set:
|
||||||
|
```sh
|
||||||
|
GOLOG_LOG_LEVEL=error,provider=debug,dht/provider=debug
|
||||||
|
```
|
||||||
|
- `provider=debug` enables generic logging (legacy provider and any non-dht operations)
|
||||||
|
- `dht/provider=debug` enables logging for the sweep provider
|
||||||
|
|
||||||
|
#### `Provide.DHT.Interval`
|
||||||
|
|
||||||
|
Sets how often to re-announce content to the DHT. Provider records on Amino DHT
|
||||||
|
expire after [`amino.DefaultProvideValidity`](https://github.com/libp2p/go-libp2p-kad-dht/blob/v0.34.0/amino/defaults.go#L40-L43),
|
||||||
|
also known as Provider Record Expiration Interval.
|
||||||
|
|
||||||
|
An interval of about half the expiration window ensures provider records
|
||||||
|
are refreshed well before they expire. This keeps your content continuously
|
||||||
|
discoverable accounting for network churn without overwhelming the network with too frequent announcements.
|
||||||
|
|
||||||
|
- If unset, it uses the implicit safe default.
|
||||||
|
- If set to the value `"0"` it will disable content reproviding to DHT.
|
||||||
|
|
||||||
|
> [!CAUTION]
|
||||||
|
> Disabling this will prevent other nodes from discovering your content via the DHT.
|
||||||
|
> Your node will stop announcing data to the DHT, making it
|
||||||
|
> inaccessible unless peers connect to you directly. Since provider
|
||||||
|
> records expire after `amino.DefaultProvideValidity`, your content will become undiscoverable
|
||||||
|
> after this period.
|
||||||
|
|
||||||
|
Default: `22h`
|
||||||
|
|
||||||
|
Type: `optionalDuration` (unset for the default)
|
||||||
|
|
||||||
|
#### `Provide.DHT.MaxWorkers`
|
||||||
|
|
||||||
|
Sets the maximum number of _concurrent_ DHT provide operations.
|
||||||
|
|
||||||
|
**When `Provide.DHT.SweepEnabled` is false (legacy mode):**
|
||||||
|
- Controls NEW CID announcements only
|
||||||
|
- Reprovide operations do **not** count against this limit
|
||||||
|
- A value of `0` allows unlimited provide workers
|
||||||
|
|
||||||
|
**When `Provide.DHT.SweepEnabled` is true:**
|
||||||
|
- Controls the total worker pool for both provide and reprovide operations
|
||||||
|
- Workers are split between periodic reprovides and burst provides
|
||||||
|
- Use a positive value to control resource usage
|
||||||
|
- See [`DedicatedPeriodicWorkers`](#providedhtdedicatedperiodicworkers) and [`DedicatedBurstWorkers`](#providedhtdedicatedburstworkers) for task allocation
|
||||||
|
|
||||||
If the [accelerated DHT client](#routingaccelerateddhtclient) is enabled, each
|
If the [accelerated DHT client](#routingaccelerateddhtclient) is enabled, each
|
||||||
provide operation opens ~20 connections in parallel. With the standard DHT
|
provide operation opens ~20 connections in parallel. With the standard DHT
|
||||||
@ -1873,8 +1961,9 @@ connections this setting can generate.
|
|||||||
|
|
||||||
> [!CAUTION]
|
> [!CAUTION]
|
||||||
> For nodes without strict connection limits that need to provide large volumes
|
> For nodes without strict connection limits that need to provide large volumes
|
||||||
> of content immediately, we recommend enabling the `Routing.AcceleratedDHTClient` and
|
> of content, we recommend first trying `Provide.DHT.SweepEnabled=true` for efficient
|
||||||
> setting `Provider.WorkerCount` to `0` (unlimited).
|
> announcements. If announcements are still not fast enough, adjust `Provide.DHT.MaxWorkers`.
|
||||||
|
> As a last resort, consider enabling `Routing.AcceleratedDHTClient=true` but be aware that it is very resource hungry.
|
||||||
>
|
>
|
||||||
> At the same time, mind that raising this value too high may lead to increased load.
|
> At the same time, mind that raising this value too high may lead to increased load.
|
||||||
> Proceed with caution, ensure proper hardware and networking are in place.
|
> Proceed with caution, ensure proper hardware and networking are in place.
|
||||||
@ -1883,6 +1972,154 @@ Default: `16`
|
|||||||
|
|
||||||
Type: `optionalInteger` (non-negative; `0` means unlimited number of workers)
|
Type: `optionalInteger` (non-negative; `0` means unlimited number of workers)
|
||||||
|
|
||||||
|
#### `Provide.DHT.SweepEnabled`
|
||||||
|
|
||||||
|
Whether Provide Sweep is enabled. If not enabled, the legacy
|
||||||
|
[`boxo/provider`](https://github.com/ipfs/boxo/tree/main/provider) is used for
|
||||||
|
both provides and reprovides.
|
||||||
|
|
||||||
|
Provide Sweep is a resource efficient technique for advertising content to
|
||||||
|
the Amino DHT swarm. The Provide Sweep module tracks the keys that should be periodically reprovided in
|
||||||
|
the `KeyStore`. It splits the keys into DHT keyspace regions by proximity (XOR
|
||||||
|
distance), and schedules when reprovides should happen in order to spread the
|
||||||
|
reprovide operation over time to avoid a spike in resource utilization. It
|
||||||
|
basically sweeps the keyspace _from left to right_ over the
|
||||||
|
[`Provide.DHT.Interval`](#providedhtinterval) time period, and reprovides keys
|
||||||
|
matching to the visited keyspace region.
|
||||||
|
|
||||||
|
Provide Sweep aims at replacing the inefficient legacy `boxo/provider`
|
||||||
|
module, and is currently opt-in. You can compare the effectiveness of sweep mode vs legacy mode by monitoring the appropriate metrics (see [Monitoring Provide Operations](#monitoring-provide-operations) above).
|
||||||
|
|
||||||
|
Whenever new keys should be advertised to the Amino DHT, `kubo` calls
|
||||||
|
`StartProviding()`, triggering an initial `provide` operation for the given
|
||||||
|
keys. The keys will be added to the `KeyStore` tracking which keys should be
|
||||||
|
reprovided and when they should be reprovided. Calling `StopProviding()`
|
||||||
|
removes the keys from the `KeyStore`. However, it is currently tricky for
|
||||||
|
`kubo` to detect when a key should stop being advertised. Hence, `kubo` will
|
||||||
|
periodically refresh the `KeyStore` at each [`Provide.DHT.Interval`](#providedhtinterval)
|
||||||
|
by providing it a channel of all the keys it is expected to contain according
|
||||||
|
to the [`Provide.Strategy`](#providestrategy). During this operation,
|
||||||
|
all keys in the `Keystore` are purged, and only the given ones remain scheduled.
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> This feature is opt-in for now, but will become the default in a future release.
|
||||||
|
> Eventually, this configuration flag will be removed once the feature is stable.
|
||||||
|
|
||||||
|
Default: `false`
|
||||||
|
|
||||||
|
Type: `flag`
|
||||||
|
|
||||||
|
|
||||||
|
#### `Provide.DHT.DedicatedPeriodicWorkers`
|
||||||
|
|
||||||
|
Number of workers dedicated to periodic keyspace region reprovides. Only applies when `Provide.DHT.SweepEnabled` is true.
|
||||||
|
|
||||||
|
Among the [`Provide.DHT.MaxWorkers`](#providedhtmaxworkers), this
|
||||||
|
number of workers will be dedicated to the periodic region reprovide only. The sum of
|
||||||
|
`DedicatedPeriodicWorkers` and `DedicatedBurstWorkers` should not exceed `MaxWorkers`.
|
||||||
|
Any remaining workers (MaxWorkers - DedicatedPeriodicWorkers - DedicatedBurstWorkers)
|
||||||
|
form a shared pool that can be used for either type of work as needed.
|
||||||
|
|
||||||
|
Default: `2`
|
||||||
|
|
||||||
|
Type: `optionalInteger` (`0` means there are no dedicated workers, but the
|
||||||
|
operation can be performed by free non-dedicated workers)
|
||||||
|
|
||||||
|
#### `Provide.DHT.DedicatedBurstWorkers`
|
||||||
|
|
||||||
|
Number of workers dedicated to burst provides. Only applies when `Provide.DHT.SweepEnabled` is true.
|
||||||
|
|
||||||
|
Burst provides are triggered by:
|
||||||
|
- Manual provide commands (`ipfs routing provide`)
|
||||||
|
- New content matching your `Provide.Strategy` (blocks from `ipfs add`, bitswap, or trustless gateway requests)
|
||||||
|
- Catch-up reprovides after being disconnected/offline for a while
|
||||||
|
|
||||||
|
Having dedicated burst workers ensures that bulk operations (like adding many CIDs
|
||||||
|
or reconnecting to the network) don't delay regular periodic reprovides, and vice versa.
|
||||||
|
|
||||||
|
Among the [`Provide.DHT.MaxWorkers`](#providedhtmaxworkers), this
|
||||||
|
number of workers will be dedicated to burst provides only. In addition to
|
||||||
|
these, if there are available workers in the pool, they can also be used for
|
||||||
|
burst provides.
|
||||||
|
|
||||||
|
Default: `1`
|
||||||
|
|
||||||
|
Type: `optionalInteger` (`0` means there are no dedicated workers, but the
|
||||||
|
operation can be performed by free non-dedicated workers)
|
||||||
|
|
||||||
|
#### `Provide.DHT.MaxProvideConnsPerWorker`
|
||||||
|
|
||||||
|
Maximum number of connections that a single worker can use to send provider
|
||||||
|
records over the network.
|
||||||
|
|
||||||
|
When reproviding CIDs corresponding to a keyspace region, the reprovider must
|
||||||
|
send a provider record to the 20 closest peers to the CID (in XOR distance) for
|
||||||
|
each CID belonging to this keyspace region.
|
||||||
|
|
||||||
|
The reprovider opens a connection to a peer from that region, sends it all its
|
||||||
|
allocated provider records. Once done, it opens a connection to the next peer
|
||||||
|
from that keyspace region until all provider records are assigned.
|
||||||
|
|
||||||
|
This option defines how many such connections can be open concurrently by a
|
||||||
|
single worker.
|
||||||
|
|
||||||
|
Default: `16`
|
||||||
|
|
||||||
|
Type: `optionalInteger` (non-negative)
|
||||||
|
|
||||||
|
#### `Provide.DHT.KeyStoreBatchSize`
|
||||||
|
|
||||||
|
During the garbage collection, all keys stored in the KeyStore are removed, and
|
||||||
|
the keys are streamed from a channel to fill the KeyStore again with up-to-date
|
||||||
|
keys. Since a high number of CIDs to reprovide can easily fill up the memory,
|
||||||
|
keys are read and written in batches to optimize for memory usage.
|
||||||
|
|
||||||
|
This option defines how many multihashes should be contained within a batch. A
|
||||||
|
multihash is usually represented by 34 bytes.
|
||||||
|
|
||||||
|
Default: `16384` (~544 KiB per batch)
|
||||||
|
|
||||||
|
Type: `optionalInteger` (non-negative)
|
||||||
|
|
||||||
|
#### `Provide.DHT.OfflineDelay`
|
||||||
|
|
||||||
|
The `SweepingProvider` has 3 states: `ONLINE`, `DISCONNECTED` and `OFFLINE`. It
|
||||||
|
starts `OFFLINE`, and as the node bootstraps, it changes its state to `ONLINE`.
|
||||||
|
|
||||||
|
When the provider loses connection to all DHT peers, it switches to the
|
||||||
|
`DISCONNECTED` state. In this state, new provides will be added to the provide
|
||||||
|
queue, and provided as soon as the node comes back online.
|
||||||
|
|
||||||
|
After a node has been `DISCONNECTED` for `OfflineDelay`, it goes to `OFFLINE`
|
||||||
|
state. When `OFFLINE`, the provider drops the provide queue, and returns errors
|
||||||
|
to new provide requests. However, when `OFFLINE` the provider still adds the
|
||||||
|
keys to its state, so keys will eventually be provided in the
|
||||||
|
[`Provide.DHT.Interval`](#providedhtinterval) after the provider comes back
|
||||||
|
`ONLINE`.
|
||||||
|
|
||||||
|
Default: `2h`
|
||||||
|
|
||||||
|
Type: `optionalDuration`
|
||||||
|
|
||||||
|
## `Provider`
|
||||||
|
|
||||||
|
### `Provider.Enabled`
|
||||||
|
|
||||||
|
**REMOVED**
|
||||||
|
|
||||||
|
Replaced with [`Provide.Enabled`](#provideenabled).
|
||||||
|
|
||||||
|
### `Provider.Strategy`
|
||||||
|
|
||||||
|
**REMOVED**
|
||||||
|
|
||||||
|
This field was unused. Use [`Provide.Strategy`](#providestrategy) instead.
|
||||||
|
|
||||||
|
### `Provider.WorkerCount`
|
||||||
|
|
||||||
|
**REMOVED**
|
||||||
|
|
||||||
|
Replaced with [`Provide.DHT.MaxWorkers`](#providedhtmaxworkers).
|
||||||
## `Pubsub`
|
## `Pubsub`
|
||||||
|
|
||||||
**DEPRECATED**: See [#9717](https://github.com/ipfs/kubo/issues/9717)
|
**DEPRECATED**: See [#9717](https://github.com/ipfs/kubo/issues/9717)
|
||||||
@ -2050,212 +2287,15 @@ Type: `array[peering]`
|
|||||||
|
|
||||||
### `Reprovider.Interval`
|
### `Reprovider.Interval`
|
||||||
|
|
||||||
Sets the time between rounds of reproviding local content to the routing
|
**REMOVED**
|
||||||
system.
|
|
||||||
|
|
||||||
- If unset, it uses the implicit safe default.
|
Replaced with [`Provide.DHT.Interval`](#providedhtinterval).
|
||||||
- If set to the value `"0"` it will disable content reproviding.
|
|
||||||
|
|
||||||
Note: disabling content reproviding will result in other nodes on the network
|
|
||||||
not being able to discover that you have the objects that you have. If you want
|
|
||||||
to have this disabled and keep the network aware of what you have, you must
|
|
||||||
manually announce your content periodically or run your own routing system
|
|
||||||
and convince users to add it to [`Routing.DelegatedRouters`](https://github.com/ipfs/kubo/blob/master/docs/config.md#routingdelegatedrouters).
|
|
||||||
|
|
||||||
> [!CAUTION]
|
|
||||||
> To maintain backward-compatibility, setting `Reprovider.Interval=0` will also disable Provider system (equivalent of `Provider.Enabled=false`)
|
|
||||||
|
|
||||||
Default: `22h` (`DefaultReproviderInterval`)
|
|
||||||
|
|
||||||
Type: `optionalDuration` (unset for the default)
|
|
||||||
|
|
||||||
### `Reprovider.Strategy`
|
### `Reprovider.Strategy`
|
||||||
|
|
||||||
Tells reprovider what should be announced. Valid strategies are:
|
**REMOVED**
|
||||||
|
|
||||||
- `"all"` - announce all CIDs of stored blocks
|
|
||||||
- `"pinned"` - only announce recursively pinned CIDs (`ipfs pin add -r`, both roots and child blocks)
|
|
||||||
- Order: root blocks of direct and recursive pins are announced first, then the child blocks of recursive pins
|
|
||||||
- `"roots"` - only announce the root block of explicitly pinned CIDs (`ipfs pin add`)
|
|
||||||
- **⚠️ BE CAREFUL:** node with `roots` strategy will not announce child blocks.
|
|
||||||
It makes sense only for use cases where the entire DAG is fetched in full,
|
|
||||||
and a graceful resume does not have to be guaranteed: the lack of child
|
|
||||||
announcements means an interrupted retrieval won't be able to find
|
|
||||||
providers for the missing block in the middle of a file, unless the peer
|
|
||||||
happens to already be connected to a provider and ask for child CID over
|
|
||||||
bitswap.
|
|
||||||
- `"mfs"` - announce only the local CIDs that are part of the MFS (`ipfs files`)
|
|
||||||
- Note: MFS is lazy-loaded. Only the MFS blocks present in local datastore are announced.
|
|
||||||
- `"pinned+mfs"` - a combination of the `pinned` and `mfs` strategies.
|
|
||||||
- **ℹ️ NOTE:** This is the suggested strategy for users who run without GC and don't want to provide everything in cache.
|
|
||||||
- Order: first `pinned` and then the locally available part of `mfs`.
|
|
||||||
|
|
||||||
**Strategy changes automatically clear the provide queue.** When you change `Reprovider.Strategy` and restart Kubo, the provide queue is automatically cleared to ensure only content matching your new strategy is announced. You can also manually clear the queue using `ipfs provide clear`.
|
|
||||||
|
|
||||||
**Memory requirements:**
|
|
||||||
|
|
||||||
- Reproviding larger pinsets using the `mfs`, `pinned`, `pinned+mfs` or `roots` strategies requires additional memory, with an estimated ~1 GiB of RAM per 20 million items for reproviding to the Amino DHT.
|
|
||||||
- This is due to the use of a buffered provider, which avoids holding a lock on the entire pinset during the reprovide cycle.
|
|
||||||
|
|
||||||
Default: `"all"`
|
|
||||||
|
|
||||||
Type: `optionalString` (unset for the default)
|
|
||||||
|
|
||||||
### Reprovider.Sweep
|
|
||||||
|
|
||||||
Reprovider Sweep is a resource efficient technique for advertising content to
|
|
||||||
the Amino DHT swarm.
|
|
||||||
|
|
||||||
The Reprovider module tracks the keys that should be periodically reprovided in
|
|
||||||
the `KeyStore`. It splits the keys into DHT keyspace regions by proximity (XOR
|
|
||||||
distance), and schedules when reprovides should happen in order to spread the
|
|
||||||
reprovide operation over time to avoid a spike in resource utilization. It
|
|
||||||
basically sweeps the keyspace _from left to right_ over the
|
|
||||||
[`Reprovider.Interval`](#reproviderinterval) time period, and reprovides keys
|
|
||||||
matching to the visited keyspace region.
|
|
||||||
|
|
||||||
Reprovider Sweep aims at replacing the inefficient legacy `boxo/provider`
|
|
||||||
module, and is currently opt-in.
|
|
||||||
|
|
||||||
Whenever new keys should be advertised to the Amino DHT, `kubo` calls
|
|
||||||
`StartProviding()`, triggering an initial `provide` operation for the given
|
|
||||||
keys. The keys will be added to the `KeyStore` tracking which keys should be
|
|
||||||
reprovided and when they should be reprovided. Calling `StopProviding()`
|
|
||||||
removes the keys from the `KeyStore`. However, it is currently tricky for
|
|
||||||
`kubo` to detect when a key should stop being advertised. Hence, `kubo` will
|
|
||||||
periodically refresh the `KeyStore` at each [`Reprovider.Interval`](#reproviderinterval)
|
|
||||||
by providing it a channel of all the keys it is expected to contain according
|
|
||||||
to the [`Reprovider.Strategy`](#reproviderstrategy). During this operation,
|
|
||||||
all keys in the `Keystore` are purged, and only the given ones remain scheduled.
|
|
||||||
|
|
||||||
#### Reprovider.Sweep.Enabled
|
|
||||||
|
|
||||||
Whether Reprovider Sweep is enabled. If not enabled, the
|
|
||||||
[`boxo/provider`](https://github.com/ipfs/boxo/tree/main/provider) is used for
|
|
||||||
both provides and reprovides.
|
|
||||||
|
|
||||||
Default: `false`
|
|
||||||
|
|
||||||
Type: `flag`
|
|
||||||
|
|
||||||
#### Reprovider.Sweep.MaxWorkers
|
|
||||||
|
|
||||||
The maximum number of workers used by the `SweepingReprovider` to provide and
|
|
||||||
reprovide CIDs to the DHT swarm.
|
|
||||||
|
|
||||||
A worker performs Kademlia `GetClosestPeers` operations (max 1 at a time) to
|
|
||||||
explore a region of the DHT keyspace, and then sends provider records to the
|
|
||||||
nodes from that keyspace region. `GetClosestPeers` is capped to `10` concurrent
|
|
||||||
connections [`amino` DHT
|
|
||||||
defaults](https://github.com/libp2p/go-libp2p-kad-dht/blob/master/amino/defaults.go).
|
|
||||||
The number of simultaneous connections used to send provider records is defined
|
|
||||||
by
|
|
||||||
[`Reprovider.Sweep.MaxProvideConnsPerWorker`](#reprovidersweepmaxprovideconnsperworker).
|
|
||||||
|
|
||||||
The workers are split between two tasks categories:
|
|
||||||
|
|
||||||
1. Periodic reprovides (see
|
|
||||||
[`Reprovider.Sweep.DedicatedPeriodicWorkers`](#reprovidersweepdedicatedperiodicworkers))
|
|
||||||
2. Burst provides (see
|
|
||||||
[`Reprovider.Sweep.DedicatedBurstWorkers`](#reprovidersweepdedicatedburstworkers))
|
|
||||||
|
|
||||||
[`Reprovider.Sweep.DedicatedPeriodicWorkers`](#reprovidersweepdedicatedperiodicworkers)
|
|
||||||
workers are allocated to the periodic reprovides only,
|
|
||||||
[`Reprovider.Sweep.DedicatedBurstWorkers`](#reprovidersweepdedicatedburstworkers)
|
|
||||||
workers are allocated to burst provides only, and the rest of
|
|
||||||
[`Reprovider.Sweep.MaxWorkers`](#reprovidersweepmaxworkers) can be used for
|
|
||||||
either task (first come, first served).
|
|
||||||
|
|
||||||
Default: `4`
|
|
||||||
|
|
||||||
Type: `optionalInteger` (non-negative)
|
|
||||||
|
|
||||||
#### Reprovider.Sweep.DedicatedPeriodicWorkers
|
|
||||||
|
|
||||||
Number of workers dedicated to periodic keyspace region reprovides.
|
|
||||||
|
|
||||||
Among the [`Reprovider.Sweep.MaxWorkers`](#reprovidersweepmaxworkers), this
|
|
||||||
number of workers will be dedicated to the periodic region reprovide only. In
|
|
||||||
addition to these, if there are available workers in the pool, they can also be
|
|
||||||
used for periodic reprovides.
|
|
||||||
|
|
||||||
Default: `2`
|
|
||||||
|
|
||||||
Type: `optionalInteger` (`0` means there are no dedicated workers, but the
|
|
||||||
operation can be performed by free non-dedicated workers)
|
|
||||||
|
|
||||||
#### Reprovider.Sweep.DedicatedBurstWorkers
|
|
||||||
|
|
||||||
Number of workers dedicated to burst provides.
|
|
||||||
|
|
||||||
Burst provides are triggered when a new keys must be advertised to the DHT
|
|
||||||
immediately, or when a node comes back online and must catch up the reprovides
|
|
||||||
that should have happened while it was offline.
|
|
||||||
|
|
||||||
Among the [`Reprovider.Sweep.MaxWorkers`](#reprovidersweepmaxworkers), this
|
|
||||||
number of workers will be dedicated to burst provides only. In addition to
|
|
||||||
these, if there are available workers in the pool, they can also be used for
|
|
||||||
burst provides.
|
|
||||||
|
|
||||||
Default: `1`
|
|
||||||
|
|
||||||
Type: `optionalInteger` (`0` means there are no dedicated workers, but the
|
|
||||||
operation can be performed by free non-dedicated workers)
|
|
||||||
|
|
||||||
#### Reprovider.Sweep.MaxProvideConnsPerWorker
|
|
||||||
|
|
||||||
Maximum number of connections that a single worker can use to send provider
|
|
||||||
records over the network.
|
|
||||||
|
|
||||||
When reproviding CIDs corresponding to a keyspace region, the reprovider must
|
|
||||||
send a provider record to the 20 closest peers to the CID (in XOR distance) for
|
|
||||||
each CID belonging to this keyspace region.
|
|
||||||
|
|
||||||
The reprovider opens a connection to a peer from that region, send it all its
|
|
||||||
allocated provider records. Once done, it opens a connection to the next peer
|
|
||||||
from that keyspace region until all provider records are assigned.
|
|
||||||
|
|
||||||
This option defines how many such connections can be open concurrently by a
|
|
||||||
single worker.
|
|
||||||
|
|
||||||
Default: `16`
|
|
||||||
|
|
||||||
Type: `optionalInteger` (non-negative)
|
|
||||||
|
|
||||||
#### Reprovider.Sweep.KeyStoreBatchSize
|
|
||||||
|
|
||||||
During the garbage collection, all keys stored in the KeyStore are removed, and
|
|
||||||
the keys are streamed from a channel to fill the KeyStore again with up-to-date
|
|
||||||
keys. Since a high number of CIDs to reprovide can easily fill up the memory,
|
|
||||||
keys are read and written in batches to optimize for memory usage.
|
|
||||||
|
|
||||||
This option defines how many multihashes should be contained within a batch. A
|
|
||||||
multihash is usually represented by 34 bytes.
|
|
||||||
|
|
||||||
Default: `16384` (~544 KiB per batch)
|
|
||||||
|
|
||||||
Type: `optionalInteger` (non-negative)
|
|
||||||
|
|
||||||
#### Reprovider.Sweep.OfflineDelay
|
|
||||||
|
|
||||||
The `SweepingProvider` has 3 states: `ONLINE`, `DISCONNECTED` and `OFFLINE`. It
|
|
||||||
starts `OFFLINE`, and as the node bootstraps, it changes its state to `ONLINE`.
|
|
||||||
|
|
||||||
When the provider loses connection to all DHT peers, it switches to the
|
|
||||||
`DISCONNECTED` state. In this state, new provides will be added to the provide
|
|
||||||
queue, and provided as soon as the node comes back online.
|
|
||||||
|
|
||||||
After a node has been `DISCONNECTED` for `OfflineDelay`, it goes to `OFFLINE`
|
|
||||||
state. When `OFFLINE`, the provider drops the provide queue, and returns errors
|
|
||||||
to new provide requests. However, when `OFFLINE` the provide still adds the
|
|
||||||
keys to its state, so keys will eventually be provided in the
|
|
||||||
[`Reprovider.Interval`](#reproviderinterval) after the provider comes back
|
|
||||||
`ONLINE`.
|
|
||||||
|
|
||||||
Default: `2h`
|
|
||||||
|
|
||||||
Type: `optionalDuration`
|
|
||||||
|
|
||||||
|
Replaced with [`Provide.Strategy`](#providestrategy).
|
||||||
## `Routing`
|
## `Routing`
|
||||||
|
|
||||||
Contains options for content, peer, and IPNS routing mechanisms.
|
Contains options for content, peer, and IPNS routing mechanisms.
|
||||||
@ -2334,6 +2374,9 @@ When it is enabled:
|
|||||||
- Client DHT operations (reads and writes) should complete much faster
|
- Client DHT operations (reads and writes) should complete much faster
|
||||||
- The provider will now use a keyspace sweeping mode allowing to keep alive
|
- The provider will now use a keyspace sweeping mode allowing to keep alive
|
||||||
CID sets that are multiple orders of magnitude larger.
|
CID sets that are multiple orders of magnitude larger.
|
||||||
|
- **Note:** For improved provide/reprovide operations specifically, consider using
|
||||||
|
[`Provide.DHT.SweepEnabled`](#providedhtssweepenabled) instead, which offers similar
|
||||||
|
benefits with lower resource consumption.
|
||||||
- The standard Bucket-Routing-Table DHT will still run for the DHT server (if
|
- The standard Bucket-Routing-Table DHT will still run for the DHT server (if
|
||||||
the DHT server is enabled). This means the classical routing table will
|
the DHT server is enabled). This means the classical routing table will
|
||||||
still be used to answer other nodes.
|
still be used to answer other nodes.
|
||||||
@ -2346,7 +2389,7 @@ When it is enabled:
|
|||||||
- The resource usage is not smooth as the client crawls the network in rounds and reproviding is similarly done in rounds
|
- The resource usage is not smooth as the client crawls the network in rounds and reproviding is similarly done in rounds
|
||||||
- Users who previously had a lot of content but were unable to advertise it on the network will see an increase in
|
- Users who previously had a lot of content but were unable to advertise it on the network will see an increase in
|
||||||
egress bandwidth as their nodes start to advertise all of their CIDs into the network. If you have lots of data
|
egress bandwidth as their nodes start to advertise all of their CIDs into the network. If you have lots of data
|
||||||
entering your node that you don't want to advertise, then consider using [Reprovider Strategies](#reproviderstrategy)
|
entering your node that you don't want to advertise, then consider using [Provide Strategies](#providestrategy)
|
||||||
to reduce the number of CIDs that you are reproviding. Similarly, if you are running a node that deals mostly with
|
to reduce the number of CIDs that you are reproviding. Similarly, if you are running a node that deals mostly with
|
||||||
short-lived temporary data (e.g. you use a separate node for ingesting data then for storing and serving it) then
|
short-lived temporary data (e.g. you use a separate node for ingesting data then for storing and serving it) then
|
||||||
you may benefit from using [Strategic Providing](experimental-features.md#strategic-providing) to prevent advertising
|
you may benefit from using [Strategic Providing](experimental-features.md#strategic-providing) to prevent advertising
|
||||||
@ -3618,7 +3661,7 @@ Reduces daemon overhead on the system by disabling optional swarm services.
|
|||||||
|
|
||||||
### `announce-off` profile
|
### `announce-off` profile
|
||||||
|
|
||||||
Disables [Reprovider](#reprovider) system (and announcing to Amino DHT).
|
Disables [Provide](#provide) system (and announcing to Amino DHT).
|
||||||
|
|
||||||
> [!CAUTION]
|
> [!CAUTION]
|
||||||
> The main use case for this is setups with manual Peering.Peers config.
|
> The main use case for this is setups with manual Peering.Peers config.
|
||||||
@ -3628,7 +3671,7 @@ Disables [Reprovider](#reprovider) system (and announcing to Amino DHT).
|
|||||||
|
|
||||||
### `announce-on` profile
|
### `announce-on` profile
|
||||||
|
|
||||||
(Re-)enables [Reprovider](#reprovider) system (reverts [`announce-off` profile](#announce-off-profile)).
|
(Re-)enables [Provide](#provide) system (reverts [`announce-off` profile](#announce-off-profile)).
|
||||||
|
|
||||||
### `legacy-cid-v0` profile
|
### `legacy-cid-v0` profile
|
||||||
|
|
||||||
|
|||||||
@ -539,7 +539,7 @@ ipfs config --json Swarm.RelayClient.Enabled true
|
|||||||
|
|
||||||
`Experimental.StrategicProviding` was removed in Kubo v0.35.
|
`Experimental.StrategicProviding` was removed in Kubo v0.35.
|
||||||
|
|
||||||
Replaced by [`Provide.Enabled`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providerenabled) and [`Reprovider.Strategy`](https://github.com/ipfs/kubo/blob/master/docs/config.md#reproviderstrategy).
|
Replaced by [`Provide.Enabled`](https://github.com/ipfs/kubo/blob/master/docs/config.md#provideenabled) and [`Provide.Strategy`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providestrategy).
|
||||||
|
|
||||||
## GraphSync
|
## GraphSync
|
||||||
|
|
||||||
|
|||||||
118
docs/metrics.md
Normal file
118
docs/metrics.md
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
## Kubo metrics
|
||||||
|
|
||||||
|
By default, a Prometheus endpoint is exposed by Kubo at `http://127.0.0.1:5001/debug/metrics/prometheus`.
|
||||||
|
|
||||||
|
It includes default [Prometheus Go client metrics](https://prometheus.io/docs/guides/go-application/) + Kubo-specific metrics listed below.
|
||||||
|
|
||||||
|
### Table of Contents
|
||||||
|
|
||||||
|
- [DHT RPC](#dht-rpc)
|
||||||
|
- [Inbound RPC metrics](#inbound-rpc-metrics)
|
||||||
|
- [Outbound RPC metrics](#outbound-rpc-metrics)
|
||||||
|
- [Provide](#provide)
|
||||||
|
- [Legacy Provider](#legacy-provider)
|
||||||
|
- [DHT Provider](#dht-provider)
|
||||||
|
- [Gateway (`boxo/gateway`)](#gateway-boxogateway)
|
||||||
|
- [HTTP metrics](#http-metrics)
|
||||||
|
- [Blockstore cache metrics](#blockstore-cache-metrics)
|
||||||
|
- [Backend metrics](#backend-metrics)
|
||||||
|
- [Generic HTTP Servers](#generic-http-servers)
|
||||||
|
- [Core HTTP metrics](#core-http-metrics-ipfs_http_)
|
||||||
|
- [HTTP Server metrics](#http-server-metrics-http_server_)
|
||||||
|
- [OpenTelemetry Metadata](#opentelemetry-metadata)
|
||||||
|
|
||||||
|
> [!WARNING]
|
||||||
|
> This documentation is incomplete. For an up-to-date list of metrics available at daemon startup, see [test/sharness/t0119-prometheus-data/prometheus_metrics_added_by_measure_profile](https://github.com/ipfs/kubo/blob/master/test/sharness/t0119-prometheus-data/prometheus_metrics_added_by_measure_profile).
|
||||||
|
>
|
||||||
|
> Additional metrics may appear during runtime as some components (like boxo/gateway) register metrics only after their first event occurs (e.g., HTTP request/response).
|
||||||
|
|
||||||
|
## DHT RPC
|
||||||
|
|
||||||
|
Metrics from `go-libp2p-kad-dht` for DHT RPC operations:
|
||||||
|
|
||||||
|
### Inbound RPC metrics
|
||||||
|
|
||||||
|
- `rpc_inbound_messages_total` - Counter: total messages received per RPC
|
||||||
|
- `rpc_inbound_message_errors_total` - Counter: total errors for received messages
|
||||||
|
- `rpc_inbound_bytes_[bucket|sum|count]` - Histogram: distribution of received bytes per RPC
|
||||||
|
- `rpc_inbound_request_latency_[bucket|sum|count]` - Histogram: latency distribution for inbound RPCs
|
||||||
|
|
||||||
|
### Outbound RPC metrics
|
||||||
|
|
||||||
|
- `rpc_outbound_messages_total` - Counter: total messages sent per RPC
|
||||||
|
- `rpc_outbound_message_errors_total` - Counter: total errors for sent messages
|
||||||
|
- `rpc_outbound_requests_total` - Counter: total requests sent
|
||||||
|
- `rpc_outbound_request_errors_total` - Counter: total errors for sent requests
|
||||||
|
- `rpc_outbound_bytes_[bucket|sum|count]` - Histogram: distribution of sent bytes per RPC
|
||||||
|
- `rpc_outbound_request_latency_[bucket|sum|count]` - Histogram: latency distribution for outbound RPCs
|
||||||
|
|
||||||
|
## Provide
|
||||||
|
|
||||||
|
### Legacy Provider
|
||||||
|
|
||||||
|
Metrics for the legacy provider system when `Provide.DHT.SweepEnabled=false`:
|
||||||
|
|
||||||
|
- `provider_reprovider_provide_count` - Counter: total successful provide operations since node startup
|
||||||
|
- `provider_reprovider_reprovide_count` - Counter: total reprovide sweep operations since node startup
|
||||||
|
|
||||||
|
### DHT Provider
|
||||||
|
|
||||||
|
Metrics for the DHT provider system when `Provide.DHT.SweepEnabled=true`:
|
||||||
|
|
||||||
|
- `total_provide_count_total` - Counter: total successful provide operations since node startup (includes both one-time provides and periodic provides done on `Provide.DHT.Interval`)
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> These metrics are exposed by [go-libp2p-kad-dht](https://github.com/libp2p/go-libp2p-kad-dht/). You can enable debug logging for DHT provider activity with `GOLOG_LOG_LEVEL=dht/provider=debug`.
|
||||||
|
|
||||||
|
## Gateway (`boxo/gateway`)
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> These metrics are limited to [IPFS Gateway](https://specs.ipfs.tech/http-gateways/) endpoints. For general HTTP metrics across all endpoints, consider using a reverse proxy.
|
||||||
|
|
||||||
|
Gateway metrics appear after the first HTTP request is processed:
|
||||||
|
|
||||||
|
### HTTP metrics
|
||||||
|
|
||||||
|
- `ipfs_http_gw_responses_total{code}` - Counter: total HTTP responses by status code
|
||||||
|
- `ipfs_http_gw_retrieval_timeouts_total{code,truncated}` - Counter: requests that timed out during content retrieval
|
||||||
|
- `ipfs_http_gw_concurrent_requests` - Gauge: number of requests currently being processed
|
||||||
|
|
||||||
|
### Blockstore cache metrics
|
||||||
|
|
||||||
|
- `ipfs_http_blockstore_cache_hit` - Counter: global block cache hits
|
||||||
|
- `ipfs_http_blockstore_cache_requests` - Counter: global block cache requests
|
||||||
|
|
||||||
|
### Backend metrics
|
||||||
|
|
||||||
|
- `ipfs_gw_backend_api_call_duration_seconds_[bucket|sum|count]{backend_method}` - Histogram: time spent in IPFSBackend API calls
|
||||||
|
|
||||||
|
## Generic HTTP Servers
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> The metrics below are not very useful and exist mostly for historical reasons. If you need non-gateway HTTP metrics, it's better to put a reverse proxy in front of Kubo and use its metrics.
|
||||||
|
|
||||||
|
### Core HTTP metrics (`ipfs_http_*`)
|
||||||
|
|
||||||
|
Prometheus metrics for the HTTP API exposed at port 5001:
|
||||||
|
|
||||||
|
- `ipfs_http_requests_total{method,code,handler}` - Counter: total HTTP requests (Legacy - new metrics are provided by boxo/gateway for gateway traffic)
|
||||||
|
- `ipfs_http_request_duration_seconds[_sum|_count]{handler}` - Summary: request processing duration
|
||||||
|
- `ipfs_http_request_size_bytes[_sum|_count]{handler}` - Summary: request body sizes
|
||||||
|
- `ipfs_http_response_size_bytes[_sum|_count]{handler}` - Summary: response body sizes
|
||||||
|
|
||||||
|
### HTTP Server metrics (`http_server_*`)
|
||||||
|
|
||||||
|
Additional HTTP instrumentation for all handlers (Gateway, API commands, etc.):
|
||||||
|
|
||||||
|
- `http_server_request_body_size_bytes_[bucket|count|sum]` - Histogram: distribution of request body sizes
|
||||||
|
- `http_server_request_duration_seconds_[bucket|count|sum]` - Histogram: distribution of request processing times
|
||||||
|
- `http_server_response_body_size_bytes_[bucket|count|sum]` - Histogram: distribution of response body sizes
|
||||||
|
|
||||||
|
These metrics are automatically added to Gateway handlers, Hostname Gateway, Libp2p Gateway, and API command handlers.
|
||||||
|
|
||||||
|
## OpenTelemetry Metadata
|
||||||
|
|
||||||
|
Kubo uses Prometheus for metrics collection for historical reasons, but OpenTelemetry metrics are automatically exposed through the same Prometheus endpoint. These metadata metrics provide context about the instrumentation:
|
||||||
|
|
||||||
|
- `otel_scope_info` - Information about instrumentation libraries producing metrics
|
||||||
|
- `target_info` - Service metadata including version and instance information
|
||||||
2
go.mod
2
go.mod
@ -80,7 +80,9 @@ require (
|
|||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0
|
||||||
go.opentelemetry.io/contrib/propagators/autoprop v0.46.1
|
go.opentelemetry.io/contrib/propagators/autoprop v0.46.1
|
||||||
go.opentelemetry.io/otel v1.38.0
|
go.opentelemetry.io/otel v1.38.0
|
||||||
|
go.opentelemetry.io/otel/exporters/prometheus v0.56.0
|
||||||
go.opentelemetry.io/otel/sdk v1.38.0
|
go.opentelemetry.io/otel/sdk v1.38.0
|
||||||
|
go.opentelemetry.io/otel/sdk/metric v1.38.0
|
||||||
go.opentelemetry.io/otel/trace v1.38.0
|
go.opentelemetry.io/otel/trace v1.38.0
|
||||||
go.uber.org/dig v1.19.0
|
go.uber.org/dig v1.19.0
|
||||||
go.uber.org/fx v1.24.0
|
go.uber.org/fx v1.24.0
|
||||||
|
|||||||
2
go.sum
2
go.sum
@ -950,6 +950,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4D
|
|||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4=
|
||||||
|
go.opentelemetry.io/otel/exporters/prometheus v0.56.0 h1:GnCIi0QyG0yy2MrJLzVrIM7laaJstj//flf1zEJCG+E=
|
||||||
|
go.opentelemetry.io/otel/exporters/prometheus v0.56.0/go.mod h1:JQcVZtbIIPM+7SWBB+T6FK+xunlyidwLp++fN0sUaOk=
|
||||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 h1:kJxSDN4SgWWTjG/hPp3O7LCGLcHXFlvS2/FFOrwL+SE=
|
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 h1:kJxSDN4SgWWTjG/hPp3O7LCGLcHXFlvS2/FFOrwL+SE=
|
||||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0/go.mod h1:mgIOzS7iZeKJdeB8/NYHrJ48fdGc71Llo5bJ1J4DWUE=
|
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0/go.mod h1:mgIOzS7iZeKJdeB8/NYHrJ48fdGc71Llo5bJ1J4DWUE=
|
||||||
go.opentelemetry.io/otel/exporters/zipkin v1.38.0 h1:0rJ2TmzpHDG+Ib9gPmu3J3cE0zXirumQcKS4wCoZUa0=
|
go.opentelemetry.io/otel/exporters/zipkin v1.38.0 h1:0rJ2TmzpHDG+Ib9gPmu3J3cE0zXirumQcKS4wCoZUa0=
|
||||||
|
|||||||
@ -407,7 +407,7 @@ func (p *telemetryPlugin) collectBasicInfo() {
|
|||||||
}
|
}
|
||||||
p.event.UptimeBucket = uptimeBucket
|
p.event.UptimeBucket = uptimeBucket
|
||||||
|
|
||||||
p.event.ReproviderStrategy = p.config.Reprovider.Strategy.WithDefault(config.DefaultReproviderStrategy)
|
p.event.ReproviderStrategy = p.config.Provide.Strategy.WithDefault(config.DefaultProvideStrategy)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *telemetryPlugin) collectRoutingInfo() {
|
func (p *telemetryPlugin) collectRoutingInfo() {
|
||||||
|
|||||||
97
repo/fsrepo/migrations/common/base.go
Normal file
97
repo/fsrepo/migrations/common/base.go
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BaseMigration provides common functionality for migrations
|
||||||
|
type BaseMigration struct {
|
||||||
|
FromVersion string
|
||||||
|
ToVersion string
|
||||||
|
Description string
|
||||||
|
Convert func(in io.ReadSeeker, out io.Writer) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Versions returns the version string for this migration
|
||||||
|
func (m *BaseMigration) Versions() string {
|
||||||
|
return fmt.Sprintf("%s-to-%s", m.FromVersion, m.ToVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
// configBackupSuffix returns the backup suffix for the config file
|
||||||
|
// e.g. ".16-to-17.bak" results in "config.16-to-17.bak"
|
||||||
|
func (m *BaseMigration) configBackupSuffix() string {
|
||||||
|
return fmt.Sprintf(".%s-to-%s.bak", m.FromVersion, m.ToVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reversible returns true as we keep backups
|
||||||
|
func (m *BaseMigration) Reversible() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply performs the migration
|
||||||
|
func (m *BaseMigration) Apply(opts Options) error {
|
||||||
|
if opts.Verbose {
|
||||||
|
fmt.Printf("applying %s repo migration\n", m.Versions())
|
||||||
|
if m.Description != "" {
|
||||||
|
fmt.Printf("> %s\n", m.Description)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check version
|
||||||
|
if err := CheckVersion(opts.Path, m.FromVersion); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
configPath := filepath.Join(opts.Path, "config")
|
||||||
|
|
||||||
|
// Perform migration with backup
|
||||||
|
if err := WithBackup(configPath, m.configBackupSuffix(), m.Convert); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update version
|
||||||
|
if err := WriteVersion(opts.Path, m.ToVersion); err != nil {
|
||||||
|
if opts.Verbose {
|
||||||
|
fmt.Printf("failed to update version file to %s\n", m.ToVersion)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.Verbose {
|
||||||
|
fmt.Println("updated version file")
|
||||||
|
fmt.Printf("Migration %s succeeded\n", m.Versions())
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Revert reverts the migration
|
||||||
|
func (m *BaseMigration) Revert(opts Options) error {
|
||||||
|
if opts.Verbose {
|
||||||
|
fmt.Println("reverting migration")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check we're at the expected version
|
||||||
|
if err := CheckVersion(opts.Path, m.ToVersion); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restore backup
|
||||||
|
configPath := filepath.Join(opts.Path, "config")
|
||||||
|
if err := RevertBackup(configPath, m.configBackupSuffix()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Revert version
|
||||||
|
if err := WriteVersion(opts.Path, m.FromVersion); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.Verbose {
|
||||||
|
fmt.Printf("lowered version number to %s\n", m.FromVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
353
repo/fsrepo/migrations/common/config_helpers.go
Normal file
353
repo/fsrepo/migrations/common/config_helpers.go
Normal file
@ -0,0 +1,353 @@
|
|||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"maps"
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetField retrieves a field from a nested config structure using a dot-separated path
|
||||||
|
// Example: GetField(config, "DNS.Resolvers") returns config["DNS"]["Resolvers"]
|
||||||
|
func GetField(config map[string]any, path string) (any, bool) {
|
||||||
|
parts := strings.Split(path, ".")
|
||||||
|
current := config
|
||||||
|
|
||||||
|
for i, part := range parts {
|
||||||
|
// Last part - return the value
|
||||||
|
if i == len(parts)-1 {
|
||||||
|
val, exists := current[part]
|
||||||
|
return val, exists
|
||||||
|
}
|
||||||
|
|
||||||
|
// Navigate deeper
|
||||||
|
next, exists := current[part]
|
||||||
|
if !exists {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure it's a map
|
||||||
|
nextMap, ok := next.(map[string]any)
|
||||||
|
if !ok {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
current = nextMap
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetField sets a field in a nested config structure using a dot-separated path
|
||||||
|
// It creates intermediate maps as needed
|
||||||
|
func SetField(config map[string]any, path string, value any) {
|
||||||
|
parts := strings.Split(path, ".")
|
||||||
|
current := config
|
||||||
|
|
||||||
|
for i, part := range parts {
|
||||||
|
// Last part - set the value
|
||||||
|
if i == len(parts)-1 {
|
||||||
|
current[part] = value
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Navigate or create intermediate maps
|
||||||
|
next, exists := current[part]
|
||||||
|
if !exists {
|
||||||
|
// Create new intermediate map
|
||||||
|
newMap := make(map[string]any)
|
||||||
|
current[part] = newMap
|
||||||
|
current = newMap
|
||||||
|
} else {
|
||||||
|
// Ensure it's a map
|
||||||
|
nextMap, ok := next.(map[string]any)
|
||||||
|
if !ok {
|
||||||
|
// Can't navigate further, replace with new map
|
||||||
|
newMap := make(map[string]any)
|
||||||
|
current[part] = newMap
|
||||||
|
current = newMap
|
||||||
|
} else {
|
||||||
|
current = nextMap
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteField removes a field from a nested config structure
|
||||||
|
func DeleteField(config map[string]any, path string) bool {
|
||||||
|
parts := strings.Split(path, ".")
|
||||||
|
|
||||||
|
// Handle simple case
|
||||||
|
if len(parts) == 1 {
|
||||||
|
_, exists := config[parts[0]]
|
||||||
|
delete(config, parts[0])
|
||||||
|
return exists
|
||||||
|
}
|
||||||
|
|
||||||
|
// Navigate to parent
|
||||||
|
parentPath := strings.Join(parts[:len(parts)-1], ".")
|
||||||
|
parent, exists := GetField(config, parentPath)
|
||||||
|
if !exists {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
parentMap, ok := parent.(map[string]any)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldName := parts[len(parts)-1]
|
||||||
|
_, exists = parentMap[fieldName]
|
||||||
|
delete(parentMap, fieldName)
|
||||||
|
return exists
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveField moves a field from one location to another
|
||||||
|
func MoveField(config map[string]any, from, to string) error {
|
||||||
|
value, exists := GetField(config, from)
|
||||||
|
if !exists {
|
||||||
|
return fmt.Errorf("source field %s does not exist", from)
|
||||||
|
}
|
||||||
|
|
||||||
|
SetField(config, to, value)
|
||||||
|
DeleteField(config, from)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenameField renames a field within the same parent
|
||||||
|
func RenameField(config map[string]any, path, oldName, newName string) error {
|
||||||
|
var parent map[string]any
|
||||||
|
if path == "" {
|
||||||
|
parent = config
|
||||||
|
} else {
|
||||||
|
p, exists := GetField(config, path)
|
||||||
|
if !exists {
|
||||||
|
return fmt.Errorf("parent path %s does not exist", path)
|
||||||
|
}
|
||||||
|
var ok bool
|
||||||
|
parent, ok = p.(map[string]any)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("parent path %s is not a map", path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
value, exists := parent[oldName]
|
||||||
|
if !exists {
|
||||||
|
return fmt.Errorf("field %s does not exist", oldName)
|
||||||
|
}
|
||||||
|
|
||||||
|
parent[newName] = value
|
||||||
|
delete(parent, oldName)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDefault sets a field value only if it doesn't already exist
|
||||||
|
func SetDefault(config map[string]any, path string, value any) {
|
||||||
|
if _, exists := GetField(config, path); !exists {
|
||||||
|
SetField(config, path, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TransformField applies a transformation function to a field value
|
||||||
|
func TransformField(config map[string]any, path string, transformer func(any) any) error {
|
||||||
|
value, exists := GetField(config, path)
|
||||||
|
if !exists {
|
||||||
|
return fmt.Errorf("field %s does not exist", path)
|
||||||
|
}
|
||||||
|
|
||||||
|
newValue := transformer(value)
|
||||||
|
SetField(config, path, newValue)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnsureFieldIs checks if a field equals expected value, sets it if missing
|
||||||
|
func EnsureFieldIs(config map[string]any, path string, expected any) {
|
||||||
|
current, exists := GetField(config, path)
|
||||||
|
if !exists || current != expected {
|
||||||
|
SetField(config, path, expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MergeInto merges multiple source fields into a destination map
|
||||||
|
func MergeInto(config map[string]any, destination string, sources ...string) {
|
||||||
|
var destMap map[string]any
|
||||||
|
|
||||||
|
// Get existing destination if it exists
|
||||||
|
if existing, exists := GetField(config, destination); exists {
|
||||||
|
if m, ok := existing.(map[string]any); ok {
|
||||||
|
destMap = m
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge each source
|
||||||
|
for _, source := range sources {
|
||||||
|
if value, exists := GetField(config, source); exists {
|
||||||
|
if sourceMap, ok := value.(map[string]any); ok {
|
||||||
|
if destMap == nil {
|
||||||
|
destMap = make(map[string]any)
|
||||||
|
}
|
||||||
|
maps.Copy(destMap, sourceMap)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if destMap != nil {
|
||||||
|
SetField(config, destination, destMap)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyField copies a field value to a new location (keeps original)
|
||||||
|
func CopyField(config map[string]any, from, to string) error {
|
||||||
|
value, exists := GetField(config, from)
|
||||||
|
if !exists {
|
||||||
|
return fmt.Errorf("source field %s does not exist", from)
|
||||||
|
}
|
||||||
|
|
||||||
|
SetField(config, to, value)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConvertInterfaceSlice converts []interface{} to []string
|
||||||
|
func ConvertInterfaceSlice(slice []interface{}) []string {
|
||||||
|
result := make([]string, 0, len(slice))
|
||||||
|
for _, item := range slice {
|
||||||
|
if str, ok := item.(string); ok {
|
||||||
|
result = append(result, str)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetOrCreateSection gets or creates a map section in config
|
||||||
|
func GetOrCreateSection(config map[string]any, path string) map[string]any {
|
||||||
|
existing, exists := GetField(config, path)
|
||||||
|
if exists {
|
||||||
|
if section, ok := existing.(map[string]any); ok {
|
||||||
|
return section
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create new section
|
||||||
|
section := make(map[string]any)
|
||||||
|
SetField(config, path, section)
|
||||||
|
return section
|
||||||
|
}
|
||||||
|
|
||||||
|
// SafeCastMap safely casts to map[string]any with fallback to empty map
|
||||||
|
func SafeCastMap(value any) map[string]any {
|
||||||
|
if m, ok := value.(map[string]any); ok {
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
return make(map[string]any)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SafeCastSlice safely casts to []interface{} with fallback to empty slice
|
||||||
|
func SafeCastSlice(value any) []interface{} {
|
||||||
|
if s, ok := value.([]interface{}); ok {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return []interface{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReplaceDefaultsWithAuto replaces default values with "auto" in a map
|
||||||
|
func ReplaceDefaultsWithAuto(values map[string]any, defaults map[string]string) map[string]string {
|
||||||
|
result := make(map[string]string)
|
||||||
|
for k, v := range values {
|
||||||
|
if vStr, ok := v.(string); ok {
|
||||||
|
if replacement, isDefault := defaults[vStr]; isDefault {
|
||||||
|
result[k] = replacement
|
||||||
|
} else {
|
||||||
|
result[k] = vStr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnsureSliceContains ensures a slice field contains a value
|
||||||
|
func EnsureSliceContains(config map[string]any, path string, value string) {
|
||||||
|
existing, exists := GetField(config, path)
|
||||||
|
if !exists {
|
||||||
|
SetField(config, path, []string{value})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if slice, ok := existing.([]interface{}); ok {
|
||||||
|
// Check if value already exists
|
||||||
|
for _, item := range slice {
|
||||||
|
if str, ok := item.(string); ok && str == value {
|
||||||
|
return // Already contains value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Add value
|
||||||
|
SetField(config, path, append(slice, value))
|
||||||
|
} else if strSlice, ok := existing.([]string); ok {
|
||||||
|
if !slices.Contains(strSlice, value) {
|
||||||
|
SetField(config, path, append(strSlice, value))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Replace with new slice containing value
|
||||||
|
SetField(config, path, []string{value})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReplaceInSlice replaces old values with new in a slice field
|
||||||
|
func ReplaceInSlice(config map[string]any, path string, oldValue, newValue string) {
|
||||||
|
existing, exists := GetField(config, path)
|
||||||
|
if !exists {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if slice, ok := existing.([]interface{}); ok {
|
||||||
|
result := make([]string, 0, len(slice))
|
||||||
|
for _, item := range slice {
|
||||||
|
if str, ok := item.(string); ok {
|
||||||
|
if str == oldValue {
|
||||||
|
result = append(result, newValue)
|
||||||
|
} else {
|
||||||
|
result = append(result, str)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
SetField(config, path, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMapSection gets a map section with error handling
|
||||||
|
func GetMapSection(config map[string]any, path string) (map[string]any, error) {
|
||||||
|
value, exists := GetField(config, path)
|
||||||
|
if !exists {
|
||||||
|
return nil, fmt.Errorf("section %s does not exist", path)
|
||||||
|
}
|
||||||
|
|
||||||
|
section, ok := value.(map[string]any)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("section %s is not a map", path)
|
||||||
|
}
|
||||||
|
|
||||||
|
return section, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloneStringMap clones a map[string]any to map[string]string
|
||||||
|
func CloneStringMap(m map[string]any) map[string]string {
|
||||||
|
result := make(map[string]string, len(m))
|
||||||
|
for k, v := range m {
|
||||||
|
if str, ok := v.(string); ok {
|
||||||
|
result[k] = str
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsEmptySlice checks if a value is an empty slice
|
||||||
|
func IsEmptySlice(value any) bool {
|
||||||
|
if value == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if slice, ok := value.([]interface{}); ok {
|
||||||
|
return len(slice) == 0
|
||||||
|
}
|
||||||
|
if slice, ok := value.([]string); ok {
|
||||||
|
return len(slice) == 0
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
16
repo/fsrepo/migrations/common/migration.go
Normal file
16
repo/fsrepo/migrations/common/migration.go
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
// Package common contains common types and interfaces for file system repository migrations
|
||||||
|
package common
|
||||||
|
|
||||||
|
// Options contains migration options for embedded migrations
|
||||||
|
type Options struct {
|
||||||
|
Path string
|
||||||
|
Verbose bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Migration is the interface that all migrations must implement
|
||||||
|
type Migration interface {
|
||||||
|
Versions() string
|
||||||
|
Apply(opts Options) error
|
||||||
|
Revert(opts Options) error
|
||||||
|
Reversible() bool
|
||||||
|
}
|
||||||
290
repo/fsrepo/migrations/common/testing_helpers.go
Normal file
290
repo/fsrepo/migrations/common/testing_helpers.go
Normal file
@ -0,0 +1,290 @@
|
|||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"maps"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestCase represents a single migration test case
|
||||||
|
type TestCase struct {
|
||||||
|
Name string
|
||||||
|
InputConfig map[string]any
|
||||||
|
Assertions []ConfigAssertion
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigAssertion represents an assertion about the migrated config
|
||||||
|
type ConfigAssertion struct {
|
||||||
|
Path string
|
||||||
|
Expected any
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunMigrationTest runs a migration test with the given test case
|
||||||
|
func RunMigrationTest(t *testing.T, migration Migration, tc TestCase) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
// Convert input to JSON
|
||||||
|
inputJSON, err := json.MarshalIndent(tc.InputConfig, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to marshal input config: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run the migration's convert function
|
||||||
|
var output bytes.Buffer
|
||||||
|
if baseMig, ok := migration.(*BaseMigration); ok {
|
||||||
|
err = baseMig.Convert(bytes.NewReader(inputJSON), &output)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("migration failed: %v", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
t.Skip("migration is not a BaseMigration")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse output
|
||||||
|
var result map[string]any
|
||||||
|
err = json.Unmarshal(output.Bytes(), &result)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to unmarshal output: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run assertions
|
||||||
|
for _, assertion := range tc.Assertions {
|
||||||
|
AssertConfigField(t, result, assertion.Path, assertion.Expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertConfigField asserts that a field in the config has the expected value
|
||||||
|
func AssertConfigField(t *testing.T, config map[string]any, path string, expected any) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
actual, exists := GetField(config, path)
|
||||||
|
if expected == nil {
|
||||||
|
if exists {
|
||||||
|
t.Errorf("expected field %s to not exist, but it has value: %v", path, actual)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !exists {
|
||||||
|
t.Errorf("expected field %s to exist with value %v, but it doesn't exist", path, expected)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle different types of comparisons
|
||||||
|
switch exp := expected.(type) {
|
||||||
|
case []string:
|
||||||
|
actualSlice, ok := actual.([]interface{})
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("field %s: expected []string, got %T", path, actual)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(exp) != len(actualSlice) {
|
||||||
|
t.Errorf("field %s: expected slice of length %d, got %d", path, len(exp), len(actualSlice))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for i, expVal := range exp {
|
||||||
|
if actualSlice[i] != expVal {
|
||||||
|
t.Errorf("field %s[%d]: expected %v, got %v", path, i, expVal, actualSlice[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case map[string]string:
|
||||||
|
actualMap, ok := actual.(map[string]any)
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("field %s: expected map, got %T", path, actual)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for k, v := range exp {
|
||||||
|
if actualMap[k] != v {
|
||||||
|
t.Errorf("field %s[%s]: expected %v, got %v", path, k, v, actualMap[k])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
if actual != expected {
|
||||||
|
t.Errorf("field %s: expected %v, got %v", path, expected, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateTestConfig creates a basic test config with the given fields
|
||||||
|
func GenerateTestConfig(fields map[string]any) map[string]any {
|
||||||
|
// Start with a minimal valid config
|
||||||
|
config := map[string]any{
|
||||||
|
"Identity": map[string]any{
|
||||||
|
"PeerID": "QmTest",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge in the provided fields
|
||||||
|
maps.Copy(config, fields)
|
||||||
|
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateTestRepo creates a temporary test repository with the given version and config
|
||||||
|
func CreateTestRepo(t *testing.T, version int, config map[string]any) string {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
tempDir := t.TempDir()
|
||||||
|
|
||||||
|
// Write version file
|
||||||
|
versionPath := filepath.Join(tempDir, "version")
|
||||||
|
err := os.WriteFile(versionPath, []byte(fmt.Sprintf("%d", version)), 0644)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to write version file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write config file
|
||||||
|
configPath := filepath.Join(tempDir, "config")
|
||||||
|
configData, err := json.MarshalIndent(config, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to marshal config: %v", err)
|
||||||
|
}
|
||||||
|
err = os.WriteFile(configPath, configData, 0644)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to write config file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return tempDir
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertMigrationSuccess runs a full migration and checks that it succeeds
|
||||||
|
func AssertMigrationSuccess(t *testing.T, migration Migration, fromVersion, toVersion int, inputConfig map[string]any) map[string]any {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
// Create test repo
|
||||||
|
repoPath := CreateTestRepo(t, fromVersion, inputConfig)
|
||||||
|
|
||||||
|
// Run migration
|
||||||
|
opts := Options{
|
||||||
|
Path: repoPath,
|
||||||
|
Verbose: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
err := migration.Apply(opts)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("migration failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check version was updated
|
||||||
|
versionBytes, err := os.ReadFile(filepath.Join(repoPath, "version"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to read version file: %v", err)
|
||||||
|
}
|
||||||
|
actualVersion := string(versionBytes)
|
||||||
|
if actualVersion != fmt.Sprintf("%d", toVersion) {
|
||||||
|
t.Errorf("expected version %d, got %s", toVersion, actualVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read and return the migrated config
|
||||||
|
configBytes, err := os.ReadFile(filepath.Join(repoPath, "config"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to read config file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var result map[string]any
|
||||||
|
err = json.Unmarshal(configBytes, &result)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to unmarshal config: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertMigrationReversible checks that a migration can be reverted
|
||||||
|
func AssertMigrationReversible(t *testing.T, migration Migration, fromVersion, toVersion int, inputConfig map[string]any) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
// Create test repo at target version
|
||||||
|
repoPath := CreateTestRepo(t, toVersion, inputConfig)
|
||||||
|
|
||||||
|
// Create backup file (simulating a previous migration)
|
||||||
|
backupPath := filepath.Join(repoPath, fmt.Sprintf("config.%d-to-%d.bak", fromVersion, toVersion))
|
||||||
|
originalConfig, err := json.MarshalIndent(inputConfig, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to marshal original config: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.WriteFile(backupPath, originalConfig, 0644); err != nil {
|
||||||
|
t.Fatalf("failed to write backup file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run revert
|
||||||
|
if err := migration.Revert(Options{Path: repoPath}); err != nil {
|
||||||
|
t.Fatalf("revert failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify version was reverted
|
||||||
|
versionBytes, err := os.ReadFile(filepath.Join(repoPath, "version"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to read version file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if actualVersion := string(versionBytes); actualVersion != fmt.Sprintf("%d", fromVersion) {
|
||||||
|
t.Errorf("expected version %d after revert, got %s", fromVersion, actualVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify config was reverted
|
||||||
|
configBytes, err := os.ReadFile(filepath.Join(repoPath, "config"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to read reverted config file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var revertedConfig map[string]any
|
||||||
|
if err := json.Unmarshal(configBytes, &revertedConfig); err != nil {
|
||||||
|
t.Fatalf("failed to unmarshal reverted config: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare reverted config with original
|
||||||
|
compareConfigs(t, inputConfig, revertedConfig, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// compareConfigs recursively compares two config maps and reports differences
|
||||||
|
func compareConfigs(t *testing.T, expected, actual map[string]any, path string) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
// Build current path helper
|
||||||
|
buildPath := func(key string) string {
|
||||||
|
if path == "" {
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
return path + "." + key
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check all expected fields exist and match
|
||||||
|
for key, expectedValue := range expected {
|
||||||
|
currentPath := buildPath(key)
|
||||||
|
|
||||||
|
actualValue, exists := actual[key]
|
||||||
|
if !exists {
|
||||||
|
t.Errorf("reverted config missing field %s", currentPath)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch exp := expectedValue.(type) {
|
||||||
|
case map[string]any:
|
||||||
|
act, ok := actualValue.(map[string]any)
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("field %s: expected map, got %T", currentPath, actualValue)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
compareConfigs(t, exp, act, currentPath)
|
||||||
|
default:
|
||||||
|
if !reflect.DeepEqual(expectedValue, actualValue) {
|
||||||
|
t.Errorf("field %s: expected %v, got %v after revert",
|
||||||
|
currentPath, expectedValue, actualValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for unexpected fields using maps.Keys (Go 1.23+)
|
||||||
|
for key := range actual {
|
||||||
|
if _, exists := expected[key]; !exists {
|
||||||
|
t.Errorf("reverted config has unexpected field %s", buildPath(key))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
107
repo/fsrepo/migrations/common/utils.go
Normal file
107
repo/fsrepo/migrations/common/utils.go
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ipfs/kubo/repo/fsrepo/migrations/atomicfile"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CheckVersion verifies the repo is at the expected version
|
||||||
|
func CheckVersion(repoPath string, expectedVersion string) error {
|
||||||
|
versionPath := filepath.Join(repoPath, "version")
|
||||||
|
versionBytes, err := os.ReadFile(versionPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not read version file: %w", err)
|
||||||
|
}
|
||||||
|
version := strings.TrimSpace(string(versionBytes))
|
||||||
|
if version != expectedVersion {
|
||||||
|
return fmt.Errorf("expected version %s, got %s", expectedVersion, version)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteVersion writes the version to the repo
|
||||||
|
func WriteVersion(repoPath string, version string) error {
|
||||||
|
versionPath := filepath.Join(repoPath, "version")
|
||||||
|
return os.WriteFile(versionPath, []byte(version), 0644)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must panics if the error is not nil. Use only for errors that cannot be handled gracefully.
|
||||||
|
func Must(err error) {
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Errorf("error can't be dealt with transactionally: %w", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithBackup performs a config file operation with automatic backup and rollback on error
|
||||||
|
func WithBackup(configPath string, backupSuffix string, fn func(in io.ReadSeeker, out io.Writer) error) error {
|
||||||
|
in, err := os.Open(configPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer in.Close()
|
||||||
|
|
||||||
|
// Create backup
|
||||||
|
backup, err := atomicfile.New(configPath+backupSuffix, 0600)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy to backup
|
||||||
|
if _, err := backup.ReadFrom(in); err != nil {
|
||||||
|
Must(backup.Abort())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset input for reading
|
||||||
|
if _, err := in.Seek(0, io.SeekStart); err != nil {
|
||||||
|
Must(backup.Abort())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create output file
|
||||||
|
out, err := atomicfile.New(configPath, 0600)
|
||||||
|
if err != nil {
|
||||||
|
Must(backup.Abort())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run the conversion function
|
||||||
|
if err := fn(in, out); err != nil {
|
||||||
|
Must(out.Abort())
|
||||||
|
Must(backup.Abort())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close everything on success
|
||||||
|
Must(out.Close())
|
||||||
|
Must(backup.Close())
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RevertBackup restores a backup file
|
||||||
|
func RevertBackup(configPath string, backupSuffix string) error {
|
||||||
|
return os.Rename(configPath+backupSuffix, configPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadConfig reads and unmarshals a JSON config file into a map
|
||||||
|
func ReadConfig(r io.Reader) (map[string]any, error) {
|
||||||
|
confMap := make(map[string]any)
|
||||||
|
if err := json.NewDecoder(r).Decode(&confMap); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return confMap, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteConfig marshals and writes a config map as indented JSON
|
||||||
|
func WriteConfig(w io.Writer, config map[string]any) error {
|
||||||
|
enc := json.NewEncoder(w)
|
||||||
|
enc.SetIndent("", " ")
|
||||||
|
return enc.Encode(config)
|
||||||
|
}
|
||||||
@ -6,25 +6,30 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"github.com/ipfs/kubo/repo/fsrepo/migrations/common"
|
||||||
mg16 "github.com/ipfs/kubo/repo/fsrepo/migrations/fs-repo-16-to-17/migration"
|
mg16 "github.com/ipfs/kubo/repo/fsrepo/migrations/fs-repo-16-to-17/migration"
|
||||||
|
mg17 "github.com/ipfs/kubo/repo/fsrepo/migrations/fs-repo-17-to-18/migration"
|
||||||
)
|
)
|
||||||
|
|
||||||
// EmbeddedMigration represents an embedded migration that can be run directly
|
// embeddedMigrations contains all embedded migrations
|
||||||
type EmbeddedMigration interface {
|
// Using a slice to maintain order and allow for future range-based operations
|
||||||
Versions() string
|
var embeddedMigrations = []common.Migration{
|
||||||
Apply(opts mg16.Options) error
|
mg16.Migration,
|
||||||
Revert(opts mg16.Options) error
|
mg17.Migration,
|
||||||
Reversible() bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// embeddedMigrations contains all embedded migrations
|
// migrationsByName provides quick lookup by name
|
||||||
var embeddedMigrations = map[string]EmbeddedMigration{
|
var migrationsByName = make(map[string]common.Migration)
|
||||||
"fs-repo-16-to-17": &mg16.Migration{},
|
|
||||||
|
func init() {
|
||||||
|
for _, m := range embeddedMigrations {
|
||||||
|
migrationsByName["fs-repo-"+m.Versions()] = m
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// RunEmbeddedMigration runs an embedded migration if available
|
// RunEmbeddedMigration runs an embedded migration if available
|
||||||
func RunEmbeddedMigration(ctx context.Context, migrationName string, ipfsDir string, revert bool) error {
|
func RunEmbeddedMigration(ctx context.Context, migrationName string, ipfsDir string, revert bool) error {
|
||||||
migration, exists := embeddedMigrations[migrationName]
|
migration, exists := migrationsByName[migrationName]
|
||||||
if !exists {
|
if !exists {
|
||||||
return fmt.Errorf("embedded migration %s not found", migrationName)
|
return fmt.Errorf("embedded migration %s not found", migrationName)
|
||||||
}
|
}
|
||||||
@ -36,7 +41,7 @@ func RunEmbeddedMigration(ctx context.Context, migrationName string, ipfsDir str
|
|||||||
logger := log.New(os.Stdout, "", 0)
|
logger := log.New(os.Stdout, "", 0)
|
||||||
logger.Printf("Running embedded migration %s...", migrationName)
|
logger.Printf("Running embedded migration %s...", migrationName)
|
||||||
|
|
||||||
opts := mg16.Options{
|
opts := common.Options{
|
||||||
Path: ipfsDir,
|
Path: ipfsDir,
|
||||||
Verbose: true,
|
Verbose: true,
|
||||||
}
|
}
|
||||||
@ -58,7 +63,7 @@ func RunEmbeddedMigration(ctx context.Context, migrationName string, ipfsDir str
|
|||||||
|
|
||||||
// HasEmbeddedMigration checks if a migration is available as embedded
|
// HasEmbeddedMigration checks if a migration is available as embedded
|
||||||
func HasEmbeddedMigration(migrationName string) bool {
|
func HasEmbeddedMigration(migrationName string) bool {
|
||||||
_, exists := embeddedMigrations[migrationName]
|
_, exists := migrationsByName[migrationName]
|
||||||
return exists
|
return exists
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -28,6 +28,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"github.com/ipfs/kubo/repo/fsrepo/migrations/common"
|
||||||
mg16 "github.com/ipfs/kubo/repo/fsrepo/migrations/fs-repo-16-to-17/migration"
|
mg16 "github.com/ipfs/kubo/repo/fsrepo/migrations/fs-repo-16-to-17/migration"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -43,17 +44,16 @@ func main() {
|
|||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
m := mg16.Migration{}
|
opts := common.Options{
|
||||||
opts := mg16.Options{
|
|
||||||
Path: *path,
|
Path: *path,
|
||||||
Verbose: *verbose,
|
Verbose: *verbose,
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
if *revert {
|
if *revert {
|
||||||
err = m.Revert(opts)
|
err = mg16.Migration.Revert(opts)
|
||||||
} else {
|
} else {
|
||||||
err = m.Apply(opts)
|
err = mg16.Migration.Apply(opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -7,27 +7,13 @@
|
|||||||
package mg16
|
package mg16
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"reflect"
|
|
||||||
"slices"
|
"slices"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/ipfs/kubo/config"
|
"github.com/ipfs/kubo/config"
|
||||||
"github.com/ipfs/kubo/repo/fsrepo/migrations/atomicfile"
|
"github.com/ipfs/kubo/repo/fsrepo/migrations/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Options contains migration options for embedded migrations
|
|
||||||
type Options struct {
|
|
||||||
Path string
|
|
||||||
Verbose bool
|
|
||||||
}
|
|
||||||
|
|
||||||
const backupSuffix = ".16-to-17.bak"
|
|
||||||
|
|
||||||
// DefaultBootstrapAddresses are the hardcoded bootstrap addresses from Kubo 0.36
|
// DefaultBootstrapAddresses are the hardcoded bootstrap addresses from Kubo 0.36
|
||||||
// for IPFS. they are nodes run by the IPFS team. docs on these later.
|
// for IPFS. they are nodes run by the IPFS team. docs on these later.
|
||||||
// As with all p2p networks, bootstrap is an important security concern.
|
// As with all p2p networks, bootstrap is an important security concern.
|
||||||
@ -42,148 +28,23 @@ var DefaultBootstrapAddresses = []string{
|
|||||||
"/ip4/104.131.131.82/udp/4001/quic-v1/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", // mars.i.ipfs.io
|
"/ip4/104.131.131.82/udp/4001/quic-v1/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", // mars.i.ipfs.io
|
||||||
}
|
}
|
||||||
|
|
||||||
// Migration implements the migration described above.
|
// Migration is the main exported migration for 16-to-17
|
||||||
type Migration struct{}
|
var Migration = &common.BaseMigration{
|
||||||
|
FromVersion: "16",
|
||||||
// Versions returns the current version string for this migration.
|
ToVersion: "17",
|
||||||
func (m Migration) Versions() string {
|
Description: "Upgrading config to use AutoConf system",
|
||||||
return "16-to-17"
|
Convert: convert,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reversible returns true, as we keep old config around
|
// NewMigration creates a new migration instance (for compatibility)
|
||||||
func (m Migration) Reversible() bool {
|
func NewMigration() common.Migration {
|
||||||
return true
|
return Migration
|
||||||
}
|
|
||||||
|
|
||||||
// Apply update the config.
|
|
||||||
func (m Migration) Apply(opts Options) error {
|
|
||||||
if opts.Verbose {
|
|
||||||
fmt.Printf("applying %s repo migration\n", m.Versions())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check version
|
|
||||||
if err := checkVersion(opts.Path, "16"); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if opts.Verbose {
|
|
||||||
fmt.Println("> Upgrading config to use AutoConf system")
|
|
||||||
}
|
|
||||||
|
|
||||||
path := filepath.Join(opts.Path, "config")
|
|
||||||
in, err := os.Open(path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// make backup
|
|
||||||
backup, err := atomicfile.New(path+backupSuffix, 0600)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := backup.ReadFrom(in); err != nil {
|
|
||||||
panicOnError(backup.Abort())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := in.Seek(0, io.SeekStart); err != nil {
|
|
||||||
panicOnError(backup.Abort())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a temp file to write the output to on success
|
|
||||||
out, err := atomicfile.New(path, 0600)
|
|
||||||
if err != nil {
|
|
||||||
panicOnError(backup.Abort())
|
|
||||||
panicOnError(in.Close())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := convert(in, out, opts.Path); err != nil {
|
|
||||||
panicOnError(out.Abort())
|
|
||||||
panicOnError(backup.Abort())
|
|
||||||
panicOnError(in.Close())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := in.Close(); err != nil {
|
|
||||||
panicOnError(out.Abort())
|
|
||||||
panicOnError(backup.Abort())
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := writeVersion(opts.Path, "17"); err != nil {
|
|
||||||
fmt.Println("failed to update version file to 17")
|
|
||||||
// There was an error so abort writing the output and clean up temp file
|
|
||||||
panicOnError(out.Abort())
|
|
||||||
panicOnError(backup.Abort())
|
|
||||||
return err
|
|
||||||
} else {
|
|
||||||
// Write the output and clean up temp file
|
|
||||||
panicOnError(out.Close())
|
|
||||||
panicOnError(backup.Close())
|
|
||||||
}
|
|
||||||
|
|
||||||
if opts.Verbose {
|
|
||||||
fmt.Println("updated version file")
|
|
||||||
fmt.Println("Migration 16 to 17 succeeded")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// panicOnError is reserved for checks we can't solve transactionally if an error occurs
|
|
||||||
func panicOnError(e error) {
|
|
||||||
if e != nil {
|
|
||||||
panic(fmt.Errorf("error can't be dealt with transactionally: %w", e))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m Migration) Revert(opts Options) error {
|
|
||||||
if opts.Verbose {
|
|
||||||
fmt.Println("reverting migration")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := checkVersion(opts.Path, "17"); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
cfg := filepath.Join(opts.Path, "config")
|
|
||||||
if err := os.Rename(cfg+backupSuffix, cfg); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := writeVersion(opts.Path, "16"); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if opts.Verbose {
|
|
||||||
fmt.Println("lowered version number to 16")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkVersion verifies the repo is at the expected version
|
|
||||||
func checkVersion(repoPath string, expectedVersion string) error {
|
|
||||||
versionPath := filepath.Join(repoPath, "version")
|
|
||||||
versionBytes, err := os.ReadFile(versionPath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("could not read version file: %w", err)
|
|
||||||
}
|
|
||||||
version := strings.TrimSpace(string(versionBytes))
|
|
||||||
if version != expectedVersion {
|
|
||||||
return fmt.Errorf("expected version %s, got %s", expectedVersion, version)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeVersion writes the version to the repo
|
|
||||||
func writeVersion(repoPath string, version string) error {
|
|
||||||
versionPath := filepath.Join(repoPath, "version")
|
|
||||||
return os.WriteFile(versionPath, []byte(version), 0644)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// convert converts the config from version 16 to 17
|
// convert converts the config from version 16 to 17
|
||||||
func convert(in io.Reader, out io.Writer, repoPath string) error {
|
func convert(in io.ReadSeeker, out io.Writer) error {
|
||||||
confMap := make(map[string]any)
|
confMap, err := common.ReadConfig(in)
|
||||||
if err := json.NewDecoder(in).Decode(&confMap); err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -193,7 +54,7 @@ func convert(in io.Reader, out io.Writer, repoPath string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Migrate Bootstrap peers
|
// Migrate Bootstrap peers
|
||||||
if err := migrateBootstrap(confMap, repoPath); err != nil {
|
if err := migrateBootstrap(confMap); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -213,88 +74,62 @@ func convert(in io.Reader, out io.Writer, repoPath string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Save new config
|
// Save new config
|
||||||
fixed, err := json.MarshalIndent(confMap, "", " ")
|
return common.WriteConfig(out, confMap)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := out.Write(fixed); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = out.Write([]byte("\n"))
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// enableAutoConf adds AutoConf section to config
|
// enableAutoConf adds AutoConf section to config
|
||||||
func enableAutoConf(confMap map[string]any) error {
|
func enableAutoConf(confMap map[string]any) error {
|
||||||
// Check if AutoConf already exists
|
// Add empty AutoConf section if it doesn't exist - all fields will use implicit defaults:
|
||||||
if _, exists := confMap["AutoConf"]; exists {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add empty AutoConf section - all fields will use implicit defaults:
|
|
||||||
// - Enabled defaults to true (via DefaultAutoConfEnabled)
|
// - Enabled defaults to true (via DefaultAutoConfEnabled)
|
||||||
// - URL defaults to mainnet URL (via DefaultAutoConfURL)
|
// - URL defaults to mainnet URL (via DefaultAutoConfURL)
|
||||||
// - RefreshInterval defaults to 24h (via DefaultAutoConfRefreshInterval)
|
// - RefreshInterval defaults to 24h (via DefaultAutoConfRefreshInterval)
|
||||||
// - TLSInsecureSkipVerify defaults to false (no WithDefault, but false is zero value)
|
// - TLSInsecureSkipVerify defaults to false (no WithDefault, but false is zero value)
|
||||||
confMap["AutoConf"] = map[string]any{}
|
common.SetDefault(confMap, "AutoConf", map[string]any{})
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// migrateBootstrap migrates bootstrap peers to use "auto"
|
// migrateBootstrap migrates bootstrap peers to use "auto"
|
||||||
func migrateBootstrap(confMap map[string]any, repoPath string) error {
|
func migrateBootstrap(confMap map[string]any) error {
|
||||||
bootstrap, exists := confMap["Bootstrap"]
|
bootstrap, exists := confMap["Bootstrap"]
|
||||||
if !exists {
|
if !exists {
|
||||||
// No bootstrap section, add "auto"
|
// No bootstrap section, add "auto"
|
||||||
confMap["Bootstrap"] = []string{"auto"}
|
confMap["Bootstrap"] = []string{config.AutoPlaceholder}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
bootstrapSlice, ok := bootstrap.([]interface{})
|
// Convert to string slice using helper
|
||||||
if !ok {
|
bootstrapPeers := common.ConvertInterfaceSlice(common.SafeCastSlice(bootstrap))
|
||||||
|
if len(bootstrapPeers) == 0 && bootstrap != nil {
|
||||||
// Invalid bootstrap format, replace with "auto"
|
// Invalid bootstrap format, replace with "auto"
|
||||||
confMap["Bootstrap"] = []string{"auto"}
|
confMap["Bootstrap"] = []string{config.AutoPlaceholder}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert to string slice
|
// Process bootstrap peers according to migration rules
|
||||||
var bootstrapPeers []string
|
newBootstrap := processBootstrapPeers(bootstrapPeers)
|
||||||
for _, peer := range bootstrapSlice {
|
|
||||||
if peerStr, ok := peer.(string); ok {
|
|
||||||
bootstrapPeers = append(bootstrapPeers, peerStr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if we should replace with "auto"
|
|
||||||
newBootstrap := processBootstrapPeers(bootstrapPeers, repoPath)
|
|
||||||
confMap["Bootstrap"] = newBootstrap
|
confMap["Bootstrap"] = newBootstrap
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// processBootstrapPeers processes bootstrap peers according to migration rules
|
// processBootstrapPeers processes bootstrap peers according to migration rules
|
||||||
func processBootstrapPeers(peers []string, repoPath string) []string {
|
func processBootstrapPeers(peers []string) []string {
|
||||||
// If empty, use "auto"
|
// If empty, use "auto"
|
||||||
if len(peers) == 0 {
|
if len(peers) == 0 {
|
||||||
return []string{"auto"}
|
return []string{config.AutoPlaceholder}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Separate default peers from custom ones
|
// Filter out default peers to get only custom ones
|
||||||
var customPeers []string
|
customPeers := slices.DeleteFunc(slices.Clone(peers), func(peer string) bool {
|
||||||
var hasDefaultPeers bool
|
return slices.Contains(DefaultBootstrapAddresses, peer)
|
||||||
|
})
|
||||||
|
|
||||||
for _, peer := range peers {
|
// Check if any default peers were removed
|
||||||
if slices.Contains(DefaultBootstrapAddresses, peer) {
|
hasDefaultPeers := len(customPeers) < len(peers)
|
||||||
hasDefaultPeers = true
|
|
||||||
} else {
|
|
||||||
customPeers = append(customPeers, peer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we have default peers, replace them with "auto"
|
// If we have default peers, replace them with "auto"
|
||||||
if hasDefaultPeers {
|
if hasDefaultPeers {
|
||||||
return append([]string{"auto"}, customPeers...)
|
return append([]string{config.AutoPlaceholder}, customPeers...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// No default peers found, keep as is
|
// No default peers found, keep as is
|
||||||
@ -303,68 +138,25 @@ func processBootstrapPeers(peers []string, repoPath string) []string {
|
|||||||
|
|
||||||
// migrateDNSResolvers migrates DNS resolvers to use "auto" for "." eTLD
|
// migrateDNSResolvers migrates DNS resolvers to use "auto" for "." eTLD
|
||||||
func migrateDNSResolvers(confMap map[string]any) error {
|
func migrateDNSResolvers(confMap map[string]any) error {
|
||||||
dnsSection, exists := confMap["DNS"]
|
// Get or create DNS section
|
||||||
if !exists {
|
dns := common.GetOrCreateSection(confMap, "DNS")
|
||||||
// No DNS section, create it with "auto"
|
|
||||||
confMap["DNS"] = map[string]any{
|
|
||||||
"Resolvers": map[string]string{
|
|
||||||
".": config.AutoPlaceholder,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
dns, ok := dnsSection.(map[string]any)
|
// Get existing resolvers or create empty map
|
||||||
if !ok {
|
resolvers := common.SafeCastMap(dns["Resolvers"])
|
||||||
// Invalid DNS format, replace with "auto"
|
|
||||||
confMap["DNS"] = map[string]any{
|
|
||||||
"Resolvers": map[string]string{
|
|
||||||
".": config.AutoPlaceholder,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
resolvers, exists := dns["Resolvers"]
|
// Define default resolvers that should be replaced with "auto"
|
||||||
if !exists {
|
|
||||||
// No resolvers, add "auto"
|
|
||||||
dns["Resolvers"] = map[string]string{
|
|
||||||
".": config.AutoPlaceholder,
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
resolversMap, ok := resolvers.(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
// Invalid resolvers format, replace with "auto"
|
|
||||||
dns["Resolvers"] = map[string]string{
|
|
||||||
".": config.AutoPlaceholder,
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert to string map and replace default resolvers with "auto"
|
|
||||||
stringResolvers := make(map[string]string)
|
|
||||||
defaultResolvers := map[string]string{
|
defaultResolvers := map[string]string{
|
||||||
"https://dns.eth.limo/dns-query": "auto",
|
"https://dns.eth.limo/dns-query": config.AutoPlaceholder,
|
||||||
"https://dns.eth.link/dns-query": "auto",
|
"https://dns.eth.link/dns-query": config.AutoPlaceholder,
|
||||||
"https://resolver.cloudflare-eth.com/dns-query": "auto",
|
"https://resolver.cloudflare-eth.com/dns-query": config.AutoPlaceholder,
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, v := range resolversMap {
|
// Replace default resolvers with "auto"
|
||||||
if vStr, ok := v.(string); ok {
|
stringResolvers := common.ReplaceDefaultsWithAuto(resolvers, defaultResolvers)
|
||||||
// Check if this is a default resolver that should be replaced
|
|
||||||
if replacement, isDefault := defaultResolvers[vStr]; isDefault {
|
|
||||||
stringResolvers[k] = replacement
|
|
||||||
} else {
|
|
||||||
stringResolvers[k] = vStr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If "." is not set or empty, set it to "auto"
|
// Ensure "." is set to "auto" if not already set
|
||||||
if _, exists := stringResolvers["."]; !exists {
|
if _, exists := stringResolvers["."]; !exists {
|
||||||
stringResolvers["."] = "auto"
|
stringResolvers["."] = config.AutoPlaceholder
|
||||||
}
|
}
|
||||||
|
|
||||||
dns["Resolvers"] = stringResolvers
|
dns["Resolvers"] = stringResolvers
|
||||||
@ -373,120 +165,57 @@ func migrateDNSResolvers(confMap map[string]any) error {
|
|||||||
|
|
||||||
// migrateDelegatedRouters migrates DelegatedRouters to use "auto"
|
// migrateDelegatedRouters migrates DelegatedRouters to use "auto"
|
||||||
func migrateDelegatedRouters(confMap map[string]any) error {
|
func migrateDelegatedRouters(confMap map[string]any) error {
|
||||||
routing, exists := confMap["Routing"]
|
// Get or create Routing section
|
||||||
if !exists {
|
routing := common.GetOrCreateSection(confMap, "Routing")
|
||||||
// No routing section, create it with "auto"
|
|
||||||
confMap["Routing"] = map[string]any{
|
|
||||||
"DelegatedRouters": []string{"auto"},
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
routingMap, ok := routing.(map[string]any)
|
// Get existing delegated routers
|
||||||
if !ok {
|
delegatedRouters, exists := routing["DelegatedRouters"]
|
||||||
// Invalid routing format, replace with "auto"
|
|
||||||
confMap["Routing"] = map[string]any{
|
|
||||||
"DelegatedRouters": []string{"auto"},
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
delegatedRouters, exists := routingMap["DelegatedRouters"]
|
|
||||||
if !exists {
|
|
||||||
// No delegated routers, add "auto"
|
|
||||||
routingMap["DelegatedRouters"] = []string{"auto"}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if it's empty or nil
|
// Check if it's empty or nil
|
||||||
if shouldReplaceWithAuto(delegatedRouters) {
|
if !exists || common.IsEmptySlice(delegatedRouters) {
|
||||||
routingMap["DelegatedRouters"] = []string{"auto"}
|
routing["DelegatedRouters"] = []string{config.AutoPlaceholder}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process the list to replace cid.contact with "auto" and preserve others
|
// Process the list to replace cid.contact with "auto" and preserve others
|
||||||
if slice, ok := delegatedRouters.([]interface{}); ok {
|
routers := common.ConvertInterfaceSlice(common.SafeCastSlice(delegatedRouters))
|
||||||
var newRouters []string
|
var newRouters []string
|
||||||
hasAuto := false
|
hasAuto := false
|
||||||
|
|
||||||
for _, router := range slice {
|
for _, router := range routers {
|
||||||
if routerStr, ok := router.(string); ok {
|
if router == "https://cid.contact" {
|
||||||
if routerStr == "https://cid.contact" {
|
|
||||||
if !hasAuto {
|
if !hasAuto {
|
||||||
newRouters = append(newRouters, "auto")
|
newRouters = append(newRouters, config.AutoPlaceholder)
|
||||||
hasAuto = true
|
hasAuto = true
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
newRouters = append(newRouters, routerStr)
|
newRouters = append(newRouters, router)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If empty after processing, add "auto"
|
// If empty after processing, add "auto"
|
||||||
if len(newRouters) == 0 {
|
if len(newRouters) == 0 {
|
||||||
newRouters = []string{"auto"}
|
newRouters = []string{config.AutoPlaceholder}
|
||||||
}
|
|
||||||
|
|
||||||
routingMap["DelegatedRouters"] = newRouters
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
routing["DelegatedRouters"] = newRouters
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// migrateDelegatedPublishers migrates DelegatedPublishers to use "auto"
|
// migrateDelegatedPublishers migrates DelegatedPublishers to use "auto"
|
||||||
func migrateDelegatedPublishers(confMap map[string]any) error {
|
func migrateDelegatedPublishers(confMap map[string]any) error {
|
||||||
ipns, exists := confMap["Ipns"]
|
// Get or create Ipns section
|
||||||
if !exists {
|
ipns := common.GetOrCreateSection(confMap, "Ipns")
|
||||||
// No IPNS section, create it with "auto"
|
|
||||||
confMap["Ipns"] = map[string]any{
|
|
||||||
"DelegatedPublishers": []string{"auto"},
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ipnsMap, ok := ipns.(map[string]any)
|
// Get existing delegated publishers
|
||||||
if !ok {
|
delegatedPublishers, exists := ipns["DelegatedPublishers"]
|
||||||
// Invalid IPNS format, replace with "auto"
|
|
||||||
confMap["Ipns"] = map[string]any{
|
|
||||||
"DelegatedPublishers": []string{"auto"},
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
delegatedPublishers, exists := ipnsMap["DelegatedPublishers"]
|
|
||||||
if !exists {
|
|
||||||
// No delegated publishers, add "auto"
|
|
||||||
ipnsMap["DelegatedPublishers"] = []string{"auto"}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if it's empty or nil - only then replace with "auto"
|
// Check if it's empty or nil - only then replace with "auto"
|
||||||
// Otherwise preserve custom publishers
|
// Otherwise preserve custom publishers
|
||||||
if shouldReplaceWithAuto(delegatedPublishers) {
|
if !exists || common.IsEmptySlice(delegatedPublishers) {
|
||||||
ipnsMap["DelegatedPublishers"] = []string{"auto"}
|
ipns["DelegatedPublishers"] = []string{config.AutoPlaceholder}
|
||||||
}
|
}
|
||||||
// If there are custom publishers, leave them as is
|
// If there are custom publishers, leave them as is
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// shouldReplaceWithAuto checks if a field should be replaced with "auto"
|
|
||||||
func shouldReplaceWithAuto(field any) bool {
|
|
||||||
// If it's nil, replace with "auto"
|
|
||||||
if field == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// If it's an empty slice, replace with "auto"
|
|
||||||
if slice, ok := field.([]interface{}); ok {
|
|
||||||
return len(slice) == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// If it's an empty array, replace with "auto"
|
|
||||||
if reflect.TypeOf(field).Kind() == reflect.Slice {
|
|
||||||
v := reflect.ValueOf(field)
|
|
||||||
return v.Len() == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|||||||
@ -7,6 +7,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ipfs/kubo/repo/fsrepo/migrations/common"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
@ -15,9 +16,7 @@ import (
|
|||||||
func runMigrationOnJSON(t *testing.T, input string) map[string]interface{} {
|
func runMigrationOnJSON(t *testing.T, input string) map[string]interface{} {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
var output bytes.Buffer
|
var output bytes.Buffer
|
||||||
// Use t.TempDir() for test isolation and parallel execution support
|
err := convert(bytes.NewReader([]byte(input)), &output)
|
||||||
tempDir := t.TempDir()
|
|
||||||
err := convert(bytes.NewReader([]byte(input)), &output, tempDir)
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var result map[string]interface{}
|
var result map[string]interface{}
|
||||||
@ -137,13 +136,12 @@ func TestMigration(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Run migration
|
// Run migration
|
||||||
migration := &Migration{}
|
opts := common.Options{
|
||||||
opts := Options{
|
|
||||||
Path: tempDir,
|
Path: tempDir,
|
||||||
Verbose: true,
|
Verbose: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
err = migration.Apply(opts)
|
err = Migration.Apply(opts)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Verify version was updated
|
// Verify version was updated
|
||||||
@ -191,7 +189,7 @@ func TestMigration(t *testing.T) {
|
|||||||
assert.Equal(t, "auto", delegatedPublishers[0], "Expected DelegatedPublishers to be ['auto']")
|
assert.Equal(t, "auto", delegatedPublishers[0], "Expected DelegatedPublishers to be ['auto']")
|
||||||
|
|
||||||
// Test revert
|
// Test revert
|
||||||
err = migration.Revert(opts)
|
err = Migration.Revert(opts)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Verify version was reverted
|
// Verify version was reverted
|
||||||
@ -273,7 +271,7 @@ func TestBootstrapMigration(t *testing.T) {
|
|||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
result := processBootstrapPeers(tt.peers, "")
|
result := processBootstrapPeers(tt.peers)
|
||||||
require.Equal(t, len(tt.expected), len(result), "Expected %d peers, got %d", len(tt.expected), len(result))
|
require.Equal(t, len(tt.expected), len(result), "Expected %d peers, got %d", len(tt.expected), len(result))
|
||||||
for i, expected := range tt.expected {
|
for i, expected := range tt.expected {
|
||||||
assert.Equal(t, expected, result[i], "Expected peer %d to be %s", i, expected)
|
assert.Equal(t, expected, result[i], "Expected peer %d to be %s", i, expected)
|
||||||
|
|||||||
60
repo/fsrepo/migrations/fs-repo-17-to-18/main.go
Normal file
60
repo/fsrepo/migrations/fs-repo-17-to-18/main.go
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
// Package main implements fs-repo-17-to-18 migration for IPFS repositories.
|
||||||
|
//
|
||||||
|
// This migration consolidates the Provider and Reprovider configurations into
|
||||||
|
// a unified Provide configuration section.
|
||||||
|
//
|
||||||
|
// Changes made:
|
||||||
|
// - Migrates Provider.Enabled to Provide.Enabled
|
||||||
|
// - Migrates Provider.WorkerCount to Provide.DHT.MaxWorkers
|
||||||
|
// - Migrates Reprovider.Strategy to Provide.Strategy (converts "flat" to "all")
|
||||||
|
// - Migrates Reprovider.Interval to Provide.DHT.Interval
|
||||||
|
// - Removes deprecated Provider and Reprovider sections
|
||||||
|
//
|
||||||
|
// The migration is reversible and creates config.17-to-18.bak for rollback.
|
||||||
|
//
|
||||||
|
// Usage:
|
||||||
|
//
|
||||||
|
// fs-repo-17-to-18 -path /path/to/ipfs/repo [-verbose] [-revert]
|
||||||
|
//
|
||||||
|
// This migration is embedded in Kubo and runs automatically during daemon startup.
|
||||||
|
// This standalone binary is provided for manual migration scenarios.
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/ipfs/kubo/repo/fsrepo/migrations/common"
|
||||||
|
mg17 "github.com/ipfs/kubo/repo/fsrepo/migrations/fs-repo-17-to-18/migration"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var path = flag.String("path", "", "Path to IPFS repository")
|
||||||
|
var verbose = flag.Bool("verbose", false, "Enable verbose output")
|
||||||
|
var revert = flag.Bool("revert", false, "Revert migration")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
if *path == "" {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error: -path flag is required\n")
|
||||||
|
flag.Usage()
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := common.Options{
|
||||||
|
Path: *path,
|
||||||
|
Verbose: *verbose,
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
if *revert {
|
||||||
|
err = mg17.Migration.Revert(opts)
|
||||||
|
} else {
|
||||||
|
err = mg17.Migration.Apply(opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Migration failed: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
121
repo/fsrepo/migrations/fs-repo-17-to-18/migration/migration.go
Normal file
121
repo/fsrepo/migrations/fs-repo-17-to-18/migration/migration.go
Normal file
@ -0,0 +1,121 @@
|
|||||||
|
// package mg17 contains the code to perform 17-18 repository migration in Kubo.
|
||||||
|
// This handles the following:
|
||||||
|
// - Migrate Provider and Reprovider configs to unified Provide config
|
||||||
|
// - Clear deprecated Provider and Reprovider fields
|
||||||
|
// - Increment repo version to 18
|
||||||
|
package mg17
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/ipfs/kubo/repo/fsrepo/migrations/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Migration is the main exported migration for 17-to-18
|
||||||
|
var Migration = &common.BaseMigration{
|
||||||
|
FromVersion: "17",
|
||||||
|
ToVersion: "18",
|
||||||
|
Description: "Migrating Provider and Reprovider configuration to unified Provide configuration",
|
||||||
|
Convert: convert,
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMigration creates a new migration instance (for compatibility)
|
||||||
|
func NewMigration() common.Migration {
|
||||||
|
return Migration
|
||||||
|
}
|
||||||
|
|
||||||
|
// convert performs the actual configuration transformation
|
||||||
|
func convert(in io.ReadSeeker, out io.Writer) error {
|
||||||
|
// Read the configuration
|
||||||
|
confMap, err := common.ReadConfig(in)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create new Provide section with DHT subsection from Provider and Reprovider
|
||||||
|
provide := make(map[string]any)
|
||||||
|
dht := make(map[string]any)
|
||||||
|
hasNonDefaultValues := false
|
||||||
|
|
||||||
|
// Migrate Provider fields if they exist
|
||||||
|
provider := common.SafeCastMap(confMap["Provider"])
|
||||||
|
if enabled, exists := provider["Enabled"]; exists {
|
||||||
|
provide["Enabled"] = enabled
|
||||||
|
// Log migration for non-default values
|
||||||
|
if enabledBool, ok := enabled.(bool); ok && !enabledBool {
|
||||||
|
fmt.Printf(" Migrated Provider.Enabled=%v to Provide.Enabled=%v\n", enabledBool, enabledBool)
|
||||||
|
hasNonDefaultValues = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if workerCount, exists := provider["WorkerCount"]; exists {
|
||||||
|
dht["MaxWorkers"] = workerCount
|
||||||
|
// Log migration for all worker count values
|
||||||
|
if count, ok := workerCount.(float64); ok {
|
||||||
|
fmt.Printf(" Migrated Provider.WorkerCount=%v to Provide.DHT.MaxWorkers=%v\n", int(count), int(count))
|
||||||
|
hasNonDefaultValues = true
|
||||||
|
|
||||||
|
// Additional guidance for high WorkerCount
|
||||||
|
if count > 5 {
|
||||||
|
fmt.Printf(" ⚠️ For better resource utilization, consider enabling Provide.DHT.SweepEnabled=true\n")
|
||||||
|
fmt.Printf(" and adjusting Provide.DHT.DedicatedBurstWorkers if announcement of new CIDs\n")
|
||||||
|
fmt.Printf(" should take priority over periodic reprovide interval.\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Note: Skip Provider.Strategy as it was unused
|
||||||
|
|
||||||
|
// Migrate Reprovider fields if they exist
|
||||||
|
reprovider := common.SafeCastMap(confMap["Reprovider"])
|
||||||
|
if strategy, exists := reprovider["Strategy"]; exists {
|
||||||
|
if strategyStr, ok := strategy.(string); ok {
|
||||||
|
// Convert deprecated "flat" strategy to "all"
|
||||||
|
if strategyStr == "flat" {
|
||||||
|
provide["Strategy"] = "all"
|
||||||
|
fmt.Printf(" Migrated deprecated Reprovider.Strategy=\"flat\" to Provide.Strategy=\"all\"\n")
|
||||||
|
} else {
|
||||||
|
// Migrate any other strategy value as-is
|
||||||
|
provide["Strategy"] = strategyStr
|
||||||
|
fmt.Printf(" Migrated Reprovider.Strategy=\"%s\" to Provide.Strategy=\"%s\"\n", strategyStr, strategyStr)
|
||||||
|
}
|
||||||
|
hasNonDefaultValues = true
|
||||||
|
} else {
|
||||||
|
// Not a string, set to default "all" to ensure valid config
|
||||||
|
provide["Strategy"] = "all"
|
||||||
|
fmt.Printf(" Warning: Reprovider.Strategy was not a string, setting Provide.Strategy=\"all\"\n")
|
||||||
|
hasNonDefaultValues = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if interval, exists := reprovider["Interval"]; exists {
|
||||||
|
dht["Interval"] = interval
|
||||||
|
// Log migration for non-default intervals
|
||||||
|
if intervalStr, ok := interval.(string); ok && intervalStr != "22h" && intervalStr != "" {
|
||||||
|
fmt.Printf(" Migrated Reprovider.Interval=\"%s\" to Provide.DHT.Interval=\"%s\"\n", intervalStr, intervalStr)
|
||||||
|
hasNonDefaultValues = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Note: Sweep is a new field introduced in v0.38, not present in v0.37
|
||||||
|
// So we don't need to migrate it from Reprovider
|
||||||
|
|
||||||
|
// Set the DHT section if we have any DHT fields to migrate
|
||||||
|
if len(dht) > 0 {
|
||||||
|
provide["DHT"] = dht
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the new Provide section if we have any fields to migrate
|
||||||
|
if len(provide) > 0 {
|
||||||
|
confMap["Provide"] = provide
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear old Provider and Reprovider sections
|
||||||
|
delete(confMap, "Provider")
|
||||||
|
delete(confMap, "Reprovider")
|
||||||
|
|
||||||
|
// Print documentation link if we migrated any non-default values
|
||||||
|
if hasNonDefaultValues {
|
||||||
|
fmt.Printf(" See: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the updated config
|
||||||
|
return common.WriteConfig(out, confMap)
|
||||||
|
}
|
||||||
@ -0,0 +1,176 @@
|
|||||||
|
package mg17
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ipfs/kubo/repo/fsrepo/migrations/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMigration17to18(t *testing.T) {
|
||||||
|
migration := NewMigration()
|
||||||
|
|
||||||
|
testCases := []common.TestCase{
|
||||||
|
{
|
||||||
|
Name: "Migrate Provider and Reprovider to Provide",
|
||||||
|
InputConfig: common.GenerateTestConfig(map[string]any{
|
||||||
|
"Provider": map[string]any{
|
||||||
|
"Enabled": true,
|
||||||
|
"WorkerCount": 8,
|
||||||
|
"Strategy": "unused", // This field was unused and should be ignored
|
||||||
|
},
|
||||||
|
"Reprovider": map[string]any{
|
||||||
|
"Strategy": "pinned",
|
||||||
|
"Interval": "12h",
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
Assertions: []common.ConfigAssertion{
|
||||||
|
{Path: "Provide.Enabled", Expected: true},
|
||||||
|
{Path: "Provide.DHT.MaxWorkers", Expected: float64(8)}, // JSON unmarshals to float64
|
||||||
|
{Path: "Provide.Strategy", Expected: "pinned"},
|
||||||
|
{Path: "Provide.DHT.Interval", Expected: "12h"},
|
||||||
|
{Path: "Provider", Expected: nil}, // Should be deleted
|
||||||
|
{Path: "Reprovider", Expected: nil}, // Should be deleted
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Convert flat strategy to all",
|
||||||
|
InputConfig: common.GenerateTestConfig(map[string]any{
|
||||||
|
"Provider": map[string]any{
|
||||||
|
"Enabled": false,
|
||||||
|
},
|
||||||
|
"Reprovider": map[string]any{
|
||||||
|
"Strategy": "flat", // Deprecated, should be converted to "all"
|
||||||
|
"Interval": "24h",
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
Assertions: []common.ConfigAssertion{
|
||||||
|
{Path: "Provide.Enabled", Expected: false},
|
||||||
|
{Path: "Provide.Strategy", Expected: "all"}, // "flat" converted to "all"
|
||||||
|
{Path: "Provide.DHT.Interval", Expected: "24h"},
|
||||||
|
{Path: "Provider", Expected: nil},
|
||||||
|
{Path: "Reprovider", Expected: nil},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Handle missing Provider section",
|
||||||
|
InputConfig: common.GenerateTestConfig(map[string]any{
|
||||||
|
"Reprovider": map[string]any{
|
||||||
|
"Strategy": "roots",
|
||||||
|
"Interval": "6h",
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
Assertions: []common.ConfigAssertion{
|
||||||
|
{Path: "Provide.Strategy", Expected: "roots"},
|
||||||
|
{Path: "Provide.DHT.Interval", Expected: "6h"},
|
||||||
|
{Path: "Provider", Expected: nil},
|
||||||
|
{Path: "Reprovider", Expected: nil},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Handle missing Reprovider section",
|
||||||
|
InputConfig: common.GenerateTestConfig(map[string]any{
|
||||||
|
"Provider": map[string]any{
|
||||||
|
"Enabled": true,
|
||||||
|
"WorkerCount": 16,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
Assertions: []common.ConfigAssertion{
|
||||||
|
{Path: "Provide.Enabled", Expected: true},
|
||||||
|
{Path: "Provide.DHT.MaxWorkers", Expected: float64(16)},
|
||||||
|
{Path: "Provider", Expected: nil},
|
||||||
|
{Path: "Reprovider", Expected: nil},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Handle empty Provider and Reprovider sections",
|
||||||
|
InputConfig: common.GenerateTestConfig(map[string]any{
|
||||||
|
"Provider": map[string]any{},
|
||||||
|
"Reprovider": map[string]any{},
|
||||||
|
}),
|
||||||
|
Assertions: []common.ConfigAssertion{
|
||||||
|
{Path: "Provide", Expected: nil}, // No fields to migrate
|
||||||
|
{Path: "Provider", Expected: nil},
|
||||||
|
{Path: "Reprovider", Expected: nil},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Handle missing both sections",
|
||||||
|
InputConfig: common.GenerateTestConfig(map[string]any{
|
||||||
|
"Datastore": map[string]any{
|
||||||
|
"StorageMax": "10GB",
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
Assertions: []common.ConfigAssertion{
|
||||||
|
{Path: "Provide", Expected: nil}, // No Provider/Reprovider to migrate
|
||||||
|
{Path: "Provider", Expected: nil},
|
||||||
|
{Path: "Reprovider", Expected: nil},
|
||||||
|
{Path: "Datastore.StorageMax", Expected: "10GB"}, // Other config preserved
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Preserve other config sections",
|
||||||
|
InputConfig: common.GenerateTestConfig(map[string]any{
|
||||||
|
"Provider": map[string]any{
|
||||||
|
"Enabled": true,
|
||||||
|
},
|
||||||
|
"Reprovider": map[string]any{
|
||||||
|
"Strategy": "all",
|
||||||
|
},
|
||||||
|
"Swarm": map[string]any{
|
||||||
|
"ConnMgr": map[string]any{
|
||||||
|
"Type": "basic",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
Assertions: []common.ConfigAssertion{
|
||||||
|
{Path: "Provide.Enabled", Expected: true},
|
||||||
|
{Path: "Provide.Strategy", Expected: "all"},
|
||||||
|
{Path: "Swarm.ConnMgr.Type", Expected: "basic"}, // Other config preserved
|
||||||
|
{Path: "Provider", Expected: nil},
|
||||||
|
{Path: "Reprovider", Expected: nil},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.Name, func(t *testing.T) {
|
||||||
|
common.RunMigrationTest(t, migration, tc)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMigration17to18Reversible(t *testing.T) {
|
||||||
|
migration := NewMigration()
|
||||||
|
|
||||||
|
// Test that migration is reversible
|
||||||
|
inputConfig := common.GenerateTestConfig(map[string]any{
|
||||||
|
"Provide": map[string]any{
|
||||||
|
"Enabled": true,
|
||||||
|
"WorkerCount": 8,
|
||||||
|
"Strategy": "pinned",
|
||||||
|
"Interval": "12h",
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
// Test full migration and revert
|
||||||
|
migratedConfig := common.AssertMigrationSuccess(t, migration, 17, 18, inputConfig)
|
||||||
|
|
||||||
|
// Check that Provide section exists after migration
|
||||||
|
common.AssertConfigField(t, migratedConfig, "Provide.Enabled", true)
|
||||||
|
|
||||||
|
// Test revert
|
||||||
|
common.AssertMigrationReversible(t, migration, 17, 18, migratedConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMigration17to18Integration(t *testing.T) {
|
||||||
|
migration := NewMigration()
|
||||||
|
|
||||||
|
// Test that the migration properly integrates with the common framework
|
||||||
|
if migration.Versions() != "17-to-18" {
|
||||||
|
t.Errorf("expected versions '17-to-18', got '%s'", migration.Versions())
|
||||||
|
}
|
||||||
|
|
||||||
|
if !migration.Reversible() {
|
||||||
|
t.Error("migration should be reversible")
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -337,8 +337,8 @@ func testExpandAutoFiltersUnsupportedPathsDelegated(t *testing.T) {
|
|||||||
node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"})
|
node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"})
|
||||||
node.SetIPFSConfig("Ipns.DelegatedPublishers", []string{"auto"})
|
node.SetIPFSConfig("Ipns.DelegatedPublishers", []string{"auto"})
|
||||||
// Disable content providing when using delegated routing
|
// Disable content providing when using delegated routing
|
||||||
node.SetIPFSConfig("Provider.Enabled", false)
|
node.SetIPFSConfig("Provide.Enabled", false)
|
||||||
node.SetIPFSConfig("Reprovider.Interval", "0")
|
node.SetIPFSConfig("Provide.DHT.Interval", "0")
|
||||||
|
|
||||||
// Load test autoconf data with unsupported paths
|
// Load test autoconf data with unsupported paths
|
||||||
autoConfData := loadTestDataExpand(t, "autoconf_with_unsupported_paths.json")
|
autoConfData := loadTestDataExpand(t, "autoconf_with_unsupported_paths.json")
|
||||||
@ -421,8 +421,8 @@ func testExpandAutoWithoutCacheDelegated(t *testing.T) {
|
|||||||
node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"})
|
node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"})
|
||||||
node.SetIPFSConfig("Ipns.DelegatedPublishers", []string{"auto"})
|
node.SetIPFSConfig("Ipns.DelegatedPublishers", []string{"auto"})
|
||||||
// Disable content providing when using delegated routing
|
// Disable content providing when using delegated routing
|
||||||
node.SetIPFSConfig("Provider.Enabled", false)
|
node.SetIPFSConfig("Provide.Enabled", false)
|
||||||
node.SetIPFSConfig("Reprovider.Interval", "0")
|
node.SetIPFSConfig("Provide.DHT.Interval", "0")
|
||||||
|
|
||||||
// Load test autoconf data with unsupported paths (this won't be used since no daemon)
|
// Load test autoconf data with unsupported paths (this won't be used since no daemon)
|
||||||
autoConfData := loadTestDataExpand(t, "autoconf_with_unsupported_paths.json")
|
autoConfData := loadTestDataExpand(t, "autoconf_with_unsupported_paths.json")
|
||||||
|
|||||||
@ -200,8 +200,8 @@ func setupNodeWithAutoconf(t *testing.T, publisherURL string, routingType string
|
|||||||
|
|
||||||
// Additional config for delegated routing mode
|
// Additional config for delegated routing mode
|
||||||
if routingType == "delegated" {
|
if routingType == "delegated" {
|
||||||
node.SetIPFSConfig("Provider.Enabled", false)
|
node.SetIPFSConfig("Provide.Enabled", false)
|
||||||
node.SetIPFSConfig("Reprovider.Interval", "0s")
|
node.SetIPFSConfig("Provide.DHT.Interval", "0s")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add bootstrap peers for connectivity
|
// Add bootstrap peers for connectivity
|
||||||
|
|||||||
@ -1,8 +1,6 @@
|
|||||||
package migrations
|
package migrations
|
||||||
|
|
||||||
// NOTE: These migration tests require the local Kubo binary (built with 'make build') to be in PATH.
|
// NOTE: These migration tests require the local Kubo binary (built with 'make build') to be in PATH.
|
||||||
// The tests migrate from repo version 16 to 17, which requires Kubo version 0.37.0+ (expects repo v17).
|
|
||||||
// If using system ipfs binary v0.36.0 or older (expects repo v16), no migration will be triggered.
|
|
||||||
//
|
//
|
||||||
// To run these tests successfully:
|
// To run these tests successfully:
|
||||||
// export PATH="$(pwd)/cmd/ipfs:$PATH"
|
// export PATH="$(pwd)/cmd/ipfs:$PATH"
|
||||||
@ -12,6 +10,7 @@ import (
|
|||||||
"bufio"
|
"bufio"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
@ -20,11 +19,28 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
ipfs "github.com/ipfs/kubo"
|
||||||
"github.com/ipfs/kubo/test/cli/harness"
|
"github.com/ipfs/kubo/test/cli/harness"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMigration16To17(t *testing.T) {
|
// TestMigration16ToLatest tests migration from repo version 16 to the latest version.
|
||||||
|
//
|
||||||
|
// This test uses a real IPFS repository snapshot from Kubo v0.36.0 (the last version that used repo v16).
|
||||||
|
// The intention is to confirm that users can upgrade from Kubo v0.36.0 to the latest version by applying
|
||||||
|
// all intermediate migrations successfully.
|
||||||
|
//
|
||||||
|
// NOTE: This test comprehensively tests all migration methods (daemon --migrate, repo migrate,
|
||||||
|
// and reverse migration) because 16-to-17 was the first embedded migration that did not fetch
|
||||||
|
// external files. It serves as a reference implementation for migration testing.
|
||||||
|
//
|
||||||
|
// Future migrations can have simplified tests (like 17-to-18 in migration_17_to_latest_test.go)
|
||||||
|
// that focus on specific migration logic rather than testing all migration methods.
|
||||||
|
//
|
||||||
|
// If you need to test migration of configuration keys that appeared in later repo versions,
|
||||||
|
// create a new test file migration_N_to_latest_test.go with a separate IPFS repository test vector
|
||||||
|
// from the appropriate Kubo version.
|
||||||
|
func TestMigration16ToLatest(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
// Primary tests using 'ipfs daemon --migrate' command (default in Docker)
|
// Primary tests using 'ipfs daemon --migrate' command (default in Docker)
|
||||||
@ -71,12 +87,13 @@ func testDaemonMigrationWithAuto(t *testing.T) {
|
|||||||
// Verify migration was successful based on monitoring
|
// Verify migration was successful based on monitoring
|
||||||
require.True(t, migrationSuccess, "Migration should have been successful")
|
require.True(t, migrationSuccess, "Migration should have been successful")
|
||||||
require.Contains(t, stdoutOutput, "applying 16-to-17 repo migration", "Migration should have been triggered")
|
require.Contains(t, stdoutOutput, "applying 16-to-17 repo migration", "Migration should have been triggered")
|
||||||
require.Contains(t, stdoutOutput, "Migration 16 to 17 succeeded", "Migration should have completed successfully")
|
require.Contains(t, stdoutOutput, "Migration 16-to-17 succeeded", "Migration should have completed successfully")
|
||||||
|
|
||||||
// Verify version was updated to 17
|
// Verify version was updated to latest
|
||||||
versionData, err := os.ReadFile(versionPath)
|
versionData, err := os.ReadFile(versionPath)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, "17", strings.TrimSpace(string(versionData)), "Version should be updated to 17")
|
expectedVersion := fmt.Sprint(ipfs.RepoVersion)
|
||||||
|
require.Equal(t, expectedVersion, strings.TrimSpace(string(versionData)), "Version should be updated to %s (latest)", expectedVersion)
|
||||||
|
|
||||||
// Verify migration results using DRY helper
|
// Verify migration results using DRY helper
|
||||||
helper := NewMigrationTestHelper(t, configPath)
|
helper := NewMigrationTestHelper(t, configPath)
|
||||||
@ -131,7 +148,7 @@ func testDaemonMigrationWithoutAuto(t *testing.T) {
|
|||||||
// Verify migration was successful based on monitoring
|
// Verify migration was successful based on monitoring
|
||||||
require.True(t, migrationSuccess, "Migration should have been successful")
|
require.True(t, migrationSuccess, "Migration should have been successful")
|
||||||
require.Contains(t, stdoutOutput, "applying 16-to-17 repo migration", "Migration should have been triggered")
|
require.Contains(t, stdoutOutput, "applying 16-to-17 repo migration", "Migration should have been triggered")
|
||||||
require.Contains(t, stdoutOutput, "Migration 16 to 17 succeeded", "Migration should have completed successfully")
|
require.Contains(t, stdoutOutput, "Migration 16-to-17 succeeded", "Migration should have completed successfully")
|
||||||
|
|
||||||
// Verify migration results: custom values preserved alongside "auto"
|
// Verify migration results: custom values preserved alongside "auto"
|
||||||
helper := NewMigrationTestHelper(t, configPath)
|
helper := NewMigrationTestHelper(t, configPath)
|
||||||
@ -487,12 +504,13 @@ func testDaemonMissingFieldsHandling(t *testing.T) {
|
|||||||
// Verify migration was successful
|
// Verify migration was successful
|
||||||
require.True(t, migrationSuccess, "Migration should have been successful")
|
require.True(t, migrationSuccess, "Migration should have been successful")
|
||||||
require.Contains(t, stdoutOutput, "applying 16-to-17 repo migration", "Migration should have been triggered")
|
require.Contains(t, stdoutOutput, "applying 16-to-17 repo migration", "Migration should have been triggered")
|
||||||
require.Contains(t, stdoutOutput, "Migration 16 to 17 succeeded", "Migration should have completed successfully")
|
require.Contains(t, stdoutOutput, "Migration 16-to-17 succeeded", "Migration should have completed successfully")
|
||||||
|
|
||||||
// Verify version was updated
|
// Verify version was updated to latest
|
||||||
versionData, err := os.ReadFile(versionPath)
|
versionData, err := os.ReadFile(versionPath)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, "17", strings.TrimSpace(string(versionData)), "Version should be updated to 17")
|
expectedVersion := fmt.Sprint(ipfs.RepoVersion)
|
||||||
|
require.Equal(t, expectedVersion, strings.TrimSpace(string(versionData)), "Version should be updated to %s (latest)", expectedVersion)
|
||||||
|
|
||||||
// Verify migration adds all required fields to minimal config
|
// Verify migration adds all required fields to minimal config
|
||||||
NewMigrationTestHelper(t, configPath).
|
NewMigrationTestHelper(t, configPath).
|
||||||
@ -543,10 +561,11 @@ func testRepoBackwardMigration(t *testing.T) {
|
|||||||
result := node.RunIPFS("repo", "migrate")
|
result := node.RunIPFS("repo", "migrate")
|
||||||
require.Empty(t, result.Stderr.String(), "Forward migration should succeed")
|
require.Empty(t, result.Stderr.String(), "Forward migration should succeed")
|
||||||
|
|
||||||
// Verify we're at v17
|
// Verify we're at the latest version
|
||||||
versionData, err := os.ReadFile(versionPath)
|
versionData, err := os.ReadFile(versionPath)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, "17", strings.TrimSpace(string(versionData)), "Should be at version 17 after forward migration")
|
expectedVersion := fmt.Sprint(ipfs.RepoVersion)
|
||||||
|
require.Equal(t, expectedVersion, strings.TrimSpace(string(versionData)), "Should be at version %s (latest) after forward migration", expectedVersion)
|
||||||
|
|
||||||
// Now run reverse migration back to v16
|
// Now run reverse migration back to v16
|
||||||
result = node.RunIPFS("repo", "migrate", "--to=16", "--allow-downgrade")
|
result = node.RunIPFS("repo", "migrate", "--to=16", "--allow-downgrade")
|
||||||
@ -565,18 +584,40 @@ func testRepoBackwardMigration(t *testing.T) {
|
|||||||
|
|
||||||
// runDaemonMigrationWithMonitoring starts daemon --migrate, monitors output until "Daemon is ready",
|
// runDaemonMigrationWithMonitoring starts daemon --migrate, monitors output until "Daemon is ready",
|
||||||
// then gracefully shuts down the daemon and returns the captured output and success status.
|
// then gracefully shuts down the daemon and returns the captured output and success status.
|
||||||
// This is a generic helper that can monitor for any migration patterns.
|
// This monitors for all expected migrations from version 16 to latest.
|
||||||
func runDaemonMigrationWithMonitoring(t *testing.T, node *harness.Node) (string, bool) {
|
func runDaemonMigrationWithMonitoring(t *testing.T, node *harness.Node) (string, bool) {
|
||||||
// Use specific patterns for 16-to-17 migration
|
// Monitor migrations from repo v16 to latest
|
||||||
return runDaemonWithMigrationMonitoring(t, node, "applying 16-to-17 repo migration", "Migration 16 to 17 succeeded")
|
return runDaemonWithExpectedMigrations(t, node, 16, ipfs.RepoVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
// runDaemonWithMigrationMonitoring is a generic helper for running daemon --migrate and monitoring output.
|
// runDaemonWithExpectedMigrations monitors daemon startup for a sequence of migrations from startVersion to endVersion
|
||||||
// It waits for the daemon to be ready, then shuts it down gracefully.
|
func runDaemonWithExpectedMigrations(t *testing.T, node *harness.Node, startVersion, endVersion int) (string, bool) {
|
||||||
// migrationPattern: pattern to detect migration started (e.g., "applying X-to-Y repo migration")
|
// Build list of expected migrations
|
||||||
// successPattern: pattern to detect migration succeeded (e.g., "Migration X to Y succeeded")
|
var expectedMigrations []struct {
|
||||||
// Returns the stdout output and whether both patterns were detected.
|
pattern string
|
||||||
func runDaemonWithMigrationMonitoring(t *testing.T, node *harness.Node, migrationPattern, successPattern string) (string, bool) {
|
success string
|
||||||
|
}
|
||||||
|
|
||||||
|
for v := startVersion; v < endVersion; v++ {
|
||||||
|
from := v
|
||||||
|
to := v + 1
|
||||||
|
expectedMigrations = append(expectedMigrations, struct {
|
||||||
|
pattern string
|
||||||
|
success string
|
||||||
|
}{
|
||||||
|
pattern: fmt.Sprintf("applying %d-to-%d repo migration", from, to),
|
||||||
|
success: fmt.Sprintf("Migration %d-to-%d succeeded", from, to),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return runDaemonWithMultipleMigrationMonitoring(t, node, expectedMigrations)
|
||||||
|
}
|
||||||
|
|
||||||
|
// runDaemonWithMultipleMigrationMonitoring monitors daemon startup for multiple sequential migrations
|
||||||
|
func runDaemonWithMultipleMigrationMonitoring(t *testing.T, node *harness.Node, expectedMigrations []struct {
|
||||||
|
pattern string
|
||||||
|
success string
|
||||||
|
}) (string, bool) {
|
||||||
// Create context with timeout as safety net
|
// Create context with timeout as safety net
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
@ -601,7 +642,11 @@ func runDaemonWithMigrationMonitoring(t *testing.T, node *harness.Node, migratio
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var allOutput strings.Builder
|
var allOutput strings.Builder
|
||||||
var migrationDetected, migrationSucceeded, daemonReady bool
|
var daemonReady bool
|
||||||
|
|
||||||
|
// Track which migrations have been detected
|
||||||
|
migrationsDetected := make([]bool, len(expectedMigrations))
|
||||||
|
migrationsSucceeded := make([]bool, len(expectedMigrations))
|
||||||
|
|
||||||
// Monitor stdout for completion signals
|
// Monitor stdout for completion signals
|
||||||
scanner := bufio.NewScanner(stdout)
|
scanner := bufio.NewScanner(stdout)
|
||||||
@ -611,11 +656,13 @@ func runDaemonWithMigrationMonitoring(t *testing.T, node *harness.Node, migratio
|
|||||||
allOutput.WriteString(line + "\n")
|
allOutput.WriteString(line + "\n")
|
||||||
|
|
||||||
// Check for migration messages
|
// Check for migration messages
|
||||||
if migrationPattern != "" && strings.Contains(line, migrationPattern) {
|
for i, migration := range expectedMigrations {
|
||||||
migrationDetected = true
|
if strings.Contains(line, migration.pattern) {
|
||||||
|
migrationsDetected[i] = true
|
||||||
|
}
|
||||||
|
if strings.Contains(line, migration.success) {
|
||||||
|
migrationsSucceeded[i] = true
|
||||||
}
|
}
|
||||||
if successPattern != "" && strings.Contains(line, successPattern) {
|
|
||||||
migrationSucceeded = true
|
|
||||||
}
|
}
|
||||||
if strings.Contains(line, "Daemon is ready") {
|
if strings.Contains(line, "Daemon is ready") {
|
||||||
daemonReady = true
|
daemonReady = true
|
||||||
@ -667,17 +714,41 @@ func runDaemonWithMigrationMonitoring(t *testing.T, node *harness.Node, migratio
|
|||||||
// Wait for process to exit
|
// Wait for process to exit
|
||||||
_ = cmd.Wait()
|
_ = cmd.Wait()
|
||||||
|
|
||||||
// Return success if we detected migration
|
// Check all migrations were detected and succeeded
|
||||||
success := migrationDetected && migrationSucceeded
|
allDetected := true
|
||||||
return allOutput.String(), success
|
allSucceeded := true
|
||||||
|
for i := range expectedMigrations {
|
||||||
|
if !migrationsDetected[i] {
|
||||||
|
allDetected = false
|
||||||
|
t.Logf("Migration %s was not detected", expectedMigrations[i].pattern)
|
||||||
|
}
|
||||||
|
if !migrationsSucceeded[i] {
|
||||||
|
allSucceeded = false
|
||||||
|
t.Logf("Migration %s did not succeed", expectedMigrations[i].success)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return allOutput.String(), allDetected && allSucceeded
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if process has exited (e.g., due to startup failure after migration)
|
// Check if process has exited (e.g., due to startup failure after migration)
|
||||||
if cmd.ProcessState != nil && cmd.ProcessState.Exited() {
|
if cmd.ProcessState != nil && cmd.ProcessState.Exited() {
|
||||||
// Process exited - migration may have completed but daemon failed to start
|
// Process exited - migration may have completed but daemon failed to start
|
||||||
// This is expected for corrupted config tests
|
// This is expected for corrupted config tests
|
||||||
success := migrationDetected && migrationSucceeded
|
|
||||||
return allOutput.String(), success
|
// Check all migrations status
|
||||||
|
allDetected := true
|
||||||
|
allSucceeded := true
|
||||||
|
for i := range expectedMigrations {
|
||||||
|
if !migrationsDetected[i] {
|
||||||
|
allDetected = false
|
||||||
|
}
|
||||||
|
if !migrationsSucceeded[i] {
|
||||||
|
allSucceeded = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return allOutput.String(), allDetected && allSucceeded
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
360
test/cli/migrations/migration_17_to_latest_test.go
Normal file
360
test/cli/migrations/migration_17_to_latest_test.go
Normal file
@ -0,0 +1,360 @@
|
|||||||
|
package migrations
|
||||||
|
|
||||||
|
// NOTE: These migration tests require the local Kubo binary (built with 'make build') to be in PATH.
|
||||||
|
//
|
||||||
|
// To run these tests successfully:
|
||||||
|
// export PATH="$(pwd)/cmd/ipfs:$PATH"
|
||||||
|
// go test ./test/cli/migrations/
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
ipfs "github.com/ipfs/kubo"
|
||||||
|
"github.com/ipfs/kubo/test/cli/harness"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestMigration17ToLatest tests migration from repo version 17 to the latest version.
|
||||||
|
//
|
||||||
|
// Since we don't have a v17 repo fixture, we start with v16 and migrate it to v17 first,
|
||||||
|
// then test the 17-to-18 migration specifically.
|
||||||
|
//
|
||||||
|
// This test focuses on the Provider/Reprovider to Provide consolidation that happens in 17-to-18.
|
||||||
|
func TestMigration17ToLatest(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// Tests for Provider/Reprovider to Provide migration (17-to-18)
|
||||||
|
t.Run("daemon migrate: Provider/Reprovider to Provide consolidation", testProviderReproviderMigration)
|
||||||
|
t.Run("daemon migrate: flat strategy conversion", testFlatStrategyConversion)
|
||||||
|
t.Run("daemon migrate: empty Provider/Reprovider sections", testEmptyProviderReproviderMigration)
|
||||||
|
t.Run("daemon migrate: partial configuration (Provider only)", testProviderOnlyMigration)
|
||||||
|
t.Run("daemon migrate: partial configuration (Reprovider only)", testReproviderOnlyMigration)
|
||||||
|
t.Run("repo migrate: invalid strategy values preserved", testInvalidStrategyMigration)
|
||||||
|
t.Run("repo migrate: Provider/Reprovider to Provide consolidation", testRepoProviderReproviderMigration)
|
||||||
|
}
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// MIGRATION 17-to-18 SPECIFIC TESTS: Provider/Reprovider to Provide consolidation
|
||||||
|
// =============================================================================
|
||||||
|
|
||||||
|
func testProviderReproviderMigration(t *testing.T) {
|
||||||
|
// TEST: 17-to-18 migration with explicit Provider/Reprovider configuration
|
||||||
|
node := setupV17RepoWithProviderConfig(t)
|
||||||
|
|
||||||
|
configPath := filepath.Join(node.Dir, "config")
|
||||||
|
versionPath := filepath.Join(node.Dir, "version")
|
||||||
|
|
||||||
|
// Run migration using daemon --migrate command
|
||||||
|
stdoutOutput, migrationSuccess := runDaemonMigrationFromV17(t, node)
|
||||||
|
|
||||||
|
// Debug: Print the actual output
|
||||||
|
t.Logf("Daemon output:\n%s", stdoutOutput)
|
||||||
|
|
||||||
|
// Verify migration was successful
|
||||||
|
require.True(t, migrationSuccess, "Migration should have been successful")
|
||||||
|
require.Contains(t, stdoutOutput, "applying 17-to-18 repo migration", "Migration 17-to-18 should have been triggered")
|
||||||
|
require.Contains(t, stdoutOutput, "Migration 17-to-18 succeeded", "Migration 17-to-18 should have completed successfully")
|
||||||
|
|
||||||
|
// Verify version was updated to latest
|
||||||
|
versionData, err := os.ReadFile(versionPath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
expectedVersion := fmt.Sprint(ipfs.RepoVersion)
|
||||||
|
require.Equal(t, expectedVersion, strings.TrimSpace(string(versionData)), "Version should be updated to %s (latest)", expectedVersion)
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// MIGRATION 17-to-18 ASSERTIONS: Provider/Reprovider to Provide consolidation
|
||||||
|
// =============================================================================
|
||||||
|
helper := NewMigrationTestHelper(t, configPath)
|
||||||
|
|
||||||
|
// Verify Provider/Reprovider migration to Provide
|
||||||
|
helper.RequireProviderMigration().
|
||||||
|
RequireFieldEquals("Provide.Enabled", true). // Migrated from Provider.Enabled
|
||||||
|
RequireFieldEquals("Provide.DHT.MaxWorkers", float64(8)). // Migrated from Provider.WorkerCount
|
||||||
|
RequireFieldEquals("Provide.Strategy", "roots"). // Migrated from Reprovider.Strategy
|
||||||
|
RequireFieldEquals("Provide.DHT.Interval", "24h") // Migrated from Reprovider.Interval
|
||||||
|
|
||||||
|
// Verify old sections are removed
|
||||||
|
helper.RequireFieldAbsent("Provider").
|
||||||
|
RequireFieldAbsent("Reprovider")
|
||||||
|
}
|
||||||
|
|
||||||
|
func testFlatStrategyConversion(t *testing.T) {
|
||||||
|
// TEST: 17-to-18 migration with "flat" strategy that should convert to "all"
|
||||||
|
node := setupV17RepoWithFlatStrategy(t)
|
||||||
|
|
||||||
|
configPath := filepath.Join(node.Dir, "config")
|
||||||
|
|
||||||
|
// Run migration using daemon --migrate command
|
||||||
|
stdoutOutput, migrationSuccess := runDaemonMigrationFromV17(t, node)
|
||||||
|
|
||||||
|
// Verify migration was successful
|
||||||
|
require.True(t, migrationSuccess, "Migration should have been successful")
|
||||||
|
require.Contains(t, stdoutOutput, "applying 17-to-18 repo migration", "Migration 17-to-18 should have been triggered")
|
||||||
|
require.Contains(t, stdoutOutput, "Migration 17-to-18 succeeded", "Migration 17-to-18 should have completed successfully")
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// MIGRATION 17-to-18 ASSERTIONS: "flat" to "all" strategy conversion
|
||||||
|
// =============================================================================
|
||||||
|
helper := NewMigrationTestHelper(t, configPath)
|
||||||
|
|
||||||
|
// Verify "flat" was converted to "all"
|
||||||
|
helper.RequireProviderMigration().
|
||||||
|
RequireFieldEquals("Provide.Strategy", "all"). // "flat" converted to "all"
|
||||||
|
RequireFieldEquals("Provide.DHT.Interval", "12h")
|
||||||
|
}
|
||||||
|
|
||||||
|
func testEmptyProviderReproviderMigration(t *testing.T) {
|
||||||
|
// TEST: 17-to-18 migration with empty Provider and Reprovider sections
|
||||||
|
node := setupV17RepoWithEmptySections(t)
|
||||||
|
|
||||||
|
configPath := filepath.Join(node.Dir, "config")
|
||||||
|
|
||||||
|
// Run migration
|
||||||
|
stdoutOutput, migrationSuccess := runDaemonMigrationFromV17(t, node)
|
||||||
|
|
||||||
|
// Verify migration was successful
|
||||||
|
require.True(t, migrationSuccess, "Migration should have been successful")
|
||||||
|
require.Contains(t, stdoutOutput, "Migration 17-to-18 succeeded")
|
||||||
|
|
||||||
|
// Verify empty sections are removed and no Provide section is created
|
||||||
|
helper := NewMigrationTestHelper(t, configPath)
|
||||||
|
helper.RequireFieldAbsent("Provider").
|
||||||
|
RequireFieldAbsent("Reprovider").
|
||||||
|
RequireFieldAbsent("Provide") // No Provide section should be created for empty configs
|
||||||
|
}
|
||||||
|
|
||||||
|
func testProviderOnlyMigration(t *testing.T) {
|
||||||
|
// TEST: 17-to-18 migration with only Provider configuration
|
||||||
|
node := setupV17RepoWithProviderOnly(t)
|
||||||
|
|
||||||
|
configPath := filepath.Join(node.Dir, "config")
|
||||||
|
|
||||||
|
// Run migration
|
||||||
|
stdoutOutput, migrationSuccess := runDaemonMigrationFromV17(t, node)
|
||||||
|
|
||||||
|
// Verify migration was successful
|
||||||
|
require.True(t, migrationSuccess, "Migration should have been successful")
|
||||||
|
require.Contains(t, stdoutOutput, "Migration 17-to-18 succeeded")
|
||||||
|
|
||||||
|
// Verify only Provider fields are migrated
|
||||||
|
helper := NewMigrationTestHelper(t, configPath)
|
||||||
|
helper.RequireProviderMigration().
|
||||||
|
RequireFieldEquals("Provide.Enabled", false).
|
||||||
|
RequireFieldEquals("Provide.DHT.MaxWorkers", float64(32)).
|
||||||
|
RequireFieldAbsent("Provide.Strategy"). // No Reprovider.Strategy to migrate
|
||||||
|
RequireFieldAbsent("Provide.DHT.Interval") // No Reprovider.Interval to migrate
|
||||||
|
}
|
||||||
|
|
||||||
|
func testReproviderOnlyMigration(t *testing.T) {
|
||||||
|
// TEST: 17-to-18 migration with only Reprovider configuration
|
||||||
|
node := setupV17RepoWithReproviderOnly(t)
|
||||||
|
|
||||||
|
configPath := filepath.Join(node.Dir, "config")
|
||||||
|
|
||||||
|
// Run migration
|
||||||
|
stdoutOutput, migrationSuccess := runDaemonMigrationFromV17(t, node)
|
||||||
|
|
||||||
|
// Verify migration was successful
|
||||||
|
require.True(t, migrationSuccess, "Migration should have been successful")
|
||||||
|
require.Contains(t, stdoutOutput, "Migration 17-to-18 succeeded")
|
||||||
|
|
||||||
|
// Verify only Reprovider fields are migrated
|
||||||
|
helper := NewMigrationTestHelper(t, configPath)
|
||||||
|
helper.RequireProviderMigration().
|
||||||
|
RequireFieldEquals("Provide.Strategy", "pinned").
|
||||||
|
RequireFieldEquals("Provide.DHT.Interval", "48h").
|
||||||
|
RequireFieldAbsent("Provide.Enabled"). // No Provider.Enabled to migrate
|
||||||
|
RequireFieldAbsent("Provide.DHT.MaxWorkers") // No Provider.WorkerCount to migrate
|
||||||
|
}
|
||||||
|
|
||||||
|
func testInvalidStrategyMigration(t *testing.T) {
|
||||||
|
// TEST: 17-to-18 migration with invalid strategy values (should be preserved as-is)
|
||||||
|
// The migration itself should succeed, but daemon start will fail due to invalid strategy
|
||||||
|
node := setupV17RepoWithInvalidStrategy(t)
|
||||||
|
|
||||||
|
configPath := filepath.Join(node.Dir, "config")
|
||||||
|
|
||||||
|
// Run the migration using 'ipfs repo migrate' (not daemon --migrate)
|
||||||
|
// because daemon would fail to start with invalid strategy after migration
|
||||||
|
result := node.RunIPFS("repo", "migrate")
|
||||||
|
require.Empty(t, result.Stderr.String(), "Migration should succeed without errors")
|
||||||
|
|
||||||
|
// Verify invalid strategy is preserved as-is (not validated during migration)
|
||||||
|
helper := NewMigrationTestHelper(t, configPath)
|
||||||
|
helper.RequireProviderMigration().
|
||||||
|
RequireFieldEquals("Provide.Strategy", "invalid-strategy") // Should be preserved
|
||||||
|
|
||||||
|
// Now verify that daemon fails to start with invalid strategy
|
||||||
|
// Note: We cannot use --offline as it skips provider validation
|
||||||
|
// Use a context with timeout to avoid hanging
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(ctx, node.IPFSBin, "daemon")
|
||||||
|
cmd.Dir = node.Dir
|
||||||
|
for k, v := range node.Runner.Env {
|
||||||
|
cmd.Env = append(cmd.Env, k+"="+v)
|
||||||
|
}
|
||||||
|
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
|
||||||
|
// The daemon should fail (either with error or timeout if it's hanging)
|
||||||
|
require.Error(t, err, "Daemon should fail to start with invalid strategy")
|
||||||
|
|
||||||
|
// Check if we got the expected error message
|
||||||
|
outputStr := string(output)
|
||||||
|
t.Logf("Daemon output with invalid strategy: %s", outputStr)
|
||||||
|
|
||||||
|
// The error should mention unknown strategy
|
||||||
|
require.Contains(t, outputStr, "unknown strategy", "Should report unknown strategy error")
|
||||||
|
}
|
||||||
|
|
||||||
|
func testRepoProviderReproviderMigration(t *testing.T) {
|
||||||
|
// TEST: 17-to-18 migration using 'ipfs repo migrate' command
|
||||||
|
node := setupV17RepoWithProviderConfig(t)
|
||||||
|
|
||||||
|
configPath := filepath.Join(node.Dir, "config")
|
||||||
|
|
||||||
|
// Run migration using 'ipfs repo migrate' command
|
||||||
|
result := node.RunIPFS("repo", "migrate")
|
||||||
|
require.Empty(t, result.Stderr.String(), "Migration should succeed without errors")
|
||||||
|
|
||||||
|
// Verify same results as daemon migrate
|
||||||
|
helper := NewMigrationTestHelper(t, configPath)
|
||||||
|
helper.RequireProviderMigration().
|
||||||
|
RequireFieldEquals("Provide.Enabled", true).
|
||||||
|
RequireFieldEquals("Provide.DHT.MaxWorkers", float64(8)).
|
||||||
|
RequireFieldEquals("Provide.Strategy", "roots").
|
||||||
|
RequireFieldEquals("Provide.DHT.Interval", "24h")
|
||||||
|
}
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// HELPER FUNCTIONS
|
||||||
|
// =============================================================================
|
||||||
|
|
||||||
|
// setupV17RepoWithProviderConfig creates a v17 repo with Provider/Reprovider configuration
|
||||||
|
func setupV17RepoWithProviderConfig(t *testing.T) *harness.Node {
|
||||||
|
return setupV17RepoWithConfig(t,
|
||||||
|
map[string]interface{}{
|
||||||
|
"Enabled": true,
|
||||||
|
"WorkerCount": 8,
|
||||||
|
},
|
||||||
|
map[string]interface{}{
|
||||||
|
"Strategy": "roots",
|
||||||
|
"Interval": "24h",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// setupV17RepoWithFlatStrategy creates a v17 repo with "flat" strategy for testing conversion
|
||||||
|
func setupV17RepoWithFlatStrategy(t *testing.T) *harness.Node {
|
||||||
|
return setupV17RepoWithConfig(t,
|
||||||
|
map[string]interface{}{
|
||||||
|
"Enabled": false,
|
||||||
|
},
|
||||||
|
map[string]interface{}{
|
||||||
|
"Strategy": "flat", // This should be converted to "all"
|
||||||
|
"Interval": "12h",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// setupV17RepoWithConfig is a helper that creates a v17 repo with specified Provider/Reprovider config
|
||||||
|
func setupV17RepoWithConfig(t *testing.T, providerConfig, reproviderConfig map[string]interface{}) *harness.Node {
|
||||||
|
node := setupStaticV16Repo(t)
|
||||||
|
|
||||||
|
// First migrate to v17
|
||||||
|
result := node.RunIPFS("repo", "migrate", "--to=17")
|
||||||
|
require.Empty(t, result.Stderr.String(), "Migration to v17 should succeed")
|
||||||
|
|
||||||
|
// Update config with specified Provider and Reprovider settings
|
||||||
|
configPath := filepath.Join(node.Dir, "config")
|
||||||
|
var config map[string]interface{}
|
||||||
|
configData, err := os.ReadFile(configPath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, json.Unmarshal(configData, &config))
|
||||||
|
|
||||||
|
if providerConfig != nil {
|
||||||
|
config["Provider"] = providerConfig
|
||||||
|
} else {
|
||||||
|
config["Provider"] = map[string]interface{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if reproviderConfig != nil {
|
||||||
|
config["Reprovider"] = reproviderConfig
|
||||||
|
} else {
|
||||||
|
config["Reprovider"] = map[string]interface{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
modifiedConfigData, err := json.MarshalIndent(config, "", " ")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, os.WriteFile(configPath, modifiedConfigData, 0644))
|
||||||
|
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// setupV17RepoWithEmptySections creates a v17 repo with empty Provider/Reprovider sections
|
||||||
|
func setupV17RepoWithEmptySections(t *testing.T) *harness.Node {
|
||||||
|
return setupV17RepoWithConfig(t,
|
||||||
|
map[string]interface{}{},
|
||||||
|
map[string]interface{}{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// setupV17RepoWithProviderOnly creates a v17 repo with only Provider configuration
|
||||||
|
func setupV17RepoWithProviderOnly(t *testing.T) *harness.Node {
|
||||||
|
return setupV17RepoWithConfig(t,
|
||||||
|
map[string]interface{}{
|
||||||
|
"Enabled": false,
|
||||||
|
"WorkerCount": 32,
|
||||||
|
},
|
||||||
|
map[string]interface{}{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// setupV17RepoWithReproviderOnly creates a v17 repo with only Reprovider configuration
|
||||||
|
func setupV17RepoWithReproviderOnly(t *testing.T) *harness.Node {
|
||||||
|
return setupV17RepoWithConfig(t,
|
||||||
|
map[string]interface{}{},
|
||||||
|
map[string]interface{}{
|
||||||
|
"Strategy": "pinned",
|
||||||
|
"Interval": "48h",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// setupV17RepoWithInvalidStrategy creates a v17 repo with an invalid strategy value
|
||||||
|
func setupV17RepoWithInvalidStrategy(t *testing.T) *harness.Node {
|
||||||
|
return setupV17RepoWithConfig(t,
|
||||||
|
map[string]interface{}{},
|
||||||
|
map[string]interface{}{
|
||||||
|
"Strategy": "invalid-strategy", // This is not a valid strategy
|
||||||
|
"Interval": "24h",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// runDaemonMigrationFromV17 monitors daemon startup for 17-to-18 migration only
|
||||||
|
func runDaemonMigrationFromV17(t *testing.T, node *harness.Node) (string, bool) {
|
||||||
|
// Monitor only the 17-to-18 migration
|
||||||
|
expectedMigrations := []struct {
|
||||||
|
pattern string
|
||||||
|
success string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
pattern: "applying 17-to-18 repo migration",
|
||||||
|
success: "Migration 17-to-18 succeeded",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return runDaemonWithMultipleMigrationMonitoring(t, node, expectedMigrations)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequireProviderMigration verifies that Provider/Reprovider have been migrated to Provide section
|
||||||
|
func (h *MigrationTestHelper) RequireProviderMigration() *MigrationTestHelper {
|
||||||
|
return h.RequireFieldExists("Provide").
|
||||||
|
RequireFieldAbsent("Provider").
|
||||||
|
RequireFieldAbsent("Reprovider")
|
||||||
|
}
|
||||||
@ -1,8 +1,14 @@
|
|||||||
package migrations
|
package migrations
|
||||||
|
|
||||||
// NOTE: These legacy migration tests require the local Kubo binary (built with 'make build') to be in PATH.
|
// NOTE: These mixed migration tests validate the transition from old Kubo versions that used external
|
||||||
// The tests migrate from repo version 15 to 17, which requires both external (15→16) and embedded (16→17) migrations.
|
// migration binaries to the latest version with embedded migrations. This ensures users can upgrade
|
||||||
// This validates the transition from legacy external binaries to modern embedded migrations.
|
// from very old installations (v15) to the latest version seamlessly.
|
||||||
|
//
|
||||||
|
// The tests verify hybrid migration paths:
|
||||||
|
// - Forward: external binary (15→16) + embedded migrations (16→latest)
|
||||||
|
// - Backward: embedded migrations (latest→16) + external binary (16→15)
|
||||||
|
//
|
||||||
|
// This confirms compatibility between the old external migration system and the new embedded system.
|
||||||
//
|
//
|
||||||
// To run these tests successfully:
|
// To run these tests successfully:
|
||||||
// export PATH="$(pwd)/cmd/ipfs:$PATH"
|
// export PATH="$(pwd)/cmd/ipfs:$PATH"
|
||||||
@ -22,30 +28,36 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
ipfs "github.com/ipfs/kubo"
|
||||||
"github.com/ipfs/kubo/test/cli/harness"
|
"github.com/ipfs/kubo/test/cli/harness"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMigration15To17(t *testing.T) {
|
// TestMixedMigration15ToLatest tests migration from old Kubo (v15 with external migrations)
|
||||||
|
// to the latest version using a hybrid approach: external binary for 15→16, then embedded
|
||||||
|
// migrations for 16→latest. This ensures backward compatibility for users upgrading from
|
||||||
|
// very old Kubo installations.
|
||||||
|
func TestMixedMigration15ToLatest(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
// Test legacy migration from v15 to v17 (combines external 15→16 + embedded 16→17)
|
// Test mixed migration from v15 to latest (combines external 15→16 + embedded 16→latest)
|
||||||
t.Run("daemon migrate: legacy 15 to 17", testDaemonMigration15To17)
|
t.Run("daemon migrate: mixed 15 to latest", testDaemonMigration15ToLatest)
|
||||||
t.Run("repo migrate: legacy 15 to 17", testRepoMigration15To17)
|
t.Run("repo migrate: mixed 15 to latest", testRepoMigration15ToLatest)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMigration17To15Downgrade(t *testing.T) {
|
// TestMixedMigrationLatestTo15Downgrade tests downgrading from the latest version back to v15
|
||||||
|
// using a hybrid approach: embedded migrations for latest→16, then external binary for 16→15.
|
||||||
|
// This ensures the migration system works bidirectionally for recovery scenarios.
|
||||||
|
func TestMixedMigrationLatestTo15Downgrade(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
// Test reverse hybrid migration from v17 to v15 (embedded 17→16 + external 16→15)
|
// Test reverse hybrid migration from latest to v15 (embedded latest→16 + external 16→15)
|
||||||
t.Run("repo migrate: reverse hybrid 17 to 15", testRepoReverseHybridMigration17To15)
|
t.Run("repo migrate: reverse hybrid latest to 15", testRepoReverseHybridMigrationLatestTo15)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testDaemonMigration15To17(t *testing.T) {
|
func testDaemonMigration15ToLatest(t *testing.T) {
|
||||||
// TEST: Migration from v15 to v17 using 'ipfs daemon --migrate'
|
// TEST: Migration from v15 to latest using 'ipfs daemon --migrate'
|
||||||
// This tests the dual migration path: external binary (15→16) + embedded (16→17)
|
// This tests the mixed migration path: external binary (15→16) + embedded (16→latest)
|
||||||
// NOTE: This test may need to be revised/updated once repo version 18 is released,
|
|
||||||
// at that point only keep tests that use 'ipfs repo migrate'
|
|
||||||
node := setupStaticV15Repo(t)
|
node := setupStaticV15Repo(t)
|
||||||
|
|
||||||
// Create mock migration binary for 15→16 (16→17 will use embedded migration)
|
// Create mock migration binary for 15→16 (16→17 will use embedded migration)
|
||||||
@ -76,13 +88,16 @@ func testDaemonMigration15To17(t *testing.T) {
|
|||||||
// Verify hybrid migration was successful
|
// Verify hybrid migration was successful
|
||||||
require.True(t, migrationSuccess, "Hybrid migration should have been successful")
|
require.True(t, migrationSuccess, "Hybrid migration should have been successful")
|
||||||
require.Contains(t, stdoutOutput, "Phase 1: External migration from v15 to v16", "Should detect external migration phase")
|
require.Contains(t, stdoutOutput, "Phase 1: External migration from v15 to v16", "Should detect external migration phase")
|
||||||
require.Contains(t, stdoutOutput, "Phase 2: Embedded migration from v16 to v17", "Should detect embedded migration phase")
|
// Verify each embedded migration step from 16 to latest
|
||||||
|
verifyMigrationSteps(t, stdoutOutput, 16, ipfs.RepoVersion, true)
|
||||||
|
require.Contains(t, stdoutOutput, fmt.Sprintf("Phase 2: Embedded migration from v16 to v%d", ipfs.RepoVersion), "Should detect embedded migration phase")
|
||||||
require.Contains(t, stdoutOutput, "Hybrid migration completed successfully", "Should confirm hybrid migration completion")
|
require.Contains(t, stdoutOutput, "Hybrid migration completed successfully", "Should confirm hybrid migration completion")
|
||||||
|
|
||||||
// Verify final version is 17
|
// Verify final version is latest
|
||||||
versionData, err = os.ReadFile(versionPath)
|
versionData, err = os.ReadFile(versionPath)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, "17", strings.TrimSpace(string(versionData)), "Version should be updated to 17")
|
latestVersion := fmt.Sprintf("%d", ipfs.RepoVersion)
|
||||||
|
require.Equal(t, latestVersion, strings.TrimSpace(string(versionData)), "Version should be updated to latest")
|
||||||
|
|
||||||
// Verify config is still valid JSON and key fields preserved
|
// Verify config is still valid JSON and key fields preserved
|
||||||
var finalConfig map[string]interface{}
|
var finalConfig map[string]interface{}
|
||||||
@ -103,8 +118,8 @@ func testDaemonMigration15To17(t *testing.T) {
|
|||||||
require.NotNil(t, autoConf, "AutoConf should be added by 16→17 migration")
|
require.NotNil(t, autoConf, "AutoConf should be added by 16→17 migration")
|
||||||
}
|
}
|
||||||
|
|
||||||
func testRepoMigration15To17(t *testing.T) {
|
func testRepoMigration15ToLatest(t *testing.T) {
|
||||||
// TEST: Migration from v15 to v17 using 'ipfs repo migrate'
|
// TEST: Migration from v15 to latest using 'ipfs repo migrate'
|
||||||
// Comparison test to verify repo migrate produces same results as daemon migrate
|
// Comparison test to verify repo migrate produces same results as daemon migrate
|
||||||
node := setupStaticV15Repo(t)
|
node := setupStaticV15Repo(t)
|
||||||
|
|
||||||
@ -132,10 +147,11 @@ func testRepoMigration15To17(t *testing.T) {
|
|||||||
})
|
})
|
||||||
require.Empty(t, result.Stderr.String(), "Migration should succeed without errors")
|
require.Empty(t, result.Stderr.String(), "Migration should succeed without errors")
|
||||||
|
|
||||||
// Verify final version is 17
|
// Verify final version is latest
|
||||||
versionData, err = os.ReadFile(versionPath)
|
versionData, err = os.ReadFile(versionPath)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, "17", strings.TrimSpace(string(versionData)), "Version should be updated to 17")
|
latestVersion := fmt.Sprintf("%d", ipfs.RepoVersion)
|
||||||
|
require.Equal(t, latestVersion, strings.TrimSpace(string(versionData)), "Version should be updated to latest")
|
||||||
|
|
||||||
// Verify config is valid JSON
|
// Verify config is valid JSON
|
||||||
var finalConfig map[string]interface{}
|
var finalConfig map[string]interface{}
|
||||||
@ -177,7 +193,7 @@ func runDaemonWithLegacyMigrationMonitoring(t *testing.T, node *harness.Node) (s
|
|||||||
// Check for hybrid migration patterns in output
|
// Check for hybrid migration patterns in output
|
||||||
hasHybridStart := strings.Contains(stdoutOutput, "Using hybrid migration strategy")
|
hasHybridStart := strings.Contains(stdoutOutput, "Using hybrid migration strategy")
|
||||||
hasPhase1 := strings.Contains(stdoutOutput, "Phase 1: External migration from v15 to v16")
|
hasPhase1 := strings.Contains(stdoutOutput, "Phase 1: External migration from v15 to v16")
|
||||||
hasPhase2 := strings.Contains(stdoutOutput, "Phase 2: Embedded migration from v16 to v17")
|
hasPhase2 := strings.Contains(stdoutOutput, fmt.Sprintf("Phase 2: Embedded migration from v16 to v%d", ipfs.RepoVersion))
|
||||||
hasHybridSuccess := strings.Contains(stdoutOutput, "Hybrid migration completed successfully")
|
hasHybridSuccess := strings.Contains(stdoutOutput, "Hybrid migration completed successfully")
|
||||||
|
|
||||||
// Success requires daemon to start and hybrid migration patterns to be detected
|
// Success requires daemon to start and hybrid migration patterns to be detected
|
||||||
@ -342,6 +358,37 @@ func main() {
|
|||||||
require.NoError(t, err, "Mock binary should exist")
|
require.NoError(t, err, "Mock binary should exist")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// expectedMigrationSteps generates the expected migration step strings for a version range.
|
||||||
|
// For forward migrations (from < to), it returns strings like "Running embedded migration fs-repo-16-to-17"
|
||||||
|
// For reverse migrations (from > to), it returns strings for the reverse path.
|
||||||
|
func expectedMigrationSteps(from, to int, forward bool) []string {
|
||||||
|
var steps []string
|
||||||
|
|
||||||
|
if forward {
|
||||||
|
// Forward migration: increment by 1 each step
|
||||||
|
for v := from; v < to; v++ {
|
||||||
|
migrationName := fmt.Sprintf("fs-repo-%d-to-%d", v, v+1)
|
||||||
|
steps = append(steps, fmt.Sprintf("Running embedded migration %s", migrationName))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Reverse migration: decrement by 1 each step
|
||||||
|
for v := from; v > to; v-- {
|
||||||
|
migrationName := fmt.Sprintf("fs-repo-%d-to-%d", v, v-1)
|
||||||
|
steps = append(steps, fmt.Sprintf("Running reverse migration %s", migrationName))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return steps
|
||||||
|
}
|
||||||
|
|
||||||
|
// verifyMigrationSteps checks that all expected migration steps appear in the output
|
||||||
|
func verifyMigrationSteps(t *testing.T, output string, from, to int, forward bool) {
|
||||||
|
steps := expectedMigrationSteps(from, to, forward)
|
||||||
|
for _, step := range steps {
|
||||||
|
require.Contains(t, output, step, "Migration output should contain: %s", step)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// getNestedValue retrieves a nested value from a config map using dot notation
|
// getNestedValue retrieves a nested value from a config map using dot notation
|
||||||
func getNestedValue(config map[string]interface{}, path string) interface{} {
|
func getNestedValue(config map[string]interface{}, path string) interface{} {
|
||||||
parts := strings.Split(path, ".")
|
parts := strings.Split(path, ".")
|
||||||
@ -362,11 +409,11 @@ func getNestedValue(config map[string]interface{}, path string) interface{} {
|
|||||||
return current
|
return current
|
||||||
}
|
}
|
||||||
|
|
||||||
func testRepoReverseHybridMigration17To15(t *testing.T) {
|
func testRepoReverseHybridMigrationLatestTo15(t *testing.T) {
|
||||||
// TEST: Reverse hybrid migration from v17 to v15 using 'ipfs repo migrate --to=15 --allow-downgrade'
|
// TEST: Reverse hybrid migration from latest to v15 using 'ipfs repo migrate --to=15 --allow-downgrade'
|
||||||
// This tests reverse hybrid migration: embedded (17→16) + external (16→15)
|
// This tests reverse hybrid migration: embedded (17→16) + external (16→15)
|
||||||
|
|
||||||
// Start with v15 fixture and migrate forward to v17 to create proper backup files
|
// Start with v15 fixture and migrate forward to latest to create proper backup files
|
||||||
node := setupStaticV15Repo(t)
|
node := setupStaticV15Repo(t)
|
||||||
|
|
||||||
// Create mock migration binary for 15→16 (needed for forward migration)
|
// Create mock migration binary for 15→16 (needed for forward migration)
|
||||||
@ -377,8 +424,8 @@ func testRepoReverseHybridMigration17To15(t *testing.T) {
|
|||||||
configPath := filepath.Join(node.Dir, "config")
|
configPath := filepath.Join(node.Dir, "config")
|
||||||
versionPath := filepath.Join(node.Dir, "version")
|
versionPath := filepath.Join(node.Dir, "version")
|
||||||
|
|
||||||
// Step 1: Forward migration from v15 to v17 to create backup files
|
// Step 1: Forward migration from v15 to latest to create backup files
|
||||||
t.Log("Step 1: Forward migration v15 → v17")
|
t.Logf("Step 1: Forward migration v15 → v%d", ipfs.RepoVersion)
|
||||||
result := node.Runner.Run(harness.RunRequest{
|
result := node.Runner.Run(harness.RunRequest{
|
||||||
Path: node.IPFSBin,
|
Path: node.IPFSBin,
|
||||||
Args: []string{"repo", "migrate"},
|
Args: []string{"repo", "migrate"},
|
||||||
@ -396,21 +443,22 @@ func testRepoReverseHybridMigration17To15(t *testing.T) {
|
|||||||
|
|
||||||
require.Empty(t, result.Stderr.String(), "Forward migration should succeed without errors")
|
require.Empty(t, result.Stderr.String(), "Forward migration should succeed without errors")
|
||||||
|
|
||||||
// Verify we're at v17 after forward migration
|
// Verify we're at latest version after forward migration
|
||||||
versionData, err := os.ReadFile(versionPath)
|
versionData, err := os.ReadFile(versionPath)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, "17", strings.TrimSpace(string(versionData)), "Should be at version 17 after forward migration")
|
latestVersion := fmt.Sprintf("%d", ipfs.RepoVersion)
|
||||||
|
require.Equal(t, latestVersion, strings.TrimSpace(string(versionData)), "Should be at latest version after forward migration")
|
||||||
|
|
||||||
// Read config after forward migration to use as baseline for downgrade
|
// Read config after forward migration to use as baseline for downgrade
|
||||||
var v17Config map[string]interface{}
|
var latestConfig map[string]interface{}
|
||||||
configData, err := os.ReadFile(configPath)
|
configData, err := os.ReadFile(configPath)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, json.Unmarshal(configData, &v17Config))
|
require.NoError(t, json.Unmarshal(configData, &latestConfig))
|
||||||
|
|
||||||
originalPeerID := getNestedValue(v17Config, "Identity.PeerID")
|
originalPeerID := getNestedValue(latestConfig, "Identity.PeerID")
|
||||||
|
|
||||||
// Step 2: Reverse hybrid migration from v17 to v15
|
// Step 2: Reverse hybrid migration from latest to v15
|
||||||
t.Log("Step 2: Reverse hybrid migration v17 → v15")
|
t.Logf("Step 2: Reverse hybrid migration v%d → v15", ipfs.RepoVersion)
|
||||||
result = node.Runner.Run(harness.RunRequest{
|
result = node.Runner.Run(harness.RunRequest{
|
||||||
Path: node.IPFSBin,
|
Path: node.IPFSBin,
|
||||||
Args: []string{"repo", "migrate", "--to=15", "--allow-downgrade"},
|
Args: []string{"repo", "migrate", "--to=15", "--allow-downgrade"},
|
||||||
@ -58,11 +58,11 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("Provider.Enabled=true announces new CIDs created by ipfs add", func(t *testing.T) {
|
t.Run("Provide.Enabled=true announces new CIDs created by ipfs add", func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
nodes := initNodes(t, 2, func(n *harness.Node) {
|
nodes := initNodes(t, 2, func(n *harness.Node) {
|
||||||
n.SetIPFSConfig("Provider.Enabled", true)
|
n.SetIPFSConfig("Provide.Enabled", true)
|
||||||
})
|
})
|
||||||
defer nodes.StopDaemons()
|
defer nodes.StopDaemons()
|
||||||
|
|
||||||
@ -70,11 +70,11 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
|
|||||||
expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...)
|
expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Provider.Enabled=true announces new CIDs created by ipfs add --pin=false with default strategy", func(t *testing.T) {
|
t.Run("Provide.Enabled=true announces new CIDs created by ipfs add --pin=false with default strategy", func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
nodes := initNodes(t, 2, func(n *harness.Node) {
|
nodes := initNodes(t, 2, func(n *harness.Node) {
|
||||||
n.SetIPFSConfig("Provider.Enabled", true)
|
n.SetIPFSConfig("Provide.Enabled", true)
|
||||||
// Default strategy is "all" which should provide even unpinned content
|
// Default strategy is "all" which should provide even unpinned content
|
||||||
})
|
})
|
||||||
defer nodes.StopDaemons()
|
defer nodes.StopDaemons()
|
||||||
@ -83,11 +83,11 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
|
|||||||
expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...)
|
expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Provider.Enabled=true announces new CIDs created by ipfs block put --pin=false with default strategy", func(t *testing.T) {
|
t.Run("Provide.Enabled=true announces new CIDs created by ipfs block put --pin=false with default strategy", func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
nodes := initNodes(t, 2, func(n *harness.Node) {
|
nodes := initNodes(t, 2, func(n *harness.Node) {
|
||||||
n.SetIPFSConfig("Provider.Enabled", true)
|
n.SetIPFSConfig("Provide.Enabled", true)
|
||||||
// Default strategy is "all" which should provide unpinned content from block put
|
// Default strategy is "all" which should provide unpinned content from block put
|
||||||
})
|
})
|
||||||
defer nodes.StopDaemons()
|
defer nodes.StopDaemons()
|
||||||
@ -97,11 +97,11 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
|
|||||||
expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...)
|
expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Provider.Enabled=true announces new CIDs created by ipfs dag put --pin=false with default strategy", func(t *testing.T) {
|
t.Run("Provide.Enabled=true announces new CIDs created by ipfs dag put --pin=false with default strategy", func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
nodes := initNodes(t, 2, func(n *harness.Node) {
|
nodes := initNodes(t, 2, func(n *harness.Node) {
|
||||||
n.SetIPFSConfig("Provider.Enabled", true)
|
n.SetIPFSConfig("Provide.Enabled", true)
|
||||||
// Default strategy is "all" which should provide unpinned content from dag put
|
// Default strategy is "all" which should provide unpinned content from dag put
|
||||||
})
|
})
|
||||||
defer nodes.StopDaemons()
|
defer nodes.StopDaemons()
|
||||||
@ -111,11 +111,11 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
|
|||||||
expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...)
|
expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Provider.Enabled=false disables announcement of new CID from ipfs add", func(t *testing.T) {
|
t.Run("Provide.Enabled=false disables announcement of new CID from ipfs add", func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
nodes := initNodes(t, 2, func(n *harness.Node) {
|
nodes := initNodes(t, 2, func(n *harness.Node) {
|
||||||
n.SetIPFSConfig("Provider.Enabled", false)
|
n.SetIPFSConfig("Provide.Enabled", false)
|
||||||
})
|
})
|
||||||
defer nodes.StopDaemons()
|
defer nodes.StopDaemons()
|
||||||
|
|
||||||
@ -123,17 +123,17 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
|
|||||||
expectNoProviders(t, cid, nodes[1:]...)
|
expectNoProviders(t, cid, nodes[1:]...)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Provider.Enabled=false disables manual announcement via RPC command", func(t *testing.T) {
|
t.Run("Provide.Enabled=false disables manual announcement via RPC command", func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
nodes := initNodes(t, 2, func(n *harness.Node) {
|
nodes := initNodes(t, 2, func(n *harness.Node) {
|
||||||
n.SetIPFSConfig("Provider.Enabled", false)
|
n.SetIPFSConfig("Provide.Enabled", false)
|
||||||
})
|
})
|
||||||
defer nodes.StopDaemons()
|
defer nodes.StopDaemons()
|
||||||
|
|
||||||
cid := nodes[0].IPFSAddStr(time.Now().String())
|
cid := nodes[0].IPFSAddStr(time.Now().String())
|
||||||
res := nodes[0].RunIPFS("routing", "provide", cid)
|
res := nodes[0].RunIPFS("routing", "provide", cid)
|
||||||
assert.Contains(t, res.Stderr.Trimmed(), "invalid configuration: Provider.Enabled is set to 'false'")
|
assert.Contains(t, res.Stderr.Trimmed(), "invalid configuration: Provide.Enabled is set to 'false'")
|
||||||
assert.Equal(t, 1, res.ExitCode())
|
assert.Equal(t, 1, res.ExitCode())
|
||||||
|
|
||||||
expectNoProviders(t, cid, nodes[1:]...)
|
expectNoProviders(t, cid, nodes[1:]...)
|
||||||
@ -144,7 +144,7 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
nodes := initNodes(t, 2, func(n *harness.Node) {
|
nodes := initNodes(t, 2, func(n *harness.Node) {
|
||||||
n.SetIPFSConfig("Reprovider.Interval", "0")
|
n.SetIPFSConfig("Provide.DHT.Interval", "0")
|
||||||
})
|
})
|
||||||
defer nodes.StopDaemons()
|
defer nodes.StopDaemons()
|
||||||
|
|
||||||
@ -153,11 +153,11 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
// It is a lesser evil - forces users to fix their config and have some sort of interval
|
// It is a lesser evil - forces users to fix their config and have some sort of interval
|
||||||
t.Run("Manual Reprovider trigger does not work when periodic Reprovider is disabled", func(t *testing.T) {
|
t.Run("Manual Reprovide trigger does not work when periodic reprovide is disabled", func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
nodes := initNodes(t, 2, func(n *harness.Node) {
|
nodes := initNodes(t, 2, func(n *harness.Node) {
|
||||||
n.SetIPFSConfig("Reprovider.Interval", "0")
|
n.SetIPFSConfig("Provide.DHT.Interval", "0")
|
||||||
})
|
})
|
||||||
defer nodes.StopDaemons()
|
defer nodes.StopDaemons()
|
||||||
|
|
||||||
@ -166,18 +166,18 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
|
|||||||
expectNoProviders(t, cid, nodes[1:]...)
|
expectNoProviders(t, cid, nodes[1:]...)
|
||||||
|
|
||||||
res := nodes[0].RunIPFS("routing", "reprovide")
|
res := nodes[0].RunIPFS("routing", "reprovide")
|
||||||
assert.Contains(t, res.Stderr.Trimmed(), "invalid configuration: Reprovider.Interval is set to '0'")
|
assert.Contains(t, res.Stderr.Trimmed(), "invalid configuration: Provide.DHT.Interval is set to '0'")
|
||||||
assert.Equal(t, 1, res.ExitCode())
|
assert.Equal(t, 1, res.ExitCode())
|
||||||
|
|
||||||
expectNoProviders(t, cid, nodes[1:]...)
|
expectNoProviders(t, cid, nodes[1:]...)
|
||||||
})
|
})
|
||||||
|
|
||||||
// It is a lesser evil - forces users to fix their config and have some sort of interval
|
// It is a lesser evil - forces users to fix their config and have some sort of interval
|
||||||
t.Run("Manual Reprovider trigger does not work when Provider system is disabled", func(t *testing.T) {
|
t.Run("Manual Reprovide trigger does not work when Provide system is disabled", func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
nodes := initNodes(t, 2, func(n *harness.Node) {
|
nodes := initNodes(t, 2, func(n *harness.Node) {
|
||||||
n.SetIPFSConfig("Provider.Enabled", false)
|
n.SetIPFSConfig("Provide.Enabled", false)
|
||||||
})
|
})
|
||||||
defer nodes.StopDaemons()
|
defer nodes.StopDaemons()
|
||||||
|
|
||||||
@ -186,7 +186,7 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
|
|||||||
expectNoProviders(t, cid, nodes[1:]...)
|
expectNoProviders(t, cid, nodes[1:]...)
|
||||||
|
|
||||||
res := nodes[0].RunIPFS("routing", "reprovide")
|
res := nodes[0].RunIPFS("routing", "reprovide")
|
||||||
assert.Contains(t, res.Stderr.Trimmed(), "invalid configuration: Provider.Enabled is set to 'false'")
|
assert.Contains(t, res.Stderr.Trimmed(), "invalid configuration: Provide.Enabled is set to 'false'")
|
||||||
assert.Equal(t, 1, res.ExitCode())
|
assert.Equal(t, 1, res.ExitCode())
|
||||||
|
|
||||||
expectNoProviders(t, cid, nodes[1:]...)
|
expectNoProviders(t, cid, nodes[1:]...)
|
||||||
@ -196,7 +196,7 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
nodes := initNodes(t, 2, func(n *harness.Node) {
|
nodes := initNodes(t, 2, func(n *harness.Node) {
|
||||||
n.SetIPFSConfig("Reprovider.Strategy", "all")
|
n.SetIPFSConfig("Provide.Strategy", "all")
|
||||||
})
|
})
|
||||||
defer nodes.StopDaemons()
|
defer nodes.StopDaemons()
|
||||||
|
|
||||||
@ -208,7 +208,7 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
nodes := initNodes(t, 2, func(n *harness.Node) {
|
nodes := initNodes(t, 2, func(n *harness.Node) {
|
||||||
n.SetIPFSConfig("Reprovider.Strategy", "pinned")
|
n.SetIPFSConfig("Provide.Strategy", "pinned")
|
||||||
})
|
})
|
||||||
defer nodes.StopDaemons()
|
defer nodes.StopDaemons()
|
||||||
|
|
||||||
@ -225,7 +225,7 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
nodes := initNodes(t, 2, func(n *harness.Node) {
|
nodes := initNodes(t, 2, func(n *harness.Node) {
|
||||||
n.SetIPFSConfig("Reprovider.Strategy", "pinned+mfs")
|
n.SetIPFSConfig("Provide.Strategy", "pinned+mfs")
|
||||||
})
|
})
|
||||||
defer nodes.StopDaemons()
|
defer nodes.StopDaemons()
|
||||||
|
|
||||||
@ -245,7 +245,7 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
nodes := initNodes(t, 2, func(n *harness.Node) {
|
nodes := initNodes(t, 2, func(n *harness.Node) {
|
||||||
n.SetIPFSConfig("Reprovider.Strategy", "roots")
|
n.SetIPFSConfig("Provide.Strategy", "roots")
|
||||||
})
|
})
|
||||||
defer nodes.StopDaemons()
|
defer nodes.StopDaemons()
|
||||||
|
|
||||||
@ -262,7 +262,7 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
nodes := initNodes(t, 2, func(n *harness.Node) {
|
nodes := initNodes(t, 2, func(n *harness.Node) {
|
||||||
n.SetIPFSConfig("Reprovider.Strategy", "mfs")
|
n.SetIPFSConfig("Provide.Strategy", "mfs")
|
||||||
})
|
})
|
||||||
defer nodes.StopDaemons()
|
defer nodes.StopDaemons()
|
||||||
|
|
||||||
@ -283,7 +283,7 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) {
|
nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) {
|
||||||
n.SetIPFSConfig("Reprovider.Strategy", "")
|
n.SetIPFSConfig("Provide.Strategy", "")
|
||||||
})
|
})
|
||||||
|
|
||||||
cid := nodes[0].IPFSAddStr(time.Now().String())
|
cid := nodes[0].IPFSAddStr(time.Now().String())
|
||||||
@ -301,7 +301,7 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) {
|
nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) {
|
||||||
n.SetIPFSConfig("Reprovider.Strategy", "all")
|
n.SetIPFSConfig("Provide.Strategy", "all")
|
||||||
})
|
})
|
||||||
|
|
||||||
cid := nodes[0].IPFSAddStr(time.Now().String())
|
cid := nodes[0].IPFSAddStr(time.Now().String())
|
||||||
@ -322,7 +322,7 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
|
|||||||
bar := random.Bytes(1000)
|
bar := random.Bytes(1000)
|
||||||
|
|
||||||
nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) {
|
nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) {
|
||||||
n.SetIPFSConfig("Reprovider.Strategy", "pinned")
|
n.SetIPFSConfig("Provide.Strategy", "pinned")
|
||||||
})
|
})
|
||||||
|
|
||||||
// Add a pin while offline so it cannot be provided
|
// Add a pin while offline so it cannot be provided
|
||||||
@ -357,7 +357,7 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
|
|||||||
bar := random.Bytes(1000)
|
bar := random.Bytes(1000)
|
||||||
|
|
||||||
nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) {
|
nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) {
|
||||||
n.SetIPFSConfig("Reprovider.Strategy", "roots")
|
n.SetIPFSConfig("Provide.Strategy", "roots")
|
||||||
})
|
})
|
||||||
n0pid := nodes[0].PeerID().String()
|
n0pid := nodes[0].PeerID().String()
|
||||||
|
|
||||||
@ -388,7 +388,7 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
|
|||||||
bar := random.Bytes(1000)
|
bar := random.Bytes(1000)
|
||||||
|
|
||||||
nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) {
|
nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) {
|
||||||
n.SetIPFSConfig("Reprovider.Strategy", "mfs")
|
n.SetIPFSConfig("Provide.Strategy", "mfs")
|
||||||
})
|
})
|
||||||
n0pid := nodes[0].PeerID().String()
|
n0pid := nodes[0].PeerID().String()
|
||||||
|
|
||||||
@ -412,7 +412,7 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) {
|
nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) {
|
||||||
n.SetIPFSConfig("Reprovider.Strategy", "pinned+mfs")
|
n.SetIPFSConfig("Provide.Strategy", "pinned+mfs")
|
||||||
})
|
})
|
||||||
n0pid := nodes[0].PeerID().String()
|
n0pid := nodes[0].PeerID().String()
|
||||||
|
|
||||||
@ -444,9 +444,9 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
|
|||||||
|
|
||||||
nodes := harness.NewT(t).NewNodes(1).Init()
|
nodes := harness.NewT(t).NewNodes(1).Init()
|
||||||
nodes.ForEachPar(func(n *harness.Node) {
|
nodes.ForEachPar(func(n *harness.Node) {
|
||||||
n.SetIPFSConfig("Provider.Enabled", true)
|
n.SetIPFSConfig("Provide.Enabled", true)
|
||||||
n.SetIPFSConfig("Reprovider.Interval", "22h")
|
n.SetIPFSConfig("Provide.DHT.Interval", "22h")
|
||||||
n.SetIPFSConfig("Reprovider.Strategy", "all")
|
n.SetIPFSConfig("Provide.Strategy", "all")
|
||||||
})
|
})
|
||||||
nodes.StartDaemons()
|
nodes.StartDaemons()
|
||||||
defer nodes.StopDaemons()
|
defer nodes.StopDaemons()
|
||||||
@ -472,9 +472,9 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
|
|||||||
|
|
||||||
nodes := harness.NewT(t).NewNodes(1).Init()
|
nodes := harness.NewT(t).NewNodes(1).Init()
|
||||||
nodes.ForEachPar(func(n *harness.Node) {
|
nodes.ForEachPar(func(n *harness.Node) {
|
||||||
n.SetIPFSConfig("Provider.Enabled", true)
|
n.SetIPFSConfig("Provide.Enabled", true)
|
||||||
n.SetIPFSConfig("Reprovider.Interval", "22h")
|
n.SetIPFSConfig("Provide.DHT.Interval", "22h")
|
||||||
n.SetIPFSConfig("Reprovider.Strategy", "all")
|
n.SetIPFSConfig("Provide.Strategy", "all")
|
||||||
})
|
})
|
||||||
nodes.StartDaemons()
|
nodes.StartDaemons()
|
||||||
defer nodes.StopDaemons()
|
defer nodes.StopDaemons()
|
||||||
@ -492,9 +492,9 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
|
|||||||
|
|
||||||
nodes := harness.NewT(t).NewNodes(1).Init()
|
nodes := harness.NewT(t).NewNodes(1).Init()
|
||||||
nodes.ForEachPar(func(n *harness.Node) {
|
nodes.ForEachPar(func(n *harness.Node) {
|
||||||
n.SetIPFSConfig("Provider.Enabled", false)
|
n.SetIPFSConfig("Provide.Enabled", false)
|
||||||
n.SetIPFSConfig("Reprovider.Interval", "22h")
|
n.SetIPFSConfig("Provide.DHT.Interval", "22h")
|
||||||
n.SetIPFSConfig("Reprovider.Strategy", "all")
|
n.SetIPFSConfig("Provide.Strategy", "all")
|
||||||
})
|
})
|
||||||
nodes.StartDaemons()
|
nodes.StartDaemons()
|
||||||
defer nodes.StopDaemons()
|
defer nodes.StopDaemons()
|
||||||
@ -509,9 +509,9 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
|
|||||||
|
|
||||||
nodes := harness.NewT(t).NewNodes(1).Init()
|
nodes := harness.NewT(t).NewNodes(1).Init()
|
||||||
nodes.ForEachPar(func(n *harness.Node) {
|
nodes.ForEachPar(func(n *harness.Node) {
|
||||||
n.SetIPFSConfig("Provider.Enabled", true)
|
n.SetIPFSConfig("Provide.Enabled", true)
|
||||||
n.SetIPFSConfig("Reprovider.Interval", "22h")
|
n.SetIPFSConfig("Provide.DHT.Interval", "22h")
|
||||||
n.SetIPFSConfig("Reprovider.Strategy", "all")
|
n.SetIPFSConfig("Provide.Strategy", "all")
|
||||||
})
|
})
|
||||||
nodes.StartDaemons()
|
nodes.StartDaemons()
|
||||||
defer nodes.StopDaemons()
|
defer nodes.StopDaemons()
|
||||||
@ -546,14 +546,14 @@ func TestProvider(t *testing.T) {
|
|||||||
name: "LegacyProvider",
|
name: "LegacyProvider",
|
||||||
reprovide: true,
|
reprovide: true,
|
||||||
apply: func(n *harness.Node) {
|
apply: func(n *harness.Node) {
|
||||||
n.SetIPFSConfig("Reprovider.Sweep.Enabled", false)
|
n.SetIPFSConfig("Provide.DHT.SweepEnabled", false)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "SweepingProvider",
|
name: "SweepingProvider",
|
||||||
reprovide: false,
|
reprovide: false,
|
||||||
apply: func(n *harness.Node) {
|
apply: func(n *harness.Node) {
|
||||||
n.SetIPFSConfig("Reprovider.Sweep.Enabled", true)
|
n.SetIPFSConfig("Provide.DHT.SweepEnabled", true)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
@ -54,6 +54,15 @@ go_memstats_stack_sys_bytes
|
|||||||
go_memstats_sys_bytes
|
go_memstats_sys_bytes
|
||||||
go_sched_gomaxprocs_threads
|
go_sched_gomaxprocs_threads
|
||||||
go_threads
|
go_threads
|
||||||
|
http_server_request_body_size_bytes_bucket
|
||||||
|
http_server_request_body_size_bytes_count
|
||||||
|
http_server_request_body_size_bytes_sum
|
||||||
|
http_server_request_duration_seconds_bucket
|
||||||
|
http_server_request_duration_seconds_count
|
||||||
|
http_server_request_duration_seconds_sum
|
||||||
|
http_server_response_body_size_bytes_bucket
|
||||||
|
http_server_response_body_size_bytes_count
|
||||||
|
http_server_response_body_size_bytes_sum
|
||||||
ipfs_bitswap_active_block_tasks
|
ipfs_bitswap_active_block_tasks
|
||||||
ipfs_bitswap_active_tasks
|
ipfs_bitswap_active_tasks
|
||||||
ipfs_bitswap_bcast_skips_total
|
ipfs_bitswap_bcast_skips_total
|
||||||
@ -231,6 +240,7 @@ libp2p_relaysvc_status
|
|||||||
libp2p_swarm_dial_ranking_delay_seconds_bucket
|
libp2p_swarm_dial_ranking_delay_seconds_bucket
|
||||||
libp2p_swarm_dial_ranking_delay_seconds_count
|
libp2p_swarm_dial_ranking_delay_seconds_count
|
||||||
libp2p_swarm_dial_ranking_delay_seconds_sum
|
libp2p_swarm_dial_ranking_delay_seconds_sum
|
||||||
|
otel_scope_info
|
||||||
process_cpu_seconds_total
|
process_cpu_seconds_total
|
||||||
process_max_fds
|
process_max_fds
|
||||||
process_network_receive_bytes_total
|
process_network_receive_bytes_total
|
||||||
@ -242,3 +252,4 @@ process_virtual_memory_bytes
|
|||||||
process_virtual_memory_max_bytes
|
process_virtual_memory_max_bytes
|
||||||
provider_reprovider_provide_count
|
provider_reprovider_provide_count
|
||||||
provider_reprovider_reprovide_count
|
provider_reprovider_reprovide_count
|
||||||
|
target_info
|
||||||
|
|||||||
@ -14,7 +14,7 @@ const CurrentVersionNumber = "0.38.0-dev"
|
|||||||
const ApiVersion = "/kubo/" + CurrentVersionNumber + "/" //nolint
|
const ApiVersion = "/kubo/" + CurrentVersionNumber + "/" //nolint
|
||||||
|
|
||||||
// RepoVersion is the version number that we are currently expecting to see.
|
// RepoVersion is the version number that we are currently expecting to see.
|
||||||
const RepoVersion = 17
|
const RepoVersion = 18
|
||||||
|
|
||||||
// GetUserAgentVersion is the libp2p user agent used by go-ipfs.
|
// GetUserAgentVersion is the libp2p user agent used by go-ipfs.
|
||||||
//
|
//
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user