mirror of
https://github.com/ipfs/kubo.git
synced 2026-02-21 02:17:45 +08:00
refactor: apply go fix modernizers from Go 1.26 (#11190)
* chore: apply go fix modernizers from Go 1.26
automated refactoring: interface{} to any, slices.Contains,
and other idiomatic updates.
* feat(ci): add `go fix` check to Go analysis workflow
ensures Go 1.26 modernizers are applied, fails CI if `go fix ./...`
produces any changes (similar to existing `go fmt` enforcement)
This commit is contained in:
parent
36c29c55f0
commit
6a008fc74c
9
.github/workflows/golang-analysis.yml
vendored
9
.github/workflows/golang-analysis.yml
vendored
@ -47,6 +47,15 @@ jobs:
|
||||
echo "$out"
|
||||
exit 1
|
||||
fi
|
||||
- name: go fix
|
||||
if: always() # run this step even if the previous one failed
|
||||
run: |
|
||||
go fix ./...
|
||||
if [[ -n $(git diff --name-only) ]]; then
|
||||
echo "go fix produced changes. Run 'go fix ./...' locally and commit the result."
|
||||
git diff
|
||||
exit 1
|
||||
fi
|
||||
- name: go vet
|
||||
if: always() # run this step even if the previous one failed
|
||||
uses: protocol/multiple-go-modules@v1.4
|
||||
|
||||
@ -34,10 +34,10 @@ type RmBlocksOpts struct {
|
||||
// It returns a channel where objects of type RemovedBlock are placed, when
|
||||
// not using the Quiet option. Block removal is asynchronous and will
|
||||
// skip any pinned blocks.
|
||||
func RmBlocks(ctx context.Context, blocks bs.GCBlockstore, pins pin.Pinner, cids []cid.Cid, opts RmBlocksOpts) (<-chan interface{}, error) {
|
||||
func RmBlocks(ctx context.Context, blocks bs.GCBlockstore, pins pin.Pinner, cids []cid.Cid, opts RmBlocksOpts) (<-chan any, error) {
|
||||
// make the channel large enough to hold any result to avoid
|
||||
// blocking while holding the GCLock
|
||||
out := make(chan interface{}, len(cids))
|
||||
out := make(chan any, len(cids))
|
||||
go func() {
|
||||
defer close(out)
|
||||
|
||||
@ -75,7 +75,7 @@ func RmBlocks(ctx context.Context, blocks bs.GCBlockstore, pins pin.Pinner, cids
|
||||
// out channel, with an error which indicates that the Cid is pinned.
|
||||
// This function is used in RmBlocks to filter out any blocks which are not
|
||||
// to be removed (because they are pinned).
|
||||
func FilterPinned(ctx context.Context, pins pin.Pinner, out chan<- interface{}, cids []cid.Cid) []cid.Cid {
|
||||
func FilterPinned(ctx context.Context, pins pin.Pinner, out chan<- any, cids []cid.Cid) []cid.Cid {
|
||||
stillOkay := make([]cid.Cid, 0, len(cids))
|
||||
res, err := pins.CheckIfPinned(ctx, cids...)
|
||||
if err != nil {
|
||||
|
||||
@ -18,10 +18,10 @@ type RequestBuilder interface {
|
||||
BodyBytes(body []byte) RequestBuilder
|
||||
Body(body io.Reader) RequestBuilder
|
||||
FileBody(body io.Reader) RequestBuilder
|
||||
Option(key string, value interface{}) RequestBuilder
|
||||
Option(key string, value any) RequestBuilder
|
||||
Header(name, value string) RequestBuilder
|
||||
Send(ctx context.Context) (*Response, error)
|
||||
Exec(ctx context.Context, res interface{}) error
|
||||
Exec(ctx context.Context, res any) error
|
||||
}
|
||||
|
||||
// encodedAbsolutePathVersion is the version from which the absolute path header in
|
||||
@ -83,7 +83,7 @@ func (r *requestBuilder) FileBody(body io.Reader) RequestBuilder {
|
||||
}
|
||||
|
||||
// Option sets the given option.
|
||||
func (r *requestBuilder) Option(key string, value interface{}) RequestBuilder {
|
||||
func (r *requestBuilder) Option(key string, value any) RequestBuilder {
|
||||
var s string
|
||||
switch v := value.(type) {
|
||||
case bool:
|
||||
@ -128,7 +128,7 @@ func (r *requestBuilder) Send(ctx context.Context) (*Response, error) {
|
||||
}
|
||||
|
||||
// Exec sends the request a request and decodes the response.
|
||||
func (r *requestBuilder) Exec(ctx context.Context, res interface{}) error {
|
||||
func (r *requestBuilder) Exec(ctx context.Context, res any) error {
|
||||
httpRes, err := r.Send(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@ -64,7 +64,7 @@ func (r *Response) Cancel() error {
|
||||
}
|
||||
|
||||
// Decode reads request body and decodes it as json.
|
||||
func (r *Response) decode(dec interface{}) error {
|
||||
func (r *Response) decode(dec any) error {
|
||||
if r.Error != nil {
|
||||
return r.Error
|
||||
}
|
||||
|
||||
@ -1287,7 +1287,7 @@ func merge(cs ...<-chan error) <-chan error {
|
||||
|
||||
func YesNoPrompt(prompt string) bool {
|
||||
var s string
|
||||
for i := 0; i < 3; i++ {
|
||||
for range 3 {
|
||||
fmt.Printf("%s ", prompt)
|
||||
_, err := fmt.Scanf("%s", &s)
|
||||
if err != nil {
|
||||
|
||||
@ -18,7 +18,7 @@ var (
|
||||
|
||||
func makeResolver(t *testing.T, n uint8) *madns.Resolver {
|
||||
results := make([]net.IPAddr, n)
|
||||
for i := uint8(0); i < n; i++ {
|
||||
for i := range n {
|
||||
results[i] = net.IPAddr{IP: net.ParseIP(fmt.Sprintf("192.0.2.%d", i))}
|
||||
}
|
||||
|
||||
|
||||
@ -133,7 +133,7 @@ func applyProfiles(conf *config.Config, profiles string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, profile := range strings.Split(profiles, ",") {
|
||||
for profile := range strings.SplitSeq(profiles, ",") {
|
||||
transformer, ok := config.Profiles[profile]
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid configuration profile: %s", profile)
|
||||
|
||||
@ -251,7 +251,7 @@ func apiAddrOption(req *cmds.Request) (ma.Multiaddr, error) {
|
||||
// multipart requests is %-encoded. Before this version, its sent raw.
|
||||
var encodedAbsolutePathVersion = semver.MustParse("0.23.0-dev")
|
||||
|
||||
func makeExecutor(req *cmds.Request, env interface{}) (cmds.Executor, error) {
|
||||
func makeExecutor(req *cmds.Request, env any) (cmds.Executor, error) {
|
||||
exe := tracingWrappedExecutor{cmds.NewExecutor(req.Root)}
|
||||
cctx := env.(*oldcmds.Context)
|
||||
|
||||
|
||||
@ -37,9 +37,7 @@ func (ih *IntrHandler) Close() error {
|
||||
func (ih *IntrHandler) Handle(handler func(count int, ih *IntrHandler), sigs ...os.Signal) {
|
||||
notify := make(chan os.Signal, 1)
|
||||
signal.Notify(notify, sigs...)
|
||||
ih.wg.Add(1)
|
||||
go func() {
|
||||
defer ih.wg.Done()
|
||||
ih.wg.Go(func() {
|
||||
defer signal.Stop(notify)
|
||||
|
||||
count := 0
|
||||
@ -52,7 +50,7 @@ func (ih *IntrHandler) Handle(handler func(count int, ih *IntrHandler), sigs ...
|
||||
handler(count, ih)
|
||||
}
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
func SetupInterruptHandler(ctx context.Context) (io.Closer, context.Context) {
|
||||
|
||||
@ -11,7 +11,7 @@ type ReqLogEntry struct {
|
||||
EndTime time.Time
|
||||
Active bool
|
||||
Command string
|
||||
Options map[string]interface{}
|
||||
Options map[string]any
|
||||
Args []string
|
||||
ID int
|
||||
|
||||
|
||||
@ -3,6 +3,7 @@ package config
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
"github.com/ipfs/boxo/autoconf"
|
||||
@ -82,12 +83,9 @@ func validateAutoConfDisabled(cfg *Config) error {
|
||||
var errors []string
|
||||
|
||||
// Check Bootstrap
|
||||
for _, peer := range cfg.Bootstrap {
|
||||
if peer == AutoPlaceholder {
|
||||
hasAutoValues = true
|
||||
errors = append(errors, "Bootstrap contains 'auto' but AutoConf.Enabled=false")
|
||||
break
|
||||
}
|
||||
if slices.Contains(cfg.Bootstrap, AutoPlaceholder) {
|
||||
hasAutoValues = true
|
||||
errors = append(errors, "Bootstrap contains 'auto' but AutoConf.Enabled=false")
|
||||
}
|
||||
|
||||
// Check DNS.Resolvers
|
||||
@ -102,21 +100,15 @@ func validateAutoConfDisabled(cfg *Config) error {
|
||||
}
|
||||
|
||||
// Check Routing.DelegatedRouters
|
||||
for _, router := range cfg.Routing.DelegatedRouters {
|
||||
if router == AutoPlaceholder {
|
||||
hasAutoValues = true
|
||||
errors = append(errors, "Routing.DelegatedRouters contains 'auto' but AutoConf.Enabled=false")
|
||||
break
|
||||
}
|
||||
if slices.Contains(cfg.Routing.DelegatedRouters, AutoPlaceholder) {
|
||||
hasAutoValues = true
|
||||
errors = append(errors, "Routing.DelegatedRouters contains 'auto' but AutoConf.Enabled=false")
|
||||
}
|
||||
|
||||
// Check Ipns.DelegatedPublishers
|
||||
for _, publisher := range cfg.Ipns.DelegatedPublishers {
|
||||
if publisher == AutoPlaceholder {
|
||||
hasAutoValues = true
|
||||
errors = append(errors, "Ipns.DelegatedPublishers contains 'auto' but AutoConf.Enabled=false")
|
||||
break
|
||||
}
|
||||
if slices.Contains(cfg.Ipns.DelegatedPublishers, AutoPlaceholder) {
|
||||
hasAutoValues = true
|
||||
errors = append(errors, "Ipns.DelegatedPublishers contains 'auto' but AutoConf.Enabled=false")
|
||||
}
|
||||
|
||||
// Log all errors
|
||||
|
||||
@ -84,5 +84,5 @@ type AutoNATThrottleConfig struct {
|
||||
// global/peer dialback limits.
|
||||
//
|
||||
// When unset, this defaults to 1 minute.
|
||||
Interval OptionalDuration `json:",omitempty"`
|
||||
Interval OptionalDuration
|
||||
}
|
||||
|
||||
@ -47,7 +47,7 @@ type Config struct {
|
||||
|
||||
Internal Internal // experimental/unstable options
|
||||
|
||||
Bitswap Bitswap `json:",omitempty"`
|
||||
Bitswap Bitswap
|
||||
}
|
||||
|
||||
const (
|
||||
@ -106,7 +106,7 @@ func Filename(configroot, userConfigFile string) (string, error) {
|
||||
}
|
||||
|
||||
// HumanOutput gets a config value ready for printing.
|
||||
func HumanOutput(value interface{}) ([]byte, error) {
|
||||
func HumanOutput(value any) ([]byte, error) {
|
||||
s, ok := value.(string)
|
||||
if ok {
|
||||
return []byte(strings.Trim(s, "\n")), nil
|
||||
@ -115,12 +115,12 @@ func HumanOutput(value interface{}) ([]byte, error) {
|
||||
}
|
||||
|
||||
// Marshal configuration with JSON.
|
||||
func Marshal(value interface{}) ([]byte, error) {
|
||||
func Marshal(value any) ([]byte, error) {
|
||||
// need to prettyprint, hence MarshalIndent, instead of Encoder
|
||||
return json.MarshalIndent(value, "", " ")
|
||||
}
|
||||
|
||||
func FromMap(v map[string]interface{}) (*Config, error) {
|
||||
func FromMap(v map[string]any) (*Config, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
if err := json.NewEncoder(buf).Encode(v); err != nil {
|
||||
return nil, err
|
||||
@ -132,12 +132,12 @@ func FromMap(v map[string]interface{}) (*Config, error) {
|
||||
return &conf, nil
|
||||
}
|
||||
|
||||
func ToMap(conf *Config) (map[string]interface{}, error) {
|
||||
func ToMap(conf *Config) (map[string]any, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
if err := json.NewEncoder(buf).Encode(conf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var m map[string]interface{}
|
||||
var m map[string]any
|
||||
if err := json.NewDecoder(buf).Decode(&m); err != nil {
|
||||
return nil, fmt.Errorf("failure to decode config: %w", err)
|
||||
}
|
||||
@ -147,14 +147,14 @@ func ToMap(conf *Config) (map[string]interface{}, error) {
|
||||
// Convert config to a map, without using encoding/json, since
|
||||
// zero/empty/'omitempty' fields are excluded by encoding/json during
|
||||
// marshaling.
|
||||
func ReflectToMap(conf interface{}) interface{} {
|
||||
func ReflectToMap(conf any) any {
|
||||
v := reflect.ValueOf(conf)
|
||||
if !v.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle pointer type
|
||||
if v.Kind() == reflect.Ptr {
|
||||
if v.Kind() == reflect.Pointer {
|
||||
if v.IsNil() {
|
||||
// Create a zero value of the pointer's element type
|
||||
elemType := v.Type().Elem()
|
||||
@ -166,7 +166,7 @@ func ReflectToMap(conf interface{}) interface{} {
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Struct:
|
||||
result := make(map[string]interface{})
|
||||
result := make(map[string]any)
|
||||
t := v.Type()
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
field := v.Field(i)
|
||||
@ -178,7 +178,7 @@ func ReflectToMap(conf interface{}) interface{} {
|
||||
return result
|
||||
|
||||
case reflect.Map:
|
||||
result := make(map[string]interface{})
|
||||
result := make(map[string]any)
|
||||
iter := v.MapRange()
|
||||
for iter.Next() {
|
||||
key := iter.Key()
|
||||
@ -194,7 +194,7 @@ func ReflectToMap(conf interface{}) interface{} {
|
||||
return result
|
||||
|
||||
case reflect.Slice, reflect.Array:
|
||||
result := make([]interface{}, v.Len())
|
||||
result := make([]any, v.Len())
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
result[i] = ReflectToMap(v.Index(i).Interface())
|
||||
}
|
||||
@ -234,11 +234,11 @@ func CheckKey(key string) error {
|
||||
|
||||
// Parse the key and verify it's presence in the map.
|
||||
var ok bool
|
||||
var mapCursor map[string]interface{}
|
||||
var mapCursor map[string]any
|
||||
|
||||
parts := strings.Split(key, ".")
|
||||
for i, part := range parts {
|
||||
mapCursor, ok = cursor.(map[string]interface{})
|
||||
mapCursor, ok = cursor.(map[string]any)
|
||||
if !ok {
|
||||
if cursor == nil {
|
||||
return nil
|
||||
|
||||
@ -32,7 +32,7 @@ func TestReflectToMap(t *testing.T) {
|
||||
// Helper function to create a test config with various field types
|
||||
reflectedConfig := ReflectToMap(new(Config))
|
||||
|
||||
mapConfig, ok := reflectedConfig.(map[string]interface{})
|
||||
mapConfig, ok := reflectedConfig.(map[string]any)
|
||||
if !ok {
|
||||
t.Fatal("Config didn't convert to map")
|
||||
}
|
||||
@ -42,7 +42,7 @@ func TestReflectToMap(t *testing.T) {
|
||||
t.Fatal("Identity field not found")
|
||||
}
|
||||
|
||||
mapIdentity, ok := reflectedIdentity.(map[string]interface{})
|
||||
mapIdentity, ok := reflectedIdentity.(map[string]any)
|
||||
if !ok {
|
||||
t.Fatal("Identity field didn't convert to map")
|
||||
}
|
||||
@ -70,7 +70,7 @@ func TestReflectToMap(t *testing.T) {
|
||||
if !ok {
|
||||
t.Fatal("Bootstrap field not found in config")
|
||||
}
|
||||
bootstrap, ok := reflectedBootstrap.([]interface{})
|
||||
bootstrap, ok := reflectedBootstrap.([]any)
|
||||
if !ok {
|
||||
t.Fatal("Bootstrap field didn't convert to []string")
|
||||
}
|
||||
@ -82,7 +82,7 @@ func TestReflectToMap(t *testing.T) {
|
||||
if !ok {
|
||||
t.Fatal("Datastore field not found in config")
|
||||
}
|
||||
datastore, ok := reflectedDatastore.(map[string]interface{})
|
||||
datastore, ok := reflectedDatastore.(map[string]any)
|
||||
if !ok {
|
||||
t.Fatal("Datastore field didn't convert to map")
|
||||
}
|
||||
@ -107,7 +107,7 @@ func TestReflectToMap(t *testing.T) {
|
||||
if !ok {
|
||||
t.Fatal("DNS field not found in config")
|
||||
}
|
||||
DNS, ok := reflectedDNS.(map[string]interface{})
|
||||
DNS, ok := reflectedDNS.(map[string]any)
|
||||
if !ok {
|
||||
t.Fatal("DNS field didn't convert to map")
|
||||
}
|
||||
@ -116,12 +116,12 @@ func TestReflectToMap(t *testing.T) {
|
||||
t.Fatal("Resolvers field not found in DNS")
|
||||
}
|
||||
// Test map field
|
||||
if _, ok := reflectedResolvers.(map[string]interface{}); !ok {
|
||||
if _, ok := reflectedResolvers.(map[string]any); !ok {
|
||||
t.Fatal("Resolvers field didn't convert to map")
|
||||
}
|
||||
|
||||
// Test pointer field
|
||||
if _, ok := DNS["MaxCacheTTL"].(map[string]interface{}); !ok {
|
||||
if _, ok := DNS["MaxCacheTTL"].(map[string]any); !ok {
|
||||
// Since OptionalDuration only field is private, we cannot test it
|
||||
t.Fatal("MaxCacheTTL field didn't convert to map")
|
||||
}
|
||||
|
||||
@ -32,12 +32,12 @@ type Datastore struct {
|
||||
NoSync bool `json:",omitempty"`
|
||||
Params *json.RawMessage `json:",omitempty"`
|
||||
|
||||
Spec map[string]interface{}
|
||||
Spec map[string]any
|
||||
|
||||
HashOnRead bool
|
||||
BloomFilterSize int
|
||||
BlockKeyCacheSize OptionalInteger `json:",omitempty"`
|
||||
WriteThrough Flag `json:",omitempty"`
|
||||
BlockKeyCacheSize OptionalInteger
|
||||
WriteThrough Flag `json:",omitempty"`
|
||||
}
|
||||
|
||||
// DataStorePath returns the default data store path given a configuration root
|
||||
|
||||
@ -130,8 +130,8 @@ func DefaultDatastoreConfig() Datastore {
|
||||
}
|
||||
}
|
||||
|
||||
func pebbleSpec() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
func pebbleSpec() map[string]any {
|
||||
return map[string]any{
|
||||
"type": "pebbleds",
|
||||
"prefix": "pebble.datastore",
|
||||
"path": "pebbleds",
|
||||
@ -139,11 +139,11 @@ func pebbleSpec() map[string]interface{} {
|
||||
}
|
||||
}
|
||||
|
||||
func pebbleSpecMeasure() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
func pebbleSpecMeasure() map[string]any {
|
||||
return map[string]any{
|
||||
"type": "measure",
|
||||
"prefix": "pebble.datastore",
|
||||
"child": map[string]interface{}{
|
||||
"child": map[string]any{
|
||||
"formatMajorVersion": int(pebble.FormatNewest),
|
||||
"type": "pebbleds",
|
||||
"path": "pebbleds",
|
||||
@ -151,8 +151,8 @@ func pebbleSpecMeasure() map[string]interface{} {
|
||||
}
|
||||
}
|
||||
|
||||
func badgerSpec() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
func badgerSpec() map[string]any {
|
||||
return map[string]any{
|
||||
"type": "badgerds",
|
||||
"prefix": "badger.datastore",
|
||||
"path": "badgerds",
|
||||
@ -161,11 +161,11 @@ func badgerSpec() map[string]interface{} {
|
||||
}
|
||||
}
|
||||
|
||||
func badgerSpecMeasure() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
func badgerSpecMeasure() map[string]any {
|
||||
return map[string]any{
|
||||
"type": "measure",
|
||||
"prefix": "badger.datastore",
|
||||
"child": map[string]interface{}{
|
||||
"child": map[string]any{
|
||||
"type": "badgerds",
|
||||
"path": "badgerds",
|
||||
"syncWrites": false,
|
||||
@ -174,11 +174,11 @@ func badgerSpecMeasure() map[string]interface{} {
|
||||
}
|
||||
}
|
||||
|
||||
func flatfsSpec() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
func flatfsSpec() map[string]any {
|
||||
return map[string]any{
|
||||
"type": "mount",
|
||||
"mounts": []interface{}{
|
||||
map[string]interface{}{
|
||||
"mounts": []any{
|
||||
map[string]any{
|
||||
"mountpoint": "/blocks",
|
||||
"type": "flatfs",
|
||||
"prefix": "flatfs.datastore",
|
||||
@ -186,7 +186,7 @@ func flatfsSpec() map[string]interface{} {
|
||||
"sync": false,
|
||||
"shardFunc": "/repo/flatfs/shard/v1/next-to-last/2",
|
||||
},
|
||||
map[string]interface{}{
|
||||
map[string]any{
|
||||
"mountpoint": "/",
|
||||
"type": "levelds",
|
||||
"prefix": "leveldb.datastore",
|
||||
@ -197,26 +197,26 @@ func flatfsSpec() map[string]interface{} {
|
||||
}
|
||||
}
|
||||
|
||||
func flatfsSpecMeasure() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
func flatfsSpecMeasure() map[string]any {
|
||||
return map[string]any{
|
||||
"type": "mount",
|
||||
"mounts": []interface{}{
|
||||
map[string]interface{}{
|
||||
"mounts": []any{
|
||||
map[string]any{
|
||||
"mountpoint": "/blocks",
|
||||
"type": "measure",
|
||||
"prefix": "flatfs.datastore",
|
||||
"child": map[string]interface{}{
|
||||
"child": map[string]any{
|
||||
"type": "flatfs",
|
||||
"path": "blocks",
|
||||
"sync": false,
|
||||
"shardFunc": "/repo/flatfs/shard/v1/next-to-last/2",
|
||||
},
|
||||
},
|
||||
map[string]interface{}{
|
||||
map[string]any{
|
||||
"mountpoint": "/",
|
||||
"type": "measure",
|
||||
"prefix": "leveldb.datastore",
|
||||
"child": map[string]interface{}{
|
||||
"child": map[string]any{
|
||||
"type": "levelds",
|
||||
"path": "datastore",
|
||||
"compression": "none",
|
||||
|
||||
@ -41,7 +41,7 @@ type BitswapBroadcastControl struct {
|
||||
// MaxPeers sets a hard limit on the number of peers to send broadcasts to.
|
||||
// A value of 0 means no broadcasts are sent. A value of -1 means there is
|
||||
// no limit. Default is [DefaultBroadcastControlMaxPeers].
|
||||
MaxPeers OptionalInteger `json:",omitempty"`
|
||||
MaxPeers OptionalInteger
|
||||
// LocalPeers enables or disables broadcast control for peers on the local
|
||||
// network. If false, than always broadcast to peers on the local network.
|
||||
// If true, apply broadcast control to local peers. Default is
|
||||
@ -58,7 +58,7 @@ type BitswapBroadcastControl struct {
|
||||
// this number of random peers receives a broadcast. This may be helpful in
|
||||
// cases where peers that are not receiving broadcasts my have wanted
|
||||
// blocks. Default is [DefaultBroadcastControlMaxRandomPeers].
|
||||
MaxRandomPeers OptionalInteger `json:",omitempty"`
|
||||
MaxRandomPeers OptionalInteger
|
||||
// SendToPendingPeers enables or disables sending broadcasts to any peers
|
||||
// to which there is a pending message to send. When enabled, this sends
|
||||
// broadcasts to many more peers, but does so in a way that does not
|
||||
|
||||
@ -7,5 +7,5 @@ type Plugins struct {
|
||||
|
||||
type Plugin struct {
|
||||
Disabled bool
|
||||
Config interface{} `json:",omitempty"`
|
||||
Config any `json:",omitempty"`
|
||||
}
|
||||
|
||||
@ -102,7 +102,7 @@ type ProvideDHT struct {
|
||||
|
||||
func ParseProvideStrategy(s string) ProvideStrategy {
|
||||
var strategy ProvideStrategy
|
||||
for _, part := range strings.Split(s, "+") {
|
||||
for part := range strings.SplitSeq(s, "+") {
|
||||
switch part {
|
||||
case "all", "flat", "": // special case, does not mix with others ("flat" is deprecated, maps to "all")
|
||||
return ProvideStrategyAll
|
||||
|
||||
@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@ -59,7 +60,7 @@ type Router struct {
|
||||
|
||||
// Parameters are extra configuration that this router might need.
|
||||
// A common one for HTTP router is "Endpoint".
|
||||
Parameters interface{}
|
||||
Parameters any
|
||||
}
|
||||
|
||||
type (
|
||||
@ -78,13 +79,7 @@ func (m Methods) Check() error {
|
||||
|
||||
// Check unsupported methods
|
||||
for k := range m {
|
||||
seen := false
|
||||
for _, mn := range MethodNameList {
|
||||
if mn == k {
|
||||
seen = true
|
||||
break
|
||||
}
|
||||
}
|
||||
seen := slices.Contains(MethodNameList, k)
|
||||
|
||||
if seen {
|
||||
continue
|
||||
@ -108,7 +103,7 @@ func (r *RouterParser) UnmarshalJSON(b []byte) error {
|
||||
}
|
||||
raw := out.Parameters.(*json.RawMessage)
|
||||
|
||||
var p interface{}
|
||||
var p any
|
||||
switch out.Type {
|
||||
case RouterTypeHTTP:
|
||||
p = &HTTPRouterParams{}
|
||||
|
||||
@ -18,7 +18,7 @@ import (
|
||||
var ErrNotInitialized = errors.New("ipfs not initialized, please run 'ipfs init'")
|
||||
|
||||
// ReadConfigFile reads the config from `filename` into `cfg`.
|
||||
func ReadConfigFile(filename string, cfg interface{}) error {
|
||||
func ReadConfigFile(filename string, cfg any) error {
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
@ -34,7 +34,7 @@ func ReadConfigFile(filename string, cfg interface{}) error {
|
||||
}
|
||||
|
||||
// WriteConfigFile writes the config from `cfg` into `filename`.
|
||||
func WriteConfigFile(filename string, cfg interface{}) error {
|
||||
func WriteConfigFile(filename string, cfg any) error {
|
||||
err := os.MkdirAll(filepath.Dir(filename), 0o755)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -50,7 +50,7 @@ func WriteConfigFile(filename string, cfg interface{}) error {
|
||||
}
|
||||
|
||||
// encode configuration with JSON.
|
||||
func encode(w io.Writer, value interface{}) error {
|
||||
func encode(w io.Writer, value any) error {
|
||||
// need to prettyprint, hence MarshalIndent, instead of Encoder
|
||||
buf, err := config.Marshal(value)
|
||||
if err != nil {
|
||||
|
||||
@ -298,7 +298,7 @@ func (d Duration) MarshalJSON() ([]byte, error) {
|
||||
}
|
||||
|
||||
func (d *Duration) UnmarshalJSON(b []byte) error {
|
||||
var v interface{}
|
||||
var v any
|
||||
if err := json.Unmarshal(b, &v); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -485,7 +485,7 @@ func (p *OptionalBytes) UnmarshalJSON(input []byte) error {
|
||||
case "null", "undefined":
|
||||
*p = OptionalBytes{}
|
||||
default:
|
||||
var value interface{}
|
||||
var value any
|
||||
err := json.Unmarshal(input, &value)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@ -112,7 +112,7 @@ The optional format string is a printf style format string:
|
||||
return emitCids(req, resp, opts)
|
||||
},
|
||||
PostRun: cmds.PostRunMap{
|
||||
cmds.CLI: streamResult(func(v interface{}, out io.Writer) nonFatalError {
|
||||
cmds.CLI: streamResult(func(v any, out io.Writer) nonFatalError {
|
||||
r := v.(*CidFormatRes)
|
||||
if r.ErrorMsg != "" {
|
||||
return nonFatalError(fmt.Sprintf("%s: %s", r.CidStr, r.ErrorMsg))
|
||||
|
||||
@ -39,7 +39,7 @@ func TestCidFmtCmd(t *testing.T) {
|
||||
|
||||
// Mock request
|
||||
req := &cmds.Request{
|
||||
Options: map[string]interface{}{
|
||||
Options: map[string]any{
|
||||
cidToVersionOptionName: "0",
|
||||
cidMultibaseOptionName: e.MultibaseName,
|
||||
cidFormatOptionName: "%s",
|
||||
@ -90,7 +90,7 @@ func TestCidFmtCmd(t *testing.T) {
|
||||
for _, e := range testCases {
|
||||
// Mock request
|
||||
req := &cmds.Request{
|
||||
Options: map[string]interface{}{
|
||||
Options: map[string]any{
|
||||
cidToVersionOptionName: e.Ver,
|
||||
cidMultibaseOptionName: e.MultibaseName,
|
||||
cidFormatOptionName: "%s",
|
||||
|
||||
@ -21,7 +21,7 @@ import (
|
||||
var log = logging.Logger("core/commands/cmdenv")
|
||||
|
||||
// GetNode extracts the node from the environment.
|
||||
func GetNode(env interface{}) (*core.IpfsNode, error) {
|
||||
func GetNode(env any) (*core.IpfsNode, error) {
|
||||
ctx, ok := env.(*commands.Context)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected env to be of type %T, got %T", ctx, env)
|
||||
|
||||
@ -20,7 +20,7 @@ type commandEncoder struct {
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
func (e *commandEncoder) Encode(v interface{}) error {
|
||||
func (e *commandEncoder) Encode(v any) error {
|
||||
var (
|
||||
cmd *Command
|
||||
ok bool
|
||||
@ -232,7 +232,7 @@ type nonFatalError string
|
||||
// streamResult is a helper function to stream results that possibly
|
||||
// contain non-fatal errors. The helper function is allowed to panic
|
||||
// on internal errors.
|
||||
func streamResult(procVal func(interface{}, io.Writer) nonFatalError) func(cmds.Response, cmds.ResponseEmitter) error {
|
||||
func streamResult(procVal func(any, io.Writer) nonFatalError) func(cmds.Response, cmds.ResponseEmitter) error {
|
||||
return func(res cmds.Response, re cmds.ResponseEmitter) (rerr error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
|
||||
@ -22,13 +22,13 @@ import (
|
||||
|
||||
// ConfigUpdateOutput is config profile apply command's output
|
||||
type ConfigUpdateOutput struct {
|
||||
OldCfg map[string]interface{}
|
||||
NewCfg map[string]interface{}
|
||||
OldCfg map[string]any
|
||||
NewCfg map[string]any
|
||||
}
|
||||
|
||||
type ConfigField struct {
|
||||
Key string
|
||||
Value interface{}
|
||||
Value any
|
||||
}
|
||||
|
||||
const (
|
||||
@ -117,7 +117,7 @@ Set multiple values in the 'Addresses.AppendAnnounce' array:
|
||||
value := args[1]
|
||||
|
||||
if parseJSON, _ := req.Options[configJSONOptionName].(bool); parseJSON {
|
||||
var jsonVal interface{}
|
||||
var jsonVal any
|
||||
if err := json.Unmarshal([]byte(value), &jsonVal); err != nil {
|
||||
err = fmt.Errorf("failed to unmarshal json. %s", err)
|
||||
return err
|
||||
@ -199,7 +199,7 @@ var configShowCmd = &cmds.Command{
|
||||
NOTE: For security reasons, this command will omit your private key and remote services. If you would like to make a full backup of your config (private key included), you must copy the config file from your repo.
|
||||
`,
|
||||
},
|
||||
Type: make(map[string]interface{}),
|
||||
Type: make(map[string]any),
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
cfgRoot, err := cmdenv.GetConfigRoot(env)
|
||||
if err != nil {
|
||||
@ -217,7 +217,7 @@ NOTE: For security reasons, this command will omit your private key and remote s
|
||||
return err
|
||||
}
|
||||
|
||||
var cfg map[string]interface{}
|
||||
var cfg map[string]any
|
||||
err = json.Unmarshal(data, &cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -262,7 +262,7 @@ NOTE: For security reasons, this command will omit your private key and remote s
|
||||
},
|
||||
}
|
||||
|
||||
var HumanJSONEncoder = cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *map[string]interface{}) error {
|
||||
var HumanJSONEncoder = cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *map[string]any) error {
|
||||
buf, err := config.HumanOutput(out)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -273,35 +273,35 @@ var HumanJSONEncoder = cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer
|
||||
})
|
||||
|
||||
// Scrubs value and returns error if missing
|
||||
func scrubValue(m map[string]interface{}, key []string) (map[string]interface{}, error) {
|
||||
func scrubValue(m map[string]any, key []string) (map[string]any, error) {
|
||||
return scrubMapInternal(m, key, false)
|
||||
}
|
||||
|
||||
// Scrubs value and returns no error if missing
|
||||
func scrubOptionalValue(m map[string]interface{}, key []string) (map[string]interface{}, error) {
|
||||
func scrubOptionalValue(m map[string]any, key []string) (map[string]any, error) {
|
||||
return scrubMapInternal(m, key, true)
|
||||
}
|
||||
|
||||
func scrubEither(u interface{}, key []string, okIfMissing bool) (interface{}, error) {
|
||||
m, ok := u.(map[string]interface{})
|
||||
func scrubEither(u any, key []string, okIfMissing bool) (any, error) {
|
||||
m, ok := u.(map[string]any)
|
||||
if ok {
|
||||
return scrubMapInternal(m, key, okIfMissing)
|
||||
}
|
||||
return scrubValueInternal(m, key, okIfMissing)
|
||||
}
|
||||
|
||||
func scrubValueInternal(v interface{}, key []string, okIfMissing bool) (interface{}, error) {
|
||||
func scrubValueInternal(v any, key []string, okIfMissing bool) (any, error) {
|
||||
if v == nil && !okIfMissing {
|
||||
return nil, errors.New("failed to find specified key")
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func scrubMapInternal(m map[string]interface{}, key []string, okIfMissing bool) (map[string]interface{}, error) {
|
||||
func scrubMapInternal(m map[string]any, key []string, okIfMissing bool) (map[string]any, error) {
|
||||
if len(key) == 0 {
|
||||
return make(map[string]interface{}), nil // delete value
|
||||
return make(map[string]any), nil // delete value
|
||||
}
|
||||
n := map[string]interface{}{}
|
||||
n := map[string]any{}
|
||||
for k, v := range m {
|
||||
if key[0] == "*" || strings.EqualFold(key[0], k) {
|
||||
u, err := scrubEither(v, key[1:], okIfMissing)
|
||||
@ -463,7 +463,7 @@ func buildProfileHelp() string {
|
||||
}
|
||||
|
||||
// scrubPrivKey scrubs private key for security reasons.
|
||||
func scrubPrivKey(cfg *config.Config) (map[string]interface{}, error) {
|
||||
func scrubPrivKey(cfg *config.Config) (map[string]any, error) {
|
||||
cfgMap, err := config.ToMap(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -553,7 +553,7 @@ func getConfigWithAutoExpand(r repo.Repo, key string) (*ConfigField, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func setConfig(r repo.Repo, key string, value interface{}) (*ConfigField, error) {
|
||||
func setConfig(r repo.Repo, key string, value any) (*ConfigField, error) {
|
||||
err := r.SetConfigKey(key, value)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to set config value: %s (maybe use --json?)", err)
|
||||
@ -646,7 +646,7 @@ func getRemotePinningServices(r repo.Repo) (map[string]config.RemotePinningServi
|
||||
if remoteServicesTag, err := getConfig(r, config.RemoteServicesPath); err == nil {
|
||||
// seems that golang cannot type assert map[string]interface{} to map[string]config.RemotePinningService
|
||||
// so we have to manually copy the data :-|
|
||||
if val, ok := remoteServicesTag.Value.(map[string]interface{}); ok {
|
||||
if val, ok := remoteServicesTag.Value.(map[string]any); ok {
|
||||
jsonString, err := json.Marshal(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@ -294,9 +294,9 @@ CAR file follows the CARv1 format: https://ipld.io/specs/transport/car/carv1/
|
||||
|
||||
// DagStat is a dag stat command response
|
||||
type DagStat struct {
|
||||
Cid cid.Cid `json:",omitempty"`
|
||||
Size uint64 `json:",omitempty"`
|
||||
NumBlocks int64 `json:",omitempty"`
|
||||
Cid cid.Cid
|
||||
Size uint64 `json:",omitempty"`
|
||||
NumBlocks int64 `json:",omitempty"`
|
||||
}
|
||||
|
||||
func (s *DagStat) String() string {
|
||||
|
||||
@ -6,7 +6,7 @@ import (
|
||||
)
|
||||
|
||||
// TypeErr returns an error with a string that explains what error was expected and what was received.
|
||||
func TypeErr(expected, actual interface{}) error {
|
||||
func TypeErr(expected, actual any) error {
|
||||
return fmt.Errorf("expected type %T, got %T", expected, actual)
|
||||
}
|
||||
|
||||
|
||||
@ -56,8 +56,8 @@ func GetPreemptsAutoUpdate(e *cmds.Extra) (val bool, found bool) {
|
||||
return getBoolFlag(e, preemptsAutoUpdate{})
|
||||
}
|
||||
|
||||
func getBoolFlag(e *cmds.Extra, key interface{}) (val bool, found bool) {
|
||||
var ival interface{}
|
||||
func getBoolFlag(e *cmds.Extra, key any) (val bool, found bool) {
|
||||
var ival any
|
||||
ival, found = e.GetValue(key)
|
||||
if !found {
|
||||
return false, false
|
||||
|
||||
@ -30,7 +30,7 @@ func TestFilesCp_DagCborNodeFails(t *testing.T) {
|
||||
"/ipfs/" + protoNode.Cid().String(),
|
||||
"/test-destination",
|
||||
},
|
||||
Options: map[string]interface{}{
|
||||
Options: map[string]any{
|
||||
"force": false,
|
||||
},
|
||||
}
|
||||
|
||||
@ -85,7 +85,7 @@ The output is:
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return streamResult(func(v interface{}, out io.Writer) nonFatalError {
|
||||
return streamResult(func(v any, out io.Writer) nonFatalError {
|
||||
r := v.(*filestore.ListRes)
|
||||
if r.ErrorMsg != "" {
|
||||
return nonFatalError(r.ErrorMsg)
|
||||
|
||||
@ -15,7 +15,7 @@ func TestGetOutputPath(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
args: []string{"/ipns/multiformats.io/"},
|
||||
opts: map[string]interface{}{
|
||||
opts: map[string]any{
|
||||
"output": "takes-precedence",
|
||||
},
|
||||
outPath: "takes-precedence",
|
||||
|
||||
@ -146,7 +146,7 @@ EXAMPLE:
|
||||
Type: IdOutput{},
|
||||
}
|
||||
|
||||
func printPeer(keyEnc ke.KeyEncoder, ps pstore.Peerstore, p peer.ID) (interface{}, error) {
|
||||
func printPeer(keyEnc ke.KeyEncoder, ps pstore.Peerstore, p peer.ID) (any, error) {
|
||||
if p == "" {
|
||||
return nil, errors.New("attempted to print nil peer")
|
||||
}
|
||||
@ -189,7 +189,7 @@ func printPeer(keyEnc ke.KeyEncoder, ps pstore.Peerstore, p peer.ID) (interface{
|
||||
}
|
||||
|
||||
// printing self is special cased as we get values differently.
|
||||
func printSelf(keyEnc ke.KeyEncoder, node *core.IpfsNode) (interface{}, error) {
|
||||
func printSelf(keyEnc ke.KeyEncoder, node *core.IpfsNode) (any, error) {
|
||||
info := new(IdOutput)
|
||||
info.ID = keyEnc.FormatID(node.Identity)
|
||||
|
||||
|
||||
@ -112,7 +112,7 @@ trip latency information.
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for i := 0; i < numPings; i++ {
|
||||
for range numPings {
|
||||
r, ok := <-pings
|
||||
if !ok {
|
||||
break
|
||||
|
||||
@ -435,7 +435,7 @@ type connInfo struct {
|
||||
Muxer string `json:",omitempty"`
|
||||
Direction inet.Direction `json:",omitempty"`
|
||||
Streams []streamInfo `json:",omitempty"`
|
||||
Identify IdOutput `json:",omitempty"`
|
||||
Identify IdOutput
|
||||
}
|
||||
|
||||
func (ci *connInfo) Sort() {
|
||||
|
||||
@ -34,8 +34,8 @@ Prints out information about your computer to aid in easier debugging.
|
||||
},
|
||||
}
|
||||
|
||||
func getInfo(nd *core.IpfsNode) (map[string]interface{}, error) {
|
||||
info := make(map[string]interface{})
|
||||
func getInfo(nd *core.IpfsNode) (map[string]any, error) {
|
||||
info := make(map[string]any)
|
||||
err := runtimeInfo(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -66,8 +66,8 @@ func getInfo(nd *core.IpfsNode) (map[string]interface{}, error) {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func runtimeInfo(out map[string]interface{}) error {
|
||||
rt := make(map[string]interface{})
|
||||
func runtimeInfo(out map[string]any) error {
|
||||
rt := make(map[string]any)
|
||||
rt["os"] = runtime.GOOS
|
||||
rt["arch"] = runtime.GOARCH
|
||||
rt["compiler"] = runtime.Compiler
|
||||
@ -80,8 +80,8 @@ func runtimeInfo(out map[string]interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func envVarInfo(out map[string]interface{}) error {
|
||||
ev := make(map[string]interface{})
|
||||
func envVarInfo(out map[string]any) error {
|
||||
ev := make(map[string]any)
|
||||
ev["GOPATH"] = os.Getenv("GOPATH")
|
||||
ev[config.EnvDir] = os.Getenv(config.EnvDir)
|
||||
|
||||
@ -89,7 +89,7 @@ func envVarInfo(out map[string]interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func diskSpaceInfo(out map[string]interface{}) error {
|
||||
func diskSpaceInfo(out map[string]any) error {
|
||||
pathRoot, err := config.PathRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -99,7 +99,7 @@ func diskSpaceInfo(out map[string]interface{}) error {
|
||||
return err
|
||||
}
|
||||
|
||||
out["diskinfo"] = map[string]interface{}{
|
||||
out["diskinfo"] = map[string]any{
|
||||
"fstype": dinfo.FsType,
|
||||
"total_space": dinfo.Total,
|
||||
"free_space": dinfo.Free,
|
||||
@ -108,8 +108,8 @@ func diskSpaceInfo(out map[string]interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func memInfo(out map[string]interface{}) error {
|
||||
m := make(map[string]interface{})
|
||||
func memInfo(out map[string]any) error {
|
||||
m := make(map[string]any)
|
||||
|
||||
meminf, err := sysi.MemoryInfo()
|
||||
if err != nil {
|
||||
@ -122,8 +122,8 @@ func memInfo(out map[string]interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func netInfo(online bool, out map[string]interface{}) error {
|
||||
n := make(map[string]interface{})
|
||||
func netInfo(online bool, out map[string]any) error {
|
||||
n := make(map[string]any)
|
||||
addrs, err := manet.InterfaceMultiaddrs()
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@ -37,7 +37,7 @@ func (NodeProvider) MakeAPISwarm(t *testing.T, ctx context.Context, fullIdentity
|
||||
nodes := make([]*core.IpfsNode, n)
|
||||
apis := make([]coreiface.CoreAPI, n)
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
for i := range n {
|
||||
var ident config.Identity
|
||||
if fullIdentity {
|
||||
sk, pk, err := crypto.GenerateKeyPair(crypto.RSA, 2048)
|
||||
|
||||
@ -5,6 +5,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
@ -281,9 +282,7 @@ func getGatewayConfig(n *core.IpfsNode) (gateway.Config, map[string][]string, er
|
||||
}
|
||||
|
||||
// Add default implicit known gateways, such as subdomain gateway on localhost.
|
||||
for hostname, gw := range defaultKnownGateways {
|
||||
gwCfg.PublicGateways[hostname] = gw
|
||||
}
|
||||
maps.Copy(gwCfg.PublicGateways, defaultKnownGateways)
|
||||
|
||||
// Apply values from cfg.Gateway.PublicGateways if they exist.
|
||||
for hostname, gw := range cfg.Gateway.PublicGateways {
|
||||
|
||||
@ -19,7 +19,7 @@ func TestPeersTotal(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
hosts := make([]*bhost.BasicHost, 4)
|
||||
for i := 0; i < 4; i++ {
|
||||
for i := range 4 {
|
||||
var err error
|
||||
hosts[i], err = bhost.NewHost(swarmt.GenSwarm(t), nil)
|
||||
if err != nil {
|
||||
|
||||
@ -46,7 +46,7 @@ type UnixfsAddSettings struct {
|
||||
FsCache bool
|
||||
NoCopy bool
|
||||
|
||||
Events chan<- interface{}
|
||||
Events chan<- any
|
||||
Silent bool
|
||||
Progress bool
|
||||
|
||||
@ -320,7 +320,7 @@ func (unixfsOpts) HashOnly(hashOnly bool) UnixfsAddOption {
|
||||
// Add operation.
|
||||
//
|
||||
// Note that if this channel blocks it may slowdown the adder
|
||||
func (unixfsOpts) Events(sink chan<- interface{}) UnixfsAddOption {
|
||||
func (unixfsOpts) Events(sink chan<- any) UnixfsAddOption {
|
||||
return func(settings *UnixfsAddSettings) error {
|
||||
settings.Events = sink
|
||||
return nil
|
||||
|
||||
@ -377,14 +377,12 @@ func (tp *TestSuite) TestAdd(t *testing.T) {
|
||||
// handle events if relevant to test case
|
||||
|
||||
opts := testCase.opts
|
||||
eventOut := make(chan interface{})
|
||||
eventOut := make(chan any)
|
||||
var evtWg sync.WaitGroup
|
||||
if len(testCase.events) > 0 {
|
||||
opts = append(opts, options.Unixfs.Events(eventOut))
|
||||
evtWg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer evtWg.Done()
|
||||
evtWg.Go(func() {
|
||||
expected := testCase.events
|
||||
|
||||
for evt := range eventOut {
|
||||
@ -424,7 +422,7 @@ func (tp *TestSuite) TestAdd(t *testing.T) {
|
||||
if len(expected) > 0 {
|
||||
t.Errorf("%d event(s) didn't arrive", len(expected))
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
tapi, err := api.WithOptions(testCase.apiOpts...)
|
||||
@ -800,7 +798,7 @@ func (tp *TestSuite) TestLsNonUnixfs(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
nd, err := cbor.WrapObject(map[string]interface{}{"foo": "bar"}, math.MaxUint64, -1)
|
||||
nd, err := cbor.WrapObject(map[string]any{"foo": "bar"}, math.MaxUint64, -1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -14,12 +14,12 @@ import (
|
||||
|
||||
type AddEvent struct {
|
||||
Name string
|
||||
Path path.ImmutablePath `json:",omitempty"`
|
||||
Bytes int64 `json:",omitempty"`
|
||||
Size string `json:",omitempty"`
|
||||
Mode os.FileMode `json:",omitempty"`
|
||||
Mtime int64 `json:",omitempty"`
|
||||
MtimeNsecs int `json:",omitempty"`
|
||||
Path path.ImmutablePath
|
||||
Bytes int64 `json:",omitempty"`
|
||||
Size string `json:",omitempty"`
|
||||
Mode os.FileMode `json:",omitempty"`
|
||||
Mtime int64 `json:",omitempty"`
|
||||
MtimeNsecs int `json:",omitempty"`
|
||||
}
|
||||
|
||||
// FileType is an enum of possible UnixFS file types.
|
||||
|
||||
@ -60,10 +60,7 @@ func NewGC(n *core.IpfsNode) (*GC, error) {
|
||||
|
||||
// calculate the slack space between StorageMax and StorageGCWatermark
|
||||
// used to limit GC duration
|
||||
slackGB := (storageMax - storageGC) / 10e9
|
||||
if slackGB < 1 {
|
||||
slackGB = 1
|
||||
}
|
||||
slackGB := max((storageMax-storageGC)/10e9, 1)
|
||||
|
||||
return &GC{
|
||||
Node: n,
|
||||
|
||||
@ -75,7 +75,7 @@ type Adder struct {
|
||||
gcLocker bstore.GCLocker
|
||||
dagService ipld.DAGService
|
||||
bufferedDS *ipld.BufferedDAG
|
||||
Out chan<- interface{}
|
||||
Out chan<- any
|
||||
Progress bool
|
||||
Pin bool
|
||||
PinName string
|
||||
@ -576,7 +576,7 @@ func (adder *Adder) maybePauseForGC(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// outputDagnode sends dagnode info over the output channel
|
||||
func outputDagnode(out chan<- interface{}, name string, dn ipld.Node) error {
|
||||
func outputDagnode(out chan<- any, name string, dn ipld.Node) error {
|
||||
if out == nil {
|
||||
return nil
|
||||
}
|
||||
@ -614,7 +614,7 @@ func getOutput(dagnode ipld.Node) (*coreiface.AddEvent, error) {
|
||||
type progressReader struct {
|
||||
file io.Reader
|
||||
path string
|
||||
out chan<- interface{}
|
||||
out chan<- any
|
||||
bytes int64
|
||||
lastProgress int64
|
||||
}
|
||||
|
||||
@ -44,7 +44,7 @@ func TestAddMultipleGCLive(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
out := make(chan interface{}, 10)
|
||||
out := make(chan any, 10)
|
||||
adder, err := NewAdder(ctx, node.Pinning, node.Blockstore, node.DAG)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -176,7 +176,7 @@ func TestAddGCLive(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
out := make(chan interface{})
|
||||
out := make(chan any)
|
||||
adder, err := NewAdder(ctx, node.Pinning, node.Blockstore, node.DAG)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -291,7 +291,7 @@ func testAddWPosInfo(t *testing.T, rawLeaves bool) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
out := make(chan interface{})
|
||||
out := make(chan any)
|
||||
adder.Out = out
|
||||
adder.Progress = true
|
||||
adder.RawLeaves = rawLeaves
|
||||
@ -382,4 +382,4 @@ func (fi *dummyFileInfo) Size() int64 { return fi.size }
|
||||
func (fi *dummyFileInfo) Mode() os.FileMode { return 0 }
|
||||
func (fi *dummyFileInfo) ModTime() time.Time { return fi.modTime }
|
||||
func (fi *dummyFileInfo) IsDir() bool { return false }
|
||||
func (fi *dummyFileInfo) Sys() interface{} { return nil }
|
||||
func (fi *dummyFileInfo) Sys() any { return nil }
|
||||
|
||||
@ -47,7 +47,7 @@ type bitswapOptionsOut struct {
|
||||
|
||||
// BitswapOptions creates configuration options for Bitswap from the config file
|
||||
// and whether to provide data.
|
||||
func BitswapOptions(cfg *config.Config) interface{} {
|
||||
func BitswapOptions(cfg *config.Config) any {
|
||||
return func() bitswapOptionsOut {
|
||||
var internalBsCfg config.InternalBitswap
|
||||
if cfg.Internal.Bitswap != nil {
|
||||
@ -81,7 +81,7 @@ type bitswapIn struct {
|
||||
// Bitswap creates the BitSwap server/client instance.
|
||||
// If Bitswap.ServerEnabled is false, the node will act only as a client
|
||||
// using an empty blockstore to prevent serving blocks to other peers.
|
||||
func Bitswap(serverEnabled, libp2pEnabled, httpEnabled bool) interface{} {
|
||||
func Bitswap(serverEnabled, libp2pEnabled, httpEnabled bool) any {
|
||||
return func(in bitswapIn, lc fx.Lifecycle) (*bitswap.Bitswap, error) {
|
||||
var bitswapNetworks, bitswapLibp2p network.BitSwapNetwork
|
||||
var bitswapBlockstore blockstore.Blockstore = in.Bs
|
||||
@ -206,7 +206,7 @@ func Bitswap(serverEnabled, libp2pEnabled, httpEnabled bool) interface{} {
|
||||
|
||||
// OnlineExchange creates new LibP2P backed block exchange.
|
||||
// Returns a no-op exchange if Bitswap is disabled.
|
||||
func OnlineExchange(isBitswapActive bool) interface{} {
|
||||
func OnlineExchange(isBitswapActive bool) any {
|
||||
return func(in *bitswap.Bitswap, lc fx.Lifecycle) exchange.Interface {
|
||||
if !isBitswapActive {
|
||||
return &noopExchange{closer: in}
|
||||
|
||||
@ -40,7 +40,7 @@ func (lcss *lcStartStop) Append(f func() func()) {
|
||||
})
|
||||
}
|
||||
|
||||
func maybeProvide(opt interface{}, enable bool) fx.Option {
|
||||
func maybeProvide(opt any, enable bool) fx.Option {
|
||||
if enable {
|
||||
return fx.Provide(opt)
|
||||
}
|
||||
@ -48,7 +48,7 @@ func maybeProvide(opt interface{}, enable bool) fx.Option {
|
||||
}
|
||||
|
||||
// nolint unused
|
||||
func maybeInvoke(opt interface{}, enable bool) fx.Option {
|
||||
func maybeInvoke(opt any, enable bool) fx.Option {
|
||||
if enable {
|
||||
return fx.Invoke(opt)
|
||||
}
|
||||
|
||||
@ -99,7 +99,7 @@ func makeAddrsFactory(announce []string, appendAnnounce []string, noAnnounce []s
|
||||
}, nil
|
||||
}
|
||||
|
||||
func AddrsFactory(announce []string, appendAnnounce []string, noAnnounce []string) interface{} {
|
||||
func AddrsFactory(announce []string, appendAnnounce []string, noAnnounce []string) any {
|
||||
return func(params struct {
|
||||
fx.In
|
||||
ForgeMgr *p2pforge.P2PForgeCertMgr `optional:"true"`
|
||||
@ -124,7 +124,7 @@ func AddrsFactory(announce []string, appendAnnounce []string, noAnnounce []strin
|
||||
}
|
||||
}
|
||||
|
||||
func ListenOn(addresses []string) interface{} {
|
||||
func ListenOn(addresses []string) any {
|
||||
return func() (opts Libp2pOpts) {
|
||||
return Libp2pOpts{
|
||||
Opts: []libp2p.Option{
|
||||
@ -134,7 +134,7 @@ func ListenOn(addresses []string) interface{} {
|
||||
}
|
||||
}
|
||||
|
||||
func P2PForgeCertMgr(repoPath string, cfg config.AutoTLS, atlsLog *logging.ZapEventLogger) interface{} {
|
||||
func P2PForgeCertMgr(repoPath string, cfg config.AutoTLS, atlsLog *logging.ZapEventLogger) any {
|
||||
return func() (*p2pforge.P2PForgeCertMgr, error) {
|
||||
storagePath := filepath.Join(repoPath, "p2p-forge-certs")
|
||||
rawLogger := atlsLog.Desugar()
|
||||
|
||||
@ -25,7 +25,7 @@ type pubsubParams struct {
|
||||
Discovery discovery.Discovery
|
||||
}
|
||||
|
||||
func FloodSub(pubsubOptions ...pubsub.Option) interface{} {
|
||||
func FloodSub(pubsubOptions ...pubsub.Option) any {
|
||||
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, params pubsubParams) (service *pubsub.PubSub, err error) {
|
||||
return pubsub.NewFloodSub(
|
||||
helpers.LifecycleCtx(mctx, lc),
|
||||
@ -37,7 +37,7 @@ func FloodSub(pubsubOptions ...pubsub.Option) interface{} {
|
||||
}
|
||||
}
|
||||
|
||||
func GossipSub(pubsubOptions ...pubsub.Option) interface{} {
|
||||
func GossipSub(pubsubOptions ...pubsub.Option) any {
|
||||
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, params pubsubParams) (service *pubsub.PubSub, err error) {
|
||||
return pubsub.NewGossipSub(
|
||||
helpers.LifecycleCtx(mctx, lc),
|
||||
|
||||
@ -28,7 +28,7 @@ const NetLimitTraceFilename = "rcmgr.json.gz"
|
||||
|
||||
var ErrNoResourceMgr = errors.New("missing ResourceMgr: make sure the daemon is running with Swarm.ResourceMgr.Enabled")
|
||||
|
||||
func ResourceManager(repoPath string, cfg config.SwarmConfig, userResourceOverrides rcmgr.PartialLimitConfig) interface{} {
|
||||
func ResourceManager(repoPath string, cfg config.SwarmConfig, userResourceOverrides rcmgr.PartialLimitConfig) any {
|
||||
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo repo.Repo) (network.ResourceManager, Libp2pOpts, error) {
|
||||
var manager network.ResourceManager
|
||||
var opts Libp2pOpts
|
||||
@ -231,8 +231,8 @@ func (u ResourceLimitsAndUsage) ToResourceLimits() rcmgr.ResourceLimits {
|
||||
type LimitsConfigAndUsage struct {
|
||||
// This is duplicated from rcmgr.ResourceManagerStat but using ResourceLimitsAndUsage
|
||||
// instead of network.ScopeStat.
|
||||
System ResourceLimitsAndUsage `json:",omitempty"`
|
||||
Transient ResourceLimitsAndUsage `json:",omitempty"`
|
||||
System ResourceLimitsAndUsage
|
||||
Transient ResourceLimitsAndUsage
|
||||
Services map[string]ResourceLimitsAndUsage `json:",omitempty"`
|
||||
Protocols map[protocol.ID]ResourceLimitsAndUsage `json:",omitempty"`
|
||||
Peers map[peer.ID]ResourceLimitsAndUsage `json:",omitempty"`
|
||||
|
||||
@ -36,7 +36,7 @@ func TestLoggingResourceManager(t *testing.T) {
|
||||
}
|
||||
|
||||
// 2 of these should result in resource limit exceeded errors and subsequent log messages
|
||||
for i := 0; i < 3; i++ {
|
||||
for range 3 {
|
||||
_, _ = lrm.OpenConnection(network.DirInbound, false, ma.StringCast("/ip4/127.0.0.1/tcp/1234"))
|
||||
}
|
||||
|
||||
|
||||
@ -63,7 +63,7 @@ type processInitialRoutingOut struct {
|
||||
|
||||
type AddrInfoChan chan peer.AddrInfo
|
||||
|
||||
func BaseRouting(cfg *config.Config) interface{} {
|
||||
func BaseRouting(cfg *config.Config) any {
|
||||
return func(lc fx.Lifecycle, in processInitialRoutingIn) (out processInitialRoutingOut, err error) {
|
||||
var dualDHT *ddht.DHT
|
||||
if dht, ok := in.Router.(*ddht.DHT); ok {
|
||||
|
||||
@ -8,7 +8,7 @@ import (
|
||||
tls "github.com/libp2p/go-libp2p/p2p/security/tls"
|
||||
)
|
||||
|
||||
func Security(enabled bool, tptConfig config.Transports) interface{} {
|
||||
func Security(enabled bool, tptConfig config.Transports) any {
|
||||
if !enabled {
|
||||
return func() (opts Libp2pOpts) {
|
||||
log.Errorf(`Your IPFS node has been configured to run WITHOUT ENCRYPTED CONNECTIONS.
|
||||
|
||||
@ -12,7 +12,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/routing"
|
||||
)
|
||||
|
||||
func TopicDiscovery() interface{} {
|
||||
func TopicDiscovery() any {
|
||||
return func(host host.Host, cr routing.ContentRouting) (service discovery.Discovery, err error) {
|
||||
baseDisc := disc.NewRoutingDiscovery(cr)
|
||||
minBackoff, maxBackoff := time.Second*60, time.Hour
|
||||
|
||||
@ -17,7 +17,7 @@ import (
|
||||
"go.uber.org/fx"
|
||||
)
|
||||
|
||||
func Transports(tptConfig config.Transports) interface{} {
|
||||
func Transports(tptConfig config.Transports) any {
|
||||
return func(params struct {
|
||||
fx.In
|
||||
Fprint PNetFingerprint `optional:"true"`
|
||||
|
||||
@ -401,11 +401,11 @@ func TestFSThrash(t *testing.T) {
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
// Spawn off workers to make directories
|
||||
for i := 0; i < ndirWorkers; i++ {
|
||||
for i := range ndirWorkers {
|
||||
wg.Add(1)
|
||||
go func(worker int) {
|
||||
defer wg.Done()
|
||||
for j := 0; j < ndirs; j++ {
|
||||
for j := range ndirs {
|
||||
dirlock.RLock()
|
||||
n := mrand.Intn(len(dirs))
|
||||
dir := dirs[n]
|
||||
@ -425,11 +425,11 @@ func TestFSThrash(t *testing.T) {
|
||||
}
|
||||
|
||||
// Spawn off workers to make files
|
||||
for i := 0; i < nfileWorkers; i++ {
|
||||
for i := range nfileWorkers {
|
||||
wg.Add(1)
|
||||
go func(worker int) {
|
||||
defer wg.Done()
|
||||
for j := 0; j < nfiles; j++ {
|
||||
for j := range nfiles {
|
||||
dirlock.RLock()
|
||||
n := mrand.Intn(len(dirs))
|
||||
dir := dirs[n]
|
||||
@ -478,7 +478,7 @@ func TestMultiWrite(t *testing.T) {
|
||||
}
|
||||
|
||||
data := randBytes(1001)
|
||||
for i := 0; i < len(data); i++ {
|
||||
for i := range data {
|
||||
n, err := fi.Write(data[i : i+1])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
||||
@ -29,7 +29,7 @@ import (
|
||||
|
||||
func init() {
|
||||
if os.Getenv("IPFS_FUSE_DEBUG") != "" {
|
||||
fuse.Debug = func(msg interface{}) {
|
||||
fuse.Debug = func(msg any) {
|
||||
fmt.Println(msg)
|
||||
}
|
||||
}
|
||||
|
||||
@ -237,7 +237,7 @@ func TestConcurrentRW(t *testing.T) {
|
||||
|
||||
t.Run("write", func(t *testing.T) {
|
||||
errs := make(chan (error), 1)
|
||||
for i := 0; i < files; i++ {
|
||||
for i := range files {
|
||||
go func() {
|
||||
var err error
|
||||
defer func() { errs <- err }()
|
||||
@ -254,7 +254,7 @@ func TestConcurrentRW(t *testing.T) {
|
||||
}
|
||||
}()
|
||||
}
|
||||
for i := 0; i < files; i++ {
|
||||
for range files {
|
||||
err := <-errs
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -285,7 +285,7 @@ func TestConcurrentRW(t *testing.T) {
|
||||
}
|
||||
}()
|
||||
}
|
||||
for i := 0; i < files; i++ {
|
||||
for range files {
|
||||
err := <-errs
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
||||
@ -77,7 +77,7 @@ func UnmountCmd(point string) (*exec.Cmd, error) {
|
||||
// Attempts a given number of times.
|
||||
func ForceUnmountManyTimes(m Mount, attempts int) error {
|
||||
var err error
|
||||
for i := 0; i < attempts; i++ {
|
||||
for range attempts {
|
||||
err = ForceUnmount(m)
|
||||
if err == nil {
|
||||
return err
|
||||
|
||||
@ -86,25 +86,19 @@ func doMount(node *core.IpfsNode, fsdir, nsdir, mfsdir string) error {
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
wg.Go(func() {
|
||||
fsmount, err1 = rofs.Mount(node, fsdir)
|
||||
}()
|
||||
})
|
||||
|
||||
if node.IsOnline {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
wg.Go(func() {
|
||||
nsmount, err2 = ipns.Mount(node, nsdir, fsdir)
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
wg.Go(func() {
|
||||
mfmount, err3 = mfs.Mount(node, mfsdir)
|
||||
}()
|
||||
})
|
||||
|
||||
wg.Wait()
|
||||
|
||||
|
||||
@ -141,14 +141,14 @@ func TestIpfsStressRead(t *testing.T) {
|
||||
ndiriter := 50
|
||||
|
||||
// Make a bunch of objects
|
||||
for i := 0; i < nobj; i++ {
|
||||
for range nobj {
|
||||
fi, _ := randObj(t, nd, rand.Int63n(50000))
|
||||
nodes = append(nodes, fi)
|
||||
paths = append(paths, fi.Cid().String())
|
||||
}
|
||||
|
||||
// Now make a bunch of dirs
|
||||
for i := 0; i < ndiriter; i++ {
|
||||
for range ndiriter {
|
||||
db, err := uio.NewDirectory(nd.DAG)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -180,12 +180,10 @@ func TestIpfsStressRead(t *testing.T) {
|
||||
wg := sync.WaitGroup{}
|
||||
errs := make(chan error)
|
||||
|
||||
for s := 0; s < 4; s++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for range 4 {
|
||||
wg.Go(func() {
|
||||
|
||||
for i := 0; i < 2000; i++ {
|
||||
for range 2000 {
|
||||
item, err := path.NewPath("/ipfs/" + paths[rand.Intn(len(paths))])
|
||||
if err != nil {
|
||||
errs <- err
|
||||
@ -220,7 +218,7 @@ func TestIpfsStressRead(t *testing.T) {
|
||||
errs <- errors.New("incorrect read")
|
||||
}
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
go func() {
|
||||
|
||||
@ -34,7 +34,7 @@ func TestGC(t *testing.T) {
|
||||
var expectedDiscarded []multihash.Multihash
|
||||
|
||||
// add some pins
|
||||
for i := 0; i < 5; i++ {
|
||||
for range 5 {
|
||||
// direct
|
||||
root, _, err := daggen.MakeDagNode(dserv.Add, 0, 1)
|
||||
require.NoError(t, err)
|
||||
@ -54,7 +54,7 @@ func TestGC(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// add more dags to be GCed
|
||||
for i := 0; i < 5; i++ {
|
||||
for range 5 {
|
||||
_, allCids, err := daggen.MakeDagNode(dserv.Add, 5, 2)
|
||||
require.NoError(t, err)
|
||||
expectedDiscarded = append(expectedDiscarded, toMHs(allCids)...)
|
||||
@ -62,7 +62,7 @@ func TestGC(t *testing.T) {
|
||||
|
||||
// and some other as "best effort roots"
|
||||
var bestEffortRoots []cid.Cid
|
||||
for i := 0; i < 5; i++ {
|
||||
for range 5 {
|
||||
root, allCids, err := daggen.MakeDagNode(dserv.Add, 5, 2)
|
||||
require.NoError(t, err)
|
||||
bestEffortRoots = append(bestEffortRoots, root)
|
||||
|
||||
@ -11,7 +11,7 @@ type Environment struct {
|
||||
//
|
||||
// This is an arbitrary JSON-like object unmarshaled into an interface{}
|
||||
// according to https://golang.org/pkg/encoding/json/#Unmarshal.
|
||||
Config interface{}
|
||||
Config any
|
||||
}
|
||||
|
||||
// Plugin is the base interface for all kinds of go-ipfs plugins
|
||||
|
||||
@ -52,7 +52,7 @@ type datastoreConfig struct {
|
||||
// BadgerdsDatastoreConfig returns a configuration stub for a badger datastore
|
||||
// from the given parameters.
|
||||
func (*badgerdsPlugin) DatastoreConfigParser() fsrepo.ConfigFromMap {
|
||||
return func(params map[string]interface{}) (fsrepo.DatastoreConfig, error) {
|
||||
return func(params map[string]any) (fsrepo.DatastoreConfig, error) {
|
||||
var c datastoreConfig
|
||||
var ok bool
|
||||
|
||||
@ -104,7 +104,7 @@ func (*badgerdsPlugin) DatastoreConfigParser() fsrepo.ConfigFromMap {
|
||||
}
|
||||
|
||||
func (c *datastoreConfig) DiskSpec() fsrepo.DiskSpec {
|
||||
return map[string]interface{}{
|
||||
return map[string]any{
|
||||
"type": "badgerds",
|
||||
"path": c.path,
|
||||
}
|
||||
|
||||
@ -45,7 +45,7 @@ type datastoreConfig struct {
|
||||
// DatastoreConfigParser returns a configuration stub for a flatfs datastore
|
||||
// from the given parameters.
|
||||
func (*flatfsPlugin) DatastoreConfigParser() fsrepo.ConfigFromMap {
|
||||
return func(params map[string]interface{}) (fsrepo.DatastoreConfig, error) {
|
||||
return func(params map[string]any) (fsrepo.DatastoreConfig, error) {
|
||||
var c datastoreConfig
|
||||
var ok bool
|
||||
var err error
|
||||
@ -73,7 +73,7 @@ func (*flatfsPlugin) DatastoreConfigParser() fsrepo.ConfigFromMap {
|
||||
}
|
||||
|
||||
func (c *datastoreConfig) DiskSpec() fsrepo.DiskSpec {
|
||||
return map[string]interface{}{
|
||||
return map[string]any{
|
||||
"type": "flatfs",
|
||||
"path": c.path,
|
||||
"shardFunc": c.shardFun.String(),
|
||||
|
||||
@ -45,7 +45,7 @@ type datastoreConfig struct {
|
||||
// DatastoreConfigParser returns a configuration stub for a badger datastore
|
||||
// from the given parameters.
|
||||
func (*leveldsPlugin) DatastoreConfigParser() fsrepo.ConfigFromMap {
|
||||
return func(params map[string]interface{}) (fsrepo.DatastoreConfig, error) {
|
||||
return func(params map[string]any) (fsrepo.DatastoreConfig, error) {
|
||||
var c datastoreConfig
|
||||
var ok bool
|
||||
|
||||
@ -70,7 +70,7 @@ func (*leveldsPlugin) DatastoreConfigParser() fsrepo.ConfigFromMap {
|
||||
}
|
||||
|
||||
func (c *datastoreConfig) DiskSpec() fsrepo.DiskSpec {
|
||||
return map[string]interface{}{
|
||||
return map[string]any{
|
||||
"type": "levelds",
|
||||
"path": c.path,
|
||||
}
|
||||
|
||||
@ -175,7 +175,7 @@ func getConfigInt(name string, params map[string]any) (int, error) {
|
||||
}
|
||||
|
||||
func (c *datastoreConfig) DiskSpec() fsrepo.DiskSpec {
|
||||
return map[string]interface{}{
|
||||
return map[string]any{
|
||||
"type": "pebbleds",
|
||||
"path": c.path,
|
||||
}
|
||||
|
||||
@ -74,12 +74,12 @@ func (*peerLogPlugin) Version() string {
|
||||
// since it is internal-only, unsupported functionality.
|
||||
// For supported functionality, we should rework the plugin API to support this use case
|
||||
// of including plugins that are disabled by default.
|
||||
func extractEnabled(config interface{}) bool {
|
||||
func extractEnabled(config any) bool {
|
||||
// plugin is disabled by default, unless Enabled=true
|
||||
if config == nil {
|
||||
return false
|
||||
}
|
||||
mapIface, ok := config.(map[string]interface{})
|
||||
mapIface, ok := config.(map[string]any)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
@ -123,7 +123,7 @@ func (pl *peerLogPlugin) collectEvents(node *core.IpfsNode) {
|
||||
// don't immediately run into this situation
|
||||
// again.
|
||||
loop:
|
||||
for i := 0; i < busyDropAmount; i++ {
|
||||
for range busyDropAmount {
|
||||
select {
|
||||
case <-pl.events:
|
||||
dropped++
|
||||
|
||||
@ -5,7 +5,7 @@ import "testing"
|
||||
func TestExtractEnabled(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
name string
|
||||
config interface{}
|
||||
config any
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
@ -20,22 +20,22 @@ func TestExtractEnabled(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "returns false when config has no Enabled field",
|
||||
config: map[string]interface{}{},
|
||||
config: map[string]any{},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "returns false when config has a null Enabled field",
|
||||
config: map[string]interface{}{"Enabled": nil},
|
||||
config: map[string]any{"Enabled": nil},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "returns false when config has a non-boolean Enabled field",
|
||||
config: map[string]interface{}{"Enabled": 1},
|
||||
config: map[string]any{"Enabled": 1},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "returns the value of the Enabled field",
|
||||
config: map[string]interface{}{"Enabled": true},
|
||||
config: map[string]any{"Enabled": true},
|
||||
expected: true,
|
||||
},
|
||||
} {
|
||||
|
||||
@ -148,12 +148,12 @@ func (p *telemetryPlugin) Version() string {
|
||||
return "0.0.1"
|
||||
}
|
||||
|
||||
func readFromConfig(cfg interface{}, key string) string {
|
||||
func readFromConfig(cfg any, key string) string {
|
||||
if cfg == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
pcfg, ok := cfg.(map[string]interface{})
|
||||
pcfg, ok := cfg.(map[string]any)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
|
||||
@ -95,7 +95,7 @@ func makeNode(t *testing.T) (node *core.IpfsNode, repopath string) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cfg.Datastore.Spec = map[string]interface{}{
|
||||
cfg.Datastore.Spec = map[string]any{
|
||||
"type": "pebbleds",
|
||||
"prefix": "pebble.datastore",
|
||||
"path": "pebbleds",
|
||||
|
||||
@ -6,16 +6,16 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func MapGetKV(v map[string]interface{}, key string) (interface{}, error) {
|
||||
func MapGetKV(v map[string]any, key string) (any, error) {
|
||||
var ok bool
|
||||
var mcursor map[string]interface{}
|
||||
var cursor interface{} = v
|
||||
var mcursor map[string]any
|
||||
var cursor any = v
|
||||
|
||||
parts := strings.Split(key, ".")
|
||||
for i, part := range parts {
|
||||
sofar := strings.Join(parts[:i], ".")
|
||||
|
||||
mcursor, ok = cursor.(map[string]interface{})
|
||||
mcursor, ok = cursor.(map[string]any)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%s key is not a map", sofar)
|
||||
}
|
||||
@ -34,14 +34,14 @@ func MapGetKV(v map[string]interface{}, key string) (interface{}, error) {
|
||||
return cursor, nil
|
||||
}
|
||||
|
||||
func MapSetKV(v map[string]interface{}, key string, value interface{}) error {
|
||||
func MapSetKV(v map[string]any, key string, value any) error {
|
||||
var ok bool
|
||||
var mcursor map[string]interface{}
|
||||
var cursor interface{} = v
|
||||
var mcursor map[string]any
|
||||
var cursor any = v
|
||||
|
||||
parts := strings.Split(key, ".")
|
||||
for i, part := range parts {
|
||||
mcursor, ok = cursor.(map[string]interface{})
|
||||
mcursor, ok = cursor.(map[string]any)
|
||||
if !ok {
|
||||
sofar := strings.Join(parts[:i], ".")
|
||||
return fmt.Errorf("%s key is not a map", sofar)
|
||||
@ -55,7 +55,7 @@ func MapSetKV(v map[string]interface{}, key string, value interface{}) error {
|
||||
|
||||
cursor, ok = mcursor[part]
|
||||
if !ok || cursor == nil { // create map if this is empty or is null
|
||||
mcursor[part] = map[string]interface{}{}
|
||||
mcursor[part] = map[string]any{}
|
||||
cursor = mcursor[part]
|
||||
}
|
||||
}
|
||||
@ -64,20 +64,20 @@ func MapSetKV(v map[string]interface{}, key string, value interface{}) error {
|
||||
|
||||
// MapMergeDeep merges the right map into the left map, recursively traversing
|
||||
// child maps until a non-map value is found.
|
||||
func MapMergeDeep(left, right map[string]interface{}) map[string]interface{} {
|
||||
func MapMergeDeep(left, right map[string]any) map[string]any {
|
||||
// We want to alter a copy of the map, not the original
|
||||
result := maps.Clone(left)
|
||||
if result == nil {
|
||||
result = make(map[string]interface{})
|
||||
result = make(map[string]any)
|
||||
}
|
||||
|
||||
for key, rightVal := range right {
|
||||
// If right value is a map
|
||||
if rightMap, ok := rightVal.(map[string]interface{}); ok {
|
||||
if rightMap, ok := rightVal.(map[string]any); ok {
|
||||
// If key is in left
|
||||
if leftVal, found := result[key]; found {
|
||||
// If left value is also a map
|
||||
if leftMap, ok := leftVal.(map[string]interface{}); ok {
|
||||
if leftMap, ok := leftVal.(map[string]any); ok {
|
||||
// Merge nested map
|
||||
result[key] = MapMergeDeep(leftMap, rightMap)
|
||||
continue
|
||||
|
||||
@ -7,10 +7,10 @@ import (
|
||||
)
|
||||
|
||||
func TestMapMergeDeepReturnsNew(t *testing.T) {
|
||||
leftMap := make(map[string]interface{})
|
||||
leftMap := make(map[string]any)
|
||||
leftMap["A"] = "Hello World"
|
||||
|
||||
rightMap := make(map[string]interface{})
|
||||
rightMap := make(map[string]any)
|
||||
rightMap["A"] = "Foo"
|
||||
|
||||
MapMergeDeep(leftMap, rightMap)
|
||||
@ -19,7 +19,7 @@ func TestMapMergeDeepReturnsNew(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMapMergeDeepNewKey(t *testing.T) {
|
||||
leftMap := make(map[string]interface{})
|
||||
leftMap := make(map[string]any)
|
||||
leftMap["A"] = "Hello World"
|
||||
/*
|
||||
leftMap
|
||||
@ -28,7 +28,7 @@ func TestMapMergeDeepNewKey(t *testing.T) {
|
||||
}
|
||||
*/
|
||||
|
||||
rightMap := make(map[string]interface{})
|
||||
rightMap := make(map[string]any)
|
||||
rightMap["B"] = "Bar"
|
||||
/*
|
||||
rightMap
|
||||
@ -50,11 +50,11 @@ func TestMapMergeDeepNewKey(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMapMergeDeepRecursesOnMaps(t *testing.T) {
|
||||
leftMapA := make(map[string]interface{})
|
||||
leftMapA := make(map[string]any)
|
||||
leftMapA["B"] = "A value!"
|
||||
leftMapA["C"] = "Another value!"
|
||||
|
||||
leftMap := make(map[string]interface{})
|
||||
leftMap := make(map[string]any)
|
||||
leftMap["A"] = leftMapA
|
||||
/*
|
||||
leftMap
|
||||
@ -66,10 +66,10 @@ func TestMapMergeDeepRecursesOnMaps(t *testing.T) {
|
||||
}
|
||||
*/
|
||||
|
||||
rightMapA := make(map[string]interface{})
|
||||
rightMapA := make(map[string]any)
|
||||
rightMapA["C"] = "A different value!"
|
||||
|
||||
rightMap := make(map[string]interface{})
|
||||
rightMap := make(map[string]any)
|
||||
rightMap["A"] = rightMapA
|
||||
/*
|
||||
rightMap
|
||||
@ -91,16 +91,16 @@ func TestMapMergeDeepRecursesOnMaps(t *testing.T) {
|
||||
}
|
||||
*/
|
||||
|
||||
resultA := result["A"].(map[string]interface{})
|
||||
resultA := result["A"].(map[string]any)
|
||||
require.Equal(t, "A value!", resultA["B"], "Unaltered values should not change")
|
||||
require.Equal(t, "A different value!", resultA["C"], "Nested values should be altered")
|
||||
}
|
||||
|
||||
func TestMapMergeDeepRightNotAMap(t *testing.T) {
|
||||
leftMapA := make(map[string]interface{})
|
||||
leftMapA := make(map[string]any)
|
||||
leftMapA["B"] = "A value!"
|
||||
|
||||
leftMap := make(map[string]interface{})
|
||||
leftMap := make(map[string]any)
|
||||
leftMap["A"] = leftMapA
|
||||
/*
|
||||
origMap
|
||||
@ -111,7 +111,7 @@ func TestMapMergeDeepRightNotAMap(t *testing.T) {
|
||||
}
|
||||
*/
|
||||
|
||||
rightMap := make(map[string]interface{})
|
||||
rightMap := make(map[string]any)
|
||||
rightMap["A"] = "Not a map!"
|
||||
/*
|
||||
newMap
|
||||
|
||||
@ -123,7 +123,7 @@ func TestLevelDbConfig(t *testing.T) {
|
||||
}
|
||||
dir := t.TempDir()
|
||||
|
||||
spec := make(map[string]interface{})
|
||||
spec := make(map[string]any)
|
||||
err = json.Unmarshal(leveldbConfig, &spec)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -157,7 +157,7 @@ func TestFlatfsConfig(t *testing.T) {
|
||||
}
|
||||
dir := t.TempDir()
|
||||
|
||||
spec := make(map[string]interface{})
|
||||
spec := make(map[string]any)
|
||||
err = json.Unmarshal(flatfsConfig, &spec)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -191,7 +191,7 @@ func TestMeasureConfig(t *testing.T) {
|
||||
}
|
||||
dir := t.TempDir()
|
||||
|
||||
spec := make(map[string]interface{})
|
||||
spec := make(map[string]any)
|
||||
err = json.Unmarshal(measureConfig, &spec)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
||||
@ -15,7 +15,7 @@ import (
|
||||
)
|
||||
|
||||
// ConfigFromMap creates a new datastore config from a map.
|
||||
type ConfigFromMap func(map[string]interface{}) (DatastoreConfig, error)
|
||||
type ConfigFromMap func(map[string]any) (DatastoreConfig, error)
|
||||
|
||||
// DatastoreConfig is an abstraction of a datastore config. A "spec" is first
|
||||
// converted to a DatastoreConfig and then Create() is called to instantiate a
|
||||
@ -35,7 +35,7 @@ type DatastoreConfig interface {
|
||||
// completely different datastores and a migration will be performed. Runtime
|
||||
// values such as cache options or concurrency options should not be added
|
||||
// here.
|
||||
type DiskSpec map[string]interface{}
|
||||
type DiskSpec map[string]any
|
||||
|
||||
// Bytes returns a minimal JSON encoding of the DiskSpec.
|
||||
func (spec DiskSpec) Bytes() []byte {
|
||||
@ -75,7 +75,7 @@ func AddDatastoreConfigHandler(name string, dsc ConfigFromMap) error {
|
||||
|
||||
// AnyDatastoreConfig returns a DatastoreConfig from a spec based on
|
||||
// the "type" parameter.
|
||||
func AnyDatastoreConfig(params map[string]interface{}) (DatastoreConfig, error) {
|
||||
func AnyDatastoreConfig(params map[string]any) (DatastoreConfig, error) {
|
||||
which, ok := params["type"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("'type' field missing or not a string")
|
||||
@ -97,14 +97,14 @@ type premount struct {
|
||||
}
|
||||
|
||||
// MountDatastoreConfig returns a mount DatastoreConfig from a spec.
|
||||
func MountDatastoreConfig(params map[string]interface{}) (DatastoreConfig, error) {
|
||||
func MountDatastoreConfig(params map[string]any) (DatastoreConfig, error) {
|
||||
var res mountDatastoreConfig
|
||||
mounts, ok := params["mounts"].([]interface{})
|
||||
mounts, ok := params["mounts"].([]any)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("'mounts' field is missing or not an array")
|
||||
}
|
||||
for _, iface := range mounts {
|
||||
cfg, ok := iface.(map[string]interface{})
|
||||
cfg, ok := iface.(map[string]any)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected map for mountpoint")
|
||||
}
|
||||
@ -133,12 +133,12 @@ func MountDatastoreConfig(params map[string]interface{}) (DatastoreConfig, error
|
||||
}
|
||||
|
||||
func (c *mountDatastoreConfig) DiskSpec() DiskSpec {
|
||||
cfg := map[string]interface{}{"type": "mount"}
|
||||
mounts := make([]interface{}, len(c.mounts))
|
||||
cfg := map[string]any{"type": "mount"}
|
||||
mounts := make([]any, len(c.mounts))
|
||||
for i, m := range c.mounts {
|
||||
c := m.ds.DiskSpec()
|
||||
if c == nil {
|
||||
c = make(map[string]interface{})
|
||||
c = make(map[string]any)
|
||||
}
|
||||
c["mountpoint"] = m.prefix.String()
|
||||
mounts[i] = c
|
||||
@ -161,11 +161,11 @@ func (c *mountDatastoreConfig) Create(path string) (repo.Datastore, error) {
|
||||
}
|
||||
|
||||
type memDatastoreConfig struct {
|
||||
cfg map[string]interface{}
|
||||
cfg map[string]any
|
||||
}
|
||||
|
||||
// MemDatastoreConfig returns a memory DatastoreConfig from a spec.
|
||||
func MemDatastoreConfig(params map[string]interface{}) (DatastoreConfig, error) {
|
||||
func MemDatastoreConfig(params map[string]any) (DatastoreConfig, error) {
|
||||
return &memDatastoreConfig{params}, nil
|
||||
}
|
||||
|
||||
@ -183,8 +183,8 @@ type logDatastoreConfig struct {
|
||||
}
|
||||
|
||||
// LogDatastoreConfig returns a log DatastoreConfig from a spec.
|
||||
func LogDatastoreConfig(params map[string]interface{}) (DatastoreConfig, error) {
|
||||
childField, ok := params["child"].(map[string]interface{})
|
||||
func LogDatastoreConfig(params map[string]any) (DatastoreConfig, error) {
|
||||
childField, ok := params["child"].(map[string]any)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("'child' field is missing or not a map")
|
||||
}
|
||||
@ -217,8 +217,8 @@ type measureDatastoreConfig struct {
|
||||
}
|
||||
|
||||
// MeasureDatastoreConfig returns a measure DatastoreConfig from a spec.
|
||||
func MeasureDatastoreConfig(params map[string]interface{}) (DatastoreConfig, error) {
|
||||
childField, ok := params["child"].(map[string]interface{})
|
||||
func MeasureDatastoreConfig(params map[string]any) (DatastoreConfig, error) {
|
||||
childField, ok := params["child"].(map[string]any)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("'child' field is missing or not a map")
|
||||
}
|
||||
|
||||
@ -279,7 +279,7 @@ func initConfig(path string, conf *config.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func initSpec(path string, conf map[string]interface{}) error {
|
||||
func initSpec(path string, conf map[string]any) error {
|
||||
fn, err := config.Path(path, specFn)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -651,7 +651,7 @@ func (r *FSRepo) SetConfig(updated *config.Config) error {
|
||||
// to avoid clobbering user-provided keys, must read the config from disk
|
||||
// as a map, write the updated struct values to the map and write the map
|
||||
// to disk.
|
||||
var mapconf map[string]interface{}
|
||||
var mapconf map[string]any
|
||||
if err := serialize.ReadConfigFile(r.configFilePath, &mapconf); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -670,7 +670,7 @@ func (r *FSRepo) SetConfig(updated *config.Config) error {
|
||||
}
|
||||
|
||||
// GetConfigKey retrieves only the value of a particular key.
|
||||
func (r *FSRepo) GetConfigKey(key string) (interface{}, error) {
|
||||
func (r *FSRepo) GetConfigKey(key string) (any, error) {
|
||||
packageLock.Lock()
|
||||
defer packageLock.Unlock()
|
||||
|
||||
@ -678,7 +678,7 @@ func (r *FSRepo) GetConfigKey(key string) (interface{}, error) {
|
||||
return nil, errors.New("repo is closed")
|
||||
}
|
||||
|
||||
var cfg map[string]interface{}
|
||||
var cfg map[string]any
|
||||
if err := serialize.ReadConfigFile(r.configFilePath, &cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -686,7 +686,7 @@ func (r *FSRepo) GetConfigKey(key string) (interface{}, error) {
|
||||
}
|
||||
|
||||
// SetConfigKey writes the value of a particular key.
|
||||
func (r *FSRepo) SetConfigKey(key string, value interface{}) error {
|
||||
func (r *FSRepo) SetConfigKey(key string, value any) error {
|
||||
packageLock.Lock()
|
||||
defer packageLock.Unlock()
|
||||
|
||||
@ -701,7 +701,7 @@ func (r *FSRepo) SetConfigKey(key string, value interface{}) error {
|
||||
}
|
||||
|
||||
// Load into a map so we don't end up writing any additional defaults to the config file.
|
||||
var mapconf map[string]interface{}
|
||||
var mapconf map[string]any
|
||||
if err := serialize.ReadConfigFile(r.configFilePath, &mapconf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -15,7 +15,7 @@ import (
|
||||
func TestInitIdempotence(t *testing.T) {
|
||||
t.Parallel()
|
||||
path := t.TempDir()
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
require.NoError(t, Init(path, &config.Config{Datastore: config.DefaultDatastoreConfig()}), "multiple calls to init should succeed")
|
||||
}
|
||||
}
|
||||
|
||||
@ -187,7 +187,7 @@ func TestNoTempFilesAfterOperations(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
// Perform multiple operations
|
||||
for i := 0; i < testIterations; i++ {
|
||||
for i := range testIterations {
|
||||
path := filepath.Join(dir, fmt.Sprintf("test%d.txt", i))
|
||||
|
||||
af, err := New(path, 0644)
|
||||
|
||||
@ -207,7 +207,7 @@ func CopyField(config map[string]any, from, to string) error {
|
||||
}
|
||||
|
||||
// ConvertInterfaceSlice converts []interface{} to []string
|
||||
func ConvertInterfaceSlice(slice []interface{}) []string {
|
||||
func ConvertInterfaceSlice(slice []any) []string {
|
||||
result := make([]string, 0, len(slice))
|
||||
for _, item := range slice {
|
||||
if str, ok := item.(string); ok {
|
||||
@ -241,11 +241,11 @@ func SafeCastMap(value any) map[string]any {
|
||||
}
|
||||
|
||||
// SafeCastSlice safely casts to []interface{} with fallback to empty slice
|
||||
func SafeCastSlice(value any) []interface{} {
|
||||
if s, ok := value.([]interface{}); ok {
|
||||
func SafeCastSlice(value any) []any {
|
||||
if s, ok := value.([]any); ok {
|
||||
return s
|
||||
}
|
||||
return []interface{}{}
|
||||
return []any{}
|
||||
}
|
||||
|
||||
// ReplaceDefaultsWithAuto replaces default values with "auto" in a map
|
||||
@ -271,7 +271,7 @@ func EnsureSliceContains(config map[string]any, path string, value string) {
|
||||
return
|
||||
}
|
||||
|
||||
if slice, ok := existing.([]interface{}); ok {
|
||||
if slice, ok := existing.([]any); ok {
|
||||
// Check if value already exists
|
||||
for _, item := range slice {
|
||||
if str, ok := item.(string); ok && str == value {
|
||||
@ -297,7 +297,7 @@ func ReplaceInSlice(config map[string]any, path string, oldValue, newValue strin
|
||||
return
|
||||
}
|
||||
|
||||
if slice, ok := existing.([]interface{}); ok {
|
||||
if slice, ok := existing.([]any); ok {
|
||||
result := make([]string, 0, len(slice))
|
||||
for _, item := range slice {
|
||||
if str, ok := item.(string); ok {
|
||||
@ -343,7 +343,7 @@ func IsEmptySlice(value any) bool {
|
||||
if value == nil {
|
||||
return true
|
||||
}
|
||||
if slice, ok := value.([]interface{}); ok {
|
||||
if slice, ok := value.([]any); ok {
|
||||
return len(slice) == 0
|
||||
}
|
||||
if slice, ok := value.([]string); ok {
|
||||
|
||||
@ -78,7 +78,7 @@ func AssertConfigField(t *testing.T, config map[string]any, path string, expecte
|
||||
// Handle different types of comparisons
|
||||
switch exp := expected.(type) {
|
||||
case []string:
|
||||
actualSlice, ok := actual.([]interface{})
|
||||
actualSlice, ok := actual.([]any)
|
||||
if !ok {
|
||||
t.Errorf("field %s: expected []string, got %T", path, actual)
|
||||
return
|
||||
@ -133,7 +133,7 @@ func CreateTestRepo(t *testing.T, version int, config map[string]any) string {
|
||||
|
||||
// Write version file
|
||||
versionPath := filepath.Join(tempDir, "version")
|
||||
err := os.WriteFile(versionPath, []byte(fmt.Sprintf("%d", version)), 0644)
|
||||
err := os.WriteFile(versionPath, fmt.Appendf(nil, "%d", version), 0644)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to write version file: %v", err)
|
||||
}
|
||||
|
||||
@ -3,6 +3,7 @@ package mg16
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"maps"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
@ -13,13 +14,13 @@ import (
|
||||
)
|
||||
|
||||
// Helper function to run migration on JSON input and return result
|
||||
func runMigrationOnJSON(t *testing.T, input string) map[string]interface{} {
|
||||
func runMigrationOnJSON(t *testing.T, input string) map[string]any {
|
||||
t.Helper()
|
||||
var output bytes.Buffer
|
||||
err := convert(bytes.NewReader([]byte(input)), &output)
|
||||
require.NoError(t, err)
|
||||
|
||||
var result map[string]interface{}
|
||||
var result map[string]any
|
||||
err = json.Unmarshal(output.Bytes(), &result)
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -27,33 +28,33 @@ func runMigrationOnJSON(t *testing.T, input string) map[string]interface{} {
|
||||
}
|
||||
|
||||
// Helper function to assert nested map key has expected value
|
||||
func assertMapKeyEquals(t *testing.T, result map[string]interface{}, path []string, key string, expected interface{}) {
|
||||
func assertMapKeyEquals(t *testing.T, result map[string]any, path []string, key string, expected any) {
|
||||
t.Helper()
|
||||
current := result
|
||||
for _, p := range path {
|
||||
section, exists := current[p]
|
||||
require.True(t, exists, "Section %s not found in path %v", p, path)
|
||||
current = section.(map[string]interface{})
|
||||
current = section.(map[string]any)
|
||||
}
|
||||
|
||||
assert.Equal(t, expected, current[key], "Expected %s to be %v", key, expected)
|
||||
}
|
||||
|
||||
// Helper function to assert slice contains expected values
|
||||
func assertSliceEquals(t *testing.T, result map[string]interface{}, path []string, expected []string) {
|
||||
func assertSliceEquals(t *testing.T, result map[string]any, path []string, expected []string) {
|
||||
t.Helper()
|
||||
current := result
|
||||
for i, p := range path[:len(path)-1] {
|
||||
section, exists := current[p]
|
||||
require.True(t, exists, "Section %s not found in path %v at index %d", p, path, i)
|
||||
current = section.(map[string]interface{})
|
||||
current = section.(map[string]any)
|
||||
}
|
||||
|
||||
sliceKey := path[len(path)-1]
|
||||
slice, exists := current[sliceKey]
|
||||
require.True(t, exists, "Slice %s not found", sliceKey)
|
||||
|
||||
actualSlice := slice.([]interface{})
|
||||
actualSlice := slice.([]any)
|
||||
require.Equal(t, len(expected), len(actualSlice), "Expected slice length %d, got %d", len(expected), len(actualSlice))
|
||||
|
||||
for i, exp := range expected {
|
||||
@ -62,27 +63,25 @@ func assertSliceEquals(t *testing.T, result map[string]interface{}, path []strin
|
||||
}
|
||||
|
||||
// Helper to build test config JSON with specified fields
|
||||
func buildTestConfig(fields map[string]interface{}) string {
|
||||
config := map[string]interface{}{
|
||||
"Identity": map[string]interface{}{"PeerID": "QmTest"},
|
||||
}
|
||||
for k, v := range fields {
|
||||
config[k] = v
|
||||
func buildTestConfig(fields map[string]any) string {
|
||||
config := map[string]any{
|
||||
"Identity": map[string]any{"PeerID": "QmTest"},
|
||||
}
|
||||
maps.Copy(config, fields)
|
||||
data, _ := json.MarshalIndent(config, "", " ")
|
||||
return string(data)
|
||||
}
|
||||
|
||||
// Helper to run migration and get DNS resolvers
|
||||
func runMigrationAndGetDNSResolvers(t *testing.T, input string) map[string]interface{} {
|
||||
func runMigrationAndGetDNSResolvers(t *testing.T, input string) map[string]any {
|
||||
t.Helper()
|
||||
result := runMigrationOnJSON(t, input)
|
||||
dns := result["DNS"].(map[string]interface{})
|
||||
return dns["Resolvers"].(map[string]interface{})
|
||||
dns := result["DNS"].(map[string]any)
|
||||
return dns["Resolvers"].(map[string]any)
|
||||
}
|
||||
|
||||
// Helper to assert multiple resolver values
|
||||
func assertResolvers(t *testing.T, resolvers map[string]interface{}, expected map[string]string) {
|
||||
func assertResolvers(t *testing.T, resolvers map[string]any, expected map[string]string) {
|
||||
t.Helper()
|
||||
for key, expectedValue := range expected {
|
||||
assert.Equal(t, expectedValue, resolvers[key], "Expected %s resolver to be %v", key, expectedValue)
|
||||
@ -100,25 +99,25 @@ func TestMigration(t *testing.T) {
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
// Create a test config with default bootstrap peers
|
||||
testConfig := map[string]interface{}{
|
||||
testConfig := map[string]any{
|
||||
"Bootstrap": []string{
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa",
|
||||
"/ip4/192.168.1.1/tcp/4001/p2p/QmCustomPeer", // Custom peer
|
||||
},
|
||||
"DNS": map[string]interface{}{
|
||||
"DNS": map[string]any{
|
||||
"Resolvers": map[string]string{},
|
||||
},
|
||||
"Routing": map[string]interface{}{
|
||||
"Routing": map[string]any{
|
||||
"DelegatedRouters": []string{},
|
||||
},
|
||||
"Ipns": map[string]interface{}{
|
||||
"Ipns": map[string]any{
|
||||
"ResolveCacheSize": 128,
|
||||
},
|
||||
"Identity": map[string]interface{}{
|
||||
"Identity": map[string]any{
|
||||
"PeerID": "QmTest",
|
||||
},
|
||||
"Version": map[string]interface{}{
|
||||
"Version": map[string]any{
|
||||
"Current": "0.36.0",
|
||||
},
|
||||
}
|
||||
@ -153,38 +152,38 @@ func TestMigration(t *testing.T) {
|
||||
configData, err = os.ReadFile(configPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
var updatedConfig map[string]interface{}
|
||||
var updatedConfig map[string]any
|
||||
err = json.Unmarshal(configData, &updatedConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check AutoConf was added
|
||||
autoConf, exists := updatedConfig["AutoConf"]
|
||||
assert.True(t, exists, "AutoConf section not added")
|
||||
autoConfMap := autoConf.(map[string]interface{})
|
||||
autoConfMap := autoConf.(map[string]any)
|
||||
// URL is not set explicitly in migration (uses implicit default)
|
||||
_, hasURL := autoConfMap["URL"]
|
||||
assert.False(t, hasURL, "AutoConf URL should not be explicitly set in migration")
|
||||
|
||||
// Check Bootstrap was updated
|
||||
bootstrap := updatedConfig["Bootstrap"].([]interface{})
|
||||
bootstrap := updatedConfig["Bootstrap"].([]any)
|
||||
assert.Equal(t, 2, len(bootstrap), "Expected 2 bootstrap entries")
|
||||
assert.Equal(t, "auto", bootstrap[0], "Expected first bootstrap entry to be 'auto'")
|
||||
assert.Equal(t, "/ip4/192.168.1.1/tcp/4001/p2p/QmCustomPeer", bootstrap[1], "Expected custom peer to be preserved")
|
||||
|
||||
// Check DNS.Resolvers was updated
|
||||
dns := updatedConfig["DNS"].(map[string]interface{})
|
||||
resolvers := dns["Resolvers"].(map[string]interface{})
|
||||
dns := updatedConfig["DNS"].(map[string]any)
|
||||
resolvers := dns["Resolvers"].(map[string]any)
|
||||
assert.Equal(t, "auto", resolvers["."], "Expected DNS resolver for '.' to be 'auto'")
|
||||
|
||||
// Check Routing.DelegatedRouters was updated
|
||||
routing := updatedConfig["Routing"].(map[string]interface{})
|
||||
delegatedRouters := routing["DelegatedRouters"].([]interface{})
|
||||
routing := updatedConfig["Routing"].(map[string]any)
|
||||
delegatedRouters := routing["DelegatedRouters"].([]any)
|
||||
assert.Equal(t, 1, len(delegatedRouters))
|
||||
assert.Equal(t, "auto", delegatedRouters[0], "Expected DelegatedRouters to be ['auto']")
|
||||
|
||||
// Check Ipns.DelegatedPublishers was updated
|
||||
ipns := updatedConfig["Ipns"].(map[string]interface{})
|
||||
delegatedPublishers := ipns["DelegatedPublishers"].([]interface{})
|
||||
ipns := updatedConfig["Ipns"].(map[string]any)
|
||||
delegatedPublishers := ipns["DelegatedPublishers"].([]any)
|
||||
assert.Equal(t, 1, len(delegatedPublishers))
|
||||
assert.Equal(t, "auto", delegatedPublishers[0], "Expected DelegatedPublishers to be ['auto']")
|
||||
|
||||
@ -200,7 +199,7 @@ func TestMigration(t *testing.T) {
|
||||
|
||||
func TestConvert(t *testing.T) {
|
||||
t.Parallel()
|
||||
input := buildTestConfig(map[string]interface{}{
|
||||
input := buildTestConfig(map[string]any{
|
||||
"Bootstrap": []string{
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa",
|
||||
@ -212,7 +211,7 @@ func TestConvert(t *testing.T) {
|
||||
// Check that AutoConf section was added but is empty (using implicit defaults)
|
||||
autoConf, exists := result["AutoConf"]
|
||||
require.True(t, exists, "AutoConf section should exist")
|
||||
autoConfMap, ok := autoConf.(map[string]interface{})
|
||||
autoConfMap, ok := autoConf.(map[string]any)
|
||||
require.True(t, ok, "AutoConf should be a map")
|
||||
require.Empty(t, autoConfMap, "AutoConf should be empty (using implicit defaults)")
|
||||
|
||||
@ -282,7 +281,7 @@ func TestBootstrapMigration(t *testing.T) {
|
||||
|
||||
t.Run("replaces all old default bootstrapper peers with auto entry", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
input := buildTestConfig(map[string]interface{}{
|
||||
input := buildTestConfig(map[string]any{
|
||||
"Bootstrap": []string{
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa",
|
||||
@ -322,8 +321,8 @@ func TestDNSMigration(t *testing.T) {
|
||||
|
||||
t.Run("preserves all custom DNS resolvers unchanged", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
input := buildTestConfig(map[string]interface{}{
|
||||
"DNS": map[string]interface{}{
|
||||
input := buildTestConfig(map[string]any{
|
||||
"DNS": map[string]any{
|
||||
"Resolvers": map[string]string{
|
||||
".": "https://my-custom-resolver.com",
|
||||
".eth": "https://eth.resolver",
|
||||
@ -340,8 +339,8 @@ func TestDNSMigration(t *testing.T) {
|
||||
|
||||
t.Run("preserves custom dot and eth resolvers unchanged", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
input := buildTestConfig(map[string]interface{}{
|
||||
"DNS": map[string]interface{}{
|
||||
input := buildTestConfig(map[string]any{
|
||||
"DNS": map[string]any{
|
||||
"Resolvers": map[string]string{
|
||||
".": "https://cloudflare-dns.com/dns-query",
|
||||
".eth": "https://example.com/dns-query",
|
||||
@ -358,8 +357,8 @@ func TestDNSMigration(t *testing.T) {
|
||||
|
||||
t.Run("replaces old default eth resolver with auto", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
input := buildTestConfig(map[string]interface{}{
|
||||
"DNS": map[string]interface{}{
|
||||
input := buildTestConfig(map[string]any{
|
||||
"DNS": map[string]any{
|
||||
"Resolvers": map[string]string{
|
||||
".": "https://cloudflare-dns.com/dns-query",
|
||||
".eth": "https://dns.eth.limo/dns-query", // should be replaced
|
||||
@ -395,8 +394,8 @@ func TestRoutingMigration(t *testing.T) {
|
||||
|
||||
t.Run("replaces cid.contact with auto while preserving custom routers added by user", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
input := buildTestConfig(map[string]interface{}{
|
||||
"Routing": map[string]interface{}{
|
||||
input := buildTestConfig(map[string]any{
|
||||
"Routing": map[string]any{
|
||||
"DelegatedRouters": []string{
|
||||
"https://cid.contact",
|
||||
"https://my-custom-router.com",
|
||||
@ -425,8 +424,8 @@ func TestIpnsMigration(t *testing.T) {
|
||||
|
||||
t.Run("preserves existing custom DelegatedPublishers unchanged", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
input := buildTestConfig(map[string]interface{}{
|
||||
"Ipns": map[string]interface{}{
|
||||
input := buildTestConfig(map[string]any{
|
||||
"Ipns": map[string]any{
|
||||
"DelegatedPublishers": []string{
|
||||
"https://my-publisher.com",
|
||||
"https://another-publisher.com",
|
||||
@ -440,8 +439,8 @@ func TestIpnsMigration(t *testing.T) {
|
||||
|
||||
t.Run("adds auto DelegatedPublishers to existing Ipns section", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
input := buildTestConfig(map[string]interface{}{
|
||||
"Ipns": map[string]interface{}{
|
||||
input := buildTestConfig(map[string]any{
|
||||
"Ipns": map[string]any{
|
||||
"ResolveCacheSize": 128,
|
||||
},
|
||||
})
|
||||
@ -461,8 +460,8 @@ func TestAutoConfMigration(t *testing.T) {
|
||||
|
||||
t.Run("preserves existing AutoConf fields unchanged", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
input := buildTestConfig(map[string]interface{}{
|
||||
"AutoConf": map[string]interface{}{
|
||||
input := buildTestConfig(map[string]any{
|
||||
"AutoConf": map[string]any{
|
||||
"URL": "https://custom.example.com/autoconf.json",
|
||||
"Enabled": false,
|
||||
"CustomField": "preserved",
|
||||
|
||||
@ -70,7 +70,7 @@ func WriteRepoVersion(ipfsDir string, version int) error {
|
||||
}
|
||||
|
||||
vFilePath := filepath.Join(ipfsDir, versionFile)
|
||||
return os.WriteFile(vFilePath, []byte(fmt.Sprintf("%d\n", version)), 0o644)
|
||||
return os.WriteFile(vFilePath, fmt.Appendf(nil, "%d\n", version), 0o644)
|
||||
}
|
||||
|
||||
func repoVersion(ipfsDir string) (int, error) {
|
||||
|
||||
@ -44,11 +44,11 @@ func (m *Mock) BackupConfig(prefix string) (string, error) {
|
||||
return "", errTODO
|
||||
}
|
||||
|
||||
func (m *Mock) SetConfigKey(key string, value interface{}) error {
|
||||
func (m *Mock) SetConfigKey(key string, value any) error {
|
||||
return errTODO
|
||||
}
|
||||
|
||||
func (m *Mock) GetConfigKey(key string) (interface{}, error) {
|
||||
func (m *Mock) GetConfigKey(key string) (any, error) {
|
||||
return nil, errTODO
|
||||
}
|
||||
|
||||
|
||||
@ -8,7 +8,7 @@ import (
|
||||
// open one.
|
||||
type OnlyOne struct {
|
||||
mu sync.Mutex
|
||||
active map[interface{}]*ref
|
||||
active map[any]*ref
|
||||
}
|
||||
|
||||
// Open a Repo identified by key. If Repo is not already open, the
|
||||
@ -23,11 +23,11 @@ type OnlyOne struct {
|
||||
// r, err := o.Open(repoKey(path), open)
|
||||
//
|
||||
// Call Repo.Close when done.
|
||||
func (o *OnlyOne) Open(key interface{}, open func() (Repo, error)) (Repo, error) {
|
||||
func (o *OnlyOne) Open(key any, open func() (Repo, error)) (Repo, error) {
|
||||
o.mu.Lock()
|
||||
defer o.mu.Unlock()
|
||||
if o.active == nil {
|
||||
o.active = make(map[interface{}]*ref)
|
||||
o.active = make(map[any]*ref)
|
||||
}
|
||||
|
||||
item, found := o.active[key]
|
||||
@ -49,7 +49,7 @@ func (o *OnlyOne) Open(key interface{}, open func() (Repo, error)) (Repo, error)
|
||||
|
||||
type ref struct {
|
||||
parent *OnlyOne
|
||||
key interface{}
|
||||
key any
|
||||
refs uint32
|
||||
Repo
|
||||
}
|
||||
|
||||
@ -38,10 +38,10 @@ type Repo interface {
|
||||
SetConfig(*config.Config) error
|
||||
|
||||
// SetConfigKey sets the given key-value pair within the config and persists it to storage.
|
||||
SetConfigKey(key string, value interface{}) error
|
||||
SetConfigKey(key string, value any) error
|
||||
|
||||
// GetConfigKey reads the value for the given key from the configuration in storage.
|
||||
GetConfigKey(key string) (interface{}, error)
|
||||
GetConfigKey(key string) (any, error)
|
||||
|
||||
// Datastore returns a reference to the configured data storage backend.
|
||||
Datastore() Datastore
|
||||
|
||||
@ -630,7 +630,7 @@ func createDeterministicFiles(dirPath string, numFiles, nameLen, lastNameLen int
|
||||
return err
|
||||
}
|
||||
|
||||
for i := 0; i < numFiles; i++ {
|
||||
for i := range numFiles {
|
||||
// Use lastNameLen for the final file
|
||||
currentNameLen := nameLen
|
||||
if i == numFiles-1 {
|
||||
|
||||
@ -37,7 +37,7 @@ func TestAddressFileReady(t *testing.T) {
|
||||
// Poll for api file to appear
|
||||
apiFile := filepath.Join(node.Dir, "api")
|
||||
var fileExists bool
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
if _, err := os.Stat(apiFile); err == nil {
|
||||
fileExists = true
|
||||
break
|
||||
@ -81,7 +81,7 @@ func TestAddressFileReady(t *testing.T) {
|
||||
// Poll for gateway file to appear
|
||||
gatewayFile := filepath.Join(node.Dir, "gateway")
|
||||
var fileExists bool
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
if _, err := os.Stat(gatewayFile); err == nil {
|
||||
fileExists = true
|
||||
break
|
||||
|
||||
@ -85,37 +85,37 @@ func testAllAutoConfFieldsResolve(t *testing.T) {
|
||||
// Create comprehensive autoconf response matching Schema 4 format
|
||||
// Use server URLs to ensure they're reachable and valid
|
||||
serverURL := fmt.Sprintf("http://%s", r.Host) // Get the server URL from the request
|
||||
autoConf := map[string]interface{}{
|
||||
autoConf := map[string]any{
|
||||
"AutoConfVersion": 2025072301,
|
||||
"AutoConfSchema": 1,
|
||||
"AutoConfTTL": 86400,
|
||||
"SystemRegistry": map[string]interface{}{
|
||||
"AminoDHT": map[string]interface{}{
|
||||
"SystemRegistry": map[string]any{
|
||||
"AminoDHT": map[string]any{
|
||||
"URL": "https://github.com/ipfs/specs/pull/497",
|
||||
"Description": "Test AminoDHT system",
|
||||
"NativeConfig": map[string]interface{}{
|
||||
"NativeConfig": map[string]any{
|
||||
"Bootstrap": []string{
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa",
|
||||
},
|
||||
},
|
||||
"DelegatedConfig": map[string]interface{}{
|
||||
"DelegatedConfig": map[string]any{
|
||||
"Read": []string{"/routing/v1/providers", "/routing/v1/peers", "/routing/v1/ipns"},
|
||||
"Write": []string{"/routing/v1/ipns"},
|
||||
},
|
||||
},
|
||||
"IPNI": map[string]interface{}{
|
||||
"IPNI": map[string]any{
|
||||
"URL": serverURL + "/ipni-system",
|
||||
"Description": "Test IPNI system",
|
||||
"DelegatedConfig": map[string]interface{}{
|
||||
"DelegatedConfig": map[string]any{
|
||||
"Read": []string{"/routing/v1/providers"},
|
||||
"Write": []string{},
|
||||
},
|
||||
},
|
||||
"CustomIPNS": map[string]interface{}{
|
||||
"CustomIPNS": map[string]any{
|
||||
"URL": serverURL + "/ipns-system",
|
||||
"Description": "Test IPNS system",
|
||||
"DelegatedConfig": map[string]interface{}{
|
||||
"DelegatedConfig": map[string]any{
|
||||
"Read": []string{"/routing/v1/ipns"},
|
||||
"Write": []string{"/routing/v1/ipns"},
|
||||
},
|
||||
@ -125,8 +125,8 @@ func testAllAutoConfFieldsResolve(t *testing.T) {
|
||||
".": {"https://cloudflare-dns.com/dns-query"},
|
||||
"eth.": {"https://dns.google/dns-query"},
|
||||
},
|
||||
"DelegatedEndpoints": map[string]interface{}{
|
||||
serverURL: map[string]interface{}{
|
||||
"DelegatedEndpoints": map[string]any{
|
||||
serverURL: map[string]any{
|
||||
"Systems": []string{"IPNI", "CustomIPNS"}, // Use non-AminoDHT systems to avoid filtering
|
||||
"Read": []string{"/routing/v1/providers", "/routing/v1/ipns"},
|
||||
"Write": []string{"/routing/v1/ipns"},
|
||||
@ -155,7 +155,7 @@ func testAllAutoConfFieldsResolve(t *testing.T) {
|
||||
// Clear any existing autoconf cache to prevent interference
|
||||
result := node.RunIPFS("config", "show")
|
||||
if result.ExitCode() == 0 {
|
||||
var cfg map[string]interface{}
|
||||
var cfg map[string]any
|
||||
if json.Unmarshal([]byte(result.Stdout.String()), &cfg) == nil {
|
||||
if repoPath, exists := cfg["path"]; exists {
|
||||
if pathStr, ok := repoPath.(string); ok {
|
||||
@ -436,12 +436,12 @@ func testConfigShowExpandAutoComplete(t *testing.T) {
|
||||
assert.Contains(t, expandedConfig, "bootstrap.libp2p.io", "Should contain expanded bootstrap peers")
|
||||
|
||||
// Should be valid JSON
|
||||
var configMap map[string]interface{}
|
||||
var configMap map[string]any
|
||||
err := json.Unmarshal([]byte(expandedConfig), &configMap)
|
||||
require.NoError(t, err, "Expanded config should be valid JSON")
|
||||
|
||||
// Verify specific fields were expanded
|
||||
if bootstrap, ok := configMap["Bootstrap"].([]interface{}); ok {
|
||||
if bootstrap, ok := configMap["Bootstrap"].([]any); ok {
|
||||
assert.Greater(t, len(bootstrap), 0, "Bootstrap should have expanded entries")
|
||||
for _, peer := range bootstrap {
|
||||
assert.NotEqual(t, "auto", peer, "Bootstrap entries should not be 'auto'")
|
||||
|
||||
@ -5,6 +5,7 @@ import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -240,11 +241,8 @@ func testDaemonWithMalformedAutoConf(t *testing.T) {
|
||||
|
||||
foundFallbackPeers := 0
|
||||
for _, expectedPeer := range expectedBootstrapPeers {
|
||||
for _, actualPeer := range bootstrap {
|
||||
if actualPeer == expectedPeer {
|
||||
foundFallbackPeers++
|
||||
break
|
||||
}
|
||||
if slices.Contains(bootstrap, expectedPeer) {
|
||||
foundFallbackPeers++
|
||||
}
|
||||
}
|
||||
assert.Greater(t, foundFallbackPeers, 0, "Should contain bootstrap peers from GetMainnetFallbackConfig() AminoDHT NativeConfig")
|
||||
|
||||
@ -286,7 +286,7 @@ func testConfigReplacePreservesAuto(t *testing.T) {
|
||||
assert.Contains(t, originalConfig, `"foo.": "auto"`)
|
||||
|
||||
// Modify the config string to add a new field but preserve auto values
|
||||
var configMap map[string]interface{}
|
||||
var configMap map[string]any
|
||||
err := json.Unmarshal([]byte(originalConfig), &configMap)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@ -4,6 +4,7 @@ import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@ -35,56 +36,56 @@ func TestAutoConfExtensibility_NewSystem(t *testing.T) {
|
||||
var mockServer *httptest.Server
|
||||
mockServer = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Create autoconf.json with NewSystem
|
||||
autoconfData := map[string]interface{}{
|
||||
autoconfData := map[string]any{
|
||||
"AutoConfVersion": 2025072901,
|
||||
"AutoConfSchema": 1,
|
||||
"AutoConfTTL": 86400,
|
||||
"SystemRegistry": map[string]interface{}{
|
||||
"AminoDHT": map[string]interface{}{
|
||||
"SystemRegistry": map[string]any{
|
||||
"AminoDHT": map[string]any{
|
||||
"URL": "https://github.com/ipfs/specs/pull/497",
|
||||
"Description": "Public DHT swarm",
|
||||
"NativeConfig": map[string]interface{}{
|
||||
"NativeConfig": map[string]any{
|
||||
"Bootstrap": []string{
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
|
||||
},
|
||||
},
|
||||
"DelegatedConfig": map[string]interface{}{
|
||||
"DelegatedConfig": map[string]any{
|
||||
"Read": []string{"/routing/v1/providers", "/routing/v1/peers", "/routing/v1/ipns"},
|
||||
"Write": []string{"/routing/v1/ipns"},
|
||||
},
|
||||
},
|
||||
"IPNI": map[string]interface{}{
|
||||
"IPNI": map[string]any{
|
||||
"URL": "https://ipni.example.com",
|
||||
"Description": "Network Indexer",
|
||||
"DelegatedConfig": map[string]interface{}{
|
||||
"DelegatedConfig": map[string]any{
|
||||
"Read": []string{"/routing/v1/providers"},
|
||||
"Write": []string{},
|
||||
},
|
||||
},
|
||||
"NewSystem": map[string]interface{}{
|
||||
"NewSystem": map[string]any{
|
||||
"URL": "https://example.com/newsystem",
|
||||
"Description": "Test system for extensibility verification",
|
||||
"NativeConfig": map[string]interface{}{
|
||||
"NativeConfig": map[string]any{
|
||||
"Bootstrap": []string{
|
||||
"/ip4/127.0.0.1/tcp/9999/p2p/12D3KooWPeQ4r3v6CmVmKXoFGtqEqcr3L8P6La9yH5oEWKtoLVVa",
|
||||
},
|
||||
},
|
||||
"DelegatedConfig": map[string]interface{}{
|
||||
"DelegatedConfig": map[string]any{
|
||||
"Read": []string{"/routing/v1/providers"},
|
||||
"Write": []string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
"DNSResolvers": map[string]interface{}{
|
||||
"DNSResolvers": map[string]any{
|
||||
"eth.": []string{"https://dns.eth.limo/dns-query"},
|
||||
},
|
||||
"DelegatedEndpoints": map[string]interface{}{
|
||||
"https://ipni.example.com": map[string]interface{}{
|
||||
"DelegatedEndpoints": map[string]any{
|
||||
"https://ipni.example.com": map[string]any{
|
||||
"Systems": []string{"IPNI"},
|
||||
"Read": []string{"/routing/v1/providers"},
|
||||
"Write": []string{},
|
||||
},
|
||||
mockServer.URL + "/newsystem": map[string]interface{}{
|
||||
mockServer.URL + "/newsystem": map[string]any{
|
||||
"Systems": []string{"NewSystem"},
|
||||
"Read": []string{"/routing/v1/providers"},
|
||||
"Write": []string{},
|
||||
@ -101,7 +102,7 @@ func TestAutoConfExtensibility_NewSystem(t *testing.T) {
|
||||
// NewSystem mock server URL will be dynamically assigned
|
||||
newSystemServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Simple mock server for NewSystem endpoint
|
||||
response := map[string]interface{}{"Providers": []interface{}{}}
|
||||
response := map[string]any{"Providers": []any{}}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_ = json.NewEncoder(w).Encode(response)
|
||||
}))
|
||||
@ -110,56 +111,56 @@ func TestAutoConfExtensibility_NewSystem(t *testing.T) {
|
||||
// Update the autoconf to point to the correct NewSystem endpoint
|
||||
mockServer.Close()
|
||||
mockServer = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
autoconfData := map[string]interface{}{
|
||||
autoconfData := map[string]any{
|
||||
"AutoConfVersion": 2025072901,
|
||||
"AutoConfSchema": 1,
|
||||
"AutoConfTTL": 86400,
|
||||
"SystemRegistry": map[string]interface{}{
|
||||
"AminoDHT": map[string]interface{}{
|
||||
"SystemRegistry": map[string]any{
|
||||
"AminoDHT": map[string]any{
|
||||
"URL": "https://github.com/ipfs/specs/pull/497",
|
||||
"Description": "Public DHT swarm",
|
||||
"NativeConfig": map[string]interface{}{
|
||||
"NativeConfig": map[string]any{
|
||||
"Bootstrap": []string{
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
|
||||
},
|
||||
},
|
||||
"DelegatedConfig": map[string]interface{}{
|
||||
"DelegatedConfig": map[string]any{
|
||||
"Read": []string{"/routing/v1/providers", "/routing/v1/peers", "/routing/v1/ipns"},
|
||||
"Write": []string{"/routing/v1/ipns"},
|
||||
},
|
||||
},
|
||||
"IPNI": map[string]interface{}{
|
||||
"IPNI": map[string]any{
|
||||
"URL": "https://ipni.example.com",
|
||||
"Description": "Network Indexer",
|
||||
"DelegatedConfig": map[string]interface{}{
|
||||
"DelegatedConfig": map[string]any{
|
||||
"Read": []string{"/routing/v1/providers"},
|
||||
"Write": []string{},
|
||||
},
|
||||
},
|
||||
"NewSystem": map[string]interface{}{
|
||||
"NewSystem": map[string]any{
|
||||
"URL": "https://example.com/newsystem",
|
||||
"Description": "Test system for extensibility verification",
|
||||
"NativeConfig": map[string]interface{}{
|
||||
"NativeConfig": map[string]any{
|
||||
"Bootstrap": []string{
|
||||
"/ip4/127.0.0.1/tcp/9999/p2p/12D3KooWPeQ4r3v6CmVmKXoFGtqEqcr3L8P6La9yH5oEWKtoLVVa",
|
||||
},
|
||||
},
|
||||
"DelegatedConfig": map[string]interface{}{
|
||||
"DelegatedConfig": map[string]any{
|
||||
"Read": []string{"/routing/v1/providers"},
|
||||
"Write": []string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
"DNSResolvers": map[string]interface{}{
|
||||
"DNSResolvers": map[string]any{
|
||||
"eth.": []string{"https://dns.eth.limo/dns-query"},
|
||||
},
|
||||
"DelegatedEndpoints": map[string]interface{}{
|
||||
"https://ipni.example.com": map[string]interface{}{
|
||||
"DelegatedEndpoints": map[string]any{
|
||||
"https://ipni.example.com": map[string]any{
|
||||
"Systems": []string{"IPNI"},
|
||||
"Read": []string{"/routing/v1/providers"},
|
||||
"Write": []string{},
|
||||
},
|
||||
newSystemServer.URL: map[string]interface{}{
|
||||
newSystemServer.URL: map[string]any{
|
||||
"Systems": []string{"NewSystem"},
|
||||
"Read": []string{"/routing/v1/providers"},
|
||||
"Write": []string{},
|
||||
@ -227,11 +228,8 @@ func TestAutoConfExtensibility_NewSystem(t *testing.T) {
|
||||
// Should contain NewSystem endpoint (not native) - now with routing path
|
||||
foundNewSystem := false
|
||||
expectedNewSystemURL := newSystemServer.URL + "/routing/v1/providers" // Full URL with path, as returned by DelegatedRoutersWithAutoConf
|
||||
for _, url := range routerURLs {
|
||||
if url == expectedNewSystemURL {
|
||||
foundNewSystem = true
|
||||
break
|
||||
}
|
||||
if slices.Contains(routerURLs, expectedNewSystemURL) {
|
||||
foundNewSystem = true
|
||||
}
|
||||
require.True(t, foundNewSystem, "Should contain NewSystem endpoint (%s) for delegated routing, got: %v", expectedNewSystemURL, routerURLs)
|
||||
|
||||
|
||||
@ -70,7 +70,7 @@ func TestAutoConfFuzz(t *testing.T) {
|
||||
func testFuzzAutoConfVersion(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
version interface{}
|
||||
version any
|
||||
expectError bool
|
||||
}{
|
||||
{"valid version", 2025071801, false},
|
||||
@ -84,22 +84,22 @@ func testFuzzAutoConfVersion(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
config := map[string]interface{}{
|
||||
config := map[string]any{
|
||||
"AutoConfVersion": tc.version,
|
||||
"AutoConfSchema": 1,
|
||||
"AutoConfTTL": 86400,
|
||||
"SystemRegistry": map[string]interface{}{
|
||||
"AminoDHT": map[string]interface{}{
|
||||
"SystemRegistry": map[string]any{
|
||||
"AminoDHT": map[string]any{
|
||||
"Description": "Test AminoDHT system",
|
||||
"NativeConfig": map[string]interface{}{
|
||||
"NativeConfig": map[string]any{
|
||||
"Bootstrap": []string{
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"DNSResolvers": map[string]interface{}{},
|
||||
"DelegatedEndpoints": map[string]interface{}{},
|
||||
"DNSResolvers": map[string]any{},
|
||||
"DelegatedEndpoints": map[string]any{},
|
||||
}
|
||||
|
||||
jsonData, err := json.Marshal(config)
|
||||
@ -120,7 +120,7 @@ func testFuzzAutoConfVersion(t *testing.T) {
|
||||
func testFuzzBootstrapArrays(t *testing.T) {
|
||||
type testCase struct {
|
||||
name string
|
||||
bootstrap interface{}
|
||||
bootstrap any
|
||||
expectError bool
|
||||
validate func(*testing.T, *autoconf.Response)
|
||||
}
|
||||
@ -177,7 +177,7 @@ func testFuzzBootstrapArrays(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "mixed types in array",
|
||||
bootstrap: []interface{}{"/dnsaddr/test", 123, nil},
|
||||
bootstrap: []any{"/dnsaddr/test", 123, nil},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
@ -199,20 +199,20 @@ func testFuzzBootstrapArrays(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
config := map[string]interface{}{
|
||||
config := map[string]any{
|
||||
"AutoConfVersion": 2025072301,
|
||||
"AutoConfSchema": 1,
|
||||
"AutoConfTTL": 86400,
|
||||
"SystemRegistry": map[string]interface{}{
|
||||
"AminoDHT": map[string]interface{}{
|
||||
"SystemRegistry": map[string]any{
|
||||
"AminoDHT": map[string]any{
|
||||
"Description": "Test AminoDHT system",
|
||||
"NativeConfig": map[string]interface{}{
|
||||
"NativeConfig": map[string]any{
|
||||
"Bootstrap": tc.bootstrap,
|
||||
},
|
||||
},
|
||||
},
|
||||
"DNSResolvers": map[string]interface{}{},
|
||||
"DelegatedEndpoints": map[string]interface{}{},
|
||||
"DNSResolvers": map[string]any{},
|
||||
"DelegatedEndpoints": map[string]any{},
|
||||
}
|
||||
|
||||
jsonData, err := json.Marshal(config)
|
||||
@ -247,7 +247,7 @@ func testFuzzBootstrapArrays(t *testing.T) {
|
||||
func testFuzzDNSResolvers(t *testing.T) {
|
||||
type testCase struct {
|
||||
name string
|
||||
resolvers interface{}
|
||||
resolvers any
|
||||
expectError bool
|
||||
validate func(*testing.T, *autoconf.Response)
|
||||
}
|
||||
@ -314,27 +314,27 @@ func testFuzzDNSResolvers(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "nested invalid structure",
|
||||
resolvers: map[string]interface{}{".": map[string]string{"invalid": "structure"}},
|
||||
resolvers: map[string]any{".": map[string]string{"invalid": "structure"}},
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
config := map[string]interface{}{
|
||||
config := map[string]any{
|
||||
"AutoConfVersion": 2025072301,
|
||||
"AutoConfSchema": 1,
|
||||
"AutoConfTTL": 86400,
|
||||
"SystemRegistry": map[string]interface{}{
|
||||
"AminoDHT": map[string]interface{}{
|
||||
"SystemRegistry": map[string]any{
|
||||
"AminoDHT": map[string]any{
|
||||
"Description": "Test AminoDHT system",
|
||||
"NativeConfig": map[string]interface{}{
|
||||
"NativeConfig": map[string]any{
|
||||
"Bootstrap": []string{"/dnsaddr/test"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"DNSResolvers": tc.resolvers,
|
||||
"DelegatedEndpoints": map[string]interface{}{},
|
||||
"DelegatedEndpoints": map[string]any{},
|
||||
}
|
||||
|
||||
jsonData, err := json.Marshal(config)
|
||||
@ -366,7 +366,7 @@ func testFuzzDelegatedRouters(t *testing.T) {
|
||||
// Test various malformed delegated router configurations
|
||||
type testCase struct {
|
||||
name string
|
||||
routers interface{}
|
||||
routers any
|
||||
expectError bool
|
||||
validate func(*testing.T, *autoconf.Response)
|
||||
}
|
||||
@ -374,8 +374,8 @@ func testFuzzDelegatedRouters(t *testing.T) {
|
||||
testCases := []testCase{
|
||||
{
|
||||
name: "valid endpoints",
|
||||
routers: map[string]interface{}{
|
||||
"https://ipni.example.com": map[string]interface{}{
|
||||
routers: map[string]any{
|
||||
"https://ipni.example.com": map[string]any{
|
||||
"Systems": []string{"IPNI"},
|
||||
"Read": []string{"/routing/v1/providers"},
|
||||
"Write": []string{},
|
||||
@ -392,7 +392,7 @@ func testFuzzDelegatedRouters(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "empty routers",
|
||||
routers: map[string]interface{}{},
|
||||
routers: map[string]any{},
|
||||
validate: func(t *testing.T, resp *autoconf.Response) {
|
||||
assert.Empty(t, resp.Config.DelegatedEndpoints, "Empty routers should result in empty endpoints")
|
||||
},
|
||||
@ -411,8 +411,8 @@ func testFuzzDelegatedRouters(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "invalid endpoint URLs",
|
||||
routers: map[string]interface{}{
|
||||
"not-a-url": map[string]interface{}{
|
||||
routers: map[string]any{
|
||||
"not-a-url": map[string]any{
|
||||
"Systems": []string{"IPNI"},
|
||||
"Read": []string{"/routing/v1/providers"},
|
||||
"Write": []string{},
|
||||
@ -424,19 +424,19 @@ func testFuzzDelegatedRouters(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
config := map[string]interface{}{
|
||||
config := map[string]any{
|
||||
"AutoConfVersion": 2025072301,
|
||||
"AutoConfSchema": 1,
|
||||
"AutoConfTTL": 86400,
|
||||
"SystemRegistry": map[string]interface{}{
|
||||
"AminoDHT": map[string]interface{}{
|
||||
"SystemRegistry": map[string]any{
|
||||
"AminoDHT": map[string]any{
|
||||
"Description": "Test AminoDHT system",
|
||||
"NativeConfig": map[string]interface{}{
|
||||
"NativeConfig": map[string]any{
|
||||
"Bootstrap": []string{"/dnsaddr/test"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"DNSResolvers": map[string]interface{}{},
|
||||
"DNSResolvers": map[string]any{},
|
||||
"DelegatedEndpoints": tc.routers,
|
||||
}
|
||||
|
||||
@ -510,26 +510,26 @@ func testFuzzDelegatedPublishers(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
autoConfData := map[string]interface{}{
|
||||
autoConfData := map[string]any{
|
||||
"AutoConfVersion": 2025072301,
|
||||
"AutoConfSchema": 1,
|
||||
"AutoConfTTL": 86400,
|
||||
"SystemRegistry": map[string]interface{}{
|
||||
"TestSystem": map[string]interface{}{
|
||||
"SystemRegistry": map[string]any{
|
||||
"TestSystem": map[string]any{
|
||||
"Description": "Test system for fuzz testing",
|
||||
"DelegatedConfig": map[string]interface{}{
|
||||
"DelegatedConfig": map[string]any{
|
||||
"Read": []string{"/routing/v1/ipns"},
|
||||
"Write": []string{"/routing/v1/ipns"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"DNSResolvers": map[string]interface{}{},
|
||||
"DelegatedEndpoints": map[string]interface{}{},
|
||||
"DNSResolvers": map[string]any{},
|
||||
"DelegatedEndpoints": map[string]any{},
|
||||
}
|
||||
|
||||
// Add test URLs as delegated endpoints
|
||||
for _, url := range tc.urls {
|
||||
autoConfData["DelegatedEndpoints"].(map[string]interface{})[url] = map[string]interface{}{
|
||||
autoConfData["DelegatedEndpoints"].(map[string]any)[url] = map[string]any{
|
||||
"Systems": []string{"TestSystem"},
|
||||
"Read": []string{"/routing/v1/ipns"},
|
||||
"Write": []string{"/routing/v1/ipns"},
|
||||
@ -598,27 +598,27 @@ func testFuzzLargePayloads(t *testing.T) {
|
||||
}
|
||||
|
||||
largeDNSResolvers := make(map[string][]string)
|
||||
for i := 0; i < 1000; i++ {
|
||||
for i := range 1000 {
|
||||
domain := fmt.Sprintf("domain%d.example.com", i)
|
||||
largeDNSResolvers[domain] = []string{
|
||||
fmt.Sprintf("https://resolver%d.example.com/dns-query", i),
|
||||
}
|
||||
}
|
||||
|
||||
config := map[string]interface{}{
|
||||
config := map[string]any{
|
||||
"AutoConfVersion": 2025072301,
|
||||
"AutoConfSchema": 1,
|
||||
"AutoConfTTL": 86400,
|
||||
"SystemRegistry": map[string]interface{}{
|
||||
"AminoDHT": map[string]interface{}{
|
||||
"SystemRegistry": map[string]any{
|
||||
"AminoDHT": map[string]any{
|
||||
"Description": "Test AminoDHT system",
|
||||
"NativeConfig": map[string]interface{}{
|
||||
"NativeConfig": map[string]any{
|
||||
"Bootstrap": largeBootstrap,
|
||||
},
|
||||
},
|
||||
},
|
||||
"DNSResolvers": largeDNSResolvers,
|
||||
"DelegatedEndpoints": map[string]interface{}{},
|
||||
"DelegatedEndpoints": map[string]any{},
|
||||
}
|
||||
|
||||
jsonData, err := json.Marshal(config)
|
||||
@ -644,7 +644,7 @@ func testFuzzLargePayloads(t *testing.T) {
|
||||
// Helper function to generate many DNS resolvers for testing
|
||||
func generateManyResolvers(count int) map[string][]string {
|
||||
resolvers := make(map[string][]string)
|
||||
for i := 0; i < count; i++ {
|
||||
for i := range count {
|
||||
domain := fmt.Sprintf("domain%d.example.com", i)
|
||||
resolvers[domain] = []string{
|
||||
fmt.Sprintf("https://resolver%d.example.com/dns-query", i),
|
||||
|
||||
@ -4,6 +4,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
@ -330,9 +331,7 @@ func (m *mockIPNSPublisher) getPublishedKeys() map[string]string {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
result := make(map[string]string)
|
||||
for k, v := range m.publishedKeys {
|
||||
result[k] = v
|
||||
}
|
||||
maps.Copy(result, m.publishedKeys)
|
||||
return result
|
||||
}
|
||||
|
||||
|
||||
@ -34,7 +34,7 @@ type mockRoutingServer struct {
|
||||
server *httptest.Server
|
||||
mu sync.Mutex
|
||||
requests []string
|
||||
providerFunc func(cid string) []map[string]interface{}
|
||||
providerFunc func(cid string) []map[string]any
|
||||
}
|
||||
|
||||
func newMockRoutingServer(t *testing.T) *mockRoutingServer {
|
||||
@ -44,8 +44,8 @@ func newMockRoutingServer(t *testing.T) *mockRoutingServer {
|
||||
}
|
||||
|
||||
// Default provider function returns mock provider records
|
||||
m.providerFunc = func(cid string) []map[string]interface{} {
|
||||
return []map[string]interface{}{
|
||||
m.providerFunc = func(cid string) []map[string]any {
|
||||
return []map[string]any{
|
||||
{
|
||||
"Protocol": "transport-bitswap",
|
||||
"Schema": "bitswap",
|
||||
@ -175,8 +175,8 @@ func testRoutingErrorHandling(t *testing.T) {
|
||||
defer routingServer.close()
|
||||
|
||||
// Configure to return no providers (empty response)
|
||||
routingServer.providerFunc = func(cid string) []map[string]interface{} {
|
||||
return []map[string]interface{}{}
|
||||
routingServer.providerFunc = func(cid string) []map[string]any {
|
||||
return []map[string]any{}
|
||||
}
|
||||
|
||||
// Create autoconf data
|
||||
|
||||
@ -65,8 +65,8 @@ func TestIPFSVersionDeps(t *testing.T) {
|
||||
assert.True(t, strings.HasPrefix(lines[0], "github.com/ipfs/kubo@v"))
|
||||
|
||||
for _, depLine := range lines[1:] {
|
||||
split := strings.Split(depLine, " => ")
|
||||
for _, moduleVersion := range split {
|
||||
split := strings.SplitSeq(depLine, " => ")
|
||||
for moduleVersion := range split {
|
||||
splitModVers := strings.Split(moduleVersion, "@")
|
||||
modPath := splitModVers[0]
|
||||
modVers := splitModVers[1]
|
||||
@ -92,7 +92,6 @@ func TestAllSubcommandsAcceptHelp(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := harness.NewT(t).NewNode()
|
||||
for _, cmd := range node.IPFSCommands() {
|
||||
cmd := cmd
|
||||
t.Run(fmt.Sprintf("command %q accepts help", cmd), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
splitCmd := strings.Split(cmd, " ")[1:]
|
||||
|
||||
@ -141,10 +141,10 @@ func TestBitswapConfig(t *testing.T) {
|
||||
|
||||
// read libp2p identify from remote peer, and print protocols
|
||||
res := requester.IPFS("id", "-f", "<protocols>", provider.PeerID().String())
|
||||
protocols := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n")
|
||||
protocols := strings.SplitSeq(strings.TrimSpace(res.Stdout.String()), "\n")
|
||||
|
||||
// No bitswap protocols should be present
|
||||
for _, proto := range protocols {
|
||||
for proto := range protocols {
|
||||
assert.NotContains(t, proto, bsnet.ProtocolBitswap, "bitswap protocol %s should not be advertised when server is disabled", proto)
|
||||
assert.NotContains(t, proto, bsnet.ProtocolBitswapNoVers, "bitswap protocol %s should not be advertised when server is disabled", proto)
|
||||
assert.NotContains(t, proto, bsnet.ProtocolBitswapOneOne, "bitswap protocol %s should not be advertised when server is disabled", proto)
|
||||
|
||||
@ -60,7 +60,7 @@ func allBlockCIDs(t *testing.T, node *harness.Node, root string) []string {
|
||||
t.Helper()
|
||||
cids := []string{root}
|
||||
res := node.IPFS("refs", "-r", "--unique", root)
|
||||
for _, line := range strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") {
|
||||
for line := range strings.SplitSeq(strings.TrimSpace(res.Stdout.String()), "\n") {
|
||||
if line != "" {
|
||||
cids = append(cids, line)
|
||||
}
|
||||
|
||||
@ -76,7 +76,7 @@ func TestConfigSecrets(t *testing.T) {
|
||||
var origPrivKey string
|
||||
assert.Contains(t, originalConfig, "PrivKey")
|
||||
// Simple extraction - find the PrivKey line
|
||||
for _, line := range strings.Split(originalConfig, "\n") {
|
||||
for line := range strings.SplitSeq(originalConfig, "\n") {
|
||||
if strings.Contains(line, "\"PrivKey\":") {
|
||||
origPrivKey = line
|
||||
break
|
||||
@ -98,7 +98,7 @@ func TestConfigSecrets(t *testing.T) {
|
||||
|
||||
// Verify the PrivKey line is the same
|
||||
var newPrivKey string
|
||||
for _, line := range strings.Split(newConfig, "\n") {
|
||||
for line := range strings.SplitSeq(newConfig, "\n") {
|
||||
if strings.Contains(line, "\"PrivKey\":") {
|
||||
newPrivKey = line
|
||||
break
|
||||
|
||||
@ -202,7 +202,6 @@ func TestContentBlocking(t *testing.T) {
|
||||
|
||||
// Confirm that denylist is active for every command in 'cliCmds' x 'testCases'
|
||||
for _, cmd := range cliCmds {
|
||||
cmd := cmd
|
||||
cliTestName := fmt.Sprintf("CLI '%s' denies %s", strings.Join(cmd, " "), testCase.name)
|
||||
t.Run(cliTestName, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user