kubo/test/cli/diag_datastore_test.go
Marcin Rataj 824a47ae11
Some checks are pending
CodeQL / codeql (push) Waiting to run
Docker Check / lint (push) Waiting to run
Docker Check / build (push) Waiting to run
Gateway Conformance / gateway-conformance (push) Waiting to run
Gateway Conformance / gateway-conformance-libp2p-experiment (push) Waiting to run
Go Build / go-build (push) Waiting to run
Go Check / go-check (push) Waiting to run
Go Lint / go-lint (push) Waiting to run
Go Test / unit-tests (push) Waiting to run
Go Test / cli-tests (push) Waiting to run
Go Test / example-tests (push) Waiting to run
Interop / interop-prep (push) Waiting to run
Interop / helia-interop (push) Blocked by required conditions
Interop / ipfs-webui (push) Blocked by required conditions
Sharness / sharness-test (push) Waiting to run
Spell Check / spellcheck (push) Waiting to run
feat(pubsub): persistent validation and diagnostic commands (#11110)
* feat(pubsub): persistent seqno validation and diagnostic commands

- upgrade go-libp2p-pubsub to v0.15.0
- add persistent seqno validator using BasicSeqnoValidator
  stores max seen seqno per peer at /pubsub/seqno/<peerid>
  survives daemon restarts, addresses message cycling in large networks (#9665)
- add `ipfs pubsub reset` command to clear validator state
- add `ipfs diag datastore get/count` commands for datastore inspection
  requires daemon to be stopped, useful for debugging
- change pubsub status from Deprecated to Experimental
- add CLI tests for pubsub and diag datastore commands
- remove flaky pubsub_msg_seen_cache_test.go (replaced by CLI tests)

* fix(pubsub): improve reset command and add deprecation warnings

- use batched delete for efficient bulk reset
- check key existence before reporting deleted count
- sync datastore after deletions to ensure persistence
- show "no validator state found" when resetting non-existent peer
- log deprecation warnings when using --enable-pubsub-experiment
  or --enable-namesys-pubsub CLI flags

* refactor(test): add datastore helpers to test harness

---------

Co-authored-by: Andrew Gillis <11790789+gammazero@users.noreply.github.com>
2026-01-16 00:27:09 +01:00

148 lines
4.5 KiB
Go

package cli
import (
"encoding/json"
"testing"
"github.com/ipfs/kubo/test/cli/harness"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestDiagDatastore(t *testing.T) {
t.Parallel()
t.Run("diag datastore get returns error for non-existent key", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
// Don't start daemon - these commands require daemon to be stopped
res := node.RunIPFS("diag", "datastore", "get", "/nonexistent/key")
assert.Error(t, res.Err)
assert.Contains(t, res.Stderr.String(), "key not found")
})
t.Run("diag datastore get returns raw bytes by default", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
// Add some data to create a known datastore key
// We need daemon for add, then stop it
node.StartDaemon()
cid := node.IPFSAddStr("test data for diag datastore")
node.IPFS("pin", "add", cid)
node.StopDaemon()
// Test count to verify we have entries
count := node.DatastoreCount("/")
t.Logf("total datastore entries: %d", count)
assert.NotEqual(t, int64(0), count, "should have datastore entries after pinning")
})
t.Run("diag datastore get --hex returns hex dump", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
// Add and pin some data
node.StartDaemon()
cid := node.IPFSAddStr("test data for hex dump")
node.IPFS("pin", "add", cid)
node.StopDaemon()
// Test with existing keys in pins namespace
count := node.DatastoreCount("/pins/")
t.Logf("pins datastore entries: %d", count)
if count != 0 {
t.Log("pins datastore has entries, hex dump format tested implicitly")
}
})
t.Run("diag datastore count returns 0 for empty prefix", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
count := node.DatastoreCount("/definitely/nonexistent/prefix/")
assert.Equal(t, int64(0), count)
})
t.Run("diag datastore count returns JSON with --enc=json", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
res := node.IPFS("diag", "datastore", "count", "/pubsub/seqno/", "--enc=json")
assert.NoError(t, res.Err)
var result struct {
Prefix string `json:"prefix"`
Count int64 `json:"count"`
}
err := json.Unmarshal(res.Stdout.Bytes(), &result)
require.NoError(t, err)
assert.Equal(t, "/pubsub/seqno/", result.Prefix)
assert.Equal(t, int64(0), result.Count)
})
t.Run("diag datastore get returns JSON with --enc=json", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
// Test error case with JSON encoding
res := node.RunIPFS("diag", "datastore", "get", "/nonexistent", "--enc=json")
assert.Error(t, res.Err)
})
t.Run("diag datastore count counts entries correctly", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
// Add multiple pins to create multiple entries
node.StartDaemon()
cid1 := node.IPFSAddStr("data 1")
cid2 := node.IPFSAddStr("data 2")
cid3 := node.IPFSAddStr("data 3")
node.IPFS("pin", "add", cid1)
node.IPFS("pin", "add", cid2)
node.IPFS("pin", "add", cid3)
node.StopDaemon()
// Count should reflect the pins (plus any system entries)
count := node.DatastoreCount("/")
t.Logf("total entries after adding 3 pins: %d", count)
// Should have more than 0 entries
assert.NotEqual(t, int64(0), count)
})
t.Run("diag datastore commands work offline", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
// Don't start daemon - these commands require daemon to be stopped
// Count should work offline
count := node.DatastoreCount("/pubsub/seqno/")
assert.Equal(t, int64(0), count)
// Get should return error for missing key (but command should work)
res := node.RunIPFS("diag", "datastore", "get", "/nonexistent/key")
assert.Error(t, res.Err)
assert.Contains(t, res.Stderr.String(), "key not found")
})
t.Run("diag datastore commands require daemon to be stopped", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
// Both get and count require repo lock, which is held by the running daemon
res := node.RunIPFS("diag", "datastore", "get", "/test")
assert.Error(t, res.Err, "get should fail when daemon is running")
assert.Contains(t, res.Stderr.String(), "ipfs daemon is running")
res = node.RunIPFS("diag", "datastore", "count", "/pubsub/seqno/")
assert.Error(t, res.Err, "count should fail when daemon is running")
assert.Contains(t, res.Stderr.String(), "ipfs daemon is running")
})
}