mirror of
https://github.com/QuilibriumNetwork/ceremonyclient.git
synced 2026-02-21 10:27:26 +08:00
wrapping up
This commit is contained in:
parent
c2b9b1d460
commit
b0a87b2fe4
@ -121,7 +121,7 @@ func (e *DataClockConsensusEngine) processFrame(
|
||||
sel = selBI.FillBytes(sel)
|
||||
|
||||
if bytes.Equal(
|
||||
trie.FindNearest(sel).External.Key,
|
||||
trie.FindNearest(sel).Key,
|
||||
e.provingKeyAddress,
|
||||
) {
|
||||
var nextFrame *protobufs.ClockFrame
|
||||
@ -143,8 +143,6 @@ func (e *DataClockConsensusEngine) processFrame(
|
||||
if !e.IsInProverTrie(e.pubSub.GetPeerID()) {
|
||||
e.logger.Info("announcing prover join")
|
||||
for _, eng := range e.executionEngines {
|
||||
eng.AnnounceProverMerge()
|
||||
time.Sleep(10 * time.Second)
|
||||
eng.AnnounceProverJoin()
|
||||
break
|
||||
}
|
||||
|
||||
@ -62,6 +62,7 @@ type DataTimeReel struct {
|
||||
badFrameCh chan *protobufs.ClockFrame
|
||||
done chan bool
|
||||
alwaysSend bool
|
||||
restore func() []*tries.RollingFrecencyCritbitTrie
|
||||
}
|
||||
|
||||
func NewDataTimeReel(
|
||||
@ -82,6 +83,7 @@ func NewDataTimeReel(
|
||||
initialInclusionProof *crypto.InclusionAggregateProof,
|
||||
initialProverKeys [][]byte,
|
||||
alwaysSend bool,
|
||||
restore func() []*tries.RollingFrecencyCritbitTrie,
|
||||
) *DataTimeReel {
|
||||
if filter == nil {
|
||||
panic("filter is nil")
|
||||
@ -131,6 +133,7 @@ func NewDataTimeReel(
|
||||
badFrameCh: make(chan *protobufs.ClockFrame),
|
||||
done: make(chan bool),
|
||||
alwaysSend: alwaysSend,
|
||||
restore: restore,
|
||||
}
|
||||
}
|
||||
|
||||
@ -145,6 +148,10 @@ func (d *DataTimeReel) Start() error {
|
||||
d.totalDistance = big.NewInt(0)
|
||||
d.headDistance = big.NewInt(0)
|
||||
} else {
|
||||
if len(tries[0].FindNearestAndApproximateNeighbors(make([]byte, 32))) == 0 {
|
||||
d.logger.Info("encountered trie corruption, invoking restoration")
|
||||
tries = d.restore()
|
||||
}
|
||||
d.head = frame
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@ -748,7 +755,7 @@ func (d *DataTimeReel) GetDistance(frame *protobufs.ClockFrame) (
|
||||
|
||||
discriminatorNode :=
|
||||
d.proverTries[0].FindNearest(prevSelector.FillBytes(make([]byte, 32)))
|
||||
discriminator := discriminatorNode.External.Key
|
||||
discriminator := discriminatorNode.Key
|
||||
addr, err := frame.GetAddress()
|
||||
if err != nil {
|
||||
return unknownDistance, errors.Wrap(err, "get distance")
|
||||
|
||||
@ -220,7 +220,7 @@ func TestDataTimeReel(t *testing.T) {
|
||||
prev,
|
||||
)
|
||||
optimalSigner, _ := keyManager.GetSigningKey(
|
||||
addrMap[string(proverSelection.External.Key)],
|
||||
addrMap[string(proverSelection.Key)],
|
||||
)
|
||||
frame, err = prover.ProveDataClockFrame(
|
||||
frame,
|
||||
@ -244,7 +244,7 @@ func TestDataTimeReel(t *testing.T) {
|
||||
prev,
|
||||
)
|
||||
optimalSigner, _ := keyManager.GetSigningKey(
|
||||
addrMap[string(proverSelection.External.Key)],
|
||||
addrMap[string(proverSelection.Key)],
|
||||
)
|
||||
frame, err = prover.ProveDataClockFrame(
|
||||
frame,
|
||||
@ -273,7 +273,7 @@ func TestDataTimeReel(t *testing.T) {
|
||||
prev,
|
||||
)
|
||||
optimalSigner, _ := keyManager.GetSigningKey(
|
||||
addrMap[string(proverSelection.External.Key)],
|
||||
addrMap[string(proverSelection.Key)],
|
||||
)
|
||||
frame, err = prover.ProveDataClockFrame(
|
||||
frame,
|
||||
@ -293,7 +293,7 @@ func TestDataTimeReel(t *testing.T) {
|
||||
prev,
|
||||
)
|
||||
suboptimalSigner2, _ := keyManager.GetSigningKey(
|
||||
addrMap[string(proverSelections[2].External.Key)],
|
||||
addrMap[string(proverSelections[2].Key)],
|
||||
)
|
||||
// What we're trying to simulate: consensus heads progressed on a slightly
|
||||
// less optimal prover.
|
||||
@ -315,7 +315,7 @@ func TestDataTimeReel(t *testing.T) {
|
||||
prev,
|
||||
)
|
||||
optimalSigner, _ := keyManager.GetSigningKey(
|
||||
addrMap[string(proverSelection.External.Key)],
|
||||
addrMap[string(proverSelection.Key)],
|
||||
)
|
||||
frame, err = prover.ProveDataClockFrame(
|
||||
frame,
|
||||
@ -346,10 +346,10 @@ func TestDataTimeReel(t *testing.T) {
|
||||
prev,
|
||||
)
|
||||
optimalSigner, _ := keyManager.GetSigningKey(
|
||||
addrMap[string(proverSelections[0].External.Key)],
|
||||
addrMap[string(proverSelections[0].Key)],
|
||||
)
|
||||
suboptimalSigner2, _ := keyManager.GetSigningKey(
|
||||
addrMap[string(proverSelections[2].External.Key)],
|
||||
addrMap[string(proverSelections[2].Key)],
|
||||
)
|
||||
optimalKeySet = append(optimalKeySet, []byte(
|
||||
(optimalSigner.Public()).(ed448.PublicKey),
|
||||
|
||||
@ -19,6 +19,5 @@ type ExecutionEngine interface {
|
||||
GetFrame() *protobufs.ClockFrame
|
||||
GetSeniority() *big.Int
|
||||
GetRingPosition() int
|
||||
AnnounceProverMerge()
|
||||
AnnounceProverJoin()
|
||||
}
|
||||
|
||||
@ -20,6 +20,7 @@ import (
|
||||
|
||||
const PROOF_FRAME_CUTOFF = 46500
|
||||
const PROOF_FRAME_RING_RESET = 52000
|
||||
const PROOF_FRAME_RING_RESET_2 = 53028
|
||||
|
||||
func (a *TokenApplication) handleMint(
|
||||
currentFrameNumber uint64,
|
||||
|
||||
@ -117,6 +117,14 @@ func (a *TokenApplication) handleDataAnnounceProverJoin(
|
||||
return nil, errors.Wrap(ErrInvalidStateTransition, "handle join")
|
||||
}
|
||||
|
||||
outputs := []*protobufs.TokenOutput{}
|
||||
if t.Announce != nil {
|
||||
outputs, err = a.handleAnnounce(currentFrameNumber, lockMap, t.Announce)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(ErrInvalidStateTransition, "handle join")
|
||||
}
|
||||
}
|
||||
|
||||
lockMap[string(t.PublicKeySignatureEd448.PublicKey.KeyValue)] = struct{}{}
|
||||
for _, t := range a.Tries {
|
||||
if t.Contains(address) {
|
||||
@ -124,11 +132,14 @@ func (a *TokenApplication) handleDataAnnounceProverJoin(
|
||||
}
|
||||
}
|
||||
|
||||
return []*protobufs.TokenOutput{
|
||||
outputs = append(
|
||||
outputs,
|
||||
&protobufs.TokenOutput{
|
||||
Output: &protobufs.TokenOutput_Join{
|
||||
Join: t,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
)
|
||||
|
||||
return outputs, nil
|
||||
}
|
||||
|
||||
@ -70,7 +70,6 @@ func TestHandleProverJoin(t *testing.T) {
|
||||
}
|
||||
|
||||
addr := addrBI.FillBytes(make([]byte, 32))
|
||||
|
||||
payload := []byte("join")
|
||||
payload = binary.BigEndian.AppendUint64(payload, 0)
|
||||
payload = append(payload, bytes.Repeat([]byte{0xff}, 32)...)
|
||||
|
||||
@ -74,8 +74,8 @@ func ToSerializedMap(m *PeerSeniority) map[string]uint64 {
|
||||
return s
|
||||
}
|
||||
|
||||
func (p PeerSeniorityItem) Priority() *big.Int {
|
||||
return big.NewInt(int64(p.seniority))
|
||||
func (p PeerSeniorityItem) Priority() uint64 {
|
||||
return p.seniority
|
||||
}
|
||||
|
||||
type TokenExecutionEngine struct {
|
||||
@ -226,6 +226,46 @@ func NewTokenExecutionEngine(
|
||||
alwaysSend = true
|
||||
}
|
||||
|
||||
restore := func() []*tries.RollingFrecencyCritbitTrie {
|
||||
frame, _, err := clockStore.GetLatestDataClockFrame(intrinsicFilter)
|
||||
if err != nil && !errors.Is(err, store.ErrNotFound) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
tries := []*tries.RollingFrecencyCritbitTrie{
|
||||
&tries.RollingFrecencyCritbitTrie{},
|
||||
}
|
||||
for _, key := range proverKeys {
|
||||
addr, _ := poseidon.HashBytes(key)
|
||||
tries[0].Add(addr.FillBytes(make([]byte, 32)), 0)
|
||||
if err = clockStore.SetProverTriesForFrame(frame, tries); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
peerSeniority, err = RebuildPeerSeniority(uint(cfg.P2P.Network))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
txn, err := clockStore.NewTransaction()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
err = clockStore.PutPeerSeniorityMap(txn, intrinsicFilter, peerSeniority)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err = txn.Commit(); err != nil {
|
||||
txn.Abort()
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return tries
|
||||
}
|
||||
|
||||
dataTimeReel := time.NewDataTimeReel(
|
||||
intrinsicFilter,
|
||||
logger,
|
||||
@ -256,6 +296,7 @@ func NewTokenExecutionEngine(
|
||||
inclusionProof,
|
||||
proverKeys,
|
||||
alwaysSend,
|
||||
restore,
|
||||
)
|
||||
|
||||
e.clock = data.NewDataClockConsensusEngine(
|
||||
@ -591,6 +632,10 @@ func (e *TokenExecutionEngine) ProcessFrame(
|
||||
peerIds = append(peerIds, peerId.String())
|
||||
}
|
||||
|
||||
logger := e.logger.Debug
|
||||
if peerIds[0] == peer.ID(e.pubSub.GetPeerID()).String() {
|
||||
logger = e.logger.Info
|
||||
}
|
||||
mergeable := true
|
||||
for i, peerId := range peerIds {
|
||||
addr, err := e.getAddressFromSignature(
|
||||
@ -603,7 +648,7 @@ func (e *TokenExecutionEngine) ProcessFrame(
|
||||
|
||||
sen, ok := (*e.peerSeniority)[string(addr)]
|
||||
if !ok {
|
||||
e.logger.Debug(
|
||||
logger(
|
||||
"peer announced with no seniority",
|
||||
zap.String("peer_id", peerId),
|
||||
)
|
||||
@ -612,7 +657,7 @@ func (e *TokenExecutionEngine) ProcessFrame(
|
||||
|
||||
peer := new(big.Int).SetUint64(sen.seniority)
|
||||
if peer.Cmp(GetAggregatedSeniority([]string{peerId})) != 0 {
|
||||
e.logger.Debug(
|
||||
logger(
|
||||
"peer announced but is already different seniority",
|
||||
zap.String("peer_id", peerIds[0]),
|
||||
)
|
||||
@ -637,6 +682,9 @@ func (e *TokenExecutionEngine) ProcessFrame(
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
|
||||
aggregated := GetAggregatedSeniority(peerIds).Uint64()
|
||||
logger("peer has merge, aggregated seniority", zap.Uint64("seniority", aggregated))
|
||||
|
||||
for _, pr := range prfs {
|
||||
if pr.IndexProof == nil && pr.Difficulty == 0 && pr.Commitment == nil {
|
||||
// approximate average per interval:
|
||||
@ -646,11 +694,16 @@ func (e *TokenExecutionEngine) ProcessFrame(
|
||||
add = big.NewInt(4000000)
|
||||
}
|
||||
additional = add.Uint64()
|
||||
logger("1.4.19-21 seniority", zap.Uint64("seniority", additional))
|
||||
}
|
||||
}
|
||||
|
||||
total := aggregated + additional
|
||||
|
||||
logger("combined aggregate and 1.4.19-21 seniority", zap.Uint64("seniority", total))
|
||||
|
||||
(*e.peerSeniority)[string(addr)] = PeerSeniorityItem{
|
||||
seniority: GetAggregatedSeniority(peerIds).Uint64() + additional,
|
||||
seniority: aggregated + additional,
|
||||
addr: string(addr),
|
||||
}
|
||||
|
||||
@ -668,6 +721,61 @@ func (e *TokenExecutionEngine) ProcessFrame(
|
||||
addr: string(addr),
|
||||
}
|
||||
}
|
||||
} else {
|
||||
addr, err := e.getAddressFromSignature(
|
||||
o.Announce.PublicKeySignaturesEd448[0],
|
||||
)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
|
||||
sen, ok := (*e.peerSeniority)[string(addr)]
|
||||
if !ok {
|
||||
logger(
|
||||
"peer announced with no seniority",
|
||||
zap.String("peer_id", peerIds[0]),
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
peer := new(big.Int).SetUint64(sen.seniority)
|
||||
if peer.Cmp(GetAggregatedSeniority([]string{peerIds[0]})) != 0 {
|
||||
logger(
|
||||
"peer announced but is already different seniority",
|
||||
zap.String("peer_id", peerIds[0]),
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
additional := uint64(0)
|
||||
_, prfs, err := e.coinStore.GetPreCoinProofsForOwner(addr)
|
||||
if err != nil && !errors.Is(err, store.ErrNotFound) {
|
||||
txn.Abort()
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
|
||||
aggregated := GetAggregatedSeniority(peerIds).Uint64()
|
||||
logger("peer does not have merge, pre-1.4.19 seniority", zap.Uint64("seniority", aggregated))
|
||||
|
||||
for _, pr := range prfs {
|
||||
if pr.IndexProof == nil && pr.Difficulty == 0 && pr.Commitment == nil {
|
||||
// approximate average per interval:
|
||||
add := new(big.Int).SetBytes(pr.Amount)
|
||||
add.Quo(add, big.NewInt(58800000))
|
||||
if add.Cmp(big.NewInt(4000000)) > 0 {
|
||||
add = big.NewInt(4000000)
|
||||
}
|
||||
additional = add.Uint64()
|
||||
logger("1.4.19-21 seniority", zap.Uint64("seniority", additional))
|
||||
}
|
||||
}
|
||||
total := GetAggregatedSeniority([]string{peerIds[0]}).Uint64() + additional
|
||||
logger("combined aggregate and 1.4.19-21 seniority", zap.Uint64("seniority", total))
|
||||
(*e.peerSeniority)[string(addr)] = PeerSeniorityItem{
|
||||
seniority: total,
|
||||
addr: string(addr),
|
||||
}
|
||||
}
|
||||
case *protobufs.TokenOutput_Join:
|
||||
addr, err := e.getAddressFromSignature(o.Join.PublicKeySignatureEd448)
|
||||
@ -719,7 +827,8 @@ func (e *TokenExecutionEngine) ProcessFrame(
|
||||
if (*e.peerSeniority)[addr].seniority > o.Penalty.Quantity {
|
||||
for _, t := range app.Tries {
|
||||
if t.Contains([]byte(addr)) {
|
||||
_, latest, _ := t.Get([]byte(addr))
|
||||
v := t.Get([]byte(addr))
|
||||
latest := v.LatestFrame
|
||||
if frame.FrameNumber-latest > 100 {
|
||||
proverTrieLeaveRequests = append(proverTrieLeaveRequests, []byte(addr))
|
||||
}
|
||||
@ -789,7 +898,8 @@ func (e *TokenExecutionEngine) ProcessFrame(
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
|
||||
if frame.FrameNumber == application.PROOF_FRAME_RING_RESET {
|
||||
if frame.FrameNumber == application.PROOF_FRAME_RING_RESET ||
|
||||
frame.FrameNumber == application.PROOF_FRAME_RING_RESET_2 {
|
||||
e.logger.Info("performing ring reset")
|
||||
seniorityMap, err := RebuildPeerSeniority(e.pubSub.GetNetwork())
|
||||
if err != nil {
|
||||
@ -810,7 +920,6 @@ func (e *TokenExecutionEngine) ProcessFrame(
|
||||
txn.Abort()
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return app.Tries, nil
|
||||
@ -852,8 +961,8 @@ func ProcessJoinsAndLeaves(
|
||||
for _, t := range app.Tries[1:] {
|
||||
nodes := t.FindNearestAndApproximateNeighbors(make([]byte, 32))
|
||||
for _, n := range nodes {
|
||||
if n.External.LatestFrame < frame.FrameNumber-1000 {
|
||||
t.Remove(n.External.Key)
|
||||
if n.LatestFrame < frame.FrameNumber-1000 {
|
||||
t.Remove(n.Key)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -867,7 +976,7 @@ func ProcessJoinsAndLeaves(
|
||||
nextSet := t.FindNearestAndApproximateNeighbors(make([]byte, 32))
|
||||
eligibilityOrder := tries.NewMinHeap[PeerSeniorityItem]()
|
||||
for _, n := range nextSet {
|
||||
eligibilityOrder.Push((*seniority)[string(n.External.Key)])
|
||||
eligibilityOrder.Push((*seniority)[string(n.Key)])
|
||||
}
|
||||
process := eligibilityOrder.All()
|
||||
slices.Reverse(process)
|
||||
@ -1033,7 +1142,7 @@ func (e *TokenExecutionEngine) GetSeniority() *big.Int {
|
||||
return big.NewInt(0)
|
||||
}
|
||||
|
||||
return sen.Priority()
|
||||
return new(big.Int).SetUint64(sen.Priority())
|
||||
}
|
||||
|
||||
func GetAggregatedSeniority(peerIds []string) *big.Int {
|
||||
@ -1144,11 +1253,11 @@ func GetAggregatedSeniority(peerIds []string) *big.Int {
|
||||
)
|
||||
}
|
||||
|
||||
func (e *TokenExecutionEngine) AnnounceProverMerge() {
|
||||
func (e *TokenExecutionEngine) AnnounceProverMerge() *protobufs.AnnounceProverRequest {
|
||||
currentHead := e.GetFrame()
|
||||
if currentHead == nil ||
|
||||
currentHead.FrameNumber < application.PROOF_FRAME_CUTOFF {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
keys := [][]byte{}
|
||||
ksigs := [][]byte{}
|
||||
@ -1197,14 +1306,12 @@ func (e *TokenExecutionEngine) AnnounceProverMerge() {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
announce := &protobufs.TokenRequest_Announce{
|
||||
Announce: &protobufs.AnnounceProverRequest{
|
||||
PublicKeySignaturesEd448: []*protobufs.Ed448Signature{},
|
||||
},
|
||||
announce := &protobufs.AnnounceProverRequest{
|
||||
PublicKeySignaturesEd448: []*protobufs.Ed448Signature{},
|
||||
}
|
||||
|
||||
announce.Announce.PublicKeySignaturesEd448 = append(
|
||||
announce.Announce.PublicKeySignaturesEd448,
|
||||
announce.PublicKeySignaturesEd448 = append(
|
||||
announce.PublicKeySignaturesEd448,
|
||||
&protobufs.Ed448Signature{
|
||||
PublicKey: &protobufs.Ed448PublicKey{
|
||||
KeyValue: e.pubSub.GetPublicKey(),
|
||||
@ -1214,8 +1321,8 @@ func (e *TokenExecutionEngine) AnnounceProverMerge() {
|
||||
)
|
||||
|
||||
for i := range keys {
|
||||
announce.Announce.PublicKeySignaturesEd448 = append(
|
||||
announce.Announce.PublicKeySignaturesEd448,
|
||||
announce.PublicKeySignaturesEd448 = append(
|
||||
announce.PublicKeySignaturesEd448,
|
||||
&protobufs.Ed448Signature{
|
||||
PublicKey: &protobufs.Ed448PublicKey{
|
||||
KeyValue: keys[i],
|
||||
@ -1225,11 +1332,7 @@ func (e *TokenExecutionEngine) AnnounceProverMerge() {
|
||||
)
|
||||
}
|
||||
|
||||
req := &protobufs.TokenRequest{
|
||||
Request: announce,
|
||||
}
|
||||
|
||||
e.publishMessage(append([]byte{0x00}, e.intrinsicFilter...), req)
|
||||
return announce
|
||||
}
|
||||
|
||||
func (e *TokenExecutionEngine) AnnounceProverJoin() {
|
||||
@ -1259,6 +1362,7 @@ func (e *TokenExecutionEngine) AnnounceProverJoin() {
|
||||
KeyValue: e.pubSub.GetPublicKey(),
|
||||
},
|
||||
},
|
||||
Announce: e.AnnounceProverMerge(),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@ -14,7 +14,7 @@ import (
|
||||
func TestProcessJoinsAndLeaves(t *testing.T) {
|
||||
set := [][]byte{}
|
||||
for i := 0; i < 6000; i++ {
|
||||
b := make([]byte, 32)
|
||||
b := make([]byte, 9999)
|
||||
rand.Read(b)
|
||||
set = append(set, b)
|
||||
}
|
||||
@ -32,7 +32,7 @@ func TestProcessJoinsAndLeaves(t *testing.T) {
|
||||
app := &application.TokenApplication{
|
||||
Tries: tr,
|
||||
}
|
||||
token.ProcessJoinsAndLeaves(joins, []token.PeerSeniorityItem{}, app, seniority, &protobufs.ClockFrame{FrameNumber: 20})
|
||||
token.ProcessJoinsAndLeaves(joins, []token.PeerSeniorityItem{}, app, seniority, &protobufs.ClockFrame{FrameNumber: 9999})
|
||||
|
||||
assert.Equal(t, len(app.Tries), 4)
|
||||
assert.Equal(t, len(app.Tries[1].FindNearestAndApproximateNeighbors(make([]byte, 32))), 2048)
|
||||
@ -40,13 +40,16 @@ func TestProcessJoinsAndLeaves(t *testing.T) {
|
||||
assert.Equal(t, len(app.Tries[3].FindNearestAndApproximateNeighbors(make([]byte, 32))), 1904)
|
||||
|
||||
leaves := []token.PeerSeniorityItem{}
|
||||
// Seniority works from highest to lowest, so we should have one removal in the bottom most, three in the middle, and one in the highest
|
||||
leaves = append(leaves, joins[30])
|
||||
leaves = append(leaves, joins[1907])
|
||||
leaves = append(leaves, joins[1955])
|
||||
leaves = append(leaves, joins[2047])
|
||||
leaves = append(leaves, joins[4095])
|
||||
token.ProcessJoinsAndLeaves([]token.PeerSeniorityItem{}, leaves, app, seniority, &protobufs.ClockFrame{FrameNumber: 20})
|
||||
token.ProcessJoinsAndLeaves([]token.PeerSeniorityItem{}, leaves, app, seniority, &protobufs.ClockFrame{FrameNumber: 10000})
|
||||
|
||||
assert.Equal(t, len(app.Tries), 4)
|
||||
assert.Equal(t, len(app.Tries[1].FindNearestAndApproximateNeighbors(make([]byte, 32))), 2048)
|
||||
assert.Equal(t, len(app.Tries[2].FindNearestAndApproximateNeighbors(make([]byte, 32))), 2048)
|
||||
assert.Equal(t, len(app.Tries[3].FindNearestAndApproximateNeighbors(make([]byte, 32))), 1901)
|
||||
assert.Equal(t, len(app.Tries[3].FindNearestAndApproximateNeighbors(make([]byte, 32))), 1899)
|
||||
}
|
||||
|
||||
@ -100,6 +100,14 @@ func LoadAggregatedSeniorityMap(network uint) {
|
||||
PeerId: "QmNtGTnGLpi35sLmrgwd2EaUJFNz99WBd7ZzzRaw8GYo9e",
|
||||
Reward: "78604",
|
||||
},
|
||||
{
|
||||
PeerId: "QmSjeYnJAbUEq3vdVP89PNbKuTfFgLXZ1cLKaWShbs2hvW",
|
||||
Reward: "78604",
|
||||
},
|
||||
{
|
||||
PeerId: "QmQrhv7bymSWPaJsatr3kdp14GP2JpTE128syPVj3eUjLy",
|
||||
Reward: "78604",
|
||||
},
|
||||
{
|
||||
PeerId: "QmNPx7PKUS6bz9MbJciWPDDRi6ufJ6vBgVqGrSXaUyUgb6",
|
||||
Reward: "39302",
|
||||
@ -123,6 +131,24 @@ func LoadAggregatedSeniorityMap(network uint) {
|
||||
AprPresence: false,
|
||||
MayPresence: false,
|
||||
},
|
||||
{
|
||||
PeerId: "QmaPh3cY9Gi8CbBr4H7nTZUABu8cJwXxRnp2utgg1urGjp",
|
||||
Reward: "1000",
|
||||
JanPresence: true,
|
||||
FebPresence: true,
|
||||
MarPresence: false,
|
||||
AprPresence: false,
|
||||
MayPresence: false,
|
||||
},
|
||||
{
|
||||
PeerId: "QmapvC4ApSxBz1J6Cdfra8375pJRo1FKp1bad5mLvn3KEK",
|
||||
Reward: "1000",
|
||||
JanPresence: true,
|
||||
FebPresence: true,
|
||||
MarPresence: false,
|
||||
AprPresence: false,
|
||||
MayPresence: false,
|
||||
},
|
||||
{
|
||||
PeerId: "QmUbgmwR3Z8Vp9zHHeuGRxRrfh4YzLF5CbW48Ur8Kx9jAP",
|
||||
Reward: "1000",
|
||||
@ -208,10 +234,30 @@ func RebuildPeerSeniority(network uint) (map[string]uint64, error) {
|
||||
PeerId: "QmTG8UAmrYBdLi76CEkXK7equRcoRRKBjbkK44oT6TcEGU",
|
||||
Reward: "157208",
|
||||
},
|
||||
{
|
||||
PeerId: "QmVDhgHgpvFG2ZiCYhUPKXA8i5j8Fp9zoGE5Bc6SLwsiuA",
|
||||
Reward: "157208",
|
||||
},
|
||||
{
|
||||
PeerId: "QmRZMVG1VbBWMEensjqBS7XqBzNfCoA5HxdDwCuouUeY16",
|
||||
Reward: "157208",
|
||||
},
|
||||
{
|
||||
PeerId: "QmPpk2cbkpzAiadWDQVCL4XBukLNNY4BujT9LYq3DYE3ZR",
|
||||
Reward: "157208",
|
||||
},
|
||||
{
|
||||
PeerId: "QmR3Xuc3t7zbnUy5fcC4iY58fnHEsFmzYra6JgY9sRqE8Y",
|
||||
Reward: "157208",
|
||||
},
|
||||
{
|
||||
PeerId: "QmPjwYSn29VoYogxAzGbh5kgGYB5rZFauSS66c3J4KkK4j",
|
||||
Reward: "78604",
|
||||
},
|
||||
{
|
||||
PeerId: "QmayFGarM7BVPYWnjAF7rBQAczXniELHKPKHS5VY8URZBd",
|
||||
Reward: "78604",
|
||||
},
|
||||
{
|
||||
PeerId: "QmWwqsH3vwPkRufqtdS1sgxgWwg8i4sgsfpeDy9BbX259p",
|
||||
Reward: "78604",
|
||||
@ -232,12 +278,34 @@ func RebuildPeerSeniority(network uint) (map[string]uint64, error) {
|
||||
PeerId: "Qma3bMDgVjCNgvSd3uomekF4v7Pq4VkTyT5R31FfdrqSan",
|
||||
Reward: "39302",
|
||||
},
|
||||
{
|
||||
PeerId: "QmbQ9Bp4SvspysHLTAYQtFN7MY9Acae4AwVFjTy3rp7Q2A",
|
||||
Reward: "39302",
|
||||
},
|
||||
{
|
||||
PeerId: "QmUDWLhZMRoCoqkJAqvi815EJwjQAZoTm2oa9LkRwqeeAW",
|
||||
Reward: "78604",
|
||||
},
|
||||
}
|
||||
secondRetro = []*SecondRetroJson{
|
||||
{
|
||||
PeerId: "QmPpk2cbkpzAiadWDQVCL4XBukLNNY4BujT9LYq3DYE3ZR",
|
||||
Reward: "1000",
|
||||
JanPresence: true,
|
||||
FebPresence: true,
|
||||
MarPresence: false,
|
||||
AprPresence: false,
|
||||
MayPresence: false,
|
||||
},
|
||||
{
|
||||
PeerId: "Qma3bMDgVjCNgvSd3uomekF4v7Pq4VkTyT5R31FfdrqSan",
|
||||
Reward: "1000",
|
||||
JanPresence: true,
|
||||
FebPresence: true,
|
||||
MarPresence: false,
|
||||
AprPresence: false,
|
||||
MayPresence: false,
|
||||
},
|
||||
{
|
||||
PeerId: "QmeafLbKKfmRKQdF7LK1Z3ayNbzwRLmRpZCtjBXrGKZzht",
|
||||
Reward: "1000",
|
||||
@ -283,6 +351,15 @@ func RebuildPeerSeniority(network uint) (map[string]uint64, error) {
|
||||
AprPresence: false,
|
||||
MayPresence: false,
|
||||
},
|
||||
{
|
||||
PeerId: "QmXDWA4f3J5WxmseBfuCEsZNv8aeAkUrJ7fqoxr894tFCi",
|
||||
Reward: "1000",
|
||||
JanPresence: true,
|
||||
FebPresence: true,
|
||||
MarPresence: false,
|
||||
AprPresence: false,
|
||||
MayPresence: false,
|
||||
},
|
||||
{
|
||||
PeerId: "QmYSwFqgVKUFGkNM8Ae4DrarCjGKPJ4u7oJvRhrmx3YPpB",
|
||||
Reward: "1000",
|
||||
@ -302,8 +379,21 @@ func RebuildPeerSeniority(network uint) (map[string]uint64, error) {
|
||||
PeerId: "QmaQuJGk6fGrYYTQiBFFasKLxSKkEkPaywEKoVbnXULEEG",
|
||||
Reward: "1000",
|
||||
},
|
||||
{
|
||||
PeerId: "QmYKSNoRkpL3ufKLhNUS77jirDJ5zWg9yGZmrBJhBcsaoE",
|
||||
Reward: "1000",
|
||||
},
|
||||
{
|
||||
PeerId: "QmZCMe29zbGkqceyzjjmzND9nDUMcWyMBUZSzMhns1sejH",
|
||||
Reward: "1000",
|
||||
},
|
||||
}
|
||||
fourthRetro = []*FourthRetroJson{
|
||||
{
|
||||
PeerId: "QmaQuJGk6fGrYYTQiBFFasKLxSKkEkPaywEKoVbnXULEEG",
|
||||
Reward: "1000",
|
||||
},
|
||||
}
|
||||
fourthRetro = []*FourthRetroJson{}
|
||||
} else {
|
||||
firstRetro = []*FirstRetroJson{}
|
||||
secondRetro = []*SecondRetroJson{}
|
||||
|
||||
@ -60,6 +60,7 @@ require (
|
||||
github.com/pion/turn/v2 v2.1.6 // indirect
|
||||
github.com/pion/webrtc/v3 v3.2.40 // indirect
|
||||
github.com/rychipman/easylex v0.0.0-20160129204217-49ee7767142f // indirect
|
||||
github.com/tatsushid/go-critbit v0.0.0-20180327152158-487ef94b52c1 // indirect
|
||||
go.opentelemetry.io/otel v1.16.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.16.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.16.0 // indirect
|
||||
|
||||
@ -522,6 +522,8 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||
github.com/tatsushid/go-critbit v0.0.0-20180327152158-487ef94b52c1 h1:hCGjUxpUUmnZg0yt5aJPdRkDndH/1e8ptiV73urNUBQ=
|
||||
github.com/tatsushid/go-critbit v0.0.0-20180327152158-487ef94b52c1/go.mod h1:iM17aYTnMeqDSIETK30CkHnWIAeogWYHFBz9ceCGaks=
|
||||
github.com/txaty/go-merkletree v0.2.2 h1:K5bHDFK+Q3KK+gEJeyTOECKuIwl/LVo4CI+cm0/p34g=
|
||||
github.com/txaty/go-merkletree v0.2.2/go.mod h1:w5HPEu7ubNw5LzS+91m+1/GtuZcWHKiPU3vEGi+ThJM=
|
||||
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
|
||||
@ -981,7 +981,8 @@ func processFrame(
|
||||
if (*peerSeniority)[addr].GetSeniority() > o.Penalty.Quantity {
|
||||
for _, t := range app.Tries {
|
||||
if t.Contains([]byte(addr)) {
|
||||
_, latest, _ := t.Get([]byte(addr))
|
||||
v := t.Get([]byte(addr))
|
||||
latest := v.LatestFrame
|
||||
if frame.FrameNumber-latest > 100 {
|
||||
proverTrieLeaveRequests = append(proverTrieLeaveRequests, []byte(addr))
|
||||
}
|
||||
|
||||
@ -2,7 +2,6 @@ package protobufs
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math/big"
|
||||
|
||||
"github.com/iden3/go-iden3-crypto/poseidon"
|
||||
pcrypto "github.com/libp2p/go-libp2p/core/crypto"
|
||||
@ -10,14 +9,14 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (t *TokenRequest) Priority() *big.Int {
|
||||
func (t *TokenRequest) Priority() uint64 {
|
||||
switch p := t.Request.(type) {
|
||||
case *TokenRequest_Mint:
|
||||
if len(p.Mint.Proofs) >= 3 {
|
||||
return new(big.Int).SetBytes(p.Mint.Proofs[2])
|
||||
return binary.BigEndian.Uint64(p.Mint.Proofs[2])
|
||||
}
|
||||
}
|
||||
return big.NewInt(0)
|
||||
return 0
|
||||
}
|
||||
|
||||
func (t *MintCoinRequest) RingAndParallelism(
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -181,6 +181,7 @@ message AnnounceProverJoin {
|
||||
bytes filter = 1;
|
||||
uint64 frame_number = 2;
|
||||
quilibrium.node.keys.pb.Ed448Signature public_key_signature_ed448 = 3;
|
||||
AnnounceProverRequest announce = 4;
|
||||
}
|
||||
|
||||
message AnnounceProverLeave {
|
||||
|
||||
@ -94,6 +94,10 @@ type ClockStore interface {
|
||||
filter []byte,
|
||||
seniorityMap map[string]uint64,
|
||||
) error
|
||||
SetProverTriesForFrame(
|
||||
frame *protobufs.ClockFrame,
|
||||
tries []*tries.RollingFrecencyCritbitTrie,
|
||||
) error
|
||||
}
|
||||
|
||||
type PebbleClockStore struct {
|
||||
@ -1529,3 +1533,24 @@ func (p *PebbleClockStore) PutPeerSeniorityMap(
|
||||
"put peer seniority map",
|
||||
)
|
||||
}
|
||||
|
||||
func (p *PebbleClockStore) SetProverTriesForFrame(
|
||||
frame *protobufs.ClockFrame,
|
||||
tries []*tries.RollingFrecencyCritbitTrie,
|
||||
) error {
|
||||
for i, proverTrie := range tries {
|
||||
proverData, err := proverTrie.Serialize()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "set prover tries for frame")
|
||||
}
|
||||
|
||||
if err = p.db.Set(
|
||||
clockProverTrieKey(frame.Filter, uint16(i), frame.FrameNumber),
|
||||
proverData,
|
||||
); err != nil {
|
||||
return errors.Wrap(err, "set prover tries for frame")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
388
node/tries/critbit_trie.go
Normal file
388
node/tries/critbit_trie.go
Normal file
@ -0,0 +1,388 @@
|
||||
// Modified from https://github.com/tatsushid/go-critbit, MIT Licensed
|
||||
// Exports fields for seerialization and uses explicit value type
|
||||
//
|
||||
// Package critbit implements Crit-Bit tree for byte sequences.
|
||||
//
|
||||
// Crit-Bit tree [1] is fast, memory efficient and a variant of PATRICIA trie.
|
||||
// This implementation can be used for byte sequences if it includes a null
|
||||
// byte or not. This is based on [2] and extends it to support a null byte in a
|
||||
// byte sequence.
|
||||
//
|
||||
// [1]: http://cr.yp.to/critbit.html (definition)
|
||||
// [2]: https://github.com/agl/critbit (C implementation and document)
|
||||
package tries
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
)
|
||||
|
||||
type NodeType int
|
||||
|
||||
func init() {
|
||||
gob.Register(&INode{})
|
||||
gob.Register(&ENode{})
|
||||
}
|
||||
|
||||
type Value struct {
|
||||
Key []byte
|
||||
EarliestFrame uint64
|
||||
LatestFrame uint64
|
||||
Count uint64
|
||||
}
|
||||
|
||||
const (
|
||||
Internal NodeType = iota
|
||||
External
|
||||
)
|
||||
|
||||
type Node interface {
|
||||
kind() NodeType
|
||||
}
|
||||
|
||||
type INode struct {
|
||||
Children [2]Node
|
||||
Pos int
|
||||
Other uint8
|
||||
}
|
||||
|
||||
func (n *INode) kind() NodeType { return Internal }
|
||||
|
||||
type ENode struct {
|
||||
Key []byte
|
||||
Value Value
|
||||
}
|
||||
|
||||
func (n *ENode) kind() NodeType { return External }
|
||||
|
||||
// Tree represents a critbit tree.
|
||||
type Tree struct {
|
||||
Root Node
|
||||
Size int
|
||||
}
|
||||
|
||||
// New returns an empty tree.
|
||||
func New() *Tree {
|
||||
return &Tree{}
|
||||
}
|
||||
|
||||
// Len returns a number of elements in the tree.
|
||||
func (t *Tree) Len() int {
|
||||
return t.Size
|
||||
}
|
||||
|
||||
func (t *Tree) direction(k []byte, pos int, other uint8) int {
|
||||
var c uint8
|
||||
if pos < len(k) {
|
||||
c = k[pos]
|
||||
} else if other == 0xff {
|
||||
return 0
|
||||
}
|
||||
return (1 + int(other|c)) >> 8
|
||||
}
|
||||
|
||||
func (t *Tree) lookup(k []byte) (*ENode, *INode) {
|
||||
if t.Root == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var top *INode
|
||||
p := t.Root
|
||||
for {
|
||||
switch n := p.(type) {
|
||||
case *ENode:
|
||||
return n, top
|
||||
case *INode:
|
||||
if top == nil || n.Pos < len(k) {
|
||||
top = n
|
||||
}
|
||||
p = n.Children[t.direction(k, n.Pos, n.Other)]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get searches a given key from the tree. If the key exists in the tree, it
|
||||
// returns its value and true. If not, it returns nil and false.
|
||||
func (t *Tree) Get(k []byte) (interface{}, bool) {
|
||||
n, _ := t.lookup(k)
|
||||
if n != nil && bytes.Equal(k, n.Key) {
|
||||
return n.Value, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (t *Tree) findFirstDiffByte(k []byte, n *ENode) (pos int, other uint8, match bool) {
|
||||
var byt, b byte
|
||||
for pos = 0; pos < len(k); pos++ {
|
||||
b = k[pos]
|
||||
byt = 0
|
||||
if pos < len(n.Key) {
|
||||
byt = n.Key[pos]
|
||||
}
|
||||
if byt != b {
|
||||
return pos, byt ^ b, false
|
||||
}
|
||||
}
|
||||
if pos < len(n.Key) {
|
||||
return pos, n.Key[pos], false
|
||||
} else if pos == len(n.Key) {
|
||||
return 0, 0, true
|
||||
}
|
||||
return pos - 1, 0, false
|
||||
}
|
||||
|
||||
func (t *Tree) findInsertPos(k []byte, pos int, other uint8) (*Node, Node) {
|
||||
p := &t.Root
|
||||
for {
|
||||
switch n := (*p).(type) {
|
||||
case *ENode:
|
||||
return p, n
|
||||
case *INode:
|
||||
if n.Pos > pos {
|
||||
return p, n
|
||||
}
|
||||
if n.Pos == pos && n.Other > other {
|
||||
return p, n
|
||||
}
|
||||
p = &n.Children[t.direction(k, n.Pos, n.Other)]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Insert adds or updates a given key to the tree and returns its previous
|
||||
// value and if anything was set or not. If there is the key in the tree, it
|
||||
// adds the key and the value to the tree and returns nil and true when it
|
||||
// succeeded while if not, it updates the key's value and returns its previous
|
||||
// value and true when it succeeded.
|
||||
func (t *Tree) Insert(k []byte, v Value) (interface{}, bool) {
|
||||
key := append([]byte{}, k...)
|
||||
|
||||
n, _ := t.lookup(k)
|
||||
if n == nil { // only happens when t.root is nil
|
||||
t.Root = &ENode{Key: key, Value: v}
|
||||
t.Size++
|
||||
return nil, true
|
||||
}
|
||||
|
||||
pos, other, match := t.findFirstDiffByte(k, n)
|
||||
if match {
|
||||
orig := n.Value
|
||||
n.Value = v
|
||||
return orig, true
|
||||
}
|
||||
|
||||
other |= other >> 1
|
||||
other |= other >> 2
|
||||
other |= other >> 4
|
||||
other = ^(other &^ (other >> 1))
|
||||
di := t.direction(n.Key, pos, other)
|
||||
|
||||
newn := &INode{Pos: pos, Other: other}
|
||||
newn.Children[1-di] = &ENode{Key: key, Value: v}
|
||||
|
||||
p, child := t.findInsertPos(k, pos, other)
|
||||
newn.Children[di] = child
|
||||
*p = newn
|
||||
|
||||
t.Size++
|
||||
return nil, true
|
||||
}
|
||||
|
||||
func (t *Tree) findDeletePos(k []byte) (*Node, *ENode, int) {
|
||||
if t.Root == nil {
|
||||
return nil, nil, 0
|
||||
}
|
||||
|
||||
var di int
|
||||
var q *Node
|
||||
p := &t.Root
|
||||
for {
|
||||
switch n := (*p).(type) {
|
||||
case *ENode:
|
||||
return q, n, di
|
||||
case *INode:
|
||||
di = t.direction(k, n.Pos, n.Other)
|
||||
q = p
|
||||
p = &n.Children[di]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Delete removes a given key and its value from the tree. If it succeeded, it
|
||||
// returns the key's previous value and true while if not, it returns nil and
|
||||
// false. On an empty tree, it always fails.
|
||||
func (t *Tree) Delete(k []byte) (interface{}, bool) {
|
||||
q, n, di := t.findDeletePos(k)
|
||||
if n == nil || !bytes.Equal(k, n.Key) {
|
||||
return nil, false
|
||||
}
|
||||
t.Size--
|
||||
if q == nil {
|
||||
t.Root = nil
|
||||
return n.Value, true
|
||||
}
|
||||
tmp := (*q).(*INode)
|
||||
*q = tmp.Children[1-di]
|
||||
return n.Value, true
|
||||
}
|
||||
|
||||
// Clear removes all elements in the tree. If it removes something, it returns
|
||||
// true while the tree is empty and there is nothing to remove, it returns
|
||||
// false.
|
||||
func (t *Tree) Clear() bool {
|
||||
if t.Root != nil {
|
||||
t.Root = nil
|
||||
t.Size = 0
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Minimum searches a key from the tree in lexicographic order and returns the
|
||||
// first one and its value. If it found such a key, it also returns true as the
|
||||
// bool value while if not, it returns false as it.
|
||||
func (t *Tree) Minimum() ([]byte, interface{}, bool) {
|
||||
if t.Root == nil {
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
p := t.Root
|
||||
for {
|
||||
switch n := p.(type) {
|
||||
case *ENode:
|
||||
return n.Key, n.Value, true
|
||||
case *INode:
|
||||
p = n.Children[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Maximum searches a key from the tree in lexicographic order and returns the
|
||||
// last one and its value. If it found such a key, it also returns true as the
|
||||
// bool value while if not, it returns false as it.
|
||||
func (t *Tree) Maximum() ([]byte, interface{}, bool) {
|
||||
if t.Root == nil {
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
p := t.Root
|
||||
for {
|
||||
switch n := p.(type) {
|
||||
case *ENode:
|
||||
return n.Key, n.Value, true
|
||||
case *INode:
|
||||
p = n.Children[1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) longestPrefix(p Node, prefix []byte) ([]byte, interface{}, bool) {
|
||||
if p == nil {
|
||||
return nil, nil, false
|
||||
}
|
||||
var di int
|
||||
var c uint8
|
||||
switch n := p.(type) {
|
||||
case *ENode:
|
||||
if bytes.HasPrefix(prefix, n.Key) {
|
||||
return n.Key, n.Value, true
|
||||
}
|
||||
case *INode:
|
||||
c = 0
|
||||
if n.Pos < len(prefix) {
|
||||
c = prefix[n.Pos]
|
||||
}
|
||||
di = (1 + int(n.Other|c)) >> 8
|
||||
|
||||
if k, v, ok := t.longestPrefix(n.Children[di], prefix); ok {
|
||||
return k, v, ok
|
||||
} else if di == 1 {
|
||||
return t.longestPrefix(n.Children[0], prefix)
|
||||
}
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// LongestPrefix searches the longest key which is included in a given key and
|
||||
// returns the found key and its value. For example, if there are "f", "fo",
|
||||
// "foobar" in the tree and "foo" is given, it returns "fo". If it found such a
|
||||
// key, it returns true as the bool value while if not, it returns false as it.
|
||||
func (t *Tree) LongestPrefix(prefix []byte) ([]byte, interface{}, bool) {
|
||||
return t.longestPrefix(t.Root, prefix)
|
||||
}
|
||||
|
||||
// WalkFn is used at walking a tree. It receives a key and its value of each
|
||||
// elements which a walk function gives. If it returns true, a walk function
|
||||
// should be terminated at there.
|
||||
type WalkFn func(k []byte, v interface{}) bool
|
||||
|
||||
func (t *Tree) walk(p Node, fn WalkFn) bool {
|
||||
if p == nil {
|
||||
return false
|
||||
}
|
||||
switch n := p.(type) {
|
||||
case *ENode:
|
||||
return fn(n.Key, n.Value)
|
||||
case *INode:
|
||||
for i := 0; i < 2; i++ {
|
||||
if t.walk(n.Children[i], fn) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Walk walks whole the tree and call a given function with each element's key
|
||||
// and value. If the function returns true, the walk is terminated at there.
|
||||
func (t *Tree) Walk(fn WalkFn) {
|
||||
t.walk(t.Root, fn)
|
||||
}
|
||||
|
||||
// WalkPrefix walks the tree under a given prefix and call a given function
|
||||
// with each element's key and value. For example, the tree has "f", "fo",
|
||||
// "foob", "foobar" and "foo" is given, it visits "foob" and "foobar" elements.
|
||||
// If the function returns true, the walk is terminated at there.
|
||||
func (t *Tree) WalkPrefix(prefix []byte, fn WalkFn) {
|
||||
n, top := t.lookup(prefix)
|
||||
if n == nil || !bytes.HasPrefix(n.Key, prefix) {
|
||||
return
|
||||
}
|
||||
wrapper := func(k []byte, v interface{}) bool {
|
||||
if bytes.HasPrefix(k, prefix) {
|
||||
return fn(k, v)
|
||||
}
|
||||
return false
|
||||
}
|
||||
t.walk(top, wrapper)
|
||||
}
|
||||
|
||||
func (t *Tree) walkPath(p Node, path []byte, fn WalkFn) bool {
|
||||
if p == nil {
|
||||
return false
|
||||
}
|
||||
var di int
|
||||
switch n := p.(type) {
|
||||
case *ENode:
|
||||
if bytes.HasPrefix(path, n.Key) {
|
||||
return fn(n.Key, n.Value)
|
||||
}
|
||||
case *INode:
|
||||
di = t.direction(path, n.Pos, n.Other)
|
||||
if di == 1 {
|
||||
if t.walkPath(n.Children[0], path, fn) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return t.walkPath(n.Children[di], path, fn)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// WalkPath walks the tree from the root up to a given key and call a given
|
||||
// function with each element's key and value. For example, the tree has "f",
|
||||
// "fo", "foob", "foobar" and "foo" is given, it visits "f" and "fo" elements.
|
||||
// If the function returns true, the walk is terminated at there.
|
||||
func (t *Tree) WalkPath(path []byte, fn WalkFn) {
|
||||
t.walkPath(t.Root, path, fn)
|
||||
}
|
||||
@ -1,11 +1,7 @@
|
||||
package tries
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
)
|
||||
|
||||
type Item interface {
|
||||
Priority() *big.Int
|
||||
Priority() uint64
|
||||
}
|
||||
|
||||
type MinHeap[I Item] struct {
|
||||
@ -51,7 +47,7 @@ func (h *MinHeap[I]) Size() int {
|
||||
func (h *MinHeap[I]) upheap(i int) {
|
||||
for i > 0 {
|
||||
parent := (i - 1) / 2
|
||||
if h.items[i].Priority().Cmp(h.items[parent].Priority()) >= 0 {
|
||||
if h.items[i].Priority() >= h.items[parent].Priority() {
|
||||
break
|
||||
}
|
||||
h.items[i], h.items[parent] = h.items[parent], h.items[i]
|
||||
@ -66,11 +62,11 @@ func (h *MinHeap[I]) downheap(i int) {
|
||||
smallest := i
|
||||
|
||||
if left < len(h.items) &&
|
||||
h.items[left].Priority().Cmp(h.items[smallest].Priority()) < 0 {
|
||||
h.items[left].Priority() < h.items[smallest].Priority() {
|
||||
smallest = left
|
||||
}
|
||||
if right < len(h.items) &&
|
||||
h.items[right].Priority().Cmp(h.items[smallest].Priority()) < 0 {
|
||||
h.items[right].Priority() < h.items[smallest].Priority() {
|
||||
smallest = right
|
||||
}
|
||||
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
package tries_test
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/tries"
|
||||
@ -9,15 +8,15 @@ import (
|
||||
|
||||
type TestItem struct {
|
||||
value string
|
||||
priority *big.Int
|
||||
priority uint64
|
||||
}
|
||||
|
||||
func (t TestItem) Priority() *big.Int {
|
||||
func (t TestItem) Priority() uint64 {
|
||||
return t.priority
|
||||
}
|
||||
|
||||
func newTestItem(value string, priority int64) TestItem {
|
||||
return TestItem{value: value, priority: big.NewInt(priority)}
|
||||
func newTestItem(value string, priority uint64) TestItem {
|
||||
return TestItem{value: value, priority: priority}
|
||||
}
|
||||
|
||||
func TestNewMinHeap(t *testing.T) {
|
||||
@ -61,7 +60,7 @@ func TestPeek(t *testing.T) {
|
||||
if !ok {
|
||||
t.Error("Peek on non-empty heap should return true")
|
||||
}
|
||||
if item.value != "test" || item.priority.Cmp(big.NewInt(1)) != 0 {
|
||||
if item.value != "test" || item.priority != 1 {
|
||||
t.Errorf("Peek returned unexpected item: %v", item)
|
||||
}
|
||||
}
|
||||
@ -82,7 +81,7 @@ func TestPop(t *testing.T) {
|
||||
if !ok {
|
||||
t.Error("Pop on non-empty heap should return true")
|
||||
}
|
||||
if item.value != "test1" || item.priority.Cmp(big.NewInt(1)) != 0 {
|
||||
if item.value != "test1" || item.priority != 1 {
|
||||
t.Errorf("Pop returned unexpected item: %v", item)
|
||||
}
|
||||
if heap.Size() != 1 {
|
||||
@ -96,13 +95,13 @@ func TestHeapOrder(t *testing.T) {
|
||||
heap.Push(newTestItem("test1", 1))
|
||||
heap.Push(newTestItem("test2", 2))
|
||||
|
||||
expected := []int64{1, 2, 3}
|
||||
expected := []uint64{1, 2, 3}
|
||||
for i, exp := range expected {
|
||||
item, ok := heap.Pop()
|
||||
if !ok {
|
||||
t.Fatalf("Failed to pop item %d", i)
|
||||
}
|
||||
if item.priority.Cmp(big.NewInt(exp)) != 0 {
|
||||
if item.priority != exp {
|
||||
t.Errorf("Item %d: expected priority %d, got %v", i, exp, item.priority)
|
||||
}
|
||||
}
|
||||
|
||||
@ -3,49 +3,28 @@ package tries
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"encoding/hex"
|
||||
"math/big"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type Node struct {
|
||||
Internal *InternalNode
|
||||
External *ExternalNode
|
||||
}
|
||||
|
||||
type InternalNode struct {
|
||||
Child [2]Node
|
||||
ByteNumber uint32
|
||||
Bits byte
|
||||
}
|
||||
|
||||
type ExternalNode struct {
|
||||
Key []byte
|
||||
EarliestFrame uint64
|
||||
LatestFrame uint64
|
||||
Count uint64
|
||||
}
|
||||
|
||||
type RollingFrecencyCritbitTrie struct {
|
||||
Root *Node
|
||||
Trie *Tree
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func (t *RollingFrecencyCritbitTrie) Serialize() ([]byte, error) {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
if t.Root == nil {
|
||||
return []byte{}, nil
|
||||
if t.Trie == nil {
|
||||
t.Trie = New()
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
enc := gob.NewEncoder(&b)
|
||||
|
||||
if err := enc.Encode(t.Root); err != nil {
|
||||
if err := enc.Encode(t.Trie); err != nil {
|
||||
return nil, errors.Wrap(err, "serialize")
|
||||
}
|
||||
|
||||
@ -63,8 +42,10 @@ func (t *RollingFrecencyCritbitTrie) Deserialize(buf []byte) error {
|
||||
b.Write(buf)
|
||||
dec := gob.NewDecoder(&b)
|
||||
|
||||
if err := dec.Decode(&t.Root); err != nil {
|
||||
return errors.Wrap(err, "deserialize")
|
||||
if err := dec.Decode(&t.Trie); err != nil {
|
||||
if t.Trie == nil {
|
||||
t.Trie = New()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -73,110 +54,106 @@ func (t *RollingFrecencyCritbitTrie) Deserialize(buf []byte) error {
|
||||
func (t *RollingFrecencyCritbitTrie) Contains(address []byte) bool {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
p := t.findNearest(address)
|
||||
return p != nil &&
|
||||
p.External != nil &&
|
||||
bytes.Equal(p.External.Key, address)
|
||||
if t.Trie == nil {
|
||||
t.Trie = New()
|
||||
}
|
||||
_, ok := t.Trie.Get(address)
|
||||
return ok
|
||||
}
|
||||
|
||||
func (t *RollingFrecencyCritbitTrie) Get(
|
||||
address []byte,
|
||||
) (earliestFrame uint64, latestFrame uint64, count uint64) {
|
||||
) Value {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
p := t.findNearest(address)
|
||||
|
||||
if p != nil &&
|
||||
p.External != nil &&
|
||||
bytes.Equal(p.External.Key, address) {
|
||||
return p.External.EarliestFrame, p.External.LatestFrame, p.External.Count
|
||||
if t.Trie == nil {
|
||||
t.Trie = New()
|
||||
}
|
||||
p, ok := t.Trie.Get(address)
|
||||
if !ok {
|
||||
return Value{
|
||||
EarliestFrame: 0,
|
||||
LatestFrame: 0,
|
||||
Count: 0,
|
||||
}
|
||||
}
|
||||
|
||||
return 0, 0, 0
|
||||
return p.(Value)
|
||||
}
|
||||
|
||||
func (t *RollingFrecencyCritbitTrie) FindNearest(
|
||||
address []byte,
|
||||
) *Node {
|
||||
) Value {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
return t.findNearest(address)
|
||||
}
|
||||
|
||||
func (t *RollingFrecencyCritbitTrie) findNearest(
|
||||
address []byte,
|
||||
) *Node {
|
||||
blen := uint32(len(address))
|
||||
p := t.Root
|
||||
|
||||
if p == nil {
|
||||
return nil
|
||||
if t.Trie == nil {
|
||||
t.Trie = New()
|
||||
}
|
||||
|
||||
for p.Internal != nil {
|
||||
right := p.Internal.ByteNumber < blen &&
|
||||
address[p.Internal.ByteNumber]&p.Internal.Bits != 0
|
||||
if right {
|
||||
p = &p.Internal.Child[1]
|
||||
} else {
|
||||
p = &p.Internal.Child[0]
|
||||
}
|
||||
}
|
||||
|
||||
return p
|
||||
return t.FindNearestAndApproximateNeighbors(address)[0]
|
||||
}
|
||||
|
||||
func (t *RollingFrecencyCritbitTrie) FindNearestAndApproximateNeighbors(
|
||||
address []byte,
|
||||
) []*Node {
|
||||
) []Value {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
blen := uint32(len(address))
|
||||
if t.Root == nil {
|
||||
return nil
|
||||
ret := []Value{}
|
||||
if t.Trie == nil {
|
||||
t.Trie = New()
|
||||
}
|
||||
|
||||
ret := []*Node{}
|
||||
|
||||
var traverse func(p *Node, address []byte) bool
|
||||
traverse = func(p *Node, address []byte) bool {
|
||||
if len(ret) > 2048 {
|
||||
return true
|
||||
}
|
||||
|
||||
if p.Internal != nil {
|
||||
right := p.Internal.ByteNumber < blen &&
|
||||
address[p.Internal.ByteNumber]&p.Internal.Bits != 0
|
||||
|
||||
if right && !traverse(&p.Internal.Child[1], address) ||
|
||||
!traverse(&p.Internal.Child[0], address) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !right {
|
||||
return traverse(&p.Internal.Child[1], address)
|
||||
}
|
||||
|
||||
return true
|
||||
} else {
|
||||
ret = append(ret, p)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
traverse(t.Root, address)
|
||||
base := new(big.Int)
|
||||
base.SetBytes(address)
|
||||
t.Trie.Walk(func(k []byte, v interface{}) bool {
|
||||
ret = append(ret, v.(Value))
|
||||
return false
|
||||
})
|
||||
|
||||
sort.Slice(ret, func(i, j int) bool {
|
||||
bi, bj := new(big.Int), new(big.Int)
|
||||
bi.SetBytes(ret[i].External.Key)
|
||||
bj.SetBytes(ret[j].External.Key)
|
||||
targetLen := len(address)
|
||||
a := ret[i].Key
|
||||
b := ret[j].Key
|
||||
aLen := len(a)
|
||||
bLen := len(b)
|
||||
|
||||
bi.Sub(base, bi)
|
||||
bj.Sub(base, bj)
|
||||
maxLen := targetLen
|
||||
if aLen > maxLen {
|
||||
maxLen = aLen
|
||||
}
|
||||
if bLen > maxLen {
|
||||
maxLen = bLen
|
||||
}
|
||||
|
||||
return bi.CmpAbs(bj) <= 0
|
||||
var aDiff, bDiff byte
|
||||
for i := 0; i < maxLen; i++ {
|
||||
var targetByte, aByte, bByte byte
|
||||
|
||||
if i < targetLen {
|
||||
targetByte = address[i]
|
||||
}
|
||||
if i < aLen {
|
||||
aByte = a[i]
|
||||
}
|
||||
if i < bLen {
|
||||
bByte = b[i]
|
||||
}
|
||||
|
||||
if targetByte >= aByte {
|
||||
aDiff = targetByte - aByte
|
||||
} else {
|
||||
aDiff = aByte - targetByte
|
||||
}
|
||||
|
||||
if targetByte >= bByte {
|
||||
bDiff = targetByte - bByte
|
||||
} else {
|
||||
bDiff = bByte - targetByte
|
||||
}
|
||||
|
||||
if aDiff != bDiff {
|
||||
return aDiff < bDiff
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
return ret
|
||||
@ -188,192 +165,31 @@ func (t *RollingFrecencyCritbitTrie) Add(
|
||||
) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
if t.Root == nil {
|
||||
t.Root = &Node{
|
||||
External: &ExternalNode{
|
||||
Key: address,
|
||||
EarliestFrame: latestFrame,
|
||||
LatestFrame: latestFrame,
|
||||
Count: 1,
|
||||
},
|
||||
}
|
||||
return
|
||||
if t.Trie == nil {
|
||||
t.Trie = New()
|
||||
}
|
||||
|
||||
p := t.findNearest(address)
|
||||
byteNumber, bits := p.critBit(address)
|
||||
if byteNumber < 0 {
|
||||
if p.External.LatestFrame < latestFrame {
|
||||
p.External.LatestFrame = latestFrame
|
||||
}
|
||||
if p.External.EarliestFrame > latestFrame {
|
||||
p.External.EarliestFrame = latestFrame
|
||||
}
|
||||
p.External.Count++
|
||||
return
|
||||
}
|
||||
|
||||
node := &InternalNode{
|
||||
ByteNumber: uint32(byteNumber),
|
||||
Bits: bits,
|
||||
}
|
||||
|
||||
blen := uint32(len(address))
|
||||
right := node.ByteNumber < blen &&
|
||||
address[node.ByteNumber]&node.Bits != 0
|
||||
e := &ExternalNode{
|
||||
Key: address,
|
||||
EarliestFrame: latestFrame,
|
||||
LatestFrame: latestFrame,
|
||||
Count: 1,
|
||||
}
|
||||
if right {
|
||||
node.Child[1].External = e
|
||||
} else {
|
||||
node.Child[0].External = e
|
||||
}
|
||||
|
||||
p = t.Root
|
||||
for m := p.Internal; m != nil; m = p.Internal {
|
||||
if m.ByteNumber > uint32(byteNumber) ||
|
||||
m.ByteNumber == uint32(byteNumber) && m.Bits < bits {
|
||||
break
|
||||
}
|
||||
|
||||
if m.ByteNumber < blen && address[m.ByteNumber]&m.Bits != 0 {
|
||||
p = &m.Child[1]
|
||||
} else {
|
||||
p = &m.Child[0]
|
||||
}
|
||||
}
|
||||
|
||||
if p.Internal != nil {
|
||||
// inverse the direction
|
||||
if right {
|
||||
node.Child[0].Internal = p.Internal
|
||||
} else {
|
||||
node.Child[1].Internal = p.Internal
|
||||
i, ok := t.Trie.Get(address)
|
||||
var v Value
|
||||
if !ok {
|
||||
v = Value{
|
||||
Key: address,
|
||||
EarliestFrame: latestFrame,
|
||||
LatestFrame: latestFrame,
|
||||
Count: 0,
|
||||
}
|
||||
} else {
|
||||
if right {
|
||||
node.Child[0].External = p.External
|
||||
} else {
|
||||
node.Child[1].External = p.External
|
||||
p.External = nil
|
||||
}
|
||||
v = i.(Value)
|
||||
}
|
||||
|
||||
p.Internal = node
|
||||
v.LatestFrame = latestFrame
|
||||
t.Trie.Insert(address, v)
|
||||
}
|
||||
|
||||
func (t *RollingFrecencyCritbitTrie) Remove(address []byte) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
if t.Root == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if t.Root.External != nil {
|
||||
if bytes.Equal(t.Root.External.Key, address) {
|
||||
t.Root = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
blen := uint32(len(address))
|
||||
var gp *Node
|
||||
p := t.Root
|
||||
var right bool
|
||||
|
||||
for m := p.Internal; m != nil; m = p.Internal {
|
||||
right = p.Internal.ByteNumber < blen &&
|
||||
address[p.Internal.ByteNumber]&p.Internal.Bits != 0
|
||||
if right {
|
||||
gp, p = p, &m.Child[1]
|
||||
} else {
|
||||
gp, p = p, &m.Child[0]
|
||||
}
|
||||
}
|
||||
|
||||
if !bytes.Equal(p.External.Key, address) {
|
||||
return
|
||||
}
|
||||
|
||||
if gp == nil {
|
||||
p.External = nil
|
||||
} else {
|
||||
if right {
|
||||
gp.External, gp.Internal = gp.Internal.Child[0].External,
|
||||
gp.Internal.Child[0].Internal
|
||||
} else {
|
||||
gp.External, gp.Internal = gp.Internal.Child[1].External,
|
||||
gp.Internal.Child[1].Internal
|
||||
}
|
||||
if t.Trie == nil {
|
||||
t.Trie = New()
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) String() string {
|
||||
if n.External != nil {
|
||||
return hex.EncodeToString(n.External.Key)
|
||||
} else {
|
||||
nodes := []string{}
|
||||
for i := range n.Internal.Child {
|
||||
nodes = append(nodes, n.Internal.Child[i].String())
|
||||
}
|
||||
return strings.Join(nodes, ",")
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) Bits() []byte {
|
||||
if n.External != nil {
|
||||
return n.External.Key
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) Info() (latestFrame uint64, count uint64) {
|
||||
if n.External != nil {
|
||||
return n.External.LatestFrame, n.External.Count
|
||||
} else {
|
||||
return 0, 0
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) critBit(
|
||||
address []byte,
|
||||
) (byteNumber int, bits byte) {
|
||||
smallestLen := len(n.External.Key)
|
||||
if len(address) < smallestLen {
|
||||
smallestLen = len(address)
|
||||
}
|
||||
|
||||
for byteNumber = 0; byteNumber < smallestLen; byteNumber++ {
|
||||
if l, r := address[byteNumber], n.External.Key[byteNumber]; l != r {
|
||||
b := l ^ r
|
||||
b |= b >> 1
|
||||
b |= b >> 2
|
||||
b |= b >> 4
|
||||
bits = b &^ (b >> 1)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if len(n.External.Key) < len(address) {
|
||||
b := address[byteNumber]
|
||||
b |= b >> 1
|
||||
b |= b >> 2
|
||||
b |= b >> 4
|
||||
bits = b &^ (b >> 1)
|
||||
} else if len(n.External.Key) > len(address) {
|
||||
b := n.External.Key[byteNumber]
|
||||
b |= b >> 1
|
||||
b |= b >> 2
|
||||
b |= b >> 4
|
||||
bits = b &^ (b >> 1)
|
||||
} else {
|
||||
byteNumber = -1
|
||||
}
|
||||
return
|
||||
t.Trie.Delete(address)
|
||||
}
|
||||
|
||||
@ -2,8 +2,6 @@ package tries_test
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/cloudflare/circl/sign/ed448"
|
||||
@ -14,7 +12,7 @@ import (
|
||||
|
||||
func TestSerializers(t *testing.T) {
|
||||
tree := &tries.RollingFrecencyCritbitTrie{}
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := 0; i < 100; i++ {
|
||||
seed := make([]byte, 57)
|
||||
rand.Read(seed)
|
||||
|
||||
@ -36,21 +34,6 @@ func TestSerializers(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
err = newTree.Deserialize(buf)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for i := 0; i < 256; i++ {
|
||||
seed := make([]byte, 57)
|
||||
rand.Read(seed)
|
||||
|
||||
priv := ed448.NewKeyFromSeed(seed)
|
||||
pubkey := (priv.Public()).(ed448.PublicKey)
|
||||
disc, err := poseidon.HashBytes(pubkey)
|
||||
assert.NoError(t, err)
|
||||
|
||||
newTreeNeighbors := newTree.FindNearestAndApproximateNeighbors(disc.Bytes())
|
||||
for i, n := range tree.FindNearestAndApproximateNeighbors(disc.Bytes()) {
|
||||
assert.Equal(t, n.Bits(), newTreeNeighbors[i].Bits())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCritbitReinit(t *testing.T) {
|
||||
@ -61,54 +44,21 @@ func TestCritbitReinit(t *testing.T) {
|
||||
rand.Read(seed)
|
||||
set = append(set, seed)
|
||||
tree.Add(seed, 14)
|
||||
assert.True(t, tree.Contains(seed))
|
||||
tree.Remove(seed)
|
||||
assert.False(t, tree.Contains(seed))
|
||||
}
|
||||
for i := 0; i < 1024; i++ {
|
||||
tree.Add(set[i], 14)
|
||||
}
|
||||
near := tree.FindNearestAndApproximateNeighbors(make([]byte, 32))
|
||||
assert.Equal(t, 1024, len(near))
|
||||
for i := 0; i < 1024; i++ {
|
||||
tree.Remove(set[i])
|
||||
assert.False(t, tree.Contains(set[i]))
|
||||
near = tree.FindNearestAndApproximateNeighbors(make([]byte, 32))
|
||||
assert.Equal(t, 1024-i-1, len(near))
|
||||
}
|
||||
near := tree.FindNearestAndApproximateNeighbors(make([]byte, 32))
|
||||
for _, n := range near {
|
||||
fmt.Println(n.External.Key)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCritbit(t *testing.T) {
|
||||
tree := &tries.RollingFrecencyCritbitTrie{}
|
||||
|
||||
for i := 0; i < 100000; i++ {
|
||||
seed := make([]byte, 57)
|
||||
rand.Read(seed)
|
||||
|
||||
priv := ed448.NewKeyFromSeed(seed)
|
||||
pubkey := (priv.Public()).(ed448.PublicKey)
|
||||
addr, err := poseidon.HashBytes(pubkey)
|
||||
assert.NoError(t, err)
|
||||
|
||||
v := uint64(i)
|
||||
a := addr.Bytes()
|
||||
b := make([]byte, 32)
|
||||
copy(b[32-len(a):], addr.Bytes())
|
||||
|
||||
tree.Add(b, v)
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i++ {
|
||||
seed := make([]byte, 57)
|
||||
rand.Read(seed)
|
||||
|
||||
priv := ed448.NewKeyFromSeed(seed)
|
||||
pubkey := (priv.Public()).(ed448.PublicKey)
|
||||
disc, err := poseidon.HashBytes(pubkey)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for _, n := range tree.FindNearestAndApproximateNeighbors(disc.Bytes()) {
|
||||
diff := new(big.Int)
|
||||
diff.SetBytes(n.Bits())
|
||||
diff.Sub(diff, disc)
|
||||
diff.Abs(diff)
|
||||
}
|
||||
}
|
||||
near = tree.FindNearestAndApproximateNeighbors(make([]byte, 32))
|
||||
assert.Equal(t, 0, len(near))
|
||||
}
|
||||
|
||||
Loading…
Reference in New Issue
Block a user