Merge branch 'v2.0.6-p3' into develop-2.1

This commit is contained in:
Cassandra Heart 2025-01-21 01:26:17 -06:00
commit debd7b54d4
No known key found for this signature in database
GPG Key ID: 6352152859385958
8 changed files with 62 additions and 348 deletions

View File

@ -19,7 +19,7 @@ case "$os_type" in
# Check if the architecture is ARM
if [[ "$(uname -m)" == "arm64" ]]; then
# MacOS ld doesn't support -Bstatic and -Bdynamic, so it's important that there is only a static version of the library
go build -ldflags "-linkmode 'external' -extldflags '-L$BINARIES_DIR -L/opt/homebrew/Cellar/mpfr/4.2.1/lib -I/opt/homebrew/Cellar/mpfr/4.2.1/include -L/opt/homebrew/Cellar/gmp/6.3.0/lib -I/opt/homebrew/Cellar/gmp/6.3.0/include -L/opt/homebrew/Cellar/flint/3.1.3-p1/lib -I/opt/homebrew/Cellar/flint/3.1.3-p1/include -lbls48581 -lstdc++ -lvdf -ldl -lm -lflint -lgmp -lmpfr'" "$@"
go build -ldflags "-linkmode 'external' -extldflags '-L$BINARIES_DIR -L/opt/homebrew/Cellar/mpfr/4.2.1/lib -I/opt/homebrew/Cellar/mpfr/4.2.1/include -L/opt/homebrew/Cellar/gmp/6.3.0/lib -I/opt/homebrew/Cellar/gmp/6.3.0/include -lbls48581 -lvdf -ldl -lm -lflint -lgmp -lmpfr'" "$@"
else
echo "Unsupported platform"
exit 1

View File

@ -566,8 +566,6 @@ func (e *DataClockConsensusEngine) Start() <-chan error {
errChan <- nil
}()
go e.runPreMidnightProofWorker()
e.wg.Add(1)
go func() {
defer e.wg.Done()

View File

@ -666,3 +666,25 @@ func (e *DataClockConsensusEngine) GetPublicChannel(
) error {
return errors.New("not supported")
}
func GetAddressOfPreCoinProof(
proof *protobufs.PreCoinProof,
) ([]byte, error) {
eval := []byte{}
eval = append(eval, application.TOKEN_ADDRESS...)
eval = append(eval, proof.Amount...)
eval = binary.BigEndian.AppendUint32(eval, proof.Index)
eval = append(eval, proof.IndexProof...)
eval = append(eval, proof.Commitment...)
eval = append(eval, proof.Proof...)
eval = binary.BigEndian.AppendUint32(eval, proof.Parallelism)
eval = binary.BigEndian.AppendUint32(eval, proof.Difficulty)
eval = binary.BigEndian.AppendUint32(eval, 0)
eval = append(eval, proof.Owner.GetImplicitAccount().Address...)
addressBI, err := poseidon.HashBytes(eval)
if err != nil {
return nil, err
}
return addressBI.FillBytes(make([]byte, 32)), nil
}

View File

@ -1,300 +0,0 @@
package data
import (
"bytes"
"encoding/binary"
"strings"
"time"
"github.com/iden3/go-iden3-crypto/poseidon"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/grpc"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token/application"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/store"
)
func (e *DataClockConsensusEngine) runPreMidnightProofWorker() {
e.logger.Info("checking for pre-2.0 proofs")
increment, _, _, err := e.dataProofStore.GetLatestDataTimeProof(
e.pubSub.GetPeerID(),
)
if err != nil {
if errors.Is(err, store.ErrNotFound) {
e.logger.Info("could not find pre-2.0 proofs")
return
}
panic(err)
}
for {
if e.GetState() < consensus.EngineStateCollecting {
e.logger.Info("waiting for node to finish starting")
time.Sleep(10 * time.Second)
continue
}
break
}
addrBI, err := poseidon.HashBytes(e.pubSub.GetPeerID())
if err != nil {
panic(err)
}
addr := addrBI.FillBytes(make([]byte, 32))
genesis := config.GetGenesis()
pub, err := crypto.UnmarshalEd448PublicKey(genesis.Beacon)
if err != nil {
panic(err)
}
peerId, err := peer.IDFromPublicKey(pub)
if err != nil {
panic(errors.Wrap(err, "error getting peer id"))
}
for {
tries := e.GetFrameProverTries()
if len(tries) == 0 || e.pubSub.GetNetworkPeersCount() < 3 {
e.logger.Info("waiting for more peer info to appear")
time.Sleep(10 * time.Second)
continue
}
_, prfs, err := e.coinStore.GetPreCoinProofsForOwner(addr)
if err != nil && !errors.Is(err, store.ErrNotFound) {
e.logger.Error("error while fetching pre-coin proofs", zap.Error(err))
return
}
if len(prfs) != 0 {
e.logger.Info("already completed pre-midnight mint")
return
}
break
}
resume := make([]byte, 32)
cc, err := e.pubSub.GetDirectChannel(e.ctx, []byte(peerId), "worker")
if err != nil {
e.logger.Info(
"could not establish direct channel, waiting...",
zap.Error(err),
)
time.Sleep(10 * time.Second)
}
for {
state := e.GetState()
if state >= consensus.EngineStateStopping || state == consensus.EngineStateStopped {
break
}
_, prfs, err := e.coinStore.GetPreCoinProofsForOwner(addr)
if err != nil && !errors.Is(err, store.ErrNotFound) {
e.logger.Error("error while fetching pre-coin proofs", zap.Error(err))
return
}
if len(prfs) != 0 {
e.logger.Info("already completed pre-midnight mint")
return
}
if cc == nil {
cc, err = e.pubSub.GetDirectChannel(e.ctx, []byte(peerId), "worker")
if err != nil {
e.logger.Info(
"could not establish direct channel, waiting...",
zap.Error(err),
)
cc = nil
time.Sleep(10 * time.Second)
continue
}
}
client := protobufs.NewDataServiceClient(cc)
if bytes.Equal(resume, make([]byte, 32)) {
status, err := client.GetPreMidnightMintStatus(
e.ctx,
&protobufs.PreMidnightMintStatusRequest{
Owner: addr,
},
grpc.MaxCallSendMsgSize(1*1024*1024),
grpc.MaxCallRecvMsgSize(1*1024*1024),
)
if err != nil || status == nil {
e.logger.Error(
"got error response, waiting...",
zap.Error(err),
)
time.Sleep(10 * time.Second)
cc.Close()
cc = nil
err = e.pubSub.Reconnect([]byte(peerId))
if err != nil {
e.logger.Error(
"got error response, waiting...",
zap.Error(err),
)
time.Sleep(10 * time.Second)
}
continue
}
resume = status.Address
if status.Increment != 0 {
increment = status.Increment - 1
} else if !bytes.Equal(status.Address, make([]byte, 32)) {
increment = 0
}
}
proofs := [][]byte{
[]byte("pre-dusk"),
resume,
}
batchCount := 0
// the cast is important, it underflows without:
for i := int(increment); i >= 0; i-- {
_, parallelism, input, output, err := e.dataProofStore.GetDataTimeProof(
e.pubSub.GetPeerID(),
uint32(i),
)
if err == nil {
p := []byte{}
p = binary.BigEndian.AppendUint32(p, uint32(i))
p = binary.BigEndian.AppendUint32(p, parallelism)
p = binary.BigEndian.AppendUint64(p, uint64(len(input)))
p = append(p, input...)
p = binary.BigEndian.AppendUint64(p, uint64(len(output)))
p = append(p, output...)
proofs = append(proofs, p)
} else {
e.logger.Error(
"could not find data time proof for peer and increment, stopping worker",
zap.String("peer_id", peer.ID(e.pubSub.GetPeerID()).String()),
zap.Int("increment", i),
)
cc.Close()
cc = nil
return
}
batchCount++
if batchCount == 200 || i == 0 {
e.logger.Info("publishing proof batch", zap.Int("increment", i))
payload := []byte("mint")
for _, i := range proofs {
payload = append(payload, i...)
}
sig, err := e.pubSub.SignMessage(payload)
if err != nil {
cc.Close()
panic(err)
}
resp, err := client.HandlePreMidnightMint(
e.ctx,
&protobufs.MintCoinRequest{
Proofs: proofs,
Signature: &protobufs.Ed448Signature{
PublicKey: &protobufs.Ed448PublicKey{
KeyValue: e.pubSub.GetPublicKey(),
},
Signature: sig,
},
},
grpc.MaxCallSendMsgSize(1*1024*1024),
grpc.MaxCallRecvMsgSize(1*1024*1024),
)
if err != nil {
if strings.Contains(
err.Error(),
application.ErrInvalidStateTransition.Error(),
) && i == 0 {
resume = make([]byte, 32)
e.logger.Info("pre-midnight proofs submitted, returning")
cc.Close()
cc = nil
return
}
e.logger.Error(
"got error response, waiting...",
zap.Error(err),
)
resume = make([]byte, 32)
cc.Close()
cc = nil
time.Sleep(10 * time.Second)
err = e.pubSub.Reconnect([]byte(peerId))
if err != nil {
e.logger.Error(
"got error response, waiting...",
zap.Error(err),
)
time.Sleep(10 * time.Second)
}
break
}
resume = resp.Address
batchCount = 0
proofs = [][]byte{
[]byte("pre-dusk"),
resume,
}
if i == 0 {
e.logger.Info("pre-midnight proofs submitted, returning")
cc.Close()
cc = nil
return
} else {
increment = uint32(i) - 1
}
break
}
}
}
}
func GetAddressOfPreCoinProof(
proof *protobufs.PreCoinProof,
) ([]byte, error) {
eval := []byte{}
eval = append(eval, application.TOKEN_ADDRESS...)
eval = append(eval, proof.Amount...)
eval = binary.BigEndian.AppendUint32(eval, proof.Index)
eval = append(eval, proof.IndexProof...)
eval = append(eval, proof.Commitment...)
eval = append(eval, proof.Proof...)
eval = binary.BigEndian.AppendUint32(eval, proof.Parallelism)
eval = binary.BigEndian.AppendUint32(eval, proof.Difficulty)
eval = binary.BigEndian.AppendUint32(eval, 0)
eval = append(eval, proof.Owner.GetImplicitAccount().Address...)
addressBI, err := poseidon.HashBytes(eval)
if err != nil {
return nil, err
}
return addressBI.FillBytes(make([]byte, 32)), nil
}

View File

@ -17,8 +17,8 @@ func init() {
}
const (
BranchNodes = 1024
BranchBits = 10 // log2(1024)
BranchNodes = 64
BranchBits = 6 // log2(64)
BranchMask = BranchNodes - 1
)
@ -73,7 +73,7 @@ func (n *VectorCommitmentBranchNode) Commit() []byte {
}
}
n.Commitment = rbls48581.CommitRaw(data, 1024)
n.Commitment = rbls48581.CommitRaw(data, 64)
}
return n.Commitment
@ -103,7 +103,7 @@ func (n *VectorCommitmentBranchNode) Verify(index int, proof []byte) bool {
}
}
n.Commitment = rbls48581.CommitRaw(data, 1024)
n.Commitment = rbls48581.CommitRaw(data, 64)
data = data[64*index : 64*(index+1)]
} else {
child := n.Children[index]
@ -127,7 +127,7 @@ func (n *VectorCommitmentBranchNode) Verify(index int, proof []byte) bool {
}
}
return rbls48581.VerifyRaw(data, n.Commitment, uint64(index), proof, 1024)
return rbls48581.VerifyRaw(data, n.Commitment, uint64(index), proof, 64)
}
func (n *VectorCommitmentBranchNode) Prove(index int) []byte {
@ -153,7 +153,7 @@ func (n *VectorCommitmentBranchNode) Prove(index int) []byte {
}
}
return rbls48581.ProveRaw(data, uint64(index), 1024)
return rbls48581.ProveRaw(data, uint64(index), 64)
}
type VectorCommitmentTree struct {

View File

@ -1,7 +1,6 @@
package application
import (
"encoding/binary"
"errors"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
@ -16,31 +15,28 @@ var ErrMissingExtrinsics = errors.New("missing extrinsics")
var ErrIsExtrinsic = errors.New("is extrinsic")
type Vertex struct {
AppAddress [32]byte
DataAddress [32]byte
SegmentOrder uint16
AppAddress [32]byte
DataAddress [32]byte
}
type Hyperedge struct {
AppAddress [32]byte
DataAddress [32]byte
Index uint16
Extrinsics map[[66]byte]Atom
Extrinsics map[[64]byte]Atom
}
type Atom interface {
GetID() [66]byte
GetID() [64]byte
GetAtomType() AtomType
GetLocation() Location
GetAppAddress() [32]byte
GetDataAddress() [32]byte
}
func (v *Vertex) GetID() [66]byte {
id := [66]byte{}
func (v *Vertex) GetID() [64]byte {
id := [64]byte{}
copy(id[:32], v.AppAddress[:])
copy(id[32:64], v.DataAddress[:])
binary.BigEndian.PutUint16(id[64:], v.SegmentOrder)
return id
}
@ -63,11 +59,10 @@ func (v *Vertex) GetDataAddress() [32]byte {
return v.DataAddress
}
func (h *Hyperedge) GetID() [66]byte {
id := [66]byte{}
func (h *Hyperedge) GetID() [64]byte {
id := [64]byte{}
copy(id[:32], h.AppAddress[:])
copy(id[32:], h.DataAddress[:])
binary.BigEndian.PutUint16(id[64:], h.Index)
return id
}
@ -92,7 +87,7 @@ func (h *Hyperedge) GetDataAddress() [32]byte {
type ShardAddress struct {
L1 [3]byte
L2 [48]byte
L2 [64]byte
}
func GetShardAddress(a Atom) ShardAddress {
@ -101,17 +96,17 @@ func GetShardAddress(a Atom) ShardAddress {
return ShardAddress{
L1: [3]byte(p2p.GetBloomFilterIndices(appAddress[:], 256, 3)),
L2: [48]byte(p2p.GetBloomFilterIndices(append(append([]byte{}, appAddress[:]...), dataAddress[:]...), 65536, 24)),
L2: [64]byte(append(append([]byte{}, appAddress[:]...), dataAddress[:]...)),
}
}
type IdSet struct {
atomType AtomType
atoms map[[66]byte]Atom
atoms map[[64]byte]Atom
}
func NewIdSet(atomType AtomType) *IdSet {
return &IdSet{atomType: atomType, atoms: make(map[[66]byte]Atom)}
return &IdSet{atomType: atomType, atoms: make(map[[64]byte]Atom)}
}
func (set *IdSet) Add(atom Atom) error {
@ -243,7 +238,7 @@ func (hg *Hypergraph) LookupAtom(a Atom) bool {
}
}
func (hg *Hypergraph) LookupAtomSet(atomSet map[[66]byte]Atom) bool {
func (hg *Hypergraph) LookupAtomSet(atomSet map[[64]byte]Atom) bool {
for _, atom := range atomSet {
if !hg.LookupAtom(atom) {
return false

View File

@ -22,9 +22,8 @@ func TestConvergence(t *testing.T) {
vertices := make([]*application.Vertex, numOperations)
for i := 0; i < numOperations; i++ {
vertices[i] = &application.Vertex{
AppAddress: [32]byte{byte(i % 256)},
DataAddress: [32]byte{byte(i / 256)},
SegmentOrder: uint16(i),
AppAddress: [32]byte{byte(i % 256)},
DataAddress: [32]byte{byte(i / 256)},
}
}
@ -33,7 +32,7 @@ func TestConvergence(t *testing.T) {
hyperedges[i] = &application.Hyperedge{
AppAddress: [32]byte{byte(i % 256)},
DataAddress: [32]byte{byte(i / 256)},
Extrinsics: make(map[[66]byte]application.Atom),
Extrinsics: make(map[[64]byte]application.Atom),
}
// Add some random vertices as extrinsics
for j := 0; j < 3; j++ {

View File

@ -12,8 +12,8 @@ func TestHypergraph(t *testing.T) {
// Test vertex operations
t.Run("Vertex Operations", func(t *testing.T) {
v1 := &application.Vertex{AppAddress: [32]byte{1}, DataAddress: [32]byte{1}, SegmentOrder: 1}
v2 := &application.Vertex{AppAddress: [32]byte{1}, DataAddress: [32]byte{2}, SegmentOrder: 1}
v1 := &application.Vertex{AppAddress: [32]byte{1}, DataAddress: [32]byte{1}}
v2 := &application.Vertex{AppAddress: [32]byte{1}, DataAddress: [32]byte{2}}
// Add vertices
err := hg.AddVertex(v1)
@ -48,15 +48,15 @@ func TestHypergraph(t *testing.T) {
// Test hyperedge operations
t.Run("Hyperedge Operations", func(t *testing.T) {
v3 := &application.Vertex{AppAddress: [32]byte{2}, DataAddress: [32]byte{1}, SegmentOrder: 1}
v4 := &application.Vertex{AppAddress: [32]byte{2}, DataAddress: [32]byte{2}, SegmentOrder: 1}
v3 := &application.Vertex{AppAddress: [32]byte{2}, DataAddress: [32]byte{1}}
v4 := &application.Vertex{AppAddress: [32]byte{2}, DataAddress: [32]byte{2}}
hg.AddVertex(v3)
hg.AddVertex(v4)
h1 := &application.Hyperedge{
AppAddress: [32]byte{3},
DataAddress: [32]byte{1},
Extrinsics: map[[66]byte]application.Atom{v3.GetID(): v3, v4.GetID(): v4},
Extrinsics: map[[64]byte]application.Atom{v3.GetID(): v3, v4.GetID(): v4},
}
// Add hyperedge
@ -82,15 +82,15 @@ func TestHypergraph(t *testing.T) {
// Test "within" relationship
t.Run("Within Relationship", func(t *testing.T) {
v5 := &application.Vertex{AppAddress: [32]byte{4}, DataAddress: [32]byte{1}, SegmentOrder: 1}
v6 := &application.Vertex{AppAddress: [32]byte{4}, DataAddress: [32]byte{2}, SegmentOrder: 1}
v5 := &application.Vertex{AppAddress: [32]byte{4}, DataAddress: [32]byte{1}}
v6 := &application.Vertex{AppAddress: [32]byte{4}, DataAddress: [32]byte{2}}
hg.AddVertex(v5)
hg.AddVertex(v6)
h2 := &application.Hyperedge{
AppAddress: [32]byte{5},
DataAddress: [32]byte{1},
Extrinsics: map[[66]byte]application.Atom{v5.GetID(): v5, v6.GetID(): v6},
Extrinsics: map[[64]byte]application.Atom{v5.GetID(): v5, v6.GetID(): v6},
}
hg.AddHyperedge(h2)
@ -101,7 +101,7 @@ func TestHypergraph(t *testing.T) {
t.Error("v6 should be within h2")
}
v7 := &application.Vertex{AppAddress: [32]byte{4}, DataAddress: [32]byte{3}, SegmentOrder: 1}
v7 := &application.Vertex{AppAddress: [32]byte{4}, DataAddress: [32]byte{3}}
hg.AddVertex(v7)
if hg.Within(v7, h2) {
t.Error("v7 should not be within h2")
@ -110,20 +110,20 @@ func TestHypergraph(t *testing.T) {
// Test nested hyperedges
t.Run("Nested Hyperedges", func(t *testing.T) {
v8 := &application.Vertex{AppAddress: [32]byte{6}, DataAddress: [32]byte{1}, SegmentOrder: 1}
v9 := &application.Vertex{AppAddress: [32]byte{6}, DataAddress: [32]byte{2}, SegmentOrder: 1}
v8 := &application.Vertex{AppAddress: [32]byte{6}, DataAddress: [32]byte{1}}
v9 := &application.Vertex{AppAddress: [32]byte{6}, DataAddress: [32]byte{2}}
hg.AddVertex(v8)
hg.AddVertex(v9)
h3 := &application.Hyperedge{
AppAddress: [32]byte{7},
DataAddress: [32]byte{1},
Extrinsics: map[[66]byte]application.Atom{v8.GetID(): v8},
Extrinsics: map[[64]byte]application.Atom{v8.GetID(): v8},
}
h4 := &application.Hyperedge{
AppAddress: [32]byte{7},
DataAddress: [32]byte{2},
Extrinsics: map[[66]byte]application.Atom{h3.GetID(): h3, v9.GetID(): v9},
Extrinsics: map[[64]byte]application.Atom{h3.GetID(): h3, v9.GetID(): v9},
}
hg.AddHyperedge(h3)
hg.AddHyperedge(h4)
@ -138,11 +138,11 @@ func TestHypergraph(t *testing.T) {
// Test error cases
t.Run("Error Cases", func(t *testing.T) {
v10 := &application.Vertex{AppAddress: [32]byte{8}, DataAddress: [32]byte{1}, SegmentOrder: 1}
v10 := &application.Vertex{AppAddress: [32]byte{8}, DataAddress: [32]byte{1}}
h5 := &application.Hyperedge{
AppAddress: [32]byte{8},
DataAddress: [32]byte{2},
Extrinsics: map[[66]byte]application.Atom{v10.GetID(): v10},
Extrinsics: map[[64]byte]application.Atom{v10.GetID(): v10},
}
// Try to add hyperedge with non-existent vertex
@ -164,8 +164,8 @@ func TestHypergraph(t *testing.T) {
// Test sharding
t.Run("Sharding", func(t *testing.T) {
v11 := &application.Vertex{AppAddress: [32]byte{9}, DataAddress: [32]byte{1}, SegmentOrder: 1}
v12 := &application.Vertex{AppAddress: [32]byte{9}, DataAddress: [32]byte{2}, SegmentOrder: 1}
v11 := &application.Vertex{AppAddress: [32]byte{9}, DataAddress: [32]byte{1}}
v12 := &application.Vertex{AppAddress: [32]byte{9}, DataAddress: [32]byte{2}}
hg.AddVertex(v11)
hg.AddVertex(v12)