ceremonyclient/node/store/data_proof.go
Cassandra Heart 9e1d07d1a0
Squashed commit of the following:
commit d05a4d5f688dbd09900ceccdcc5f8109dd0671c2
Author: Cassandra Heart <cassandra@quilibrium.com>
Date:   Wed Jun 12 00:50:16 2024 -0500

    merge

commit db57ff1f191f9dedc87ca77da1c71244dd2325bd
Merge: 7b43494 2e3279a
Author: Cassandra Heart <cassandra@quilibrium.com>
Date:   Wed Jun 12 00:49:32 2024 -0500

    Merge branch 'v1.4.19' into not-release

commit 7b43494246e28152b46710c8c9821429d4231f7e
Author: Cassandra Heart <cassandra@quilibrium.com>
Date:   Wed Jun 12 00:49:13 2024 -0500

    pull from release site

commit 2e3279ac930ac630d9ca2b26cf4f3232abe79823
Author: Cassandra Heart <cassandra@quilibrium.com>
Date:   Sat Jun 8 06:31:02 2024 -0500

    remove binaries

commit 2768a8778b3860c5736352c8aa950e3496a46e56
Author: Cassandra Heart <cassandra@quilibrium.com>
Date:   Sat Jun 8 06:24:44 2024 -0500

    signatory #8 added

commit 6a944628575ccadd17c9f9f4a11a49c032fa0c1d
Author: Cassandra Heart <cassandra@quilibrium.com>
Date:   Sat Jun 8 06:08:26 2024 -0500

    signatory #6 added

commit b401fb65e5ddbe0340fe85aab1182d6120a4e161
Author: Cassandra Heart <cassandra@quilibrium.com>
Date:   Sat Jun 8 05:39:03 2024 -0500

    signatory #3 added

commit e5700913c0f6246fb607bcd3e219c257cb4a80e9
Author: Cassandra Heart <cassandra@quilibrium.com>
Date:   Sat Jun 8 05:31:24 2024 -0500

    signatory #15 added

commit 9b1da6c03e517135bfcd59226f900adab42f3687
Author: Cassandra Heart <cassandra@quilibrium.com>
Date:   Sat Jun 8 05:23:02 2024 -0500

    signatories #4 and #16 added

commit 9c97d1bbc399a070ac21b35ed9b1af127fa4c7ea
Author: Cassandra Heart <cassandra@quilibrium.com>
Date:   Sat Jun 8 04:59:27 2024 -0500

    signatories #1 and #2 added

commit 905e3f78a8121eade1c331ae910ed25dd534f27a
Author: Cassandra Heart <cassandra@quilibrium.com>
Date:   Sat Jun 8 04:40:32 2024 -0500

    build, binaries, signatory #13

commit ebfb57bc29d9ed1fb25d0dd100e38709354b3d84
Author: Cassandra Heart <cassandra@quilibrium.com>
Date:   Sat Jun 8 03:38:53 2024 -0500

    tests pass, let's go

commit 5d4612c6c624c3dc18f9a5657936034ac9d9d8dd
Author: Cassandra Heart <cassandra@quilibrium.com>
Date:   Fri Jun 7 03:53:15 2024 -0500

    update version info + readme

commit 6b0dd69e930d01b98acb8d7b56bb5d572e1a4324
Merge: 090d630 859221b
Author: Cassie Heart <cassandra@quilibrium.com>
Date:   Fri Jun 7 08:25:16 2024 +0000

    Merge branch 'feat-data-worker-direct-config' into 'v1.4.19'

    feat: support detached configuration mode for data workers

    See merge request quilibrium/ceremonyclient!7

commit 859221b179ab2631fa474be2494259afaaa6bd51
Author: Cassandra Heart <cassandra@quilibrium.com>
Date:   Fri Jun 7 03:24:22 2024 -0500

    feat: support detached configuration mode for data workers

commit 090d6301d44a2aa88886120783cd5a6e537aa6d1
Merge: 62db30c d1cae94
Author: Cassie Heart <cassandra@quilibrium.com>
Date:   Fri Jun 7 06:25:43 2024 +0000

    Merge branch 'feat-go-1-22' into 'v1.4.19'

    feat: go 1.22 support

    See merge request quilibrium/ceremonyclient!6

commit d1cae942165f4871f8051e266722c0ca717780cb
Author: Cassie Heart <cassandra@quilibrium.com>
Date:   Fri Jun 7 06:25:43 2024 +0000

    feat: go 1.22 support

commit 62db30c54f9258c92113c6664ce817670a339083
Merge: 0cbc0d0 f36cea3
Author: Cassie Heart <cassandra@quilibrium.com>
Date:   Fri Jun 7 03:52:17 2024 +0000

    Merge branch 'rust-vdf' into 'v1.4.19'

    Switch to Rust VDF

    See merge request quilibrium/ceremonyclient!2

commit f36cea323bfe5e56f519f59f9a0cce35f0f8b6ab
Author: Agost Biro <agostbiro@gmail.com>
Date:   Fri Jun 7 03:52:16 2024 +0000

    Switch to Rust VDF

commit 0cbc0d0d319713e20ca7f48588c4153833e58429
Merge: 986e12c 0c48a83
Author: Cassie Heart <cassandra@quilibrium.com>
Date:   Fri Jun 7 00:50:15 2024 +0000

    Merge branch 'release_image' into 'v1.4.19'

    create docker image based on release binaries

    See merge request quilibrium/ceremonyclient!4

commit 0c48a83bb5751abf7c8c0ff188bfdc2130631e78
Author: Marius Scurtescu <marius.scurtescu@gmail.com>
Date:   Fri Jun 7 00:50:15 2024 +0000

    create docker image based on release binaries

commit 986e12c88bb2d2b412b59f7db1ae39f828304dbe
Merge: 58456c1 a3ef5c6
Author: Cassie Heart <cassandra@quilibrium.com>
Date:   Wed Jun 5 22:01:37 2024 +0000

    Merge branch 'signature_check' into 'v1.4.19'

    add default of signature check from QUILIBRIUM_SIGNATURE_CHECK env var

    See merge request quilibrium/ceremonyclient!1

commit a3ef5c6af2d5de107d01c45a62d7324165e2551b
Author: Marius Scurtescu <marius.scurtescu@gmail.com>
Date:   Wed Jun 5 14:37:50 2024 -0700

    add default of signature check from QUILIBRIUM_SIGNATURE_CHECK env var
2024-06-12 00:51:16 -05:00

570 lines
14 KiB
Go

package store
import (
"encoding/binary"
"fmt"
"math/big"
"github.com/cockroachdb/pebble"
"github.com/pkg/errors"
"go.uber.org/zap"
"golang.org/x/crypto/sha3"
"google.golang.org/protobuf/proto"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
type DataProofStore interface {
NewTransaction() (Transaction, error)
GetAggregateProof(
filter []byte,
commitment []byte,
frameNumber uint64,
) (
*protobufs.InclusionAggregateProof,
error,
)
PutAggregateProof(
txn Transaction,
aggregateProof *protobufs.InclusionAggregateProof,
commitment []byte,
) error
GetDataTimeProof(
peerId []byte,
increment uint32,
) (difficulty, parallelism uint32, input, output []byte, err error)
GetTotalReward(
peerId []byte,
) (*big.Int, error)
PutDataTimeProof(
txn Transaction,
parallelism uint32,
peerId []byte,
increment uint32,
input []byte,
output []byte,
) error
GetLatestDataTimeProof(peerId []byte) (
increment uint32,
parallelism uint32,
output []byte,
err error,
)
}
var _ DataProofStore = (*PebbleDataProofStore)(nil)
type PebbleDataProofStore struct {
db KVDB
logger *zap.Logger
}
func NewPebbleDataProofStore(
db KVDB,
logger *zap.Logger,
) *PebbleDataProofStore {
return &PebbleDataProofStore{
db,
logger,
}
}
const (
DATA_PROOF = 0x04
DATA_PROOF_METADATA = 0x00
DATA_PROOF_INCLUSION = 0x01
DATA_PROOF_SEGMENT = 0x02
DATA_TIME_PROOF = 0x05
DATA_TIME_PROOF_DATA = 0x00
DATA_TIME_PROOF_LATEST = 0x01
)
func dataProofMetadataKey(filter []byte, commitment []byte) []byte {
key := []byte{DATA_PROOF, DATA_PROOF_METADATA}
key = append(key, commitment...)
key = append(key, filter...)
return key
}
func dataProofInclusionKey(
filter []byte,
commitment []byte,
seqNo uint64,
) []byte {
key := []byte{DATA_PROOF, DATA_PROOF_INCLUSION}
key = append(key, commitment...)
key = binary.BigEndian.AppendUint64(key, seqNo)
key = append(key, filter...)
return key
}
func dataProofSegmentKey(
filter []byte,
hash []byte,
) []byte {
key := []byte{DATA_PROOF, DATA_PROOF_SEGMENT}
key = append(key, hash...)
key = append(key, filter...)
return key
}
func dataTimeProofKey(peerId []byte, increment uint32) []byte {
key := []byte{DATA_TIME_PROOF, DATA_TIME_PROOF_DATA}
key = append(key, peerId...)
key = binary.BigEndian.AppendUint32(key, increment)
return key
}
func dataTimeProofLatestKey(peerId []byte) []byte {
key := []byte{DATA_TIME_PROOF, DATA_TIME_PROOF_LATEST}
key = append(key, peerId...)
return key
}
func (p *PebbleDataProofStore) NewTransaction() (Transaction, error) {
return p.db.NewBatch(), nil
}
func internalGetAggregateProof(
db KVDB,
filter []byte,
commitment []byte,
frameNumber uint64,
) (*protobufs.InclusionAggregateProof, error) {
value, closer, err := db.Get(dataProofMetadataKey(filter, commitment))
if err != nil {
if errors.Is(err, pebble.ErrNotFound) {
return nil, ErrNotFound
}
return nil, errors.Wrap(err, "get aggregate proof")
}
defer closer.Close()
copied := make([]byte, len(value[8:]))
limit := binary.BigEndian.Uint64(value[0:8])
copy(copied, value[8:])
aggregate := &protobufs.InclusionAggregateProof{
Filter: filter,
FrameNumber: frameNumber,
InclusionCommitments: []*protobufs.InclusionCommitment{},
Proof: copied,
}
iter, err := db.NewIter(
dataProofInclusionKey(filter, commitment, 0),
dataProofInclusionKey(filter, commitment, limit+1),
)
if err != nil {
return nil, errors.Wrap(err, "get aggregate proof")
}
i := uint32(0)
for iter.First(); iter.Valid(); iter.Next() {
incCommit := iter.Value()
urlLength := binary.BigEndian.Uint16(incCommit[:2])
commitLength := binary.BigEndian.Uint16(incCommit[2:4])
url := make([]byte, urlLength)
copy(url, incCommit[4:urlLength+4])
commit := make([]byte, commitLength)
copy(commit, incCommit[urlLength+4:urlLength+4+commitLength])
remainder := int(urlLength + 4 + commitLength)
inclusionCommitment := &protobufs.InclusionCommitment{
Filter: filter,
FrameNumber: frameNumber,
Position: i,
TypeUrl: string(url),
Commitment: commit,
}
chunks := [][]byte{}
for j := 0; j < (len(incCommit)-remainder)/32; j++ {
start := remainder + (j * 32)
end := remainder + ((j + 1) * 32)
segValue, dataCloser, err := db.Get(
dataProofSegmentKey(filter, incCommit[start:end]),
)
if err != nil {
if errors.Is(err, pebble.ErrNotFound) {
// If we've lost this key it means we're in a corrupted state
return nil, ErrInvalidData
}
return nil, errors.Wrap(err, "get aggregate proof")
}
segCopy := make([]byte, len(segValue))
copy(segCopy, segValue)
chunks = append(chunks, segCopy)
if err = dataCloser.Close(); err != nil {
return nil, errors.Wrap(err, "get aggregate proof")
}
}
if string(url) == protobufs.IntrinsicExecutionOutputType {
o := &protobufs.IntrinsicExecutionOutput{}
copiedLeft := make([]byte, len(chunks[0]))
copiedRight := make([]byte, len(chunks[1]))
copy(copiedLeft, chunks[0])
copy(copiedRight, chunks[1])
o.Address = copiedLeft[:32]
o.Output = copiedLeft[32:]
o.Proof = copiedRight
inclusionCommitment.Data, err = proto.Marshal(o)
if err != nil {
return nil, errors.Wrap(err, "get aggregate proof")
}
} else {
copied := make([]byte, len(chunks[0]))
copy(copied, chunks[0])
inclusionCommitment.Data = copied
}
aggregate.InclusionCommitments = append(
aggregate.InclusionCommitments,
inclusionCommitment,
)
i++
}
if err = iter.Close(); err != nil {
return nil, errors.Wrap(err, "get aggregate proof")
}
return aggregate, nil
}
func internalListAggregateProofKeys(
db KVDB,
filter []byte,
commitment []byte,
frameNumber uint64,
) ([][]byte, [][]byte, [][]byte, error) {
proofs := [][]byte{dataProofMetadataKey(filter, commitment)}
commits := [][]byte{}
data := [][]byte{}
value, closer, err := db.Get(dataProofMetadataKey(filter, commitment))
if err != nil {
fmt.Println("proof lookup failed")
if errors.Is(err, pebble.ErrNotFound) {
return nil, nil, nil, ErrNotFound
}
return nil, nil, nil, errors.Wrap(err, "list aggregate proof")
}
defer closer.Close()
copied := make([]byte, len(value[8:]))
limit := binary.BigEndian.Uint64(value[0:8])
copy(copied, value[8:])
iter, err := db.NewIter(
dataProofInclusionKey(filter, commitment, 0),
dataProofInclusionKey(filter, commitment, limit+1),
)
if err != nil {
fmt.Println("inclusion lookup failed")
return nil, nil, nil, errors.Wrap(err, "list aggregate proof")
}
i := uint32(0)
commits = append(commits, dataProofInclusionKey(filter, commitment, 0))
for iter.First(); iter.Valid(); iter.Next() {
incCommit := iter.Value()
urlLength := binary.BigEndian.Uint16(incCommit[:2])
commitLength := binary.BigEndian.Uint16(incCommit[2:4])
url := make([]byte, urlLength)
copy(url, incCommit[4:urlLength+4])
commit := make([]byte, commitLength)
copy(commit, incCommit[urlLength+4:urlLength+4+commitLength])
remainder := int(urlLength + 4 + commitLength)
for j := 0; j < (len(incCommit)-remainder)/32; j++ {
start := remainder + (j * 32)
end := remainder + ((j + 1) * 32)
data = append(data, dataProofSegmentKey(filter, incCommit[start:end]))
}
i++
}
if err = iter.Close(); err != nil {
return nil, nil, nil, errors.Wrap(err, "list aggregate proof")
}
return proofs, commits, data, nil
}
func (p *PebbleDataProofStore) GetAggregateProof(
filter []byte,
commitment []byte,
frameNumber uint64,
) (*protobufs.InclusionAggregateProof, error) {
return internalGetAggregateProof(
p.db,
filter,
commitment,
frameNumber,
)
}
func internalPutAggregateProof(
db KVDB,
txn Transaction,
aggregateProof *protobufs.InclusionAggregateProof,
commitment []byte,
) error {
buf := binary.BigEndian.AppendUint64(
nil,
uint64(len(aggregateProof.InclusionCommitments)),
)
buf = append(buf, aggregateProof.Proof...)
for i, inc := range aggregateProof.InclusionCommitments {
var segments [][]byte
if inc.TypeUrl == protobufs.IntrinsicExecutionOutputType {
o := &protobufs.IntrinsicExecutionOutput{}
if err := proto.Unmarshal(inc.Data, o); err != nil {
return errors.Wrap(err, "get aggregate proof")
}
leftBits := append([]byte{}, o.Address...)
leftBits = append(leftBits, o.Output...)
rightBits := o.Proof
segments = [][]byte{leftBits, rightBits}
} else {
segments = [][]byte{inc.Data}
}
urlLength := len(inc.TypeUrl)
commitLength := len(inc.Commitment)
encoded := binary.BigEndian.AppendUint16(nil, uint16(urlLength))
encoded = binary.BigEndian.AppendUint16(encoded, uint16(commitLength))
encoded = append(encoded, []byte(inc.TypeUrl)...)
encoded = append(encoded, inc.Commitment...)
for _, segment := range segments {
hash := sha3.Sum256(segment)
if err := txn.Set(
dataProofSegmentKey(aggregateProof.Filter, hash[:]),
segment,
); err != nil {
return errors.Wrap(err, "put aggregate proof")
}
encoded = append(encoded, hash[:]...)
}
if err := txn.Set(
dataProofInclusionKey(aggregateProof.Filter, commitment, uint64(i)),
encoded,
); err != nil {
return errors.Wrap(err, "put aggregate proof")
}
}
if err := txn.Set(
dataProofMetadataKey(aggregateProof.Filter, commitment),
buf,
); err != nil {
return errors.Wrap(err, "put aggregate proof")
}
return nil
}
func (p *PebbleDataProofStore) PutAggregateProof(
txn Transaction,
aggregateProof *protobufs.InclusionAggregateProof,
commitment []byte,
) error {
return internalPutAggregateProof(
p.db,
txn,
aggregateProof,
commitment,
)
}
func (p *PebbleDataProofStore) GetDataTimeProof(
peerId []byte,
increment uint32,
) (difficulty, parallelism uint32, input, output []byte, err error) {
data, closer, err := p.db.Get(dataTimeProofKey(peerId, increment))
if err != nil {
if errors.Is(err, pebble.ErrNotFound) {
err = ErrNotFound
return
}
err = errors.Wrap(err, "get data time proof")
return
}
defer closer.Close()
if len(data) < 24 {
err = ErrInvalidData
return
}
difficulty = binary.BigEndian.Uint32(data[:4])
parallelism = binary.BigEndian.Uint32(data[4:8])
inputLen := binary.BigEndian.Uint64(data[8:16])
// Verify length of remaining data is at least equal to the input and next
// length prefix
if uint64(len(data[16:])) < inputLen+8 {
err = ErrInvalidData
return
}
input = make([]byte, inputLen)
copy(input[:], data[16:16+inputLen])
outputLen := binary.BigEndian.Uint64(data[16+inputLen : 16+inputLen+8])
// Verify length
if uint64(len(data[16+inputLen+8:])) < outputLen {
err = ErrInvalidData
return
}
output = make([]byte, outputLen)
copy(output[:], data[16+inputLen+8:])
return difficulty, parallelism, input, output, nil
}
func (p *PebbleDataProofStore) GetTotalReward(
peerId []byte,
) (*big.Int, error) {
reward := big.NewInt(0)
prev, closer, err := p.db.Get(dataTimeProofLatestKey(peerId))
if err != nil {
if errors.Is(err, pebble.ErrNotFound) {
return big.NewInt(0), nil
}
return nil, errors.Wrap(err, "get total difficulty sum")
}
if len(prev) != 0 {
reward.SetBytes(prev[4:])
if err = closer.Close(); err != nil {
return nil, errors.Wrap(err, "get total difficulty sum")
}
}
return reward, nil
}
func (p *PebbleDataProofStore) PutDataTimeProof(
txn Transaction,
parallelism uint32,
peerId []byte,
increment uint32,
input []byte,
output []byte,
) error {
// Now, for the assumptions.
// Rewards are calculated based off of a current average rate of growth such
// that we continue at the rate we have been, for the course of the next month
// and carry it to now it such that the greatest advantage gleaned is from
// upgrading on time, akin to a "difficulty bomb" in reverse, but locally
// calculated.
difficulty := 200000 - (increment / 4)
// Basis split on the estimated shard level for growth rate (in terms of
// units): 240 (QUIL) * 8000000000 (conversion factor) / 1600000 (shards)
// = 1200000 units per reward interval per core
pomwBasis := big.NewInt(1200000)
reward := new(big.Int)
reward = reward.Mul(pomwBasis, big.NewInt(int64(parallelism)))
priorSum := big.NewInt(0)
prev, closer, err := p.db.Get(dataTimeProofLatestKey(peerId))
if err != nil && (!errors.Is(err, pebble.ErrNotFound) || increment != 0) {
return errors.Wrap(err, "put data time proof")
}
if len(prev) != 0 {
priorSum.SetBytes(prev[4:])
prevIncrement := binary.BigEndian.Uint32(prev[:4])
if err = closer.Close(); err != nil {
return errors.Wrap(err, "put data time proof")
}
if prevIncrement != increment-1 {
return errors.Wrap(errors.New("invalid increment"), "put data time proof")
}
}
data := []byte{}
data = binary.BigEndian.AppendUint32(data, difficulty)
data = binary.BigEndian.AppendUint32(data, parallelism)
data = binary.BigEndian.AppendUint64(data, uint64(len(input)))
data = append(data, input...)
data = binary.BigEndian.AppendUint64(data, uint64(len(output)))
data = append(data, output...)
err = txn.Set(dataTimeProofKey(peerId, increment), data)
if err != nil {
return errors.Wrap(err, "put data time proof")
}
latest := []byte{}
latest = binary.BigEndian.AppendUint32(latest, increment)
priorSum.Add(priorSum, reward)
latest = append(latest, priorSum.FillBytes(make([]byte, 32))...)
if err = txn.Set(dataTimeProofLatestKey(peerId), latest); err != nil {
return errors.Wrap(err, "put data time proof")
}
return nil
}
func (p *PebbleDataProofStore) GetLatestDataTimeProof(peerId []byte) (
increment uint32,
parallelism uint32,
output []byte,
err error,
) {
prev, closer, err := p.db.Get(dataTimeProofLatestKey(peerId))
if err != nil {
if errors.Is(err, pebble.ErrNotFound) {
return 0, 0, nil, ErrNotFound
}
return 0, 0, nil, errors.Wrap(err, "get latest data time proof")
}
if len(prev) < 4 {
return 0, 0, nil, ErrInvalidData
}
increment = binary.BigEndian.Uint32(prev[:4])
if err = closer.Close(); err != nil {
return 0, 0, nil, errors.Wrap(err, "get latest data time proof")
}
_, parallelism, _, output, err = p.GetDataTimeProof(peerId, increment)
return increment, parallelism, output, err
}