fix: blocker (3)

This commit is contained in:
Cassandra Heart 2025-10-23 00:59:34 -05:00
parent d5a094116c
commit 264efde9de
No known key found for this signature in database
GPG Key ID: 371083BFA6C240AA
138 changed files with 11788 additions and 5634 deletions

85
Cargo.lock generated
View File

@ -576,7 +576,7 @@ version = "4.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64"
dependencies = [
"heck 0.5.0",
"heck",
"proc-macro2",
"quote",
"syn 2.0.100",
@ -1111,9 +1111,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]]
name = "goblin"
version = "0.6.1"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0d6b4de4a8eb6c46a8c77e1d3be942cb9a8bf073c22374578e5ba4b08ed0ff68"
checksum = "1b363a30c165f666402fe6a3024d3bec7ebc898f96a4a23bd1c99f8dbf3f4f47"
dependencies = [
"log",
"plain",
@ -1153,12 +1153,6 @@ version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
[[package]]
name = "heck"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
[[package]]
name = "heck"
version = "0.5.0"
@ -1457,12 +1451,6 @@ version = "1.21.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
[[package]]
name = "oneshot-uniffi"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c548d5c78976f6955d72d0ced18c48ca07030f7a1d4024529fedd7c1c01b29c"
[[package]]
name = "oorandom"
version = "11.1.3"
@ -1766,18 +1754,18 @@ dependencies = [
[[package]]
name = "scroll"
version = "0.11.0"
version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04c565b551bafbef4157586fa379538366e4385d42082f255bfd96e4fe8519da"
checksum = "6ab8598aa408498679922eff7fa985c25d58a90771bd6be794434c5277eab1a6"
dependencies = [
"scroll_derive",
]
[[package]]
name = "scroll_derive"
version = "0.11.1"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1db149f81d46d2deba7cd3c50772474707729550221e69588478ebf9ada425ae"
checksum = "1783eabc414609e28a5ba76aee5ddd52199f7107a0b24c2e9746a1ecc34a683d"
dependencies = [
"proc-macro2",
"quote",
@ -1962,6 +1950,12 @@ version = "0.3.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d"
[[package]]
name = "smawk"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b7c388c1b5e93756d0c740965c41e8822f866621d41acbdf6336a6a168f8840c"
[[package]]
name = "spki"
version = "0.7.3"
@ -2032,6 +2026,9 @@ name = "textwrap"
version = "0.16.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9"
dependencies = [
"smawk",
]
[[package]]
name = "thiserror"
@ -2121,12 +2118,13 @@ checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af"
[[package]]
name = "uniffi"
version = "0.25.3"
version = "0.28.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "21345172d31092fd48c47fd56c53d4ae9e41c4b1f559fb8c38c1ab1685fd919f"
checksum = "4cb08c58c7ed7033150132febe696bef553f891b1ede57424b40d87a89e3c170"
dependencies = [
"anyhow",
"camino",
"cargo_metadata",
"clap 4.5.4",
"uniffi_bindgen",
"uniffi_build",
@ -2136,33 +2134,32 @@ dependencies = [
[[package]]
name = "uniffi_bindgen"
version = "0.25.3"
version = "0.28.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fd992f2929a053829d5875af1eff2ee3d7a7001cb3b9a46cc7895f2caede6940"
checksum = "cade167af943e189a55020eda2c314681e223f1e42aca7c4e52614c2b627698f"
dependencies = [
"anyhow",
"askama",
"camino",
"cargo_metadata",
"clap 4.5.4",
"fs-err",
"glob",
"goblin",
"heck 0.4.1",
"heck",
"once_cell",
"paste",
"serde",
"textwrap 0.16.1",
"toml",
"uniffi_meta",
"uniffi_testing",
"uniffi_udl",
]
[[package]]
name = "uniffi_build"
version = "0.25.3"
version = "0.28.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "001964dd3682d600084b3aaf75acf9c3426699bc27b65e96bb32d175a31c74e9"
checksum = "4c7cf32576e08104b7dc2a6a5d815f37616e66c6866c2a639fe16e6d2286b75b"
dependencies = [
"anyhow",
"camino",
@ -2171,9 +2168,9 @@ dependencies = [
[[package]]
name = "uniffi_checksum_derive"
version = "0.25.3"
version = "0.28.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55137c122f712d9330fd985d66fa61bdc381752e89c35708c13ce63049a3002c"
checksum = "802d2051a700e3ec894c79f80d2705b69d85844dafbbe5d1a92776f8f48b563a"
dependencies = [
"quote",
"syn 2.0.100",
@ -2181,25 +2178,23 @@ dependencies = [
[[package]]
name = "uniffi_core"
version = "0.25.3"
version = "0.28.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6121a127a3af1665cd90d12dd2b3683c2643c5103281d0fed5838324ca1fad5b"
checksum = "bc7687007d2546c454d8ae609b105daceb88175477dac280707ad6d95bcd6f1f"
dependencies = [
"anyhow",
"bytes",
"camino",
"log",
"once_cell",
"oneshot-uniffi",
"paste",
"static_assertions",
]
[[package]]
name = "uniffi_macros"
version = "0.25.3"
version = "0.28.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "11cf7a58f101fcedafa5b77ea037999b88748607f0ef3a33eaa0efc5392e92e4"
checksum = "12c65a5b12ec544ef136693af8759fb9d11aefce740fb76916721e876639033b"
dependencies = [
"bincode",
"camino",
@ -2210,15 +2205,14 @@ dependencies = [
"serde",
"syn 2.0.100",
"toml",
"uniffi_build",
"uniffi_meta",
]
[[package]]
name = "uniffi_meta"
version = "0.25.3"
version = "0.28.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "71dc8573a7b1ac4b71643d6da34888273ebfc03440c525121f1b3634ad3417a2"
checksum = "4a74ed96c26882dac1ca9b93ca23c827e284bacbd7ec23c6f0b0372f747d59e4"
dependencies = [
"anyhow",
"bytes",
@ -2228,9 +2222,9 @@ dependencies = [
[[package]]
name = "uniffi_testing"
version = "0.25.3"
version = "0.28.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "118448debffcb676ddbe8c5305fb933ab7e0123753e659a71dc4a693f8d9f23c"
checksum = "6a6f984f0781f892cc864a62c3a5c60361b1ccbd68e538e6c9fbced5d82268ac"
dependencies = [
"anyhow",
"camino",
@ -2241,11 +2235,12 @@ dependencies = [
[[package]]
name = "uniffi_udl"
version = "0.25.3"
version = "0.28.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "889edb7109c6078abe0e53e9b4070cf74a6b3468d141bdf5ef1bd4d1dc24a1c3"
checksum = "037820a4cfc4422db1eaa82f291a3863c92c7d1789dc513489c36223f9b4cdfc"
dependencies = [
"anyhow",
"textwrap 0.16.1",
"uniffi_meta",
"uniffi_testing",
"weedle2",
@ -2408,9 +2403,9 @@ dependencies = [
[[package]]
name = "weedle2"
version = "4.0.0"
version = "5.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2e79c5206e1f43a2306fd64bdb95025ee4228960f2e6c5a8b173f3caaf807741"
checksum = "998d2c24ec099a87daf9467808859f9d82b61f1d9c9701251aea037f514eae0e"
dependencies = [
"nom",
]

View File

@ -65,7 +65,7 @@ COPY docker/rustup-init.sh /opt/rustup-init.sh
RUN /opt/rustup-init.sh -y --profile minimal
# Install uniffi-bindgen-go
RUN cargo install uniffi-bindgen-go --git https://github.com/NordSecurity/uniffi-bindgen-go --tag v0.2.1+v0.25.0
RUN cargo install uniffi-bindgen-go --git https://github.com/NordSecurity/uniffi-bindgen-go --tag v0.4.0+v0.28.3
FROM base AS build

View File

@ -67,7 +67,7 @@ COPY docker/rustup-init.sh /opt/rustup-init.sh
RUN /opt/rustup-init.sh -y --profile minimal
# Install uniffi-bindgen-go
RUN cargo install uniffi-bindgen-go --git https://github.com/NordSecurity/uniffi-bindgen-go --tag v0.2.1+v0.25.0
RUN cargo install uniffi-bindgen-go --git https://github.com/NordSecurity/uniffi-bindgen-go --tag v0.4.0+v0.28.3
FROM base-avx512 AS build-avx512

View File

@ -1,8 +0,0 @@
#include <bls48581.h>
// This file exists beacause of
// https://github.com/golang/go/issues/11263
void cgo_rust_task_callback_bridge_bls48581(RustTaskCallback cb, const void * taskData, int8_t status) {
cb(taskData, status);
}

View File

@ -12,60 +12,67 @@ import (
"unsafe"
)
type RustBuffer = C.RustBuffer
// This is needed, because as of go 1.24
// type RustBuffer C.RustBuffer cannot have methods,
// RustBuffer is treated as non-local type
type GoRustBuffer struct {
inner C.RustBuffer
}
type RustBufferI interface {
AsReader() *bytes.Reader
Free()
ToGoBytes() []byte
Data() unsafe.Pointer
Len() int
Capacity() int
Len() uint64
Capacity() uint64
}
func RustBufferFromExternal(b RustBufferI) RustBuffer {
return RustBuffer{
capacity: C.int(b.Capacity()),
len: C.int(b.Len()),
data: (*C.uchar)(b.Data()),
func RustBufferFromExternal(b RustBufferI) GoRustBuffer {
return GoRustBuffer{
inner: C.RustBuffer{
capacity: C.uint64_t(b.Capacity()),
len: C.uint64_t(b.Len()),
data: (*C.uchar)(b.Data()),
},
}
}
func (cb RustBuffer) Capacity() int {
return int(cb.capacity)
func (cb GoRustBuffer) Capacity() uint64 {
return uint64(cb.inner.capacity)
}
func (cb RustBuffer) Len() int {
return int(cb.len)
func (cb GoRustBuffer) Len() uint64 {
return uint64(cb.inner.len)
}
func (cb RustBuffer) Data() unsafe.Pointer {
return unsafe.Pointer(cb.data)
func (cb GoRustBuffer) Data() unsafe.Pointer {
return unsafe.Pointer(cb.inner.data)
}
func (cb RustBuffer) AsReader() *bytes.Reader {
b := unsafe.Slice((*byte)(cb.data), C.int(cb.len))
func (cb GoRustBuffer) AsReader() *bytes.Reader {
b := unsafe.Slice((*byte)(cb.inner.data), C.uint64_t(cb.inner.len))
return bytes.NewReader(b)
}
func (cb RustBuffer) Free() {
func (cb GoRustBuffer) Free() {
rustCall(func(status *C.RustCallStatus) bool {
C.ffi_bls48581_rustbuffer_free(cb, status)
C.ffi_bls48581_rustbuffer_free(cb.inner, status)
return false
})
}
func (cb RustBuffer) ToGoBytes() []byte {
return C.GoBytes(unsafe.Pointer(cb.data), C.int(cb.len))
func (cb GoRustBuffer) ToGoBytes() []byte {
return C.GoBytes(unsafe.Pointer(cb.inner.data), C.int(cb.inner.len))
}
func stringToRustBuffer(str string) RustBuffer {
func stringToRustBuffer(str string) C.RustBuffer {
return bytesToRustBuffer([]byte(str))
}
func bytesToRustBuffer(b []byte) RustBuffer {
func bytesToRustBuffer(b []byte) C.RustBuffer {
if len(b) == 0 {
return RustBuffer{}
return C.RustBuffer{}
}
// We can pass the pointer along here, as it is pinned
// for the duration of this call
@ -74,7 +81,7 @@ func bytesToRustBuffer(b []byte) RustBuffer {
data: (*C.uchar)(unsafe.Pointer(&b[0])),
}
return rustCall(func(status *C.RustCallStatus) RustBuffer {
return rustCall(func(status *C.RustCallStatus) C.RustBuffer {
return C.ffi_bls48581_rustbuffer_from_bytes(foreign, status)
})
}
@ -84,12 +91,7 @@ type BufLifter[GoType any] interface {
}
type BufLowerer[GoType any] interface {
Lower(value GoType) RustBuffer
}
type FfiConverter[GoType any, FfiType any] interface {
Lift(value FfiType) GoType
Lower(value GoType) FfiType
Lower(value GoType) C.RustBuffer
}
type BufReader[GoType any] interface {
@ -100,12 +102,7 @@ type BufWriter[GoType any] interface {
Write(writer io.Writer, value GoType)
}
type FfiRustBufConverter[GoType any, FfiType any] interface {
FfiConverter[GoType, FfiType]
BufReader[GoType]
}
func LowerIntoRustBuffer[GoType any](bufWriter BufWriter[GoType], value GoType) RustBuffer {
func LowerIntoRustBuffer[GoType any](bufWriter BufWriter[GoType], value GoType) C.RustBuffer {
// This might be not the most efficient way but it does not require knowing allocation size
// beforehand
var buffer bytes.Buffer
@ -130,31 +127,30 @@ func LiftFromRustBuffer[GoType any](bufReader BufReader[GoType], rbuf RustBuffer
return item
}
func rustCallWithError[U any](converter BufLifter[error], callback func(*C.RustCallStatus) U) (U, error) {
func rustCallWithError[E any, U any](converter BufReader[*E], callback func(*C.RustCallStatus) U) (U, *E) {
var status C.RustCallStatus
returnValue := callback(&status)
err := checkCallStatus(converter, status)
return returnValue, err
}
func checkCallStatus(converter BufLifter[error], status C.RustCallStatus) error {
func checkCallStatus[E any](converter BufReader[*E], status C.RustCallStatus) *E {
switch status.code {
case 0:
return nil
case 1:
return converter.Lift(status.errorBuf)
return LiftFromRustBuffer(converter, GoRustBuffer{inner: status.errorBuf})
case 2:
// when the rust code sees a panic, it tries to construct a rustbuffer
// when the rust code sees a panic, it tries to construct a rustBuffer
// with the message. but if that code panics, then it just sends back
// an empty buffer.
if status.errorBuf.len > 0 {
panic(fmt.Errorf("%s", FfiConverterStringINSTANCE.Lift(status.errorBuf)))
panic(fmt.Errorf("%s", FfiConverterStringINSTANCE.Lift(GoRustBuffer{inner: status.errorBuf})))
} else {
panic(fmt.Errorf("Rust panicked while handling Rust panic"))
}
default:
return fmt.Errorf("unknown status code: %d", status.code)
panic(fmt.Errorf("unknown status code: %d", status.code))
}
}
@ -165,11 +161,13 @@ func checkCallStatusUnknown(status C.RustCallStatus) error {
case 1:
panic(fmt.Errorf("function not returning an error returned an error"))
case 2:
// when the rust code sees a panic, it tries to construct a rustbuffer
// when the rust code sees a panic, it tries to construct a C.RustBuffer
// with the message. but if that code panics, then it just sends back
// an empty buffer.
if status.errorBuf.len > 0 {
panic(fmt.Errorf("%s", FfiConverterStringINSTANCE.Lift(status.errorBuf)))
panic(fmt.Errorf("%s", FfiConverterStringINSTANCE.Lift(GoRustBuffer{
inner: status.errorBuf,
})))
} else {
panic(fmt.Errorf("Rust panicked while handling Rust panic"))
}
@ -179,13 +177,17 @@ func checkCallStatusUnknown(status C.RustCallStatus) error {
}
func rustCall[U any](callback func(*C.RustCallStatus) U) U {
returnValue, err := rustCallWithError(nil, callback)
returnValue, err := rustCallWithError[error](nil, callback)
if err != nil {
panic(err)
}
return returnValue
}
type NativeError interface {
AsError() error
}
func writeInt8(writer io.Writer, value int8) {
if err := binary.Write(writer, binary.BigEndian, value); err != nil {
panic(err)
@ -333,63 +335,63 @@ func init() {
func uniffiCheckChecksums() {
// Get the bindings contract version from our ComponentInterface
bindingsContractVersion := 24
bindingsContractVersion := 26
// Get the scaffolding contract version by calling the into the dylib
scaffoldingContractVersion := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint32_t {
return C.ffi_bls48581_uniffi_contract_version(uniffiStatus)
scaffoldingContractVersion := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint32_t {
return C.ffi_bls48581_uniffi_contract_version()
})
if bindingsContractVersion != int(scaffoldingContractVersion) {
// If this happens try cleaning and rebuilding your project
panic("bls48581: UniFFI contract version mismatch")
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bls48581_checksum_func_bls_aggregate(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bls48581_checksum_func_bls_aggregate()
})
if checksum != 25405 {
if checksum != 54030 {
// If this happens try cleaning and rebuilding your project
panic("bls48581: uniffi_bls48581_checksum_func_bls_aggregate: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bls48581_checksum_func_bls_keygen(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bls48581_checksum_func_bls_keygen()
})
if checksum != 58096 {
if checksum != 55807 {
// If this happens try cleaning and rebuilding your project
panic("bls48581: uniffi_bls48581_checksum_func_bls_keygen: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bls48581_checksum_func_bls_sign(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bls48581_checksum_func_bls_sign()
})
if checksum != 44903 {
if checksum != 27146 {
// If this happens try cleaning and rebuilding your project
panic("bls48581: uniffi_bls48581_checksum_func_bls_sign: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bls48581_checksum_func_bls_verify(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bls48581_checksum_func_bls_verify()
})
if checksum != 59437 {
if checksum != 23721 {
// If this happens try cleaning and rebuilding your project
panic("bls48581: uniffi_bls48581_checksum_func_bls_verify: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bls48581_checksum_func_commit_raw(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bls48581_checksum_func_commit_raw()
})
if checksum != 20099 {
if checksum != 14479 {
// If this happens try cleaning and rebuilding your project
panic("bls48581: uniffi_bls48581_checksum_func_commit_raw: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bls48581_checksum_func_init(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bls48581_checksum_func_init()
})
if checksum != 11227 {
// If this happens try cleaning and rebuilding your project
@ -397,37 +399,37 @@ func uniffiCheckChecksums() {
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bls48581_checksum_func_prove_multiple(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bls48581_checksum_func_prove_multiple()
})
if checksum != 15323 {
if checksum != 38907 {
// If this happens try cleaning and rebuilding your project
panic("bls48581: uniffi_bls48581_checksum_func_prove_multiple: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bls48581_checksum_func_prove_raw(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bls48581_checksum_func_prove_raw()
})
if checksum != 64858 {
if checksum != 54704 {
// If this happens try cleaning and rebuilding your project
panic("bls48581: uniffi_bls48581_checksum_func_prove_raw: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bls48581_checksum_func_verify_multiple(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bls48581_checksum_func_verify_multiple()
})
if checksum != 33757 {
if checksum != 8610 {
// If this happens try cleaning and rebuilding your project
panic("bls48581: uniffi_bls48581_checksum_func_verify_multiple: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bls48581_checksum_func_verify_raw(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bls48581_checksum_func_verify_raw()
})
if checksum != 52165 {
if checksum != 15303 {
// If this happens try cleaning and rebuilding your project
panic("bls48581: uniffi_bls48581_checksum_func_verify_raw: UniFFI API checksum mismatch")
}
@ -531,7 +533,7 @@ func (FfiConverterString) Read(reader io.Reader) string {
length := readInt32(reader)
buffer := make([]byte, length)
read_length, err := reader.Read(buffer)
if err != nil {
if err != nil && err != io.EOF {
panic(err)
}
if read_length != int(length) {
@ -540,7 +542,7 @@ func (FfiConverterString) Read(reader io.Reader) string {
return string(buffer)
}
func (FfiConverterString) Lower(value string) RustBuffer {
func (FfiConverterString) Lower(value string) C.RustBuffer {
return stringToRustBuffer(value)
}
@ -573,33 +575,33 @@ func (r *BlsAggregateOutput) Destroy() {
FfiDestroyerSequenceUint8{}.Destroy(r.AggregateSignature)
}
type FfiConverterTypeBlsAggregateOutput struct{}
type FfiConverterBlsAggregateOutput struct{}
var FfiConverterTypeBlsAggregateOutputINSTANCE = FfiConverterTypeBlsAggregateOutput{}
var FfiConverterBlsAggregateOutputINSTANCE = FfiConverterBlsAggregateOutput{}
func (c FfiConverterTypeBlsAggregateOutput) Lift(rb RustBufferI) BlsAggregateOutput {
func (c FfiConverterBlsAggregateOutput) Lift(rb RustBufferI) BlsAggregateOutput {
return LiftFromRustBuffer[BlsAggregateOutput](c, rb)
}
func (c FfiConverterTypeBlsAggregateOutput) Read(reader io.Reader) BlsAggregateOutput {
func (c FfiConverterBlsAggregateOutput) Read(reader io.Reader) BlsAggregateOutput {
return BlsAggregateOutput{
FfiConverterSequenceUint8INSTANCE.Read(reader),
FfiConverterSequenceUint8INSTANCE.Read(reader),
}
}
func (c FfiConverterTypeBlsAggregateOutput) Lower(value BlsAggregateOutput) RustBuffer {
func (c FfiConverterBlsAggregateOutput) Lower(value BlsAggregateOutput) C.RustBuffer {
return LowerIntoRustBuffer[BlsAggregateOutput](c, value)
}
func (c FfiConverterTypeBlsAggregateOutput) Write(writer io.Writer, value BlsAggregateOutput) {
func (c FfiConverterBlsAggregateOutput) Write(writer io.Writer, value BlsAggregateOutput) {
FfiConverterSequenceUint8INSTANCE.Write(writer, value.AggregatePublicKey)
FfiConverterSequenceUint8INSTANCE.Write(writer, value.AggregateSignature)
}
type FfiDestroyerTypeBlsAggregateOutput struct{}
type FfiDestroyerBlsAggregateOutput struct{}
func (_ FfiDestroyerTypeBlsAggregateOutput) Destroy(value BlsAggregateOutput) {
func (_ FfiDestroyerBlsAggregateOutput) Destroy(value BlsAggregateOutput) {
value.Destroy()
}
@ -615,15 +617,15 @@ func (r *BlsKeygenOutput) Destroy() {
FfiDestroyerSequenceUint8{}.Destroy(r.ProofOfPossessionSig)
}
type FfiConverterTypeBlsKeygenOutput struct{}
type FfiConverterBlsKeygenOutput struct{}
var FfiConverterTypeBlsKeygenOutputINSTANCE = FfiConverterTypeBlsKeygenOutput{}
var FfiConverterBlsKeygenOutputINSTANCE = FfiConverterBlsKeygenOutput{}
func (c FfiConverterTypeBlsKeygenOutput) Lift(rb RustBufferI) BlsKeygenOutput {
func (c FfiConverterBlsKeygenOutput) Lift(rb RustBufferI) BlsKeygenOutput {
return LiftFromRustBuffer[BlsKeygenOutput](c, rb)
}
func (c FfiConverterTypeBlsKeygenOutput) Read(reader io.Reader) BlsKeygenOutput {
func (c FfiConverterBlsKeygenOutput) Read(reader io.Reader) BlsKeygenOutput {
return BlsKeygenOutput{
FfiConverterSequenceUint8INSTANCE.Read(reader),
FfiConverterSequenceUint8INSTANCE.Read(reader),
@ -631,19 +633,19 @@ func (c FfiConverterTypeBlsKeygenOutput) Read(reader io.Reader) BlsKeygenOutput
}
}
func (c FfiConverterTypeBlsKeygenOutput) Lower(value BlsKeygenOutput) RustBuffer {
func (c FfiConverterBlsKeygenOutput) Lower(value BlsKeygenOutput) C.RustBuffer {
return LowerIntoRustBuffer[BlsKeygenOutput](c, value)
}
func (c FfiConverterTypeBlsKeygenOutput) Write(writer io.Writer, value BlsKeygenOutput) {
func (c FfiConverterBlsKeygenOutput) Write(writer io.Writer, value BlsKeygenOutput) {
FfiConverterSequenceUint8INSTANCE.Write(writer, value.SecretKey)
FfiConverterSequenceUint8INSTANCE.Write(writer, value.PublicKey)
FfiConverterSequenceUint8INSTANCE.Write(writer, value.ProofOfPossessionSig)
}
type FfiDestroyerTypeBlsKeygenOutput struct{}
type FfiDestroyerBlsKeygenOutput struct{}
func (_ FfiDestroyerTypeBlsKeygenOutput) Destroy(value BlsKeygenOutput) {
func (_ FfiDestroyerBlsKeygenOutput) Destroy(value BlsKeygenOutput) {
value.Destroy()
}
@ -657,33 +659,33 @@ func (r *Multiproof) Destroy() {
FfiDestroyerSequenceUint8{}.Destroy(r.Proof)
}
type FfiConverterTypeMultiproof struct{}
type FfiConverterMultiproof struct{}
var FfiConverterTypeMultiproofINSTANCE = FfiConverterTypeMultiproof{}
var FfiConverterMultiproofINSTANCE = FfiConverterMultiproof{}
func (c FfiConverterTypeMultiproof) Lift(rb RustBufferI) Multiproof {
func (c FfiConverterMultiproof) Lift(rb RustBufferI) Multiproof {
return LiftFromRustBuffer[Multiproof](c, rb)
}
func (c FfiConverterTypeMultiproof) Read(reader io.Reader) Multiproof {
func (c FfiConverterMultiproof) Read(reader io.Reader) Multiproof {
return Multiproof{
FfiConverterSequenceUint8INSTANCE.Read(reader),
FfiConverterSequenceUint8INSTANCE.Read(reader),
}
}
func (c FfiConverterTypeMultiproof) Lower(value Multiproof) RustBuffer {
func (c FfiConverterMultiproof) Lower(value Multiproof) C.RustBuffer {
return LowerIntoRustBuffer[Multiproof](c, value)
}
func (c FfiConverterTypeMultiproof) Write(writer io.Writer, value Multiproof) {
func (c FfiConverterMultiproof) Write(writer io.Writer, value Multiproof) {
FfiConverterSequenceUint8INSTANCE.Write(writer, value.D)
FfiConverterSequenceUint8INSTANCE.Write(writer, value.Proof)
}
type FfiDestroyerTypeMultiproof struct{}
type FfiDestroyerMultiproof struct{}
func (_ FfiDestroyerTypeMultiproof) Destroy(value Multiproof) {
func (_ FfiDestroyerMultiproof) Destroy(value Multiproof) {
value.Destroy()
}
@ -707,7 +709,7 @@ func (c FfiConverterSequenceUint8) Read(reader io.Reader) []uint8 {
return result
}
func (c FfiConverterSequenceUint8) Lower(value []uint8) RustBuffer {
func (c FfiConverterSequenceUint8) Lower(value []uint8) C.RustBuffer {
return LowerIntoRustBuffer[[]uint8](c, value)
}
@ -750,7 +752,7 @@ func (c FfiConverterSequenceUint64) Read(reader io.Reader) []uint64 {
return result
}
func (c FfiConverterSequenceUint64) Lower(value []uint64) RustBuffer {
func (c FfiConverterSequenceUint64) Lower(value []uint64) C.RustBuffer {
return LowerIntoRustBuffer[[]uint64](c, value)
}
@ -793,7 +795,7 @@ func (c FfiConverterSequenceSequenceUint8) Read(reader io.Reader) [][]uint8 {
return result
}
func (c FfiConverterSequenceSequenceUint8) Lower(value [][]uint8) RustBuffer {
func (c FfiConverterSequenceSequenceUint8) Lower(value [][]uint8) C.RustBuffer {
return LowerIntoRustBuffer[[][]uint8](c, value)
}
@ -817,20 +819,26 @@ func (FfiDestroyerSequenceSequenceUint8) Destroy(sequence [][]uint8) {
}
func BlsAggregate(pks [][]uint8, sigs [][]uint8) BlsAggregateOutput {
return FfiConverterTypeBlsAggregateOutputINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_bls48581_fn_func_bls_aggregate(FfiConverterSequenceSequenceUint8INSTANCE.Lower(pks), FfiConverterSequenceSequenceUint8INSTANCE.Lower(sigs), _uniffiStatus)
return FfiConverterBlsAggregateOutputINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_bls48581_fn_func_bls_aggregate(FfiConverterSequenceSequenceUint8INSTANCE.Lower(pks), FfiConverterSequenceSequenceUint8INSTANCE.Lower(sigs), _uniffiStatus),
}
}))
}
func BlsKeygen() BlsKeygenOutput {
return FfiConverterTypeBlsKeygenOutputINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_bls48581_fn_func_bls_keygen(_uniffiStatus)
return FfiConverterBlsKeygenOutputINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_bls48581_fn_func_bls_keygen(_uniffiStatus),
}
}))
}
func BlsSign(sk []uint8, msg []uint8, domain []uint8) []uint8 {
return FfiConverterSequenceUint8INSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_bls48581_fn_func_bls_sign(FfiConverterSequenceUint8INSTANCE.Lower(sk), FfiConverterSequenceUint8INSTANCE.Lower(msg), FfiConverterSequenceUint8INSTANCE.Lower(domain), _uniffiStatus)
return GoRustBuffer{
inner: C.uniffi_bls48581_fn_func_bls_sign(FfiConverterSequenceUint8INSTANCE.Lower(sk), FfiConverterSequenceUint8INSTANCE.Lower(msg), FfiConverterSequenceUint8INSTANCE.Lower(domain), _uniffiStatus),
}
}))
}
@ -842,7 +850,9 @@ func BlsVerify(pk []uint8, sig []uint8, msg []uint8, domain []uint8) bool {
func CommitRaw(data []uint8, polySize uint64) []uint8 {
return FfiConverterSequenceUint8INSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_bls48581_fn_func_commit_raw(FfiConverterSequenceUint8INSTANCE.Lower(data), FfiConverterUint64INSTANCE.Lower(polySize), _uniffiStatus)
return GoRustBuffer{
inner: C.uniffi_bls48581_fn_func_commit_raw(FfiConverterSequenceUint8INSTANCE.Lower(data), FfiConverterUint64INSTANCE.Lower(polySize), _uniffiStatus),
}
}))
}
@ -854,14 +864,18 @@ func Init() {
}
func ProveMultiple(commitments [][]uint8, polys [][]uint8, indices []uint64, polySize uint64) Multiproof {
return FfiConverterTypeMultiproofINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_bls48581_fn_func_prove_multiple(FfiConverterSequenceSequenceUint8INSTANCE.Lower(commitments), FfiConverterSequenceSequenceUint8INSTANCE.Lower(polys), FfiConverterSequenceUint64INSTANCE.Lower(indices), FfiConverterUint64INSTANCE.Lower(polySize), _uniffiStatus)
return FfiConverterMultiproofINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_bls48581_fn_func_prove_multiple(FfiConverterSequenceSequenceUint8INSTANCE.Lower(commitments), FfiConverterSequenceSequenceUint8INSTANCE.Lower(polys), FfiConverterSequenceUint64INSTANCE.Lower(indices), FfiConverterUint64INSTANCE.Lower(polySize), _uniffiStatus),
}
}))
}
func ProveRaw(data []uint8, index uint64, polySize uint64) []uint8 {
return FfiConverterSequenceUint8INSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_bls48581_fn_func_prove_raw(FfiConverterSequenceUint8INSTANCE.Lower(data), FfiConverterUint64INSTANCE.Lower(index), FfiConverterUint64INSTANCE.Lower(polySize), _uniffiStatus)
return GoRustBuffer{
inner: C.uniffi_bls48581_fn_func_prove_raw(FfiConverterSequenceUint8INSTANCE.Lower(data), FfiConverterUint64INSTANCE.Lower(index), FfiConverterUint64INSTANCE.Lower(polySize), _uniffiStatus),
}
}))
}

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +0,0 @@
#include <bulletproofs.h>
// This file exists beacause of
// https://github.com/golang/go/issues/11263
void cgo_rust_task_callback_bridge_bulletproofs(RustTaskCallback cb, const void * taskData, int8_t status) {
cb(taskData, status);
}

View File

@ -12,60 +12,67 @@ import (
"unsafe"
)
type RustBuffer = C.RustBuffer
// This is needed, because as of go 1.24
// type RustBuffer C.RustBuffer cannot have methods,
// RustBuffer is treated as non-local type
type GoRustBuffer struct {
inner C.RustBuffer
}
type RustBufferI interface {
AsReader() *bytes.Reader
Free()
ToGoBytes() []byte
Data() unsafe.Pointer
Len() int
Capacity() int
Len() uint64
Capacity() uint64
}
func RustBufferFromExternal(b RustBufferI) RustBuffer {
return RustBuffer{
capacity: C.int(b.Capacity()),
len: C.int(b.Len()),
data: (*C.uchar)(b.Data()),
func RustBufferFromExternal(b RustBufferI) GoRustBuffer {
return GoRustBuffer{
inner: C.RustBuffer{
capacity: C.uint64_t(b.Capacity()),
len: C.uint64_t(b.Len()),
data: (*C.uchar)(b.Data()),
},
}
}
func (cb RustBuffer) Capacity() int {
return int(cb.capacity)
func (cb GoRustBuffer) Capacity() uint64 {
return uint64(cb.inner.capacity)
}
func (cb RustBuffer) Len() int {
return int(cb.len)
func (cb GoRustBuffer) Len() uint64 {
return uint64(cb.inner.len)
}
func (cb RustBuffer) Data() unsafe.Pointer {
return unsafe.Pointer(cb.data)
func (cb GoRustBuffer) Data() unsafe.Pointer {
return unsafe.Pointer(cb.inner.data)
}
func (cb RustBuffer) AsReader() *bytes.Reader {
b := unsafe.Slice((*byte)(cb.data), C.int(cb.len))
func (cb GoRustBuffer) AsReader() *bytes.Reader {
b := unsafe.Slice((*byte)(cb.inner.data), C.uint64_t(cb.inner.len))
return bytes.NewReader(b)
}
func (cb RustBuffer) Free() {
func (cb GoRustBuffer) Free() {
rustCall(func(status *C.RustCallStatus) bool {
C.ffi_bulletproofs_rustbuffer_free(cb, status)
C.ffi_bulletproofs_rustbuffer_free(cb.inner, status)
return false
})
}
func (cb RustBuffer) ToGoBytes() []byte {
return C.GoBytes(unsafe.Pointer(cb.data), C.int(cb.len))
func (cb GoRustBuffer) ToGoBytes() []byte {
return C.GoBytes(unsafe.Pointer(cb.inner.data), C.int(cb.inner.len))
}
func stringToRustBuffer(str string) RustBuffer {
func stringToRustBuffer(str string) C.RustBuffer {
return bytesToRustBuffer([]byte(str))
}
func bytesToRustBuffer(b []byte) RustBuffer {
func bytesToRustBuffer(b []byte) C.RustBuffer {
if len(b) == 0 {
return RustBuffer{}
return C.RustBuffer{}
}
// We can pass the pointer along here, as it is pinned
// for the duration of this call
@ -74,7 +81,7 @@ func bytesToRustBuffer(b []byte) RustBuffer {
data: (*C.uchar)(unsafe.Pointer(&b[0])),
}
return rustCall(func(status *C.RustCallStatus) RustBuffer {
return rustCall(func(status *C.RustCallStatus) C.RustBuffer {
return C.ffi_bulletproofs_rustbuffer_from_bytes(foreign, status)
})
}
@ -84,12 +91,7 @@ type BufLifter[GoType any] interface {
}
type BufLowerer[GoType any] interface {
Lower(value GoType) RustBuffer
}
type FfiConverter[GoType any, FfiType any] interface {
Lift(value FfiType) GoType
Lower(value GoType) FfiType
Lower(value GoType) C.RustBuffer
}
type BufReader[GoType any] interface {
@ -100,12 +102,7 @@ type BufWriter[GoType any] interface {
Write(writer io.Writer, value GoType)
}
type FfiRustBufConverter[GoType any, FfiType any] interface {
FfiConverter[GoType, FfiType]
BufReader[GoType]
}
func LowerIntoRustBuffer[GoType any](bufWriter BufWriter[GoType], value GoType) RustBuffer {
func LowerIntoRustBuffer[GoType any](bufWriter BufWriter[GoType], value GoType) C.RustBuffer {
// This might be not the most efficient way but it does not require knowing allocation size
// beforehand
var buffer bytes.Buffer
@ -130,31 +127,30 @@ func LiftFromRustBuffer[GoType any](bufReader BufReader[GoType], rbuf RustBuffer
return item
}
func rustCallWithError[U any](converter BufLifter[error], callback func(*C.RustCallStatus) U) (U, error) {
func rustCallWithError[E any, U any](converter BufReader[*E], callback func(*C.RustCallStatus) U) (U, *E) {
var status C.RustCallStatus
returnValue := callback(&status)
err := checkCallStatus(converter, status)
return returnValue, err
}
func checkCallStatus(converter BufLifter[error], status C.RustCallStatus) error {
func checkCallStatus[E any](converter BufReader[*E], status C.RustCallStatus) *E {
switch status.code {
case 0:
return nil
case 1:
return converter.Lift(status.errorBuf)
return LiftFromRustBuffer(converter, GoRustBuffer{inner: status.errorBuf})
case 2:
// when the rust code sees a panic, it tries to construct a rustbuffer
// when the rust code sees a panic, it tries to construct a rustBuffer
// with the message. but if that code panics, then it just sends back
// an empty buffer.
if status.errorBuf.len > 0 {
panic(fmt.Errorf("%s", FfiConverterStringINSTANCE.Lift(status.errorBuf)))
panic(fmt.Errorf("%s", FfiConverterStringINSTANCE.Lift(GoRustBuffer{inner: status.errorBuf})))
} else {
panic(fmt.Errorf("Rust panicked while handling Rust panic"))
}
default:
return fmt.Errorf("unknown status code: %d", status.code)
panic(fmt.Errorf("unknown status code: %d", status.code))
}
}
@ -165,11 +161,13 @@ func checkCallStatusUnknown(status C.RustCallStatus) error {
case 1:
panic(fmt.Errorf("function not returning an error returned an error"))
case 2:
// when the rust code sees a panic, it tries to construct a rustbuffer
// when the rust code sees a panic, it tries to construct a C.RustBuffer
// with the message. but if that code panics, then it just sends back
// an empty buffer.
if status.errorBuf.len > 0 {
panic(fmt.Errorf("%s", FfiConverterStringINSTANCE.Lift(status.errorBuf)))
panic(fmt.Errorf("%s", FfiConverterStringINSTANCE.Lift(GoRustBuffer{
inner: status.errorBuf,
})))
} else {
panic(fmt.Errorf("Rust panicked while handling Rust panic"))
}
@ -179,13 +177,17 @@ func checkCallStatusUnknown(status C.RustCallStatus) error {
}
func rustCall[U any](callback func(*C.RustCallStatus) U) U {
returnValue, err := rustCallWithError(nil, callback)
returnValue, err := rustCallWithError[error](nil, callback)
if err != nil {
panic(err)
}
return returnValue
}
type NativeError interface {
AsError() error
}
func writeInt8(writer io.Writer, value int8) {
if err := binary.Write(writer, binary.BigEndian, value); err != nil {
panic(err)
@ -333,191 +335,191 @@ func init() {
func uniffiCheckChecksums() {
// Get the bindings contract version from our ComponentInterface
bindingsContractVersion := 24
bindingsContractVersion := 26
// Get the scaffolding contract version by calling the into the dylib
scaffoldingContractVersion := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint32_t {
return C.ffi_bulletproofs_uniffi_contract_version(uniffiStatus)
scaffoldingContractVersion := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint32_t {
return C.ffi_bulletproofs_uniffi_contract_version()
})
if bindingsContractVersion != int(scaffoldingContractVersion) {
// If this happens try cleaning and rebuilding your project
panic("bulletproofs: UniFFI contract version mismatch")
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_alt_generator(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_alt_generator()
})
if checksum != 26422 {
if checksum != 26339 {
// If this happens try cleaning and rebuilding your project
panic("bulletproofs: uniffi_bulletproofs_checksum_func_alt_generator: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_generate_input_commitments(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_generate_input_commitments()
})
if checksum != 65001 {
if checksum != 19822 {
// If this happens try cleaning and rebuilding your project
panic("bulletproofs: uniffi_bulletproofs_checksum_func_generate_input_commitments: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_generate_range_proof(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_generate_range_proof()
})
if checksum != 40322 {
if checksum != 985 {
// If this happens try cleaning and rebuilding your project
panic("bulletproofs: uniffi_bulletproofs_checksum_func_generate_range_proof: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_hash_to_scalar(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_hash_to_scalar()
})
if checksum != 19176 {
if checksum != 13632 {
// If this happens try cleaning and rebuilding your project
panic("bulletproofs: uniffi_bulletproofs_checksum_func_hash_to_scalar: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_keygen(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_keygen()
})
if checksum != 46171 {
if checksum != 9609 {
// If this happens try cleaning and rebuilding your project
panic("bulletproofs: uniffi_bulletproofs_checksum_func_keygen: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_point_addition(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_point_addition()
})
if checksum != 6828 {
if checksum != 32221 {
// If this happens try cleaning and rebuilding your project
panic("bulletproofs: uniffi_bulletproofs_checksum_func_point_addition: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_point_subtraction(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_point_subtraction()
})
if checksum != 48479 {
if checksum != 38806 {
// If this happens try cleaning and rebuilding your project
panic("bulletproofs: uniffi_bulletproofs_checksum_func_point_subtraction: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_scalar_addition(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_scalar_addition()
})
if checksum != 29576 {
if checksum != 60180 {
// If this happens try cleaning and rebuilding your project
panic("bulletproofs: uniffi_bulletproofs_checksum_func_scalar_addition: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_scalar_inverse(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_scalar_inverse()
})
if checksum != 11499 {
if checksum != 37774 {
// If this happens try cleaning and rebuilding your project
panic("bulletproofs: uniffi_bulletproofs_checksum_func_scalar_inverse: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_scalar_mult(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_scalar_mult()
})
if checksum != 6075 {
if checksum != 45102 {
// If this happens try cleaning and rebuilding your project
panic("bulletproofs: uniffi_bulletproofs_checksum_func_scalar_mult: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_scalar_mult_hash_to_scalar(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_scalar_mult_hash_to_scalar()
})
if checksum != 53652 {
if checksum != 53592 {
// If this happens try cleaning and rebuilding your project
panic("bulletproofs: uniffi_bulletproofs_checksum_func_scalar_mult_hash_to_scalar: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_scalar_mult_point(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_scalar_mult_point()
})
if checksum != 46237 {
if checksum != 61743 {
// If this happens try cleaning and rebuilding your project
panic("bulletproofs: uniffi_bulletproofs_checksum_func_scalar_mult_point: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_scalar_subtraction(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_scalar_subtraction()
})
if checksum != 13728 {
if checksum != 7250 {
// If this happens try cleaning and rebuilding your project
panic("bulletproofs: uniffi_bulletproofs_checksum_func_scalar_subtraction: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_scalar_to_point(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_scalar_to_point()
})
if checksum != 61077 {
if checksum != 51818 {
// If this happens try cleaning and rebuilding your project
panic("bulletproofs: uniffi_bulletproofs_checksum_func_scalar_to_point: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_sign_hidden(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_sign_hidden()
})
if checksum != 57560 {
if checksum != 32104 {
// If this happens try cleaning and rebuilding your project
panic("bulletproofs: uniffi_bulletproofs_checksum_func_sign_hidden: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_sign_simple(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_sign_simple()
})
if checksum != 5535 {
if checksum != 35259 {
// If this happens try cleaning and rebuilding your project
panic("bulletproofs: uniffi_bulletproofs_checksum_func_sign_simple: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_sum_check(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_sum_check()
})
if checksum != 18164 {
if checksum != 47141 {
// If this happens try cleaning and rebuilding your project
panic("bulletproofs: uniffi_bulletproofs_checksum_func_sum_check: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_verify_hidden(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_verify_hidden()
})
if checksum != 55266 {
if checksum != 64726 {
// If this happens try cleaning and rebuilding your project
panic("bulletproofs: uniffi_bulletproofs_checksum_func_verify_hidden: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_verify_range_proof(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_verify_range_proof()
})
if checksum != 37611 {
if checksum != 62924 {
// If this happens try cleaning and rebuilding your project
panic("bulletproofs: uniffi_bulletproofs_checksum_func_verify_range_proof: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_verify_simple(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bulletproofs_checksum_func_verify_simple()
})
if checksum != 32821 {
if checksum != 27860 {
// If this happens try cleaning and rebuilding your project
panic("bulletproofs: uniffi_bulletproofs_checksum_func_verify_simple: UniFFI API checksum mismatch")
}
@ -621,7 +623,7 @@ func (FfiConverterString) Read(reader io.Reader) string {
length := readInt32(reader)
buffer := make([]byte, length)
read_length, err := reader.Read(buffer)
if err != nil {
if err != nil && err != io.EOF {
panic(err)
}
if read_length != int(length) {
@ -630,7 +632,7 @@ func (FfiConverterString) Read(reader io.Reader) string {
return string(buffer)
}
func (FfiConverterString) Lower(value string) RustBuffer {
func (FfiConverterString) Lower(value string) C.RustBuffer {
return stringToRustBuffer(value)
}
@ -665,15 +667,15 @@ func (r *RangeProofResult) Destroy() {
FfiDestroyerSequenceUint8{}.Destroy(r.Blinding)
}
type FfiConverterTypeRangeProofResult struct{}
type FfiConverterRangeProofResult struct{}
var FfiConverterTypeRangeProofResultINSTANCE = FfiConverterTypeRangeProofResult{}
var FfiConverterRangeProofResultINSTANCE = FfiConverterRangeProofResult{}
func (c FfiConverterTypeRangeProofResult) Lift(rb RustBufferI) RangeProofResult {
func (c FfiConverterRangeProofResult) Lift(rb RustBufferI) RangeProofResult {
return LiftFromRustBuffer[RangeProofResult](c, rb)
}
func (c FfiConverterTypeRangeProofResult) Read(reader io.Reader) RangeProofResult {
func (c FfiConverterRangeProofResult) Read(reader io.Reader) RangeProofResult {
return RangeProofResult{
FfiConverterSequenceUint8INSTANCE.Read(reader),
FfiConverterSequenceUint8INSTANCE.Read(reader),
@ -681,19 +683,19 @@ func (c FfiConverterTypeRangeProofResult) Read(reader io.Reader) RangeProofResul
}
}
func (c FfiConverterTypeRangeProofResult) Lower(value RangeProofResult) RustBuffer {
func (c FfiConverterRangeProofResult) Lower(value RangeProofResult) C.RustBuffer {
return LowerIntoRustBuffer[RangeProofResult](c, value)
}
func (c FfiConverterTypeRangeProofResult) Write(writer io.Writer, value RangeProofResult) {
func (c FfiConverterRangeProofResult) Write(writer io.Writer, value RangeProofResult) {
FfiConverterSequenceUint8INSTANCE.Write(writer, value.Proof)
FfiConverterSequenceUint8INSTANCE.Write(writer, value.Commitment)
FfiConverterSequenceUint8INSTANCE.Write(writer, value.Blinding)
}
type FfiDestroyerTypeRangeProofResult struct{}
type FfiDestroyerRangeProofResult struct{}
func (_ FfiDestroyerTypeRangeProofResult) Destroy(value RangeProofResult) {
func (_ FfiDestroyerRangeProofResult) Destroy(value RangeProofResult) {
value.Destroy()
}
@ -717,7 +719,7 @@ func (c FfiConverterSequenceUint8) Read(reader io.Reader) []uint8 {
return result
}
func (c FfiConverterSequenceUint8) Lower(value []uint8) RustBuffer {
func (c FfiConverterSequenceUint8) Lower(value []uint8) C.RustBuffer {
return LowerIntoRustBuffer[[]uint8](c, value)
}
@ -760,7 +762,7 @@ func (c FfiConverterSequenceSequenceUint8) Read(reader io.Reader) [][]uint8 {
return result
}
func (c FfiConverterSequenceSequenceUint8) Lower(value [][]uint8) RustBuffer {
func (c FfiConverterSequenceSequenceUint8) Lower(value [][]uint8) C.RustBuffer {
return LowerIntoRustBuffer[[][]uint8](c, value)
}
@ -785,97 +787,129 @@ func (FfiDestroyerSequenceSequenceUint8) Destroy(sequence [][]uint8) {
func AltGenerator() []uint8 {
return FfiConverterSequenceUint8INSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_bulletproofs_fn_func_alt_generator(_uniffiStatus)
return GoRustBuffer{
inner: C.uniffi_bulletproofs_fn_func_alt_generator(_uniffiStatus),
}
}))
}
func GenerateInputCommitments(values [][]uint8, blinding []uint8) []uint8 {
return FfiConverterSequenceUint8INSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_bulletproofs_fn_func_generate_input_commitments(FfiConverterSequenceSequenceUint8INSTANCE.Lower(values), FfiConverterSequenceUint8INSTANCE.Lower(blinding), _uniffiStatus)
return GoRustBuffer{
inner: C.uniffi_bulletproofs_fn_func_generate_input_commitments(FfiConverterSequenceSequenceUint8INSTANCE.Lower(values), FfiConverterSequenceUint8INSTANCE.Lower(blinding), _uniffiStatus),
}
}))
}
func GenerateRangeProof(values [][]uint8, blinding []uint8, bitSize uint64) RangeProofResult {
return FfiConverterTypeRangeProofResultINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_bulletproofs_fn_func_generate_range_proof(FfiConverterSequenceSequenceUint8INSTANCE.Lower(values), FfiConverterSequenceUint8INSTANCE.Lower(blinding), FfiConverterUint64INSTANCE.Lower(bitSize), _uniffiStatus)
return FfiConverterRangeProofResultINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_bulletproofs_fn_func_generate_range_proof(FfiConverterSequenceSequenceUint8INSTANCE.Lower(values), FfiConverterSequenceUint8INSTANCE.Lower(blinding), FfiConverterUint64INSTANCE.Lower(bitSize), _uniffiStatus),
}
}))
}
func HashToScalar(input []uint8) []uint8 {
return FfiConverterSequenceUint8INSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_bulletproofs_fn_func_hash_to_scalar(FfiConverterSequenceUint8INSTANCE.Lower(input), _uniffiStatus)
return GoRustBuffer{
inner: C.uniffi_bulletproofs_fn_func_hash_to_scalar(FfiConverterSequenceUint8INSTANCE.Lower(input), _uniffiStatus),
}
}))
}
func Keygen() []uint8 {
return FfiConverterSequenceUint8INSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_bulletproofs_fn_func_keygen(_uniffiStatus)
return GoRustBuffer{
inner: C.uniffi_bulletproofs_fn_func_keygen(_uniffiStatus),
}
}))
}
func PointAddition(inputPoint []uint8, publicPoint []uint8) []uint8 {
return FfiConverterSequenceUint8INSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_bulletproofs_fn_func_point_addition(FfiConverterSequenceUint8INSTANCE.Lower(inputPoint), FfiConverterSequenceUint8INSTANCE.Lower(publicPoint), _uniffiStatus)
return GoRustBuffer{
inner: C.uniffi_bulletproofs_fn_func_point_addition(FfiConverterSequenceUint8INSTANCE.Lower(inputPoint), FfiConverterSequenceUint8INSTANCE.Lower(publicPoint), _uniffiStatus),
}
}))
}
func PointSubtraction(inputPoint []uint8, publicPoint []uint8) []uint8 {
return FfiConverterSequenceUint8INSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_bulletproofs_fn_func_point_subtraction(FfiConverterSequenceUint8INSTANCE.Lower(inputPoint), FfiConverterSequenceUint8INSTANCE.Lower(publicPoint), _uniffiStatus)
return GoRustBuffer{
inner: C.uniffi_bulletproofs_fn_func_point_subtraction(FfiConverterSequenceUint8INSTANCE.Lower(inputPoint), FfiConverterSequenceUint8INSTANCE.Lower(publicPoint), _uniffiStatus),
}
}))
}
func ScalarAddition(lhs []uint8, rhs []uint8) []uint8 {
return FfiConverterSequenceUint8INSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_bulletproofs_fn_func_scalar_addition(FfiConverterSequenceUint8INSTANCE.Lower(lhs), FfiConverterSequenceUint8INSTANCE.Lower(rhs), _uniffiStatus)
return GoRustBuffer{
inner: C.uniffi_bulletproofs_fn_func_scalar_addition(FfiConverterSequenceUint8INSTANCE.Lower(lhs), FfiConverterSequenceUint8INSTANCE.Lower(rhs), _uniffiStatus),
}
}))
}
func ScalarInverse(inputScalar []uint8) []uint8 {
return FfiConverterSequenceUint8INSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_bulletproofs_fn_func_scalar_inverse(FfiConverterSequenceUint8INSTANCE.Lower(inputScalar), _uniffiStatus)
return GoRustBuffer{
inner: C.uniffi_bulletproofs_fn_func_scalar_inverse(FfiConverterSequenceUint8INSTANCE.Lower(inputScalar), _uniffiStatus),
}
}))
}
func ScalarMult(lhs []uint8, rhs []uint8) []uint8 {
return FfiConverterSequenceUint8INSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_bulletproofs_fn_func_scalar_mult(FfiConverterSequenceUint8INSTANCE.Lower(lhs), FfiConverterSequenceUint8INSTANCE.Lower(rhs), _uniffiStatus)
return GoRustBuffer{
inner: C.uniffi_bulletproofs_fn_func_scalar_mult(FfiConverterSequenceUint8INSTANCE.Lower(lhs), FfiConverterSequenceUint8INSTANCE.Lower(rhs), _uniffiStatus),
}
}))
}
func ScalarMultHashToScalar(inputScalar []uint8, publicPoint []uint8) []uint8 {
return FfiConverterSequenceUint8INSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_bulletproofs_fn_func_scalar_mult_hash_to_scalar(FfiConverterSequenceUint8INSTANCE.Lower(inputScalar), FfiConverterSequenceUint8INSTANCE.Lower(publicPoint), _uniffiStatus)
return GoRustBuffer{
inner: C.uniffi_bulletproofs_fn_func_scalar_mult_hash_to_scalar(FfiConverterSequenceUint8INSTANCE.Lower(inputScalar), FfiConverterSequenceUint8INSTANCE.Lower(publicPoint), _uniffiStatus),
}
}))
}
func ScalarMultPoint(inputScalar []uint8, publicPoint []uint8) []uint8 {
return FfiConverterSequenceUint8INSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_bulletproofs_fn_func_scalar_mult_point(FfiConverterSequenceUint8INSTANCE.Lower(inputScalar), FfiConverterSequenceUint8INSTANCE.Lower(publicPoint), _uniffiStatus)
return GoRustBuffer{
inner: C.uniffi_bulletproofs_fn_func_scalar_mult_point(FfiConverterSequenceUint8INSTANCE.Lower(inputScalar), FfiConverterSequenceUint8INSTANCE.Lower(publicPoint), _uniffiStatus),
}
}))
}
func ScalarSubtraction(lhs []uint8, rhs []uint8) []uint8 {
return FfiConverterSequenceUint8INSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_bulletproofs_fn_func_scalar_subtraction(FfiConverterSequenceUint8INSTANCE.Lower(lhs), FfiConverterSequenceUint8INSTANCE.Lower(rhs), _uniffiStatus)
return GoRustBuffer{
inner: C.uniffi_bulletproofs_fn_func_scalar_subtraction(FfiConverterSequenceUint8INSTANCE.Lower(lhs), FfiConverterSequenceUint8INSTANCE.Lower(rhs), _uniffiStatus),
}
}))
}
func ScalarToPoint(input []uint8) []uint8 {
return FfiConverterSequenceUint8INSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_bulletproofs_fn_func_scalar_to_point(FfiConverterSequenceUint8INSTANCE.Lower(input), _uniffiStatus)
return GoRustBuffer{
inner: C.uniffi_bulletproofs_fn_func_scalar_to_point(FfiConverterSequenceUint8INSTANCE.Lower(input), _uniffiStatus),
}
}))
}
func SignHidden(x []uint8, t []uint8, a []uint8, r []uint8) []uint8 {
return FfiConverterSequenceUint8INSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_bulletproofs_fn_func_sign_hidden(FfiConverterSequenceUint8INSTANCE.Lower(x), FfiConverterSequenceUint8INSTANCE.Lower(t), FfiConverterSequenceUint8INSTANCE.Lower(a), FfiConverterSequenceUint8INSTANCE.Lower(r), _uniffiStatus)
return GoRustBuffer{
inner: C.uniffi_bulletproofs_fn_func_sign_hidden(FfiConverterSequenceUint8INSTANCE.Lower(x), FfiConverterSequenceUint8INSTANCE.Lower(t), FfiConverterSequenceUint8INSTANCE.Lower(a), FfiConverterSequenceUint8INSTANCE.Lower(r), _uniffiStatus),
}
}))
}
func SignSimple(secret []uint8, message []uint8) []uint8 {
return FfiConverterSequenceUint8INSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_bulletproofs_fn_func_sign_simple(FfiConverterSequenceUint8INSTANCE.Lower(secret), FfiConverterSequenceUint8INSTANCE.Lower(message), _uniffiStatus)
return GoRustBuffer{
inner: C.uniffi_bulletproofs_fn_func_sign_simple(FfiConverterSequenceUint8INSTANCE.Lower(secret), FfiConverterSequenceUint8INSTANCE.Lower(message), _uniffiStatus),
}
}))
}

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +0,0 @@
#include <channel.h>
// This file exists beacause of
// https://github.com/golang/go/issues/11263
void cgo_rust_task_callback_bridge_channel(RustTaskCallback cb, const void * taskData, int8_t status) {
cb(taskData, status);
}

View File

@ -12,60 +12,67 @@ import (
"unsafe"
)
type RustBuffer = C.RustBuffer
// This is needed, because as of go 1.24
// type RustBuffer C.RustBuffer cannot have methods,
// RustBuffer is treated as non-local type
type GoRustBuffer struct {
inner C.RustBuffer
}
type RustBufferI interface {
AsReader() *bytes.Reader
Free()
ToGoBytes() []byte
Data() unsafe.Pointer
Len() int
Capacity() int
Len() uint64
Capacity() uint64
}
func RustBufferFromExternal(b RustBufferI) RustBuffer {
return RustBuffer{
capacity: C.int(b.Capacity()),
len: C.int(b.Len()),
data: (*C.uchar)(b.Data()),
func RustBufferFromExternal(b RustBufferI) GoRustBuffer {
return GoRustBuffer{
inner: C.RustBuffer{
capacity: C.uint64_t(b.Capacity()),
len: C.uint64_t(b.Len()),
data: (*C.uchar)(b.Data()),
},
}
}
func (cb RustBuffer) Capacity() int {
return int(cb.capacity)
func (cb GoRustBuffer) Capacity() uint64 {
return uint64(cb.inner.capacity)
}
func (cb RustBuffer) Len() int {
return int(cb.len)
func (cb GoRustBuffer) Len() uint64 {
return uint64(cb.inner.len)
}
func (cb RustBuffer) Data() unsafe.Pointer {
return unsafe.Pointer(cb.data)
func (cb GoRustBuffer) Data() unsafe.Pointer {
return unsafe.Pointer(cb.inner.data)
}
func (cb RustBuffer) AsReader() *bytes.Reader {
b := unsafe.Slice((*byte)(cb.data), C.int(cb.len))
func (cb GoRustBuffer) AsReader() *bytes.Reader {
b := unsafe.Slice((*byte)(cb.inner.data), C.uint64_t(cb.inner.len))
return bytes.NewReader(b)
}
func (cb RustBuffer) Free() {
func (cb GoRustBuffer) Free() {
rustCall(func(status *C.RustCallStatus) bool {
C.ffi_channel_rustbuffer_free(cb, status)
C.ffi_channel_rustbuffer_free(cb.inner, status)
return false
})
}
func (cb RustBuffer) ToGoBytes() []byte {
return C.GoBytes(unsafe.Pointer(cb.data), C.int(cb.len))
func (cb GoRustBuffer) ToGoBytes() []byte {
return C.GoBytes(unsafe.Pointer(cb.inner.data), C.int(cb.inner.len))
}
func stringToRustBuffer(str string) RustBuffer {
func stringToRustBuffer(str string) C.RustBuffer {
return bytesToRustBuffer([]byte(str))
}
func bytesToRustBuffer(b []byte) RustBuffer {
func bytesToRustBuffer(b []byte) C.RustBuffer {
if len(b) == 0 {
return RustBuffer{}
return C.RustBuffer{}
}
// We can pass the pointer along here, as it is pinned
// for the duration of this call
@ -74,7 +81,7 @@ func bytesToRustBuffer(b []byte) RustBuffer {
data: (*C.uchar)(unsafe.Pointer(&b[0])),
}
return rustCall(func(status *C.RustCallStatus) RustBuffer {
return rustCall(func(status *C.RustCallStatus) C.RustBuffer {
return C.ffi_channel_rustbuffer_from_bytes(foreign, status)
})
}
@ -84,12 +91,7 @@ type BufLifter[GoType any] interface {
}
type BufLowerer[GoType any] interface {
Lower(value GoType) RustBuffer
}
type FfiConverter[GoType any, FfiType any] interface {
Lift(value FfiType) GoType
Lower(value GoType) FfiType
Lower(value GoType) C.RustBuffer
}
type BufReader[GoType any] interface {
@ -100,12 +102,7 @@ type BufWriter[GoType any] interface {
Write(writer io.Writer, value GoType)
}
type FfiRustBufConverter[GoType any, FfiType any] interface {
FfiConverter[GoType, FfiType]
BufReader[GoType]
}
func LowerIntoRustBuffer[GoType any](bufWriter BufWriter[GoType], value GoType) RustBuffer {
func LowerIntoRustBuffer[GoType any](bufWriter BufWriter[GoType], value GoType) C.RustBuffer {
// This might be not the most efficient way but it does not require knowing allocation size
// beforehand
var buffer bytes.Buffer
@ -130,31 +127,30 @@ func LiftFromRustBuffer[GoType any](bufReader BufReader[GoType], rbuf RustBuffer
return item
}
func rustCallWithError[U any](converter BufLifter[error], callback func(*C.RustCallStatus) U) (U, error) {
func rustCallWithError[E any, U any](converter BufReader[*E], callback func(*C.RustCallStatus) U) (U, *E) {
var status C.RustCallStatus
returnValue := callback(&status)
err := checkCallStatus(converter, status)
return returnValue, err
}
func checkCallStatus(converter BufLifter[error], status C.RustCallStatus) error {
func checkCallStatus[E any](converter BufReader[*E], status C.RustCallStatus) *E {
switch status.code {
case 0:
return nil
case 1:
return converter.Lift(status.errorBuf)
return LiftFromRustBuffer(converter, GoRustBuffer{inner: status.errorBuf})
case 2:
// when the rust code sees a panic, it tries to construct a rustbuffer
// when the rust code sees a panic, it tries to construct a rustBuffer
// with the message. but if that code panics, then it just sends back
// an empty buffer.
if status.errorBuf.len > 0 {
panic(fmt.Errorf("%s", FfiConverterStringINSTANCE.Lift(status.errorBuf)))
panic(fmt.Errorf("%s", FfiConverterStringINSTANCE.Lift(GoRustBuffer{inner: status.errorBuf})))
} else {
panic(fmt.Errorf("Rust panicked while handling Rust panic"))
}
default:
return fmt.Errorf("unknown status code: %d", status.code)
panic(fmt.Errorf("unknown status code: %d", status.code))
}
}
@ -165,11 +161,13 @@ func checkCallStatusUnknown(status C.RustCallStatus) error {
case 1:
panic(fmt.Errorf("function not returning an error returned an error"))
case 2:
// when the rust code sees a panic, it tries to construct a rustbuffer
// when the rust code sees a panic, it tries to construct a C.RustBuffer
// with the message. but if that code panics, then it just sends back
// an empty buffer.
if status.errorBuf.len > 0 {
panic(fmt.Errorf("%s", FfiConverterStringINSTANCE.Lift(status.errorBuf)))
panic(fmt.Errorf("%s", FfiConverterStringINSTANCE.Lift(GoRustBuffer{
inner: status.errorBuf,
})))
} else {
panic(fmt.Errorf("Rust panicked while handling Rust panic"))
}
@ -179,13 +177,17 @@ func checkCallStatusUnknown(status C.RustCallStatus) error {
}
func rustCall[U any](callback func(*C.RustCallStatus) U) U {
returnValue, err := rustCallWithError(nil, callback)
returnValue, err := rustCallWithError[error](nil, callback)
if err != nil {
panic(err)
}
return returnValue
}
type NativeError interface {
AsError() error
}
func writeInt8(writer io.Writer, value int8) {
if err := binary.Write(writer, binary.BigEndian, value); err != nil {
panic(err)
@ -333,119 +335,119 @@ func init() {
func uniffiCheckChecksums() {
// Get the bindings contract version from our ComponentInterface
bindingsContractVersion := 24
bindingsContractVersion := 26
// Get the scaffolding contract version by calling the into the dylib
scaffoldingContractVersion := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint32_t {
return C.ffi_channel_uniffi_contract_version(uniffiStatus)
scaffoldingContractVersion := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint32_t {
return C.ffi_channel_uniffi_contract_version()
})
if bindingsContractVersion != int(scaffoldingContractVersion) {
// If this happens try cleaning and rebuilding your project
panic("channel: UniFFI contract version mismatch")
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_double_ratchet_decrypt(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_double_ratchet_decrypt()
})
if checksum != 57128 {
if checksum != 13335 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_double_ratchet_decrypt: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_double_ratchet_encrypt(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_double_ratchet_encrypt()
})
if checksum != 10167 {
if checksum != 59209 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_double_ratchet_encrypt: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_new_double_ratchet(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_new_double_ratchet()
})
if checksum != 21249 {
if checksum != 16925 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_new_double_ratchet: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_new_triple_ratchet(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_new_triple_ratchet()
})
if checksum != 11118 {
if checksum != 20275 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_new_triple_ratchet: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_receiver_x3dh(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_receiver_x3dh()
})
if checksum != 53802 {
if checksum != 19343 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_receiver_x3dh: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_sender_x3dh(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_sender_x3dh()
})
if checksum != 2887 {
if checksum != 41646 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_sender_x3dh: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_triple_ratchet_decrypt(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_triple_ratchet_decrypt()
})
if checksum != 56417 {
if checksum != 42324 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_triple_ratchet_decrypt: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_triple_ratchet_encrypt(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_triple_ratchet_encrypt()
})
if checksum != 63768 {
if checksum != 61617 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_triple_ratchet_encrypt: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_triple_ratchet_init_round_1(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_triple_ratchet_init_round_1()
})
if checksum != 48593 {
if checksum != 42612 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_triple_ratchet_init_round_1: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_triple_ratchet_init_round_2(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_triple_ratchet_init_round_2()
})
if checksum != 55359 {
if checksum != 11875 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_triple_ratchet_init_round_2: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_triple_ratchet_init_round_3(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_triple_ratchet_init_round_3()
})
if checksum != 50330 {
if checksum != 50331 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_triple_ratchet_init_round_3: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_triple_ratchet_init_round_4(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_triple_ratchet_init_round_4()
})
if checksum != 58513 {
if checksum != 14779 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_triple_ratchet_init_round_4: UniFFI API checksum mismatch")
}
@ -549,7 +551,7 @@ func (FfiConverterString) Read(reader io.Reader) string {
length := readInt32(reader)
buffer := make([]byte, length)
read_length, err := reader.Read(buffer)
if err != nil {
if err != nil && err != io.EOF {
panic(err)
}
if read_length != int(length) {
@ -558,7 +560,7 @@ func (FfiConverterString) Read(reader io.Reader) string {
return string(buffer)
}
func (FfiConverterString) Lower(value string) RustBuffer {
func (FfiConverterString) Lower(value string) C.RustBuffer {
return stringToRustBuffer(value)
}
@ -591,33 +593,33 @@ func (r *DoubleRatchetStateAndEnvelope) Destroy() {
FfiDestroyerString{}.Destroy(r.Envelope)
}
type FfiConverterTypeDoubleRatchetStateAndEnvelope struct{}
type FfiConverterDoubleRatchetStateAndEnvelope struct{}
var FfiConverterTypeDoubleRatchetStateAndEnvelopeINSTANCE = FfiConverterTypeDoubleRatchetStateAndEnvelope{}
var FfiConverterDoubleRatchetStateAndEnvelopeINSTANCE = FfiConverterDoubleRatchetStateAndEnvelope{}
func (c FfiConverterTypeDoubleRatchetStateAndEnvelope) Lift(rb RustBufferI) DoubleRatchetStateAndEnvelope {
func (c FfiConverterDoubleRatchetStateAndEnvelope) Lift(rb RustBufferI) DoubleRatchetStateAndEnvelope {
return LiftFromRustBuffer[DoubleRatchetStateAndEnvelope](c, rb)
}
func (c FfiConverterTypeDoubleRatchetStateAndEnvelope) Read(reader io.Reader) DoubleRatchetStateAndEnvelope {
func (c FfiConverterDoubleRatchetStateAndEnvelope) Read(reader io.Reader) DoubleRatchetStateAndEnvelope {
return DoubleRatchetStateAndEnvelope{
FfiConverterStringINSTANCE.Read(reader),
FfiConverterStringINSTANCE.Read(reader),
}
}
func (c FfiConverterTypeDoubleRatchetStateAndEnvelope) Lower(value DoubleRatchetStateAndEnvelope) RustBuffer {
func (c FfiConverterDoubleRatchetStateAndEnvelope) Lower(value DoubleRatchetStateAndEnvelope) C.RustBuffer {
return LowerIntoRustBuffer[DoubleRatchetStateAndEnvelope](c, value)
}
func (c FfiConverterTypeDoubleRatchetStateAndEnvelope) Write(writer io.Writer, value DoubleRatchetStateAndEnvelope) {
func (c FfiConverterDoubleRatchetStateAndEnvelope) Write(writer io.Writer, value DoubleRatchetStateAndEnvelope) {
FfiConverterStringINSTANCE.Write(writer, value.RatchetState)
FfiConverterStringINSTANCE.Write(writer, value.Envelope)
}
type FfiDestroyerTypeDoubleRatchetStateAndEnvelope struct{}
type FfiDestroyerDoubleRatchetStateAndEnvelope struct{}
func (_ FfiDestroyerTypeDoubleRatchetStateAndEnvelope) Destroy(value DoubleRatchetStateAndEnvelope) {
func (_ FfiDestroyerDoubleRatchetStateAndEnvelope) Destroy(value DoubleRatchetStateAndEnvelope) {
value.Destroy()
}
@ -631,33 +633,33 @@ func (r *DoubleRatchetStateAndMessage) Destroy() {
FfiDestroyerSequenceUint8{}.Destroy(r.Message)
}
type FfiConverterTypeDoubleRatchetStateAndMessage struct{}
type FfiConverterDoubleRatchetStateAndMessage struct{}
var FfiConverterTypeDoubleRatchetStateAndMessageINSTANCE = FfiConverterTypeDoubleRatchetStateAndMessage{}
var FfiConverterDoubleRatchetStateAndMessageINSTANCE = FfiConverterDoubleRatchetStateAndMessage{}
func (c FfiConverterTypeDoubleRatchetStateAndMessage) Lift(rb RustBufferI) DoubleRatchetStateAndMessage {
func (c FfiConverterDoubleRatchetStateAndMessage) Lift(rb RustBufferI) DoubleRatchetStateAndMessage {
return LiftFromRustBuffer[DoubleRatchetStateAndMessage](c, rb)
}
func (c FfiConverterTypeDoubleRatchetStateAndMessage) Read(reader io.Reader) DoubleRatchetStateAndMessage {
func (c FfiConverterDoubleRatchetStateAndMessage) Read(reader io.Reader) DoubleRatchetStateAndMessage {
return DoubleRatchetStateAndMessage{
FfiConverterStringINSTANCE.Read(reader),
FfiConverterSequenceUint8INSTANCE.Read(reader),
}
}
func (c FfiConverterTypeDoubleRatchetStateAndMessage) Lower(value DoubleRatchetStateAndMessage) RustBuffer {
func (c FfiConverterDoubleRatchetStateAndMessage) Lower(value DoubleRatchetStateAndMessage) C.RustBuffer {
return LowerIntoRustBuffer[DoubleRatchetStateAndMessage](c, value)
}
func (c FfiConverterTypeDoubleRatchetStateAndMessage) Write(writer io.Writer, value DoubleRatchetStateAndMessage) {
func (c FfiConverterDoubleRatchetStateAndMessage) Write(writer io.Writer, value DoubleRatchetStateAndMessage) {
FfiConverterStringINSTANCE.Write(writer, value.RatchetState)
FfiConverterSequenceUint8INSTANCE.Write(writer, value.Message)
}
type FfiDestroyerTypeDoubleRatchetStateAndMessage struct{}
type FfiDestroyerDoubleRatchetStateAndMessage struct{}
func (_ FfiDestroyerTypeDoubleRatchetStateAndMessage) Destroy(value DoubleRatchetStateAndMessage) {
func (_ FfiDestroyerDoubleRatchetStateAndMessage) Destroy(value DoubleRatchetStateAndMessage) {
value.Destroy()
}
@ -671,33 +673,33 @@ func (r *TripleRatchetStateAndEnvelope) Destroy() {
FfiDestroyerString{}.Destroy(r.Envelope)
}
type FfiConverterTypeTripleRatchetStateAndEnvelope struct{}
type FfiConverterTripleRatchetStateAndEnvelope struct{}
var FfiConverterTypeTripleRatchetStateAndEnvelopeINSTANCE = FfiConverterTypeTripleRatchetStateAndEnvelope{}
var FfiConverterTripleRatchetStateAndEnvelopeINSTANCE = FfiConverterTripleRatchetStateAndEnvelope{}
func (c FfiConverterTypeTripleRatchetStateAndEnvelope) Lift(rb RustBufferI) TripleRatchetStateAndEnvelope {
func (c FfiConverterTripleRatchetStateAndEnvelope) Lift(rb RustBufferI) TripleRatchetStateAndEnvelope {
return LiftFromRustBuffer[TripleRatchetStateAndEnvelope](c, rb)
}
func (c FfiConverterTypeTripleRatchetStateAndEnvelope) Read(reader io.Reader) TripleRatchetStateAndEnvelope {
func (c FfiConverterTripleRatchetStateAndEnvelope) Read(reader io.Reader) TripleRatchetStateAndEnvelope {
return TripleRatchetStateAndEnvelope{
FfiConverterStringINSTANCE.Read(reader),
FfiConverterStringINSTANCE.Read(reader),
}
}
func (c FfiConverterTypeTripleRatchetStateAndEnvelope) Lower(value TripleRatchetStateAndEnvelope) RustBuffer {
func (c FfiConverterTripleRatchetStateAndEnvelope) Lower(value TripleRatchetStateAndEnvelope) C.RustBuffer {
return LowerIntoRustBuffer[TripleRatchetStateAndEnvelope](c, value)
}
func (c FfiConverterTypeTripleRatchetStateAndEnvelope) Write(writer io.Writer, value TripleRatchetStateAndEnvelope) {
func (c FfiConverterTripleRatchetStateAndEnvelope) Write(writer io.Writer, value TripleRatchetStateAndEnvelope) {
FfiConverterStringINSTANCE.Write(writer, value.RatchetState)
FfiConverterStringINSTANCE.Write(writer, value.Envelope)
}
type FfiDestroyerTypeTripleRatchetStateAndEnvelope struct{}
type FfiDestroyerTripleRatchetStateAndEnvelope struct{}
func (_ FfiDestroyerTypeTripleRatchetStateAndEnvelope) Destroy(value TripleRatchetStateAndEnvelope) {
func (_ FfiDestroyerTripleRatchetStateAndEnvelope) Destroy(value TripleRatchetStateAndEnvelope) {
value.Destroy()
}
@ -711,33 +713,33 @@ func (r *TripleRatchetStateAndMessage) Destroy() {
FfiDestroyerSequenceUint8{}.Destroy(r.Message)
}
type FfiConverterTypeTripleRatchetStateAndMessage struct{}
type FfiConverterTripleRatchetStateAndMessage struct{}
var FfiConverterTypeTripleRatchetStateAndMessageINSTANCE = FfiConverterTypeTripleRatchetStateAndMessage{}
var FfiConverterTripleRatchetStateAndMessageINSTANCE = FfiConverterTripleRatchetStateAndMessage{}
func (c FfiConverterTypeTripleRatchetStateAndMessage) Lift(rb RustBufferI) TripleRatchetStateAndMessage {
func (c FfiConverterTripleRatchetStateAndMessage) Lift(rb RustBufferI) TripleRatchetStateAndMessage {
return LiftFromRustBuffer[TripleRatchetStateAndMessage](c, rb)
}
func (c FfiConverterTypeTripleRatchetStateAndMessage) Read(reader io.Reader) TripleRatchetStateAndMessage {
func (c FfiConverterTripleRatchetStateAndMessage) Read(reader io.Reader) TripleRatchetStateAndMessage {
return TripleRatchetStateAndMessage{
FfiConverterStringINSTANCE.Read(reader),
FfiConverterSequenceUint8INSTANCE.Read(reader),
}
}
func (c FfiConverterTypeTripleRatchetStateAndMessage) Lower(value TripleRatchetStateAndMessage) RustBuffer {
func (c FfiConverterTripleRatchetStateAndMessage) Lower(value TripleRatchetStateAndMessage) C.RustBuffer {
return LowerIntoRustBuffer[TripleRatchetStateAndMessage](c, value)
}
func (c FfiConverterTypeTripleRatchetStateAndMessage) Write(writer io.Writer, value TripleRatchetStateAndMessage) {
func (c FfiConverterTripleRatchetStateAndMessage) Write(writer io.Writer, value TripleRatchetStateAndMessage) {
FfiConverterStringINSTANCE.Write(writer, value.RatchetState)
FfiConverterSequenceUint8INSTANCE.Write(writer, value.Message)
}
type FfiDestroyerTypeTripleRatchetStateAndMessage struct{}
type FfiDestroyerTripleRatchetStateAndMessage struct{}
func (_ FfiDestroyerTypeTripleRatchetStateAndMessage) Destroy(value TripleRatchetStateAndMessage) {
func (_ FfiDestroyerTripleRatchetStateAndMessage) Destroy(value TripleRatchetStateAndMessage) {
value.Destroy()
}
@ -751,33 +753,33 @@ func (r *TripleRatchetStateAndMetadata) Destroy() {
FfiDestroyerMapStringString{}.Destroy(r.Metadata)
}
type FfiConverterTypeTripleRatchetStateAndMetadata struct{}
type FfiConverterTripleRatchetStateAndMetadata struct{}
var FfiConverterTypeTripleRatchetStateAndMetadataINSTANCE = FfiConverterTypeTripleRatchetStateAndMetadata{}
var FfiConverterTripleRatchetStateAndMetadataINSTANCE = FfiConverterTripleRatchetStateAndMetadata{}
func (c FfiConverterTypeTripleRatchetStateAndMetadata) Lift(rb RustBufferI) TripleRatchetStateAndMetadata {
func (c FfiConverterTripleRatchetStateAndMetadata) Lift(rb RustBufferI) TripleRatchetStateAndMetadata {
return LiftFromRustBuffer[TripleRatchetStateAndMetadata](c, rb)
}
func (c FfiConverterTypeTripleRatchetStateAndMetadata) Read(reader io.Reader) TripleRatchetStateAndMetadata {
func (c FfiConverterTripleRatchetStateAndMetadata) Read(reader io.Reader) TripleRatchetStateAndMetadata {
return TripleRatchetStateAndMetadata{
FfiConverterStringINSTANCE.Read(reader),
FfiConverterMapStringStringINSTANCE.Read(reader),
}
}
func (c FfiConverterTypeTripleRatchetStateAndMetadata) Lower(value TripleRatchetStateAndMetadata) RustBuffer {
func (c FfiConverterTripleRatchetStateAndMetadata) Lower(value TripleRatchetStateAndMetadata) C.RustBuffer {
return LowerIntoRustBuffer[TripleRatchetStateAndMetadata](c, value)
}
func (c FfiConverterTypeTripleRatchetStateAndMetadata) Write(writer io.Writer, value TripleRatchetStateAndMetadata) {
func (c FfiConverterTripleRatchetStateAndMetadata) Write(writer io.Writer, value TripleRatchetStateAndMetadata) {
FfiConverterStringINSTANCE.Write(writer, value.RatchetState)
FfiConverterMapStringStringINSTANCE.Write(writer, value.Metadata)
}
type FfiDestroyerTypeTripleRatchetStateAndMetadata struct{}
type FfiDestroyerTripleRatchetStateAndMetadata struct{}
func (_ FfiDestroyerTypeTripleRatchetStateAndMetadata) Destroy(value TripleRatchetStateAndMetadata) {
func (_ FfiDestroyerTripleRatchetStateAndMetadata) Destroy(value TripleRatchetStateAndMetadata) {
value.Destroy()
}
@ -801,7 +803,7 @@ func (c FfiConverterSequenceUint8) Read(reader io.Reader) []uint8 {
return result
}
func (c FfiConverterSequenceUint8) Lower(value []uint8) RustBuffer {
func (c FfiConverterSequenceUint8) Lower(value []uint8) C.RustBuffer {
return LowerIntoRustBuffer[[]uint8](c, value)
}
@ -844,7 +846,7 @@ func (c FfiConverterSequenceSequenceUint8) Read(reader io.Reader) [][]uint8 {
return result
}
func (c FfiConverterSequenceSequenceUint8) Lower(value [][]uint8) RustBuffer {
func (c FfiConverterSequenceSequenceUint8) Lower(value [][]uint8) C.RustBuffer {
return LowerIntoRustBuffer[[][]uint8](c, value)
}
@ -886,7 +888,7 @@ func (_ FfiConverterMapStringString) Read(reader io.Reader) map[string]string {
return result
}
func (c FfiConverterMapStringString) Lower(value map[string]string) RustBuffer {
func (c FfiConverterMapStringString) Lower(value map[string]string) C.RustBuffer {
return LowerIntoRustBuffer[map[string]string](c, value)
}
@ -912,73 +914,97 @@ func (_ FfiDestroyerMapStringString) Destroy(mapValue map[string]string) {
}
func DoubleRatchetDecrypt(ratchetStateAndEnvelope DoubleRatchetStateAndEnvelope) DoubleRatchetStateAndMessage {
return FfiConverterTypeDoubleRatchetStateAndMessageINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_channel_fn_func_double_ratchet_decrypt(FfiConverterTypeDoubleRatchetStateAndEnvelopeINSTANCE.Lower(ratchetStateAndEnvelope), _uniffiStatus)
return FfiConverterDoubleRatchetStateAndMessageINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_double_ratchet_decrypt(FfiConverterDoubleRatchetStateAndEnvelopeINSTANCE.Lower(ratchetStateAndEnvelope), _uniffiStatus),
}
}))
}
func DoubleRatchetEncrypt(ratchetStateAndMessage DoubleRatchetStateAndMessage) DoubleRatchetStateAndEnvelope {
return FfiConverterTypeDoubleRatchetStateAndEnvelopeINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_channel_fn_func_double_ratchet_encrypt(FfiConverterTypeDoubleRatchetStateAndMessageINSTANCE.Lower(ratchetStateAndMessage), _uniffiStatus)
return FfiConverterDoubleRatchetStateAndEnvelopeINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_double_ratchet_encrypt(FfiConverterDoubleRatchetStateAndMessageINSTANCE.Lower(ratchetStateAndMessage), _uniffiStatus),
}
}))
}
func NewDoubleRatchet(sessionKey []uint8, sendingHeaderKey []uint8, nextReceivingHeaderKey []uint8, isSender bool, sendingEphemeralPrivateKey []uint8, receivingEphemeralKey []uint8) string {
return FfiConverterStringINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_channel_fn_func_new_double_ratchet(FfiConverterSequenceUint8INSTANCE.Lower(sessionKey), FfiConverterSequenceUint8INSTANCE.Lower(sendingHeaderKey), FfiConverterSequenceUint8INSTANCE.Lower(nextReceivingHeaderKey), FfiConverterBoolINSTANCE.Lower(isSender), FfiConverterSequenceUint8INSTANCE.Lower(sendingEphemeralPrivateKey), FfiConverterSequenceUint8INSTANCE.Lower(receivingEphemeralKey), _uniffiStatus)
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_new_double_ratchet(FfiConverterSequenceUint8INSTANCE.Lower(sessionKey), FfiConverterSequenceUint8INSTANCE.Lower(sendingHeaderKey), FfiConverterSequenceUint8INSTANCE.Lower(nextReceivingHeaderKey), FfiConverterBoolINSTANCE.Lower(isSender), FfiConverterSequenceUint8INSTANCE.Lower(sendingEphemeralPrivateKey), FfiConverterSequenceUint8INSTANCE.Lower(receivingEphemeralKey), _uniffiStatus),
}
}))
}
func NewTripleRatchet(peers [][]uint8, peerKey []uint8, identityKey []uint8, signedPreKey []uint8, threshold uint64, asyncDkgRatchet bool) TripleRatchetStateAndMetadata {
return FfiConverterTypeTripleRatchetStateAndMetadataINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_channel_fn_func_new_triple_ratchet(FfiConverterSequenceSequenceUint8INSTANCE.Lower(peers), FfiConverterSequenceUint8INSTANCE.Lower(peerKey), FfiConverterSequenceUint8INSTANCE.Lower(identityKey), FfiConverterSequenceUint8INSTANCE.Lower(signedPreKey), FfiConverterUint64INSTANCE.Lower(threshold), FfiConverterBoolINSTANCE.Lower(asyncDkgRatchet), _uniffiStatus)
return FfiConverterTripleRatchetStateAndMetadataINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_new_triple_ratchet(FfiConverterSequenceSequenceUint8INSTANCE.Lower(peers), FfiConverterSequenceUint8INSTANCE.Lower(peerKey), FfiConverterSequenceUint8INSTANCE.Lower(identityKey), FfiConverterSequenceUint8INSTANCE.Lower(signedPreKey), FfiConverterUint64INSTANCE.Lower(threshold), FfiConverterBoolINSTANCE.Lower(asyncDkgRatchet), _uniffiStatus),
}
}))
}
func ReceiverX3dh(sendingIdentityPrivateKey []uint8, sendingSignedPrivateKey []uint8, receivingIdentityKey []uint8, receivingEphemeralKey []uint8, sessionKeyLength uint64) string {
return FfiConverterStringINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_channel_fn_func_receiver_x3dh(FfiConverterSequenceUint8INSTANCE.Lower(sendingIdentityPrivateKey), FfiConverterSequenceUint8INSTANCE.Lower(sendingSignedPrivateKey), FfiConverterSequenceUint8INSTANCE.Lower(receivingIdentityKey), FfiConverterSequenceUint8INSTANCE.Lower(receivingEphemeralKey), FfiConverterUint64INSTANCE.Lower(sessionKeyLength), _uniffiStatus)
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_receiver_x3dh(FfiConverterSequenceUint8INSTANCE.Lower(sendingIdentityPrivateKey), FfiConverterSequenceUint8INSTANCE.Lower(sendingSignedPrivateKey), FfiConverterSequenceUint8INSTANCE.Lower(receivingIdentityKey), FfiConverterSequenceUint8INSTANCE.Lower(receivingEphemeralKey), FfiConverterUint64INSTANCE.Lower(sessionKeyLength), _uniffiStatus),
}
}))
}
func SenderX3dh(sendingIdentityPrivateKey []uint8, sendingEphemeralPrivateKey []uint8, receivingIdentityKey []uint8, receivingSignedPreKey []uint8, sessionKeyLength uint64) string {
return FfiConverterStringINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_channel_fn_func_sender_x3dh(FfiConverterSequenceUint8INSTANCE.Lower(sendingIdentityPrivateKey), FfiConverterSequenceUint8INSTANCE.Lower(sendingEphemeralPrivateKey), FfiConverterSequenceUint8INSTANCE.Lower(receivingIdentityKey), FfiConverterSequenceUint8INSTANCE.Lower(receivingSignedPreKey), FfiConverterUint64INSTANCE.Lower(sessionKeyLength), _uniffiStatus)
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_sender_x3dh(FfiConverterSequenceUint8INSTANCE.Lower(sendingIdentityPrivateKey), FfiConverterSequenceUint8INSTANCE.Lower(sendingEphemeralPrivateKey), FfiConverterSequenceUint8INSTANCE.Lower(receivingIdentityKey), FfiConverterSequenceUint8INSTANCE.Lower(receivingSignedPreKey), FfiConverterUint64INSTANCE.Lower(sessionKeyLength), _uniffiStatus),
}
}))
}
func TripleRatchetDecrypt(ratchetStateAndEnvelope TripleRatchetStateAndEnvelope) TripleRatchetStateAndMessage {
return FfiConverterTypeTripleRatchetStateAndMessageINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_channel_fn_func_triple_ratchet_decrypt(FfiConverterTypeTripleRatchetStateAndEnvelopeINSTANCE.Lower(ratchetStateAndEnvelope), _uniffiStatus)
return FfiConverterTripleRatchetStateAndMessageINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_triple_ratchet_decrypt(FfiConverterTripleRatchetStateAndEnvelopeINSTANCE.Lower(ratchetStateAndEnvelope), _uniffiStatus),
}
}))
}
func TripleRatchetEncrypt(ratchetStateAndMessage TripleRatchetStateAndMessage) TripleRatchetStateAndEnvelope {
return FfiConverterTypeTripleRatchetStateAndEnvelopeINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_channel_fn_func_triple_ratchet_encrypt(FfiConverterTypeTripleRatchetStateAndMessageINSTANCE.Lower(ratchetStateAndMessage), _uniffiStatus)
return FfiConverterTripleRatchetStateAndEnvelopeINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_triple_ratchet_encrypt(FfiConverterTripleRatchetStateAndMessageINSTANCE.Lower(ratchetStateAndMessage), _uniffiStatus),
}
}))
}
func TripleRatchetInitRound1(ratchetStateAndMetadata TripleRatchetStateAndMetadata) TripleRatchetStateAndMetadata {
return FfiConverterTypeTripleRatchetStateAndMetadataINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_channel_fn_func_triple_ratchet_init_round_1(FfiConverterTypeTripleRatchetStateAndMetadataINSTANCE.Lower(ratchetStateAndMetadata), _uniffiStatus)
return FfiConverterTripleRatchetStateAndMetadataINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_triple_ratchet_init_round_1(FfiConverterTripleRatchetStateAndMetadataINSTANCE.Lower(ratchetStateAndMetadata), _uniffiStatus),
}
}))
}
func TripleRatchetInitRound2(ratchetStateAndMetadata TripleRatchetStateAndMetadata) TripleRatchetStateAndMetadata {
return FfiConverterTypeTripleRatchetStateAndMetadataINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_channel_fn_func_triple_ratchet_init_round_2(FfiConverterTypeTripleRatchetStateAndMetadataINSTANCE.Lower(ratchetStateAndMetadata), _uniffiStatus)
return FfiConverterTripleRatchetStateAndMetadataINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_triple_ratchet_init_round_2(FfiConverterTripleRatchetStateAndMetadataINSTANCE.Lower(ratchetStateAndMetadata), _uniffiStatus),
}
}))
}
func TripleRatchetInitRound3(ratchetStateAndMetadata TripleRatchetStateAndMetadata) TripleRatchetStateAndMetadata {
return FfiConverterTypeTripleRatchetStateAndMetadataINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_channel_fn_func_triple_ratchet_init_round_3(FfiConverterTypeTripleRatchetStateAndMetadataINSTANCE.Lower(ratchetStateAndMetadata), _uniffiStatus)
return FfiConverterTripleRatchetStateAndMetadataINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_triple_ratchet_init_round_3(FfiConverterTripleRatchetStateAndMetadataINSTANCE.Lower(ratchetStateAndMetadata), _uniffiStatus),
}
}))
}
func TripleRatchetInitRound4(ratchetStateAndMetadata TripleRatchetStateAndMetadata) TripleRatchetStateAndMetadata {
return FfiConverterTypeTripleRatchetStateAndMetadataINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_channel_fn_func_triple_ratchet_init_round_4(FfiConverterTypeTripleRatchetStateAndMetadataINSTANCE.Lower(ratchetStateAndMetadata), _uniffiStatus)
return FfiConverterTripleRatchetStateAndMetadataINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_triple_ratchet_init_round_4(FfiConverterTripleRatchetStateAndMetadataINSTANCE.Lower(ratchetStateAndMetadata), _uniffiStatus),
}
}))
}

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
module source.quilibrium.com/quilibrium/monorepo/channel
go 1.20
go 1.23.2
// A necessary hack until source.quilibrium.com is open to all
replace source.quilibrium.com/quilibrium/monorepo/nekryptology => ../nekryptology
@ -13,6 +13,8 @@ replace github.com/multiformats/go-multiaddr-dns => ../go-multiaddr-dns
replace github.com/libp2p/go-libp2p => ../go-libp2p
replace source.quilibrium.com/quilibrium/monorepo/consensus => ../consensus
replace source.quilibrium.com/quilibrium/monorepo/types => ../types
replace source.quilibrium.com/quilibrium/monorepo/utils => ../utils
@ -28,7 +30,33 @@ require (
github.com/btcsuite/btcd v0.21.0-beta.0.20201114000516-e9c7a5ac6401 // indirect
github.com/bwesterb/go-ristretto v1.2.3 // indirect
github.com/consensys/gnark-crypto v0.5.3 // indirect
github.com/kr/pretty v0.2.1 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
github.com/iden3/go-iden3-crypto v0.0.17 // indirect
github.com/ipfs/go-cid v0.5.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
github.com/libp2p/go-libp2p v0.41.1 // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr v0.16.1 // indirect
github.com/multiformats/go-multibase v0.2.0 // indirect
github.com/multiformats/go-multicodec v0.9.1 // indirect
github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 // indirect
golang.org/x/net v0.41.0 // indirect
golang.org/x/text v0.26.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect
google.golang.org/grpc v1.72.0 // indirect
google.golang.org/protobuf v1.36.6 // indirect
lukechampine.com/blake3 v1.4.1 // indirect
source.quilibrium.com/quilibrium/monorepo/consensus v0.0.0-00010101000000-000000000000 // indirect
source.quilibrium.com/quilibrium/monorepo/protobufs v0.0.0-00010101000000-000000000000 // indirect
)
@ -36,7 +64,7 @@ require (
github.com/cloudflare/circl v1.6.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
golang.org/x/crypto v0.38.0 // indirect
golang.org/x/crypto v0.39.0 // indirect
golang.org/x/sys v0.33.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
source.quilibrium.com/quilibrium/monorepo/nekryptology v0.0.0-00010101000000-000000000000

View File

@ -24,22 +24,60 @@ github.com/consensys/gnark-crypto v0.5.3/go.mod h1:hOdPlWQV1gDLp7faZVeg8Y0iEPFaO
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/iden3/go-iden3-crypto v0.0.17 h1:NdkceRLJo/pI4UpcjVah4lN/a3yzxRUGXqxbWcYh9mY=
github.com/iden3/go-iden3-crypto v0.0.17/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo=
github.com/multiformats/go-multicodec v0.9.1/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo=
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
@ -48,19 +86,39 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY=
go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI=
go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ=
go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A=
go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU=
go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk=
go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w=
go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k=
go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8=
golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 h1:bsqhLWFR6G6xiQcb+JoGqdKdRU6WzPWmK8E0jxTjzo4=
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -72,7 +130,17 @@ golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950=
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=
google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM=
google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
@ -81,4 +149,6 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg=
lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo=
rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=

View File

@ -44,6 +44,7 @@ type Config struct {
P2P *P2PConfig `yaml:"p2p"`
Engine *EngineConfig `yaml:"engine"`
DB *DBConfig `yaml:"db"`
Logger *LogConfig `yaml:"logger"`
ListenGRPCMultiaddr string `yaml:"listenGrpcMultiaddr"`
ListenRestMultiaddr string `yaml:"listenRESTMultiaddr"`
LogFile string `yaml:"logFile"`

49
config/logger.go Normal file
View File

@ -0,0 +1,49 @@
package config
import (
"io"
"github.com/pkg/errors"
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/utils/logging"
)
type LogConfig struct {
Path string `yaml:"path"`
MaxSize int `yaml:"maxSize"`
MaxBackups int `yaml:"maxBackups"`
MaxAge int `yaml:"maxAge"`
Compress bool `yaml:"compress"`
}
func (c *Config) CreateLogger(coreId uint, debug bool) (
*zap.Logger,
io.Closer,
error,
) {
filename := c.LogFile
if filename != "" || c.Logger != nil {
dir := ""
if c.Logger != nil {
dir = c.Logger.Path
}
logger, closer, err := logging.NewRotatingFileLogger(
debug,
coreId,
dir,
filename,
)
return logger, closer, errors.Wrap(err, "create logger")
}
var logger *zap.Logger
var err error
if debug {
logger, err = zap.NewDevelopment()
} else {
logger, err = zap.NewProduction()
}
return logger, io.NopCloser(nil), errors.Wrap(err, "create logger")
}

View File

@ -425,6 +425,8 @@ func (sm *StateMachine[
fmt.Sprintf("error encountered in %s", sm.machineState),
err,
)
time.Sleep(10 * time.Second)
sm.SendEvent(EventSyncTimeout)
return
}
found := false
@ -568,18 +570,21 @@ func (sm *StateMachine[
),
)
time.Sleep(100 * time.Millisecond)
err := sm.livenessProvider.SendLiveness(data, collected, ctx)
if err != nil {
sm.traceLogger.Error(
fmt.Sprintf("error encountered in %s", sm.machineState),
err,
)
sm.SendEvent(EventInduceSync)
return
select {
case <-time.After(1 * time.Second):
err := sm.livenessProvider.SendLiveness(data, collected, ctx)
if err != nil {
sm.traceLogger.Error(
fmt.Sprintf("error encountered in %s", sm.machineState),
err,
)
sm.SendEvent(EventInduceSync)
return
}
case <-ctx.Done():
}
},
Timeout: 1 * time.Second,
Timeout: 2 * time.Second,
OnTimeout: EventLivenessTimeout,
}
@ -921,13 +926,13 @@ func (sm *StateMachine[
StateLivenessCheck,
nil,
)
// Loop until we get enough of these
addTransition(
StateLivenessCheck,
EventLivenessCheckReceived,
StateLivenessCheck,
nil,
)
// // Loop until we get enough of these
// addTransition(
// StateLivenessCheck,
// EventLivenessCheckReceived,
// StateLivenessCheck,
// nil,
// )
// Prover flow
addTransition(StateProving, EventProofComplete, StatePublishing, nil)
@ -937,7 +942,7 @@ func (sm *StateMachine[
// Common voting flow
addTransition(StateVoting, EventProposalReceived, StateVoting, nil)
addTransition(StateVoting, EventVoteReceived, StateVoting, nil)
// addTransition(StateVoting, EventVoteReceived, StateVoting, nil)
addTransition(StateVoting, EventQuorumReached, StateFinalizing, nil)
addTransition(StateVoting, EventVotingTimeout, StateVoting, nil)
addTransition(StateFinalizing, EventAggregationDone, StateVerifying, nil)
@ -986,6 +991,14 @@ func (sm *StateMachine[
]) Stop() error {
sm.traceLogger.Trace("enter stop")
defer sm.traceLogger.Trace("exit stop")
drain:
for {
select {
case <-sm.eventChan:
default:
break drain
}
}
sm.SendEvent(EventStop)
return nil
}

View File

@ -10,7 +10,7 @@ name = "bls48581"
[dependencies]
hex = "0.4.3"
serde_json = "1.0.117"
uniffi = { version= "0.25", features = ["cli"] }
uniffi = { version= "0.28.3", features = ["cli"] }
rand = "0.8.5"
sha2 = "0.10.8"
@ -19,8 +19,8 @@ criterion = { version = "0.4", features = ["html_reports"] }
rand = "0.8.5"
[build-dependencies]
uniffi = { version = "0.25", features = [ "build" ] }
uniffi = { version = "0.28.3", features = [ "build" ] }
[[bench]]
name = "bench_bls"
harness = false
harness = false

View File

@ -31,11 +31,11 @@ thiserror = { version = "2" }
merlin = { version = "3", default-features = false }
clear_on_drop = { version = "0.2", default-features = false, optional = true }
lazy_static = "1.5"
uniffi = { version = "0.25.0", optional = true }
uniffi = { version = "0.28.3", optional = true }
bincode = "1.3.3"
[build-dependencies]
uniffi = { version = "0.25.0", features = ["build"], optional = true }
uniffi = { version = "0.28.3", features = ["build"], optional = true }
[features]
default = ["uniffi-bindings", "clear_on_drop"]

View File

@ -73,7 +73,7 @@ pub fn generate_range_proof(
blinding: vec![],
};
}
total = total + s.unwrap();
total = total + s.unwrap();
}
let mut blinding_scalars: Vec<Scalar> = Vec::new();
for _ in 0..values.len()-1 {

View File

@ -19,4 +19,4 @@ ed448-rust = { path = "../ed448-rust", version = "0.1.2" }
rand = "0.8.5"
sha2 = "0.10.8"
hkdf = "0.12.4"
aes-gcm = "0.10.3"
aes-gcm = "0.10.3"

View File

@ -20,15 +20,15 @@ thiserror = "1.0.63"
hmac = "0.12.1"
serde = "1.0.208"
lazy_static = "1.5.0"
uniffi = { version= "0.25", features = ["cli"]}
uniffi = { version= "0.28.3", features = ["cli"]}
[dev-dependencies]
criterion = { version = "0.4", features = ["html_reports"] }
rand = "0.8.5"
[build-dependencies]
uniffi = { version = "0.25", features = [ "build" ] }
uniffi = { version = "0.28.3", features = [ "build" ] }
[[bench]]
name = "bench_channel"
harness = false
harness = false

View File

@ -12,12 +12,12 @@ name = "ferret"
[dependencies]
libc = "0.2"
uniffi = { version= "0.25", features = ["cli"]}
uniffi = { version= "0.28.3", features = ["cli"]}
hex = "0.4.3"
rand = "0.9.0"
rand_chacha = "0.9.0"
[build-dependencies]
uniffi = { version = "0.25", features = [ "build" ] }
uniffi = { version = "0.28.3", features = [ "build" ] }
bindgen = "0.63.0"
cc = "1.0"

View File

@ -8,7 +8,7 @@ crate-type = ["lib", "staticlib"]
name = "rpm"
[dependencies]
uniffi = { version= "0.25", features = ["cli"]}
uniffi = { version= "0.28.3", features = ["cli"]}
curve25519-dalek = "4.1.3"
rand = "0.8.5"
num = "0.4.3"
@ -19,8 +19,8 @@ criterion = { version = "0.4", features = ["html_reports"] }
rand = "0.8.5"
[build-dependencies]
uniffi = { version = "0.25", features = [ "build" ] }
uniffi = { version = "0.28.3", features = [ "build" ] }
[[bench]]
name = "bench_rpm"
harness = false
harness = false

View File

@ -31,10 +31,10 @@ classgroup = { path = "../classgroup", version = "^0.1.0" }
num-traits = "0.2"
sha2 = "0.8"
bit-vec = "0.5"
uniffi = { version= "0.25", features = ["cli"]}
uniffi = { version= "0.28.3", features = ["cli"]}
[build-dependencies]
uniffi = { version = "0.25", features = [ "build" ] }
uniffi = { version = "0.28.3", features = [ "build" ] }
[dev-dependencies]
criterion = ">=0.2"

View File

@ -15,9 +15,9 @@ sha2 = "0.9.0"
hex = "0.4.0"
rand = "0.8"
ed448-goldilocks-plus = "0.11.2"
uniffi = { version= "0.25", features = ["cli"]}
uniffi = { version= "0.28.3", features = ["cli"]}
serde = "1.0.208"
rand_chacha = "0.3.1"
[build-dependencies]
uniffi = { version = "0.25", features = [ "build" ] }
uniffi = { version = "0.28.3", features = [ "build" ] }

View File

@ -1,8 +0,0 @@
#include <ferret.h>
// This file exists beacause of
// https://github.com/golang/go/issues/11263
void cgo_rust_task_callback_bridge_ferret(RustTaskCallback cb, const void * taskData, int8_t status) {
cb(taskData, status);
}

View File

@ -14,60 +14,67 @@ import (
"unsafe"
)
type RustBuffer = C.RustBuffer
// This is needed, because as of go 1.24
// type RustBuffer C.RustBuffer cannot have methods,
// RustBuffer is treated as non-local type
type GoRustBuffer struct {
inner C.RustBuffer
}
type RustBufferI interface {
AsReader() *bytes.Reader
Free()
ToGoBytes() []byte
Data() unsafe.Pointer
Len() int
Capacity() int
Len() uint64
Capacity() uint64
}
func RustBufferFromExternal(b RustBufferI) RustBuffer {
return RustBuffer{
capacity: C.int(b.Capacity()),
len: C.int(b.Len()),
data: (*C.uchar)(b.Data()),
func RustBufferFromExternal(b RustBufferI) GoRustBuffer {
return GoRustBuffer{
inner: C.RustBuffer{
capacity: C.uint64_t(b.Capacity()),
len: C.uint64_t(b.Len()),
data: (*C.uchar)(b.Data()),
},
}
}
func (cb RustBuffer) Capacity() int {
return int(cb.capacity)
func (cb GoRustBuffer) Capacity() uint64 {
return uint64(cb.inner.capacity)
}
func (cb RustBuffer) Len() int {
return int(cb.len)
func (cb GoRustBuffer) Len() uint64 {
return uint64(cb.inner.len)
}
func (cb RustBuffer) Data() unsafe.Pointer {
return unsafe.Pointer(cb.data)
func (cb GoRustBuffer) Data() unsafe.Pointer {
return unsafe.Pointer(cb.inner.data)
}
func (cb RustBuffer) AsReader() *bytes.Reader {
b := unsafe.Slice((*byte)(cb.data), C.int(cb.len))
func (cb GoRustBuffer) AsReader() *bytes.Reader {
b := unsafe.Slice((*byte)(cb.inner.data), C.uint64_t(cb.inner.len))
return bytes.NewReader(b)
}
func (cb RustBuffer) Free() {
func (cb GoRustBuffer) Free() {
rustCall(func(status *C.RustCallStatus) bool {
C.ffi_ferret_rustbuffer_free(cb, status)
C.ffi_ferret_rustbuffer_free(cb.inner, status)
return false
})
}
func (cb RustBuffer) ToGoBytes() []byte {
return C.GoBytes(unsafe.Pointer(cb.data), C.int(cb.len))
func (cb GoRustBuffer) ToGoBytes() []byte {
return C.GoBytes(unsafe.Pointer(cb.inner.data), C.int(cb.inner.len))
}
func stringToRustBuffer(str string) RustBuffer {
func stringToRustBuffer(str string) C.RustBuffer {
return bytesToRustBuffer([]byte(str))
}
func bytesToRustBuffer(b []byte) RustBuffer {
func bytesToRustBuffer(b []byte) C.RustBuffer {
if len(b) == 0 {
return RustBuffer{}
return C.RustBuffer{}
}
// We can pass the pointer along here, as it is pinned
// for the duration of this call
@ -76,7 +83,7 @@ func bytesToRustBuffer(b []byte) RustBuffer {
data: (*C.uchar)(unsafe.Pointer(&b[0])),
}
return rustCall(func(status *C.RustCallStatus) RustBuffer {
return rustCall(func(status *C.RustCallStatus) C.RustBuffer {
return C.ffi_ferret_rustbuffer_from_bytes(foreign, status)
})
}
@ -86,12 +93,7 @@ type BufLifter[GoType any] interface {
}
type BufLowerer[GoType any] interface {
Lower(value GoType) RustBuffer
}
type FfiConverter[GoType any, FfiType any] interface {
Lift(value FfiType) GoType
Lower(value GoType) FfiType
Lower(value GoType) C.RustBuffer
}
type BufReader[GoType any] interface {
@ -102,12 +104,7 @@ type BufWriter[GoType any] interface {
Write(writer io.Writer, value GoType)
}
type FfiRustBufConverter[GoType any, FfiType any] interface {
FfiConverter[GoType, FfiType]
BufReader[GoType]
}
func LowerIntoRustBuffer[GoType any](bufWriter BufWriter[GoType], value GoType) RustBuffer {
func LowerIntoRustBuffer[GoType any](bufWriter BufWriter[GoType], value GoType) C.RustBuffer {
// This might be not the most efficient way but it does not require knowing allocation size
// beforehand
var buffer bytes.Buffer
@ -132,31 +129,30 @@ func LiftFromRustBuffer[GoType any](bufReader BufReader[GoType], rbuf RustBuffer
return item
}
func rustCallWithError[U any](converter BufLifter[error], callback func(*C.RustCallStatus) U) (U, error) {
func rustCallWithError[E any, U any](converter BufReader[*E], callback func(*C.RustCallStatus) U) (U, *E) {
var status C.RustCallStatus
returnValue := callback(&status)
err := checkCallStatus(converter, status)
return returnValue, err
}
func checkCallStatus(converter BufLifter[error], status C.RustCallStatus) error {
func checkCallStatus[E any](converter BufReader[*E], status C.RustCallStatus) *E {
switch status.code {
case 0:
return nil
case 1:
return converter.Lift(status.errorBuf)
return LiftFromRustBuffer(converter, GoRustBuffer{inner: status.errorBuf})
case 2:
// when the rust code sees a panic, it tries to construct a rustbuffer
// when the rust code sees a panic, it tries to construct a rustBuffer
// with the message. but if that code panics, then it just sends back
// an empty buffer.
if status.errorBuf.len > 0 {
panic(fmt.Errorf("%s", FfiConverterStringINSTANCE.Lift(status.errorBuf)))
panic(fmt.Errorf("%s", FfiConverterStringINSTANCE.Lift(GoRustBuffer{inner: status.errorBuf})))
} else {
panic(fmt.Errorf("Rust panicked while handling Rust panic"))
}
default:
return fmt.Errorf("unknown status code: %d", status.code)
panic(fmt.Errorf("unknown status code: %d", status.code))
}
}
@ -167,11 +163,13 @@ func checkCallStatusUnknown(status C.RustCallStatus) error {
case 1:
panic(fmt.Errorf("function not returning an error returned an error"))
case 2:
// when the rust code sees a panic, it tries to construct a rustbuffer
// when the rust code sees a panic, it tries to construct a C.RustBuffer
// with the message. but if that code panics, then it just sends back
// an empty buffer.
if status.errorBuf.len > 0 {
panic(fmt.Errorf("%s", FfiConverterStringINSTANCE.Lift(status.errorBuf)))
panic(fmt.Errorf("%s", FfiConverterStringINSTANCE.Lift(GoRustBuffer{
inner: status.errorBuf,
})))
} else {
panic(fmt.Errorf("Rust panicked while handling Rust panic"))
}
@ -181,13 +179,17 @@ func checkCallStatusUnknown(status C.RustCallStatus) error {
}
func rustCall[U any](callback func(*C.RustCallStatus) U) U {
returnValue, err := rustCallWithError(nil, callback)
returnValue, err := rustCallWithError[error](nil, callback)
if err != nil {
panic(err)
}
return returnValue
}
type NativeError interface {
AsError() error
}
func writeInt8(writer io.Writer, value int8) {
if err := binary.Write(writer, binary.BigEndian, value); err != nil {
panic(err)
@ -335,45 +337,45 @@ func init() {
func uniffiCheckChecksums() {
// Get the bindings contract version from our ComponentInterface
bindingsContractVersion := 24
bindingsContractVersion := 26
// Get the scaffolding contract version by calling the into the dylib
scaffoldingContractVersion := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint32_t {
return C.ffi_ferret_uniffi_contract_version(uniffiStatus)
scaffoldingContractVersion := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint32_t {
return C.ffi_ferret_uniffi_contract_version()
})
if bindingsContractVersion != int(scaffoldingContractVersion) {
// If this happens try cleaning and rebuilding your project
panic("ferret: UniFFI contract version mismatch")
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_func_create_ferret_cot_manager(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_func_create_ferret_cot_manager()
})
if checksum != 55889 {
if checksum != 49338 {
// If this happens try cleaning and rebuilding your project
panic("ferret: uniffi_ferret_checksum_func_create_ferret_cot_manager: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_func_create_netio_manager(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_func_create_netio_manager()
})
if checksum != 43279 {
if checksum != 37785 {
// If this happens try cleaning and rebuilding your project
panic("ferret: uniffi_ferret_checksum_func_create_netio_manager: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_ferretcotmanager_get_block_data(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_ferretcotmanager_get_block_data()
})
if checksum != 48460 {
if checksum != 54850 {
// If this happens try cleaning and rebuilding your project
panic("ferret: uniffi_ferret_checksum_method_ferretcotmanager_get_block_data: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_ferretcotmanager_recv_cot(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_ferretcotmanager_recv_cot()
})
if checksum != 47983 {
// If this happens try cleaning and rebuilding your project
@ -381,8 +383,8 @@ func uniffiCheckChecksums() {
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_ferretcotmanager_recv_rot(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_ferretcotmanager_recv_rot()
})
if checksum != 38722 {
// If this happens try cleaning and rebuilding your project
@ -390,8 +392,8 @@ func uniffiCheckChecksums() {
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_ferretcotmanager_send_cot(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_ferretcotmanager_send_cot()
})
if checksum != 25816 {
// If this happens try cleaning and rebuilding your project
@ -399,8 +401,8 @@ func uniffiCheckChecksums() {
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_ferretcotmanager_send_rot(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_ferretcotmanager_send_rot()
})
if checksum != 51835 {
// If this happens try cleaning and rebuilding your project
@ -408,10 +410,10 @@ func uniffiCheckChecksums() {
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_ferretcotmanager_set_block_data(uniffiStatus)
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_ferretcotmanager_set_block_data()
})
if checksum != 15140 {
if checksum != 39823 {
// If this happens try cleaning and rebuilding your project
panic("ferret: uniffi_ferret_checksum_method_ferretcotmanager_set_block_data: UniFFI API checksum mismatch")
}
@ -539,7 +541,7 @@ func (FfiConverterString) Read(reader io.Reader) string {
length := readInt32(reader)
buffer := make([]byte, length)
read_length, err := reader.Read(buffer)
if err != nil {
if err != nil && err != io.EOF {
panic(err)
}
if read_length != int(length) {
@ -548,7 +550,7 @@ func (FfiConverterString) Read(reader io.Reader) string {
return string(buffer)
}
func (FfiConverterString) Lower(value string) RustBuffer {
func (FfiConverterString) Lower(value string) C.RustBuffer {
return stringToRustBuffer(value)
}
@ -575,16 +577,22 @@ func (FfiDestroyerString) Destroy(_ string) {}
// https://github.com/mozilla/uniffi-rs/blob/0dc031132d9493ca812c3af6e7dd60ad2ea95bf0/uniffi_bindgen/src/bindings/kotlin/templates/ObjectRuntime.kt#L31
type FfiObject struct {
pointer unsafe.Pointer
callCounter atomic.Int64
freeFunction func(unsafe.Pointer, *C.RustCallStatus)
destroyed atomic.Bool
pointer unsafe.Pointer
callCounter atomic.Int64
cloneFunction func(unsafe.Pointer, *C.RustCallStatus) unsafe.Pointer
freeFunction func(unsafe.Pointer, *C.RustCallStatus)
destroyed atomic.Bool
}
func newFfiObject(pointer unsafe.Pointer, freeFunction func(unsafe.Pointer, *C.RustCallStatus)) FfiObject {
func newFfiObject(
pointer unsafe.Pointer,
cloneFunction func(unsafe.Pointer, *C.RustCallStatus) unsafe.Pointer,
freeFunction func(unsafe.Pointer, *C.RustCallStatus),
) FfiObject {
return FfiObject{
pointer: pointer,
freeFunction: freeFunction,
pointer: pointer,
cloneFunction: cloneFunction,
freeFunction: freeFunction,
}
}
@ -602,7 +610,9 @@ func (ffiObject *FfiObject) incrementPointer(debugName string) unsafe.Pointer {
}
}
return ffiObject.pointer
return rustCall(func(status *C.RustCallStatus) unsafe.Pointer {
return ffiObject.cloneFunction(ffiObject.pointer, status)
})
}
func (ffiObject *FfiObject) decrementPointer() {
@ -626,6 +636,14 @@ func (ffiObject *FfiObject) freeRustArcPtr() {
})
}
type FerretCotManagerInterface interface {
GetBlockData(blockChoice uint8, index uint64) []uint8
RecvCot()
RecvRot()
SendCot()
SendRot()
SetBlockData(blockChoice uint8, index uint64, data []uint8)
}
type FerretCotManager struct {
ffiObject FfiObject
}
@ -634,8 +652,10 @@ func (_self *FerretCotManager) GetBlockData(blockChoice uint8, index uint64) []u
_pointer := _self.ffiObject.incrementPointer("*FerretCotManager")
defer _self.ffiObject.decrementPointer()
return FfiConverterSequenceUint8INSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_ferret_fn_method_ferretcotmanager_get_block_data(
_pointer, FfiConverterUint8INSTANCE.Lower(blockChoice), FfiConverterUint64INSTANCE.Lower(index), _uniffiStatus)
return GoRustBuffer{
inner: C.uniffi_ferret_fn_method_ferretcotmanager_get_block_data(
_pointer, FfiConverterUint8INSTANCE.Lower(blockChoice), FfiConverterUint64INSTANCE.Lower(index), _uniffiStatus),
}
}))
}
@ -688,42 +708,46 @@ func (_self *FerretCotManager) SetBlockData(blockChoice uint8, index uint64, dat
return false
})
}
func (object *FerretCotManager) Destroy() {
runtime.SetFinalizer(object, nil)
object.ffiObject.destroy()
}
type FfiConverterFerretCOTManager struct{}
type FfiConverterFerretCotManager struct{}
var FfiConverterFerretCOTManagerINSTANCE = FfiConverterFerretCOTManager{}
var FfiConverterFerretCotManagerINSTANCE = FfiConverterFerretCotManager{}
func (c FfiConverterFerretCOTManager) Lift(pointer unsafe.Pointer) *FerretCotManager {
func (c FfiConverterFerretCotManager) Lift(pointer unsafe.Pointer) *FerretCotManager {
result := &FerretCotManager{
newFfiObject(
pointer,
func(pointer unsafe.Pointer, status *C.RustCallStatus) unsafe.Pointer {
return C.uniffi_ferret_fn_clone_ferretcotmanager(pointer, status)
},
func(pointer unsafe.Pointer, status *C.RustCallStatus) {
C.uniffi_ferret_fn_free_ferretcotmanager(pointer, status)
}),
},
),
}
runtime.SetFinalizer(result, (*FerretCotManager).Destroy)
return result
}
func (c FfiConverterFerretCOTManager) Read(reader io.Reader) *FerretCotManager {
func (c FfiConverterFerretCotManager) Read(reader io.Reader) *FerretCotManager {
return c.Lift(unsafe.Pointer(uintptr(readUint64(reader))))
}
func (c FfiConverterFerretCOTManager) Lower(value *FerretCotManager) unsafe.Pointer {
func (c FfiConverterFerretCotManager) Lower(value *FerretCotManager) unsafe.Pointer {
// TODO: this is bad - all synchronization from ObjectRuntime.go is discarded here,
// because the pointer will be decremented immediately after this function returns,
// and someone will be left holding onto a non-locked pointer.
pointer := value.ffiObject.incrementPointer("*FerretCotManager")
defer value.ffiObject.decrementPointer()
return pointer
}
func (c FfiConverterFerretCOTManager) Write(writer io.Writer, value *FerretCotManager) {
func (c FfiConverterFerretCotManager) Write(writer io.Writer, value *FerretCotManager) {
writeUint64(writer, uint64(uintptr(c.Lower(value))))
}
@ -733,6 +757,8 @@ func (_ FfiDestroyerFerretCotManager) Destroy(value *FerretCotManager) {
value.Destroy()
}
type NetIoManagerInterface interface {
}
type NetIoManager struct {
ffiObject FfiObject
}
@ -742,36 +768,41 @@ func (object *NetIoManager) Destroy() {
object.ffiObject.destroy()
}
type FfiConverterNetIOManager struct{}
type FfiConverterNetIoManager struct{}
var FfiConverterNetIOManagerINSTANCE = FfiConverterNetIOManager{}
var FfiConverterNetIoManagerINSTANCE = FfiConverterNetIoManager{}
func (c FfiConverterNetIOManager) Lift(pointer unsafe.Pointer) *NetIoManager {
func (c FfiConverterNetIoManager) Lift(pointer unsafe.Pointer) *NetIoManager {
result := &NetIoManager{
newFfiObject(
pointer,
func(pointer unsafe.Pointer, status *C.RustCallStatus) unsafe.Pointer {
return C.uniffi_ferret_fn_clone_netiomanager(pointer, status)
},
func(pointer unsafe.Pointer, status *C.RustCallStatus) {
C.uniffi_ferret_fn_free_netiomanager(pointer, status)
}),
},
),
}
runtime.SetFinalizer(result, (*NetIoManager).Destroy)
return result
}
func (c FfiConverterNetIOManager) Read(reader io.Reader) *NetIoManager {
func (c FfiConverterNetIoManager) Read(reader io.Reader) *NetIoManager {
return c.Lift(unsafe.Pointer(uintptr(readUint64(reader))))
}
func (c FfiConverterNetIOManager) Lower(value *NetIoManager) unsafe.Pointer {
func (c FfiConverterNetIoManager) Lower(value *NetIoManager) unsafe.Pointer {
// TODO: this is bad - all synchronization from ObjectRuntime.go is discarded here,
// because the pointer will be decremented immediately after this function returns,
// and someone will be left holding onto a non-locked pointer.
pointer := value.ffiObject.incrementPointer("*NetIoManager")
defer value.ffiObject.decrementPointer()
return pointer
}
func (c FfiConverterNetIOManager) Write(writer io.Writer, value *NetIoManager) {
func (c FfiConverterNetIoManager) Write(writer io.Writer, value *NetIoManager) {
writeUint64(writer, uint64(uintptr(c.Lower(value))))
}
@ -797,7 +828,7 @@ func (_ FfiConverterOptionalString) Read(reader io.Reader) *string {
return &temp
}
func (c FfiConverterOptionalString) Lower(value *string) RustBuffer {
func (c FfiConverterOptionalString) Lower(value *string) C.RustBuffer {
return LowerIntoRustBuffer[*string](c, value)
}
@ -838,7 +869,7 @@ func (c FfiConverterSequenceUint8) Read(reader io.Reader) []uint8 {
return result
}
func (c FfiConverterSequenceUint8) Lower(value []uint8) RustBuffer {
func (c FfiConverterSequenceUint8) Lower(value []uint8) C.RustBuffer {
return LowerIntoRustBuffer[[]uint8](c, value)
}
@ -881,7 +912,7 @@ func (c FfiConverterSequenceBool) Read(reader io.Reader) []bool {
return result
}
func (c FfiConverterSequenceBool) Lower(value []bool) RustBuffer {
func (c FfiConverterSequenceBool) Lower(value []bool) C.RustBuffer {
return LowerIntoRustBuffer[[]bool](c, value)
}
@ -905,13 +936,13 @@ func (FfiDestroyerSequenceBool) Destroy(sequence []bool) {
}
func CreateFerretCotManager(party int32, threads int32, length uint64, choices []bool, netio *NetIoManager, malicious bool) *FerretCotManager {
return FfiConverterFerretCOTManagerINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) unsafe.Pointer {
return C.uniffi_ferret_fn_func_create_ferret_cot_manager(FfiConverterInt32INSTANCE.Lower(party), FfiConverterInt32INSTANCE.Lower(threads), FfiConverterUint64INSTANCE.Lower(length), FfiConverterSequenceBoolINSTANCE.Lower(choices), FfiConverterNetIOManagerINSTANCE.Lower(netio), FfiConverterBoolINSTANCE.Lower(malicious), _uniffiStatus)
return FfiConverterFerretCotManagerINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) unsafe.Pointer {
return C.uniffi_ferret_fn_func_create_ferret_cot_manager(FfiConverterInt32INSTANCE.Lower(party), FfiConverterInt32INSTANCE.Lower(threads), FfiConverterUint64INSTANCE.Lower(length), FfiConverterSequenceBoolINSTANCE.Lower(choices), FfiConverterNetIoManagerINSTANCE.Lower(netio), FfiConverterBoolINSTANCE.Lower(malicious), _uniffiStatus)
}))
}
func CreateNetioManager(party int32, address *string, port int32) *NetIoManager {
return FfiConverterNetIOManagerINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) unsafe.Pointer {
return FfiConverterNetIoManagerINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) unsafe.Pointer {
return C.uniffi_ferret_fn_func_create_netio_manager(FfiConverterInt32INSTANCE.Lower(party), FfiConverterOptionalStringINSTANCE.Lower(address), FfiConverterInt32INSTANCE.Lower(port), _uniffiStatus)
}))
}

File diff suppressed because it is too large Load Diff

View File

@ -124,7 +124,6 @@ func (d *dummyDiscovery) FindPeers(ctx context.Context, ns string, opts ...disco
}
func TestBlossomSubDiscoveryAfterBootstrap(t *testing.T) {
t.Skip("flaky test disabled")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -182,7 +181,7 @@ func TestBlossomSubDiscoveryAfterBootstrap(t *testing.T) {
owner := rand.Intn(numHosts)
if err := bitmaskHandlers[owner].Publish(ctx, bitmaskHandlers[owner].bitmask, msg, WithReadiness(MinBitmaskSize(numHosts-1))); err != nil {
if err := bitmaskHandlers[owner].Publish(ctx, bitmaskHandlers[owner].bitmask, msg, WithReadiness(MinBitmaskSize(numHosts-4))); err != nil {
t.Fatal(err)
}

View File

@ -580,7 +580,6 @@ func (hg *HypergraphCRDT) GetMetadataAtKey(pathKey []byte) (
path,
)
if err != nil {
hg.logger.Debug("could not get node by path", zap.Error(err))
metadata = append(metadata, hypergraph.ShardMetadata{
Commitment: make([]byte, 64),
LeafCount: 0,

View File

@ -136,6 +136,12 @@ func (set *idSet) Has(key [64]byte) bool {
return err == nil
}
func (hg *HypergraphCRDT) GetCoveredPrefix() ([]int, error) {
hg.mu.RLock()
defer hg.mu.RUnlock()
return hg.getCoveredPrefix(), nil
}
func (hg *HypergraphCRDT) getCoveredPrefix() []int {
return slices.Clone(hg.coveredPrefix)
}

View File

@ -1,22 +1,32 @@
package hypergraph
import (
"fmt"
"strings"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"source.quilibrium.com/quilibrium/monorepo/types/hypergraph"
"source.quilibrium.com/quilibrium/monorepo/types/tries"
"source.quilibrium.com/quilibrium/monorepo/utils/p2p"
up2p "source.quilibrium.com/quilibrium/monorepo/utils/p2p"
)
// Commit calculates the hierarchical vector commitments of each set and returns
// the roots of all sets.
func (hg *HypergraphCRDT) Commit() map[tries.ShardKey][][]byte {
func (hg *HypergraphCRDT) Commit(
frameNumber uint64,
) (map[tries.ShardKey][][]byte, error) {
hg.mu.Lock()
defer hg.mu.Unlock()
timer := prometheus.NewTimer(CommitDuration)
defer timer.ObserveDuration()
commits := map[tries.ShardKey][][]byte{}
commits, err := hg.store.GetRootCommits(frameNumber)
if err != nil {
return nil, errors.Wrap(err, "commit")
}
ensureSet := func(shardKey tries.ShardKey) {
if _, ok := commits[shardKey]; !ok {
@ -28,25 +38,181 @@ func (hg *HypergraphCRDT) Commit() map[tries.ShardKey][][]byte {
}
}
txn, err := hg.store.NewTransaction(false)
if err != nil {
return nil, errors.Wrap(err, "commit shard")
}
touched := map[tries.ShardKey][]bool{}
for shardKey, vertexAdds := range hg.vertexAdds {
if r, ok := commits[shardKey]; ok && len(r[0]) != 64 {
continue
}
root := vertexAdds.GetTree().Commit(false)
ensureSet(shardKey)
commits[shardKey][0] = root
err = hg.store.SetShardCommit(
txn,
frameNumber,
"adds",
"vertex",
shardKey.L2[:],
root,
)
if err != nil {
txn.Abort()
return nil, errors.Wrap(err, "commit shard")
}
touched[shardKey] = make([]bool, 4)
touched[shardKey][0] = true
}
for shardKey, vertexRemoves := range hg.vertexRemoves {
if r, ok := commits[shardKey]; ok && len(r[1]) != 64 {
continue
}
root := vertexRemoves.GetTree().Commit(false)
ensureSet(shardKey)
commits[shardKey][1] = root
err = hg.store.SetShardCommit(
txn,
frameNumber,
"removes",
"vertex",
shardKey.L2[:],
root,
)
if err != nil {
txn.Abort()
return nil, errors.Wrap(err, "commit shard")
}
if _, ok := touched[shardKey]; !ok {
touched[shardKey] = make([]bool, 4)
}
touched[shardKey][1] = true
}
for shardKey, hyperedgeAdds := range hg.hyperedgeAdds {
if r, ok := commits[shardKey]; ok && len(r[2]) != 64 {
continue
}
root := hyperedgeAdds.GetTree().Commit(false)
ensureSet(shardKey)
commits[shardKey][2] = root
err = hg.store.SetShardCommit(
txn,
frameNumber,
"adds",
"hyperedge",
shardKey.L2[:],
root,
)
if err != nil {
txn.Abort()
return nil, errors.Wrap(err, "commit shard")
}
if _, ok := touched[shardKey]; !ok {
touched[shardKey] = make([]bool, 4)
}
touched[shardKey][2] = true
}
for shardKey, hyperedgeRemoves := range hg.hyperedgeRemoves {
if r, ok := commits[shardKey]; ok && len(r[3]) != 64 {
continue
}
root := hyperedgeRemoves.GetTree().Commit(false)
ensureSet(shardKey)
commits[shardKey][3] = root
err = hg.store.SetShardCommit(
txn,
frameNumber,
"removes",
"hyperedge",
shardKey.L2[:],
root,
)
if err != nil {
txn.Abort()
return nil, errors.Wrap(err, "commit shard")
}
if _, ok := touched[shardKey]; !ok {
touched[shardKey] = make([]bool, 4)
}
touched[shardKey][3] = true
}
for shardKey, touchSet := range touched {
if !touchSet[0] {
err = hg.store.SetShardCommit(
txn,
frameNumber,
"adds",
"vertex",
shardKey.L2[:],
make([]byte, 64),
)
if err != nil {
txn.Abort()
return nil, errors.Wrap(err, "commit shard")
}
}
if !touchSet[1] {
err = hg.store.SetShardCommit(
txn,
frameNumber,
"removes",
"vertex",
shardKey.L2[:],
make([]byte, 64),
)
if err != nil {
txn.Abort()
return nil, errors.Wrap(err, "commit shard")
}
}
if !touchSet[2] {
err = hg.store.SetShardCommit(
txn,
frameNumber,
"adds",
"hyperedge",
shardKey.L2[:],
make([]byte, 64),
)
if err != nil {
txn.Abort()
return nil, errors.Wrap(err, "commit shard")
}
}
if !touchSet[3] {
err = hg.store.SetShardCommit(
txn,
frameNumber,
"removes",
"hyperedge",
shardKey.L2[:],
make([]byte, 64),
)
if err != nil {
txn.Abort()
return nil, errors.Wrap(err, "commit shard")
}
}
}
if err := txn.Commit(); err != nil {
txn.Abort()
return nil, errors.Wrap(err, "commit shard")
}
// Update metrics
@ -64,7 +230,267 @@ func (hg *HypergraphCRDT) Commit() map[tries.ShardKey][][]byte {
SizeTotal.Set(size)
}
return commits
return commits, nil
}
// Commit calculates the sub-scoped vector commitments of each phase set and
// returns the roots of each.
func (hg *HypergraphCRDT) CommitShard(
frameNumber uint64,
shardAddress []byte,
) ([][]byte, error) {
hg.mu.Lock()
defer hg.mu.Unlock()
if len(shardAddress) < 32 {
return nil, errors.Wrap(errors.New("invalid shard address"), "commit shard")
}
l1 := up2p.GetBloomFilterIndices(shardAddress[:32], 256, 3)
shardKey := tries.ShardKey{
L1: [3]byte(l1),
L2: [32]byte(shardAddress[:32]),
}
vertexAddSet, vertexRemoveSet := hg.getOrCreateIdSet(
shardKey,
hg.vertexAdds,
hg.vertexRemoves,
"vertex",
hg.getCoveredPrefix(),
)
vertexAddTree := vertexAddSet.GetTree()
vertexAddTree.Commit(false)
vertexRemoveTree := vertexRemoveSet.GetTree()
vertexRemoveTree.Commit(false)
path := tries.GetFullPath(shardAddress[:32])
for _, p := range shardAddress[32:] {
path = append(path, int(p))
}
vertexAddNode, err := vertexAddTree.GetByPath(path)
if err != nil && !strings.Contains(err.Error(), "not found") {
return nil, errors.Wrap(err, "commit shard")
}
vertexRemoveNode, err := vertexRemoveTree.GetByPath(path)
if err != nil && !strings.Contains(err.Error(), "not found") {
return nil, errors.Wrap(err, "commit shard")
}
hyperedgeAddSet, hyperedgeRemoveSet := hg.getOrCreateIdSet(
shardKey,
hg.hyperedgeAdds,
hg.hyperedgeRemoves,
"hyperedge",
hg.getCoveredPrefix(),
)
hyperedgeAddTree := hyperedgeAddSet.GetTree()
hyperedgeAddTree.Commit(false)
hyperedgeRemoveTree := hyperedgeRemoveSet.GetTree()
hyperedgeRemoveTree.Commit(false)
hyperedgeAddNode, err := vertexAddTree.GetByPath(path)
if err != nil && !strings.Contains(err.Error(), "not found") {
return nil, errors.Wrap(err, "commit shard")
}
hyperedgeRemoveNode, err := vertexRemoveTree.GetByPath(path)
if err != nil && !strings.Contains(err.Error(), "not found") {
return nil, errors.Wrap(err, "commit shard")
}
txn, err := hg.store.NewTransaction(false)
if err != nil {
return nil, errors.Wrap(err, "commit shard")
}
vertexAddCommit := make([]byte, 64)
if vertexAddNode != nil {
switch n := vertexAddNode.(type) {
case *tries.LazyVectorCommitmentBranchNode:
vertexAddCommit = n.Commitment
case *tries.LazyVectorCommitmentLeafNode:
vertexAddCommit = n.Commitment
}
}
vertexRemoveCommit := make([]byte, 64)
if vertexRemoveNode != nil {
switch n := vertexRemoveNode.(type) {
case *tries.LazyVectorCommitmentBranchNode:
vertexRemoveCommit = n.Commitment
case *tries.LazyVectorCommitmentLeafNode:
vertexRemoveCommit = n.Commitment
}
}
hyperedgeAddCommit := make([]byte, 64)
if hyperedgeAddNode != nil {
switch n := hyperedgeAddNode.(type) {
case *tries.LazyVectorCommitmentBranchNode:
hyperedgeAddCommit = n.Commitment
case *tries.LazyVectorCommitmentLeafNode:
hyperedgeAddCommit = n.Commitment
}
}
hyperedgeRemoveCommit := make([]byte, 64)
if hyperedgeRemoveNode != nil {
switch n := hyperedgeRemoveNode.(type) {
case *tries.LazyVectorCommitmentBranchNode:
hyperedgeRemoveCommit = n.Commitment
case *tries.LazyVectorCommitmentLeafNode:
hyperedgeRemoveCommit = n.Commitment
}
}
err = hg.store.SetShardCommit(
txn,
frameNumber,
"adds",
"vertex",
shardAddress,
vertexAddCommit,
)
if err != nil {
txn.Abort()
return nil, errors.Wrap(err, "commit shard")
}
err = hg.store.SetShardCommit(
txn,
frameNumber,
"removes",
"vertex",
shardAddress,
vertexRemoveCommit,
)
if err != nil {
txn.Abort()
return nil, errors.Wrap(err, "commit shard")
}
err = hg.store.SetShardCommit(
txn,
frameNumber,
"adds",
"hyperedge",
shardAddress,
hyperedgeAddCommit,
)
if err != nil {
txn.Abort()
return nil, errors.Wrap(err, "commit shard")
}
err = hg.store.SetShardCommit(
txn,
frameNumber,
"removes",
"hyperedge",
shardAddress,
hyperedgeRemoveCommit,
)
if err != nil {
txn.Abort()
return nil, errors.Wrap(err, "commit shard")
}
if err := txn.Commit(); err != nil {
txn.Abort()
return nil, errors.Wrap(err, "commit shard")
}
return [][]byte{
vertexAddCommit,
vertexRemoveCommit,
hyperedgeAddCommit,
hyperedgeRemoveCommit,
}, nil
}
// GetShardCommits retries the sub-scoped vector commitments of each phase set
// and returns the commitments of each at the tree level of the shard address.
// If this does not already exist, returns an error.
func (hg *HypergraphCRDT) GetShardCommits(
frameNumber uint64,
shardAddress []byte,
) ([][]byte, error) {
hg.mu.RLock()
defer hg.mu.RUnlock()
vertexAddsCommit, err := hg.store.GetShardCommit(
frameNumber,
"adds",
"vertex",
shardAddress,
)
if err != nil {
return nil, errors.Wrap(
errors.Wrap(
err,
fmt.Sprintf("shard address: (va) %x", shardAddress),
),
"get shard commits",
)
}
vertexRemovesCommit, err := hg.store.GetShardCommit(
frameNumber,
"removes",
"vertex",
shardAddress,
)
if err != nil {
return nil, errors.Wrap(
errors.Wrap(
err,
fmt.Sprintf("shard address: (vr) %x", shardAddress),
),
"get shard commits",
)
}
hyperedgeAddsCommit, err := hg.store.GetShardCommit(
frameNumber,
"adds",
"hyperedge",
shardAddress,
)
if err != nil {
return nil, errors.Wrap(
errors.Wrap(
err,
fmt.Sprintf("shard address: (ha) %x", shardAddress),
),
"get shard commits",
)
}
hyperedgeRemovesCommit, err := hg.store.GetShardCommit(
frameNumber,
"removes",
"hyperedge",
shardAddress,
)
if err != nil {
return nil, errors.Wrap(
errors.Wrap(
err,
fmt.Sprintf("shard address: (he) %x", shardAddress),
),
"get shard commits",
)
}
return [][]byte{
vertexAddsCommit,
vertexRemovesCommit,
hyperedgeAddsCommit,
hyperedgeRemovesCommit,
}, nil
}
// CreateTraversalProofs generates proofs for multiple keys in a shard. The
@ -135,6 +561,7 @@ func (hg *HypergraphCRDT) VerifyTraversalProof(
domain [32]byte,
atomType hypergraph.AtomType,
phaseType hypergraph.PhaseType,
root []byte,
traversalProof *tries.TraversalProof,
) (bool, error) {
hg.mu.RLock()
@ -168,15 +595,12 @@ func (hg *HypergraphCRDT) VerifyTraversalProof(
)
}
valid := true
var valid bool
var err error
if phaseType == hypergraph.AddsPhaseType {
if !addSet.GetTree().Verify(traversalProof) {
valid = false
}
valid, err = addSet.GetTree().Verify(root, traversalProof)
} else {
if !removeSet.GetTree().Verify(traversalProof) {
valid = false
}
valid, err = removeSet.GetTree().Verify(root, traversalProof)
}
TraversalProofVerifyTotal.WithLabelValues(
@ -184,5 +608,5 @@ func (hg *HypergraphCRDT) VerifyTraversalProof(
string(phaseType),
boolToString(valid),
).Inc()
return valid, nil
return valid, err
}

View File

@ -25,7 +25,7 @@ func (hg *HypergraphCRDT) HyperStream(
if !hg.syncController.TryEstablishSyncSession() {
return errors.New("unavailable")
}
hg.Commit()
hg.mu.RLock()
defer hg.mu.RUnlock()
defer hg.syncController.EndSyncSession()
@ -63,7 +63,6 @@ func (hg *HypergraphCRDT) Sync(
return errors.New("unavailable")
}
hg.Commit()
hg.mu.RLock()
defer hg.mu.RUnlock()
defer hg.syncController.EndSyncSession()

View File

@ -147,7 +147,6 @@ func (hg *HypergraphCRDT) addVertex(
) error {
timer := prometheus.NewTimer(AddVertexDuration)
defer timer.ObserveDuration()
shardAddr := hypergraph.GetShardKey(v)
addSet, _ := hg.getOrCreateIdSet(
shardAddr,

View File

@ -2,6 +2,7 @@ package app
import (
"fmt"
"os"
"go.uber.org/zap"
consensustime "source.quilibrium.com/quilibrium/monorepo/node/consensus/time"
@ -56,7 +57,10 @@ func newDataWorkerNode(
}, nil
}
func (n *DataWorkerNode) Start() error {
func (n *DataWorkerNode) Start(
done chan os.Signal,
quitCh chan struct{},
) error {
go func() {
err := n.ipcServer.Start()
if err != nil {
@ -70,31 +74,33 @@ func (n *DataWorkerNode) Start() error {
n.logger.Info("data worker node started", zap.Uint("core_id", n.coreId))
defer func() {
err := n.pebble.Close()
if err != nil {
n.logger.Error(
"database shut down with errors",
zap.Error(err),
zap.Uint("core_id", n.coreId),
)
} else {
n.logger.Info(
"database stopped cleanly",
zap.Uint("core_id", n.coreId),
)
}
}()
select {
case <-n.quit:
case <-done:
}
<-n.quit
n.ipcServer.Stop()
err := n.pebble.Close()
if err != nil {
n.logger.Error(
"database shut down with errors",
zap.Error(err),
zap.Uint("core_id", n.coreId),
)
} else {
n.logger.Info(
"database stopped cleanly",
zap.Uint("core_id", n.coreId),
)
}
quitCh <- struct{}{}
return nil
}
func (n *DataWorkerNode) Stop() {
n.logger.Info("stopping data worker node")
// Signal quit
if n.quit != nil {
close(n.quit)
}

File diff suppressed because it is too large Load Diff

View File

@ -198,9 +198,9 @@ func NewDHTNode(*zap.Logger, *config.Config, uint) (*DHTNode, error) {
))
}
// func NewDBConsole(*config.Config) (*DBConsole, error) {
// panic(wire.Build(newDBConsole))
// }
func NewDBConsole(*config.Config) (*DBConsole, error) {
panic(wire.Build(newDBConsole))
}
func NewClockStore(
*zap.Logger,

View File

@ -55,6 +55,14 @@ func NewDHTNode(logger *zap.Logger, configConfig *config.Config, uint2 uint) (*D
return dhtNode, nil
}
func NewDBConsole(configConfig *config.Config) (*DBConsole, error) {
dbConsole, err := newDBConsole(configConfig)
if err != nil {
return nil, err
}
return dbConsole, nil
}
func NewClockStore(logger *zap.Logger, configConfig *config.Config, uint2 uint) (store.ClockStore, error) {
dbConfig := configConfig.DB
pebbleDB := store2.NewPebbleDB(logger, dbConfig, uint2)
@ -96,6 +104,7 @@ func NewDataWorkerNodeWithProxyPubsub(logger *zap.Logger, config2 *config.Config
return nil, err
}
pebbleInboxStore := store2.NewPebbleInboxStore(pebbleDB, logger)
pebbleShardsStore := store2.NewPebbleShardsStore(pebbleDB, logger)
bedlamCompiler := compiler.NewBedlamCompiler()
inMemoryPeerInfoManager := p2p.NewInMemoryPeerInfoManager(logger)
dynamicFeeManager := fees.NewDynamicFeeManager(logger, kzgInclusionProver)
@ -107,7 +116,7 @@ func NewDataWorkerNodeWithProxyPubsub(logger *zap.Logger, config2 *config.Config
asertDifficultyAdjuster := difficulty.NewAsertDifficultyAdjuster(uint64_2, int64_2, uint32_2)
optimizedProofOfMeaningfulWorkRewardIssuance := reward.NewOptRewardIssuance()
doubleRatchetEncryptedChannel := channel.NewDoubleRatchetEncryptedChannel()
appConsensusEngineFactory := app.NewAppConsensusEngineFactory(logger, config2, proxyBlossomSub, hypergraph, fileKeyManager, pebbleKeyStore, pebbleClockStore, pebbleInboxStore, pebbleHypergraphStore, frameProver, kzgInclusionProver, decaf448BulletproofProver, mpCitHVerifiableEncryptor, decaf448KeyConstructor, bedlamCompiler, cachedSignerRegistry, proverRegistry, inMemoryPeerInfoManager, dynamicFeeManager, blsAppFrameValidator, blsGlobalFrameValidator, asertDifficultyAdjuster, optimizedProofOfMeaningfulWorkRewardIssuance, bls48581KeyConstructor, doubleRatchetEncryptedChannel)
appConsensusEngineFactory := app.NewAppConsensusEngineFactory(logger, config2, proxyBlossomSub, hypergraph, fileKeyManager, pebbleKeyStore, pebbleClockStore, pebbleInboxStore, pebbleShardsStore, pebbleHypergraphStore, frameProver, kzgInclusionProver, decaf448BulletproofProver, mpCitHVerifiableEncryptor, decaf448KeyConstructor, bedlamCompiler, cachedSignerRegistry, proverRegistry, inMemoryPeerInfoManager, dynamicFeeManager, blsAppFrameValidator, blsGlobalFrameValidator, asertDifficultyAdjuster, optimizedProofOfMeaningfulWorkRewardIssuance, bls48581KeyConstructor, doubleRatchetEncryptedChannel)
dataWorkerIPCServer := provideDataWorkerIPC(rpcMultiaddr, config2, cachedSignerRegistry, proverRegistry, appConsensusEngineFactory, inMemoryPeerInfoManager, frameProver, logger, coreId, parentProcess)
globalTimeReel, err := provideGlobalTimeReel(appConsensusEngineFactory)
if err != nil {
@ -151,6 +160,7 @@ func NewDataWorkerNodeWithoutProxyPubsub(logger *zap.Logger, config2 *config.Con
engineConfig := config2.Engine
blossomSub := p2p.NewBlossomSub(p2PConfig, engineConfig, logger, coreId)
pebbleInboxStore := store2.NewPebbleInboxStore(pebbleDB, logger)
pebbleShardsStore := store2.NewPebbleShardsStore(pebbleDB, logger)
bedlamCompiler := compiler.NewBedlamCompiler()
inMemoryPeerInfoManager := p2p.NewInMemoryPeerInfoManager(logger)
dynamicFeeManager := fees.NewDynamicFeeManager(logger, kzgInclusionProver)
@ -162,7 +172,7 @@ func NewDataWorkerNodeWithoutProxyPubsub(logger *zap.Logger, config2 *config.Con
asertDifficultyAdjuster := difficulty.NewAsertDifficultyAdjuster(uint64_2, int64_2, uint32_2)
optimizedProofOfMeaningfulWorkRewardIssuance := reward.NewOptRewardIssuance()
doubleRatchetEncryptedChannel := channel.NewDoubleRatchetEncryptedChannel()
appConsensusEngineFactory := app.NewAppConsensusEngineFactory(logger, config2, blossomSub, hypergraph, fileKeyManager, pebbleKeyStore, pebbleClockStore, pebbleInboxStore, pebbleHypergraphStore, frameProver, kzgInclusionProver, decaf448BulletproofProver, mpCitHVerifiableEncryptor, decaf448KeyConstructor, bedlamCompiler, cachedSignerRegistry, proverRegistry, inMemoryPeerInfoManager, dynamicFeeManager, blsAppFrameValidator, blsGlobalFrameValidator, asertDifficultyAdjuster, optimizedProofOfMeaningfulWorkRewardIssuance, bls48581KeyConstructor, doubleRatchetEncryptedChannel)
appConsensusEngineFactory := app.NewAppConsensusEngineFactory(logger, config2, blossomSub, hypergraph, fileKeyManager, pebbleKeyStore, pebbleClockStore, pebbleInboxStore, pebbleShardsStore, pebbleHypergraphStore, frameProver, kzgInclusionProver, decaf448BulletproofProver, mpCitHVerifiableEncryptor, decaf448KeyConstructor, bedlamCompiler, cachedSignerRegistry, proverRegistry, inMemoryPeerInfoManager, dynamicFeeManager, blsAppFrameValidator, blsGlobalFrameValidator, asertDifficultyAdjuster, optimizedProofOfMeaningfulWorkRewardIssuance, bls48581KeyConstructor, doubleRatchetEncryptedChannel)
dataWorkerIPCServer := provideDataWorkerIPC(rpcMultiaddr, config2, cachedSignerRegistry, proverRegistry, appConsensusEngineFactory, inMemoryPeerInfoManager, frameProver, logger, coreId, parentProcess)
globalTimeReel, err := provideGlobalTimeReel(appConsensusEngineFactory)
if err != nil {

View File

@ -3,9 +3,7 @@ package app
import (
"bytes"
"context"
"encoding/binary"
"encoding/hex"
"fmt"
"math/big"
"slices"
"strings"
@ -43,7 +41,6 @@ import (
tp2p "source.quilibrium.com/quilibrium/monorepo/types/p2p"
"source.quilibrium.com/quilibrium/monorepo/types/store"
"source.quilibrium.com/quilibrium/monorepo/types/tries"
qcrypto "source.quilibrium.com/quilibrium/monorepo/types/tries"
up2p "source.quilibrium.com/quilibrium/monorepo/utils/p2p"
)
@ -63,6 +60,7 @@ type AppConsensusEngine struct {
keyStore store.KeyStore
clockStore store.ClockStore
inboxStore store.InboxStore
shardsStore store.ShardsStore
hypergraphStore store.HypergraphStore
frameProver crypto.FrameProver
inclusionProver crypto.InclusionProver
@ -89,6 +87,8 @@ type AppConsensusEngine struct {
currentDifficultyMu sync.RWMutex
pendingMessages []*protobufs.Message
pendingMessagesMu sync.RWMutex
collectedMessages map[string][]*protobufs.Message
collectedMessagesMu sync.RWMutex
lastProvenFrameTime time.Time
lastProvenFrameTimeMu sync.RWMutex
frameStore map[string]*protobufs.AppShardFrame
@ -155,6 +155,7 @@ func NewAppConsensusEngine(
keyStore store.KeyStore,
clockStore store.ClockStore,
inboxStore store.InboxStore,
shardsStore store.ShardsStore,
hypergraphStore store.HypergraphStore,
frameProver crypto.FrameProver,
inclusionProver crypto.InclusionProver,
@ -191,6 +192,7 @@ func NewAppConsensusEngine(
keyStore: keyStore,
clockStore: clockStore,
inboxStore: inboxStore,
shardsStore: shardsStore,
hypergraphStore: hypergraphStore,
frameProver: frameProver,
inclusionProver: inclusionProver,
@ -210,7 +212,9 @@ func NewAppConsensusEngine(
peerInfoManager: peerInfoManager,
executors: make(map[string]execution.ShardExecutionEngine),
frameStore: make(map[string]*protobufs.AppShardFrame),
collectedMessages: make(map[string][]*protobufs.Message),
consensusMessageQueue: make(chan *pb.Message, 1000),
proverMessageQueue: make(chan *pb.Message, 1000),
frameMessageQueue: make(chan *pb.Message, 100),
globalFrameMessageQueue: make(chan *pb.Message, 100),
globalAlertMessageQueue: make(chan *pb.Message, 100),
@ -299,6 +303,9 @@ func NewAppConsensusEngine(
decafConstructor,
compiler,
frameProver,
nil,
nil,
nil,
false, // includeGlobal
)
if err != nil {
@ -348,14 +355,14 @@ func NewAppConsensusEngine(
engine.appAddress,
)
if err != nil {
return 1
return 999
}
if len(currentSet) > 6 {
return 6
}
return uint64(len(currentSet))
return uint64(len(currentSet)) * 2 / 3
}
}
@ -568,27 +575,14 @@ func (e *AppConsensusEngine) Stop(force bool) <-chan error {
e.cancel()
}
// Unsubscribe from pubsub to stop new messages from arriving
e.pubsub.Unsubscribe(e.getConsensusMessageBitmask(), false)
e.pubsub.UnregisterValidator(e.getConsensusMessageBitmask())
e.pubsub.Unsubscribe(e.getProverMessageBitmask(), false)
e.pubsub.UnregisterValidator(e.getProverMessageBitmask())
e.pubsub.Unsubscribe(e.getFrameMessageBitmask(), false)
e.pubsub.UnregisterValidator(e.getFrameMessageBitmask())
e.pubsub.Unsubscribe(e.getGlobalFrameMessageBitmask(), false)
e.pubsub.UnregisterValidator(e.getGlobalFrameMessageBitmask())
e.pubsub.Unsubscribe(e.getGlobalAlertMessageBitmask(), false)
e.pubsub.UnregisterValidator(e.getGlobalAlertMessageBitmask())
e.pubsub.Unsubscribe(e.getGlobalPeerInfoMessageBitmask(), false)
e.pubsub.UnregisterValidator(e.getGlobalPeerInfoMessageBitmask())
e.pubsub.Unsubscribe(e.getDispatchMessageBitmask(), false)
e.pubsub.UnregisterValidator(e.getDispatchMessageBitmask())
// Stop the state machine
if e.stateMachine != nil {
if err := e.stateMachine.Stop(); err != nil && !force {
e.logger.Warn("error stopping state machine", zap.Error(err))
errChan <- errors.Wrap(err, "stop state machine")
select {
case errChan <- errors.Wrap(err, "stop state machine"):
default:
}
}
}
@ -596,7 +590,10 @@ func (e *AppConsensusEngine) Stop(force bool) <-chan error {
if e.eventDistributor != nil {
if err := e.eventDistributor.Stop(); err != nil && !force {
e.logger.Warn("error stopping event distributor", zap.Error(err))
errChan <- errors.Wrap(err, "stop event distributor")
select {
case errChan <- errors.Wrap(err, "stop event distributor"):
default:
}
}
}
@ -604,7 +601,10 @@ func (e *AppConsensusEngine) Stop(force bool) <-chan error {
if e.executionManager != nil {
if err := e.executionManager.StopAll(force); err != nil && !force {
e.logger.Warn("error stopping execution engines", zap.Error(err))
errChan <- errors.Wrap(err, "stop execution engines")
select {
case errChan <- errors.Wrap(err, "stop execution engines"):
default:
}
}
}
@ -628,7 +628,10 @@ func (e *AppConsensusEngine) Stop(force bool) <-chan error {
case <-time.After(timeout):
if !force {
e.logger.Error("timeout waiting for graceful shutdown")
errChan <- errors.New("timeout waiting for graceful shutdown")
select {
case errChan <- errors.New("timeout waiting for graceful shutdown"):
default:
}
} else {
e.logger.Warn("forced shutdown after timeout")
}
@ -639,6 +642,22 @@ func (e *AppConsensusEngine) Stop(force bool) <-chan error {
e.stateMachine.Close()
}
// Unsubscribe from pubsub to stop new messages from arriving
e.pubsub.Unsubscribe(e.getConsensusMessageBitmask(), false)
e.pubsub.UnregisterValidator(e.getConsensusMessageBitmask())
e.pubsub.Unsubscribe(e.getProverMessageBitmask(), false)
e.pubsub.UnregisterValidator(e.getProverMessageBitmask())
e.pubsub.Unsubscribe(e.getFrameMessageBitmask(), false)
e.pubsub.UnregisterValidator(e.getFrameMessageBitmask())
e.pubsub.Unsubscribe(e.getGlobalFrameMessageBitmask(), false)
e.pubsub.UnregisterValidator(e.getGlobalFrameMessageBitmask())
e.pubsub.Unsubscribe(e.getGlobalAlertMessageBitmask(), false)
e.pubsub.UnregisterValidator(e.getGlobalAlertMessageBitmask())
e.pubsub.Unsubscribe(e.getGlobalPeerInfoMessageBitmask(), false)
e.pubsub.UnregisterValidator(e.getGlobalPeerInfoMessageBitmask())
e.pubsub.Unsubscribe(e.getDispatchMessageBitmask(), false)
e.pubsub.UnregisterValidator(e.getDispatchMessageBitmask())
close(errChan)
return errChan
}
@ -845,7 +864,7 @@ func (e *AppConsensusEngine) revert(
l2 := make([]byte, 32)
copy(l2, e.appAddress[:min(len(e.appAddress), 32)])
shardKey := qcrypto.ShardKey{
shardKey := tries.ShardKey{
L1: [3]byte(bits),
L2: [32]byte(l2),
}
@ -867,9 +886,12 @@ func (e *AppConsensusEngine) materialize(
var state state.State
state = hgstate.NewHypergraphState(e.hypergraph)
acceptedMessages := []*protobufs.MessageBundle{}
for i, request := range frame.Requests {
e.logger.Debug(
"processing request",
zap.Int("message_index", i),
)
requestBytes, err := request.ToCanonicalBytes()
if err != nil {
@ -896,7 +918,7 @@ func (e *AppConsensusEngine) materialize(
zap.Int("message_index", i),
zap.Error(err),
)
continue
return errors.Wrap(err, "materialize")
}
e.currentDifficultyMu.RLock()
@ -921,7 +943,7 @@ func (e *AppConsensusEngine) materialize(
baseline,
big.NewInt(int64(frame.Header.FeeMultiplierVote)),
),
e.appAddress,
e.appAddress[:32],
requestBytes,
state,
)
@ -935,16 +957,12 @@ func (e *AppConsensusEngine) materialize(
}
state = result.State
acceptedMessages = append(acceptedMessages, request)
}
err := e.proverRegistry.ProcessStateTransition(
state,
frame.Header.FrameNumber,
e.logger.Debug(
"processed transactions",
zap.Any("current_changeset_count", len(state.Changeset())),
)
if err != nil {
return errors.Wrap(err, "materialize")
}
if err := state.Commit(); err != nil {
return errors.Wrap(err, "materialize")
@ -994,7 +1012,7 @@ func (e *AppConsensusEngine) calculateRequestsRoot(
return make([]byte, 64), nil
}
tree := &qcrypto.VectorCommitmentTree{}
tree := &tries.VectorCommitmentTree{}
for _, msg := range messages {
hash := sha3.Sum256(msg.Payload)
@ -1341,18 +1359,14 @@ func (e *AppConsensusEngine) internalProveFrame(
return nil, errors.New("no proving key available")
}
bits := up2p.GetBloomFilterIndices(e.appAddress, 256, 3)
l2 := make([]byte, 32)
copy(l2, e.appAddress[:min(len(e.appAddress), 32)])
shardKey := qcrypto.ShardKey{
L1: [3]byte(bits),
L2: [32]byte(l2),
stateRoots, err := e.hypergraph.CommitShard(
previousFrame.Header.FrameNumber+1,
e.appAddress,
)
if err != nil {
return nil, err
}
root := e.hypergraph.Commit()
stateRoots := root[shardKey]
if len(stateRoots) == 0 {
stateRoots = make([][]byte, 4)
stateRoots[0] = make([]byte, 64)
@ -1363,6 +1377,11 @@ func (e *AppConsensusEngine) internalProveFrame(
txMap := map[string][][]byte{}
for i, message := range messages {
e.logger.Debug(
"locking addresses for message",
zap.Int("index", i),
zap.String("tx_hash", hex.EncodeToString(message.Hash)),
)
lockedAddrs, err := e.executionManager.Lock(
previousFrame.Header.FrameNumber+1,
message.Address,
@ -1385,7 +1404,7 @@ func (e *AppConsensusEngine) internalProveFrame(
txMap[string(message.Hash)] = lockedAddrs
}
err := e.executionManager.Unlock()
err = e.executionManager.Unlock()
if err != nil {
e.logger.Error("could not unlock", zap.Error(err))
return nil, err
@ -1435,10 +1454,19 @@ func (e *AppConsensusEngine) internalProveFrame(
}
}
rootCommit := make([]byte, 64)
if len(requestsRoot[32:]) > 0 {
tree, err := tries.DeserializeNonLazyTree(requestsRoot[32:])
if err != nil {
return nil, err
}
rootCommit = tree.Commit(e.inclusionProver, false)
}
newHeader, err := e.frameProver.ProveFrameHeader(
previousFrame.Header,
e.appAddress,
requestsRoot[:32],
rootCommit,
stateRoots,
e.getProverAddress(),
signer,
@ -1473,206 +1501,6 @@ func (e *AppConsensusEngine) messagesToRequests(
return requests
}
// getWorkerIndexFromFilter determines the worker index based on the filter
func (e *AppConsensusEngine) getWorkerIndexFromFilter() int {
// If no app address, assume worker 0
if len(e.appAddress) == 0 {
return 0
}
// Check configured worker filters
for i, filter := range e.config.Engine.DataWorkerFilters {
if filter == hex.EncodeToString(e.appAddress) {
return i
}
}
// Default to worker 0 if filter not found
return 0
}
// getWorkerPubsubAddrs returns pubsub addresses for a specific worker
func (e *AppConsensusEngine) getWorkerPubsubAddrs(
ownAddrs []multiaddr.Multiaddr,
workerIndex int,
) []string {
addrs := make([]string, 0)
// Check specific worker multiaddrs first
if workerIndex < len(e.config.Engine.DataWorkerP2PMultiaddrs) &&
e.config.Engine.DataWorkerP2PMultiaddrs[workerIndex] != "" {
// Use specific configured address
specificAddr := e.config.Engine.DataWorkerP2PMultiaddrs[workerIndex]
// Try to match against discovered addresses
for _, addr := range ownAddrs {
if e.matchesPattern(addr.String(), specificAddr) {
addrs = append(addrs, addr.String())
}
}
// If no match found, use the configured address as is
if len(addrs) == 0 {
addrs = append(addrs, specificAddr)
}
} else {
// Build from base pattern
port := e.config.Engine.DataWorkerBaseP2PPort + uint16(workerIndex)
pattern := fmt.Sprintf(e.config.Engine.DataWorkerBaseListenMultiaddr, port)
// Find matching discovered addresses
for _, addr := range ownAddrs {
if e.matchesPattern(addr.String(), pattern) {
addrs = append(addrs, addr.String())
}
}
// If no match found, construct from pattern
if len(addrs) == 0 && pattern != "" {
addrs = append(addrs, pattern)
}
}
return addrs
}
// getWorkerStreamAddrs returns stream addresses for a specific worker
func (e *AppConsensusEngine) getWorkerStreamAddrs(
ownAddrs []multiaddr.Multiaddr,
workerIndex int,
) []string {
addrs := make([]string, 0)
// Check specific worker multiaddrs first
if workerIndex < len(e.config.Engine.DataWorkerStreamMultiaddrs) &&
e.config.Engine.DataWorkerStreamMultiaddrs[workerIndex] != "" {
// Use specific configured address
specificAddr := e.config.Engine.DataWorkerStreamMultiaddrs[workerIndex]
// Try to match against discovered addresses
for _, addr := range ownAddrs {
if e.matchesPattern(addr.String(), specificAddr) {
addrs = append(addrs, addr.String())
}
}
// If no match found, use the configured address as-is
if len(addrs) == 0 {
addrs = append(addrs, specificAddr)
}
} else {
// Build from base pattern
port := e.config.Engine.DataWorkerBaseStreamPort + uint16(workerIndex)
pattern := fmt.Sprintf(e.config.Engine.DataWorkerBaseListenMultiaddr, port)
// Find matching discovered addresses
for _, addr := range ownAddrs {
if e.matchesPattern(addr.String(), pattern) {
addrs = append(addrs, addr.String())
}
}
// If no match found, construct from pattern
if len(addrs) == 0 && pattern != "" {
addrs = append(addrs, pattern)
}
}
return addrs
}
// matchesProtocol checks if an address matches a configured protocol pattern
func (e *AppConsensusEngine) matchesProtocol(addr, pattern string) bool {
// Extract protocol components and match
// e.g., /ip4/1.2.3.4/tcp/8336/quic-v1 matches /ip4/0.0.0.0/tcp/8336/quic-v1
patternParts := strings.Split(pattern, "/")
addrParts := strings.Split(addr, "/")
if len(patternParts) != len(addrParts) {
return false
}
for i, part := range patternParts {
// Skip IP comparison for wildcard/localhost in pattern
if i > 0 && patternParts[i-1] == "ip4" &&
(part == "0.0.0.0" || part == "127.0.0.1") {
continue
}
if i > 0 && patternParts[i-1] == "ip6" &&
(part == "::" || part == "::1") {
continue
}
if part != addrParts[i] {
return false
}
}
return true
}
// matchesPattern is more flexible than matchesProtocol, allowing partial
// matches
func (e *AppConsensusEngine) matchesPattern(addr, pattern string) bool {
// First try exact protocol match
if e.matchesProtocol(addr, pattern) {
return true
}
// Try matching with port substitution (for %d patterns)
if strings.Contains(pattern, "%d") {
// Extract the port from the address
addrParts := strings.Split(addr, "/")
patternParts := strings.Split(pattern, "/")
if len(addrParts) == len(patternParts) {
allMatch := true
for i, part := range patternParts {
if part == "%d" {
// Skip port comparison
continue
}
// Skip IP comparison for wildcard/localhost in pattern
if i > 0 && patternParts[i-1] == "ip4" &&
(part == "0.0.0.0" || part == "127.0.0.1") {
continue
}
if i > 0 && patternParts[i-1] == "ip6" &&
(part == "::" || part == "::1") {
continue
}
if part != addrParts[i] {
allMatch = false
break
}
}
return allMatch
}
}
return false
}
// signPeerInfo signs the peer info message
func (e *AppConsensusEngine) signPeerInfo(
info *protobufs.PeerInfo,
) ([]byte, error) {
msg := append([]byte("peerinfo"), info.PeerId...)
msg = binary.BigEndian.AppendUint64(msg, uint64(info.Timestamp))
// for _, addr := range info.PubsubMultiaddrs {
// msg = append(msg, addr...)
// }
// for _, addr := range info.StreamMultiaddrs {
// msg = append(msg, addr...)
// }
// if info.Filter != nil {
// msg = append(msg, hex.EncodeToString(info.Filter)...)
// }
return e.pubsub.SignMessage(msg)
}
// SetGlobalClient sets the global client manually, used for tests
func (e *AppConsensusEngine) SetGlobalClient(
client protobufs.GlobalServiceClient,

View File

@ -20,7 +20,7 @@ import (
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"google.golang.org/protobuf/proto"
"golang.org/x/crypto/sha3"
"source.quilibrium.com/quilibrium/monorepo/bls48581"
"source.quilibrium.com/quilibrium/monorepo/bulletproofs"
"source.quilibrium.com/quilibrium/monorepo/channel"
@ -35,6 +35,7 @@ import (
"source.quilibrium.com/quilibrium/monorepo/node/consensus/reward"
consensustime "source.quilibrium.com/quilibrium/monorepo/node/consensus/time"
"source.quilibrium.com/quilibrium/monorepo/node/consensus/validator"
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/store"
@ -42,6 +43,7 @@ import (
"source.quilibrium.com/quilibrium/monorepo/protobufs"
tconsensus "source.quilibrium.com/quilibrium/monorepo/types/consensus"
"source.quilibrium.com/quilibrium/monorepo/types/crypto"
thypergraph "source.quilibrium.com/quilibrium/monorepo/types/hypergraph"
tkeys "source.quilibrium.com/quilibrium/monorepo/types/keys"
"source.quilibrium.com/quilibrium/monorepo/vdf"
"source.quilibrium.com/quilibrium/monorepo/verenc"
@ -123,7 +125,7 @@ func TestAppConsensusEngine_Integration_ChaosScenario(t *testing.T) {
ScenarioStateRewind: "State Rewind",
}
appAddress := []byte{0xCC, 0x01, 0x02, 0x03}
appAddress := token.QUIL_TOKEN_ADDRESS
// Create nodes
type chaosNode struct {
@ -134,6 +136,7 @@ func TestAppConsensusEngine_Integration_ChaosScenario(t *testing.T) {
frameHistory []*protobufs.AppShardFrame
quit chan struct{}
mu sync.RWMutex
gsc *mockGlobalClientLocks
}
nodes := make([]*chaosNode, numNodes)
@ -215,6 +218,7 @@ func TestAppConsensusEngine_Integration_ChaosScenario(t *testing.T) {
nodeKeyStore := store.NewPebbleKeyStore(nodeDB, logger)
nodeClockStore := store.NewPebbleClockStore(nodeDB, logger)
nodeInboxStore := store.NewPebbleInboxStore(nodeDB, logger)
nodeShardsStore := store.NewPebbleShardsStore(nodeDB, logger)
nodeHg := hypergraph.NewHypergraph(logger, nodeHypergraphStore, nodeInclusionProver, []int{}, &tests.Nopthenticator{})
// Create mock pubsub for network simulation
@ -255,6 +259,7 @@ func TestAppConsensusEngine_Integration_ChaosScenario(t *testing.T) {
nodeKeyStore,
nodeClockStore,
nodeInboxStore,
nodeShardsStore,
nodeHypergraphStore,
frameProver,
nodeInclusionProver,
@ -294,6 +299,8 @@ func TestAppConsensusEngine_Integration_ChaosScenario(t *testing.T) {
cleanup := func() {
nodeDB.Close()
}
mockGSC := &mockGlobalClientLocks{}
engine.SetGlobalClient(mockGSC)
return engine, pubsub, globalTimeReel, cleanup
}
@ -312,8 +319,12 @@ func TestAppConsensusEngine_Integration_ChaosScenario(t *testing.T) {
executors: make(map[string]*mockIntegrationExecutor),
frameHistory: make([]*protobufs.AppShardFrame, 0),
quit: make(chan struct{}),
gsc: &mockGlobalClientLocks{},
}
// ensure unique global service client per node
node.engine.SetGlobalClient(node.gsc)
// Subscribe to frames
pubsub.Subscribe(engine.getConsensusMessageBitmask(), func(message *pb.Message) error {
frame := &protobufs.AppShardFrame{}
@ -352,6 +363,52 @@ func TestAppConsensusEngine_Integration_ChaosScenario(t *testing.T) {
node.pubsub.peerCount = numNodes - 1
}
// Pre-generate valid payloads and stash for broadcast; commit initial world state for verification
// Create per-node hypergraphs slice to feed payload creation
hgs := make([]thypergraph.Hypergraph, 0, numNodes)
for _, node := range nodes {
hgs = append(hgs, node.engine.hypergraph)
}
t.Logf("Step 5.a: Generating 6,000 pending transactions")
pending := make([]*token.PendingTransaction, 0, 6)
for i := 0; i < 6; i++ {
for j := 0; j < 1000; j++ {
tx := createValidPendingTxPayload(t, hgs, keys.NewInMemoryKeyManager(bc, dc), byte(i))
pending = append(pending, tx)
}
}
t.Logf("Step 5.b: Sealing world state at genesis")
// Seal initial world state for reference in verification
for _, hg := range hgs {
hg.Commit(0)
}
// Encode payloads as MessageBundle and stash
stashedPayloads := make([][]byte, 0, len(pending))
for _, tx := range pending {
require.NoError(t, tx.Prove(0))
req := &protobufs.MessageBundle{
Requests: []*protobufs.MessageRequest{
{Request: &protobufs.MessageRequest_PendingTransaction{PendingTransaction: tx.ToProtobuf()}},
},
Timestamp: time.Now().UnixMilli(),
}
out, err := req.ToCanonicalBytes()
require.NoError(t, err)
stashedPayloads = append(stashedPayloads, out)
}
// Record hashes into each node's global service client for lock checks
for _, node := range nodes {
for _, payload := range stashedPayloads {
h := sha3.Sum256(payload)
node.gsc.hashes = append(node.gsc.hashes, h[:])
node.gsc.committed = true
}
}
// Chaos test state
type chaosState struct {
currentFrameNumber uint64
@ -480,26 +537,20 @@ func TestAppConsensusEngine_Integration_ChaosScenario(t *testing.T) {
messageBitmask[0] = 0x01
copy(messageBitmask[1:], appAddress)
// Send batch of messages
msgCount := random.Intn(20) + 10
for i := 0; i < msgCount; i++ {
msg := &protobufs.Message{
Hash: []byte(fmt.Sprintf("chaos-msg-%d-%d", time.Now().Unix(), i)),
Payload: []byte(fmt.Sprintf("chaos payload %d", i)),
}
if msgData, err := proto.Marshal(msg); err == nil {
// Pick random non-partitioned node to send from
for j, node := range nodes {
if !state.partitionedNodes[j] {
node.pubsub.PublishToBitmask(node.engine.getConsensusMessageBitmask(), msgData)
break
}
// Broadcast pre-generated valid payloads to ensure end-to-end processing
sent := 0
for _, payload := range stashedPayloads {
// Pick random non-partitioned node to send from
for j, node := range nodes {
if !state.partitionedNodes[j] {
node.pubsub.PublishToBitmask(node.engine.getProverMessageBitmask(), payload)
sent++
break
}
}
}
t.Logf(" - Sent %d messages", msgCount)
t.Logf(" - Sent %d stashed valid payloads", sent)
time.Sleep(3 * time.Second)
}

File diff suppressed because one or more lines are too long

View File

@ -72,16 +72,22 @@ func (p *AppLeaderProvider) ProveNextState(
return nil, errors.Wrap(errors.New("nil prior frame"), "prove next state")
}
// Get pending messages to include in frame
// Get collected messages to include in frame
p.engine.pendingMessagesMu.RLock()
messages := make([]*protobufs.Message, len(p.engine.pendingMessages))
copy(messages, p.engine.pendingMessages)
messages := make([]*protobufs.Message, len(p.engine.collectedMessages[string(
collected.commitmentHash[:32],
)]))
copy(messages, p.engine.collectedMessages[string(
collected.commitmentHash[:32],
)])
p.engine.pendingMessagesMu.RUnlock()
// Clear pending messages after copying
p.engine.pendingMessagesMu.Lock()
p.engine.pendingMessages = p.engine.pendingMessages[:0]
p.engine.pendingMessagesMu.Unlock()
// Clear collected messages after copying
p.engine.collectedMessagesMu.Lock()
p.engine.collectedMessages[string(
collected.commitmentHash[:32],
)] = []*protobufs.Message{}
p.engine.collectedMessagesMu.Unlock()
// Update pending messages metric
pendingMessagesCount.WithLabelValues(p.engine.appAddressHex).Set(0)

View File

@ -53,31 +53,8 @@ func (p *AppLivenessProvider) Collect(
txMap := map[string][][]byte{}
for i, message := range slices.Concat(mixnetMessages, pendingMessages) {
err := p.engine.executionManager.ValidateMessage(
frameNumber,
message.Address,
message.Payload,
)
lockedAddrs, err := p.validateAndLockMessage(frameNumber, i, message)
if err != nil {
p.engine.logger.Debug(
"invalid message",
zap.Int("message_index", i),
zap.Error(err),
)
continue
}
lockedAddrs, err := p.engine.executionManager.Lock(
frameNumber,
message.Address,
message.Payload,
)
if err != nil {
p.engine.logger.Debug(
"message failed lock",
zap.Int("message_index", i),
zap.Error(err),
)
continue
}
@ -93,19 +70,30 @@ func (p *AppLivenessProvider) Collect(
p.engine.logger.Info(
"collected messages",
zap.Int("total_message_count", len(mixnetMessages)+len(pendingMessages)),
zap.Int(
"total_message_count",
len(mixnetMessages)+len(pendingMessages),
),
zap.Int("valid_message_count", len(finalizedMessages)),
zap.Uint64(
"current_frame",
p.engine.GetFrame().Rank(),
),
)
transactionsCollectedTotal.WithLabelValues(p.engine.appAddressHex).Add(
float64(len(finalizedMessages)),
)
// Calculate commitment root
commitment, err := p.engine.calculateRequestsRoot(finalizedMessages, txMap)
if err != nil {
return CollectedCommitments{}, errors.Wrap(err, "collect")
}
p.engine.collectedMessagesMu.Lock()
p.engine.collectedMessages[string(commitment[:32])] = finalizedMessages
p.engine.collectedMessagesMu.Unlock()
return CollectedCommitments{
frameNumber: frameNumber,
commitmentHash: commitment,
@ -129,6 +117,11 @@ func (p *AppLivenessProvider) SendLiveness(
frameNumber = (*prior).Header.FrameNumber + 1
}
lastProcessed := p.engine.GetFrame()
if lastProcessed != nil && lastProcessed.Header.FrameNumber > frameNumber {
return errors.New("out of sync, forcing resync")
}
// Create liveness check message
livenessCheck := &protobufs.ProverLivenessCheck{
Filter: p.engine.appAddress,
@ -152,10 +145,11 @@ func (p *AppLivenessProvider) SendLiveness(
}
proverAddress := p.engine.getAddressFromPublicKey(publicKey)
livenessCheck.PublicKeySignatureBls48581 = &protobufs.BLS48581AddressedSignature{
Address: proverAddress,
Signature: sig,
}
livenessCheck.PublicKeySignatureBls48581 =
&protobufs.BLS48581AddressedSignature{
Address: proverAddress,
Signature: sig,
}
// Serialize using canonical bytes
data, err := livenessCheck.ToCanonicalBytes()
@ -177,3 +171,50 @@ func (p *AppLivenessProvider) SendLiveness(
return nil
}
func (p *AppLivenessProvider) validateAndLockMessage(
frameNumber uint64,
i int,
message *protobufs.Message,
) (lockedAddrs [][]byte, err error) {
defer func() {
if r := recover(); r != nil {
p.engine.logger.Error(
"panic recovered from message",
zap.Any("panic", r),
zap.Stack("stacktrace"),
)
err = errors.New("panicked processing message")
}
}()
err = p.engine.executionManager.ValidateMessage(
frameNumber,
message.Address,
message.Payload,
)
if err != nil {
p.engine.logger.Debug(
"invalid message",
zap.Int("message_index", i),
zap.Error(err),
)
return nil, err
}
lockedAddrs, err = p.engine.executionManager.Lock(
frameNumber,
message.Address,
message.Payload,
)
if err != nil {
p.engine.logger.Debug(
"message failed lock",
zap.Int("message_index", i),
zap.Error(err),
)
return nil, err
}
return lockedAddrs, nil
}

View File

@ -25,7 +25,6 @@ import (
"source.quilibrium.com/quilibrium/monorepo/node/internal/frametime"
"source.quilibrium.com/quilibrium/monorepo/protobufs"
"source.quilibrium.com/quilibrium/monorepo/types/tries"
qcrypto "source.quilibrium.com/quilibrium/monorepo/types/tries"
up2p "source.quilibrium.com/quilibrium/monorepo/utils/p2p"
)
@ -42,6 +41,8 @@ func (p *AppSyncProvider) Synchronize(
errCh := make(chan error, 1)
go func() {
defer close(dataCh)
defer close(errCh)
defer func() {
if r := recover(); r != nil {
errCh <- errors.Wrap(
@ -50,8 +51,6 @@ func (p *AppSyncProvider) Synchronize(
)
}
}()
defer close(dataCh)
defer close(errCh)
// Check if we have a current frame
p.engine.frameStoreMu.RLock()
@ -94,22 +93,29 @@ func (p *AppSyncProvider) Synchronize(
l2 := make([]byte, 32)
copy(l2, p.engine.appAddress[:min(len(p.engine.appAddress), 32)])
shardKey := qcrypto.ShardKey{
shardKey := tries.ShardKey{
L1: [3]byte(bits),
L2: [32]byte(l2),
}
shouldHypersync := false
comm := p.engine.hypergraph.Commit()
for i, c := range comm[shardKey] {
if !bytes.Equal(c, latestFrame.Header.StateRoots[i]) {
shouldHypersync = true
break
comm, err := p.engine.hypergraph.GetShardCommits(
latestFrame.Header.FrameNumber,
p.engine.appAddress,
)
if err != nil {
p.engine.logger.Error("could not get commits", zap.Error(err))
} else {
for i, c := range comm {
if !bytes.Equal(c, latestFrame.Header.StateRoots[i]) {
shouldHypersync = true
break
}
}
}
if shouldHypersync {
p.hyperSyncWithProver(latestFrame.Header.Prover, shardKey)
if shouldHypersync {
p.hyperSyncWithProver(latestFrame.Header.Prover, shardKey)
}
}
}

View File

@ -10,7 +10,7 @@ type AppTracer struct {
}
func (t *AppTracer) Trace(message string) {
// t.logger.Debug(message)
t.logger.Debug(message)
}
func (t *AppTracer) Error(message string, err error) {

View File

@ -4,6 +4,7 @@ import (
"bytes"
"context"
"encoding/hex"
"slices"
"sync"
"time"
@ -15,6 +16,8 @@ import (
"google.golang.org/protobuf/proto"
"source.quilibrium.com/quilibrium/monorepo/consensus"
"source.quilibrium.com/quilibrium/monorepo/protobufs"
"source.quilibrium.com/quilibrium/monorepo/types/tries"
up2p "source.quilibrium.com/quilibrium/monorepo/utils/p2p"
)
// AppVotingProvider implements VotingProvider
@ -389,6 +392,7 @@ func (p *AppVotingProvider) FinalizeVotes(
)
}
// Build a map of transaction hashes to their committed status
txMap := map[string]bool{}
for _, req := range (*chosenProposal).Requests {
tx, err := req.ToCanonicalBytes()
@ -400,24 +404,92 @@ func (p *AppVotingProvider) FinalizeVotes(
}
txHash := sha3.Sum256(tx)
p.engine.logger.Debug(
"adding transaction in frame to commit check",
zap.String("tx_hash", hex.EncodeToString(txHash[:])),
)
txMap[string(txHash[:])] = false
}
// Check that transactions are committed in our shard and collect shard
// addresses
shardAddressesSet := make(map[string]bool)
for _, tx := range res.Transactions {
p.engine.logger.Debug(
"checking transaction from global map",
zap.String("tx_hash", hex.EncodeToString(tx.TransactionHash)),
)
if _, ok := txMap[string(tx.TransactionHash)]; ok {
txMap[string(tx.TransactionHash)] = tx.Committed
// Extract shard addresses from each locked transaction's shard addresses
for _, shardAddr := range tx.ShardAddresses {
// Extract the applicable shard address (can be shorter than the full
// address)
extractedShards := p.extractShardAddresses(shardAddr)
for _, extractedShard := range extractedShards {
shardAddrStr := string(extractedShard)
shardAddressesSet[shardAddrStr] = true
}
}
}
}
// Check that all transactions are committed in our shard
for _, committed := range txMap {
if !committed {
return &parentFrame, PeerID{}, errors.Wrap(
errors.New("tx cross-shard lock unconfirmed"),
errors.New("tx not committed in our shard"),
"finalize votes",
)
}
}
// Check cross-shard locks for each unique shard address
for shardAddrStr := range shardAddressesSet {
shardAddr := []byte(shardAddrStr)
// Skip our own shard since we already checked it
if bytes.Equal(shardAddr, p.engine.appAddress) {
continue
}
// Query the global client for locked addresses in this shard
shardRes, err := p.engine.globalClient.GetLockedAddresses(
ctx,
&protobufs.GetLockedAddressesRequest{
ShardAddress: shardAddr,
FrameNumber: (*chosenProposal).Header.FrameNumber,
},
)
if err != nil {
p.engine.logger.Debug(
"failed to get locked addresses for shard",
zap.String("shard_addr", hex.EncodeToString(shardAddr)),
zap.Error(err),
)
continue
}
// Check that all our transactions are committed in this shard
for txHashStr := range txMap {
committedInShard := false
for _, tx := range shardRes.Transactions {
if string(tx.TransactionHash) == txHashStr {
committedInShard = tx.Committed
break
}
}
if !committedInShard {
return &parentFrame, PeerID{}, errors.Wrap(
errors.New("tx cross-shard lock unconfirmed"),
"finalize votes",
)
}
}
}
proverSet, err := p.engine.proverRegistry.GetActiveProvers(
p.engine.appAddress,
)
@ -480,12 +552,6 @@ func (p *AppVotingProvider) FinalizeVotes(
for i := 0; i < len(provers); i++ {
activeProver := provers[i]
if err != nil {
p.engine.logger.Error(
"could not get prover info",
zap.String("address", hex.EncodeToString(provers[i].Address)),
)
}
if _, ok := voterMap[string(activeProver.Address)]; !ok {
continue
}
@ -588,3 +654,111 @@ func (p *AppVotingProvider) SendConfirmation(
return nil
}
// GetFullPath converts a key to its path representation using 6-bit nibbles
func GetFullPath(key []byte) []int32 {
var nibbles []int32
depth := 0
for {
n1 := getNextNibble(key, depth)
if n1 == -1 {
break
}
nibbles = append(nibbles, n1)
depth += tries.BranchBits
}
return nibbles
}
// getNextNibble returns the next BranchBits bits from the key starting at pos
func getNextNibble(key []byte, pos int) int32 {
startByte := pos / 8
if startByte >= len(key) {
return -1
}
// Calculate how many bits we need from the current byte
startBit := pos % 8
bitsFromCurrentByte := 8 - startBit
result := int(key[startByte] & ((1 << bitsFromCurrentByte) - 1))
if bitsFromCurrentByte >= tries.BranchBits {
// We have enough bits in the current byte
return int32((result >> (bitsFromCurrentByte - tries.BranchBits)) &
tries.BranchMask)
}
// We need bits from the next byte
result = result << (tries.BranchBits - bitsFromCurrentByte)
if startByte+1 < len(key) {
remainingBits := tries.BranchBits - bitsFromCurrentByte
nextByte := int(key[startByte+1])
result |= (nextByte >> (8 - remainingBits))
}
return int32(result & tries.BranchMask)
}
// extractShardAddresses extracts all possible shard addresses from a transaction address
func (p *AppVotingProvider) extractShardAddresses(txAddress []byte) [][]byte {
var shardAddresses [][]byte
// Get the full path from the transaction address
path := GetFullPath(txAddress)
// The first 43 nibbles (258 bits) represent the base shard address
// We need to extract all possible shard addresses by considering path segments after the 43rd nibble
if len(path) <= 43 {
// If the path is too short, just return the original address truncated to 32 bytes
if len(txAddress) >= 32 {
shardAddresses = append(shardAddresses, txAddress[:32])
}
return shardAddresses
}
// Convert the first 43 nibbles to bytes (base shard address)
baseShardAddr := txAddress[:32]
l1 := up2p.GetBloomFilterIndices(baseShardAddr, 256, 3)
candidates := map[string]struct{}{}
// Now generate all possible shard addresses by extending the path
// Each additional nibble after the 43rd creates a new shard address
for i := 43; i < len(path); i++ {
// Create a new shard address by extending the base with this path segment
extendedAddr := make([]byte, 32)
copy(extendedAddr, baseShardAddr)
// Add the path segment as a byte
extendedAddr = append(extendedAddr, byte(path[i]))
candidates[string(extendedAddr)] = struct{}{}
}
shards, err := p.engine.shardsStore.GetAppShards(
slices.Concat(l1, baseShardAddr),
[]uint32{},
)
if err != nil {
return [][]byte{}
}
for _, shard := range shards {
if _, ok := candidates[string(
slices.Concat(shard.L2, uint32ToBytes(shard.Path)),
)]; ok {
shardAddresses = append(shardAddresses, shard.L2)
}
}
return shardAddresses
}
func uint32ToBytes(path []uint32) []byte {
bytes := []byte{}
for _, p := range path {
bytes = append(bytes, byte(p))
}
return bytes
}

View File

@ -119,6 +119,7 @@ func (e *AppConsensusEngine) eventDistributorLoop() {
e.inclusionProver,
),
e.proverRegistry,
e.clockStore,
)
if err != nil {
e.logger.Error(

View File

@ -29,6 +29,7 @@ type AppConsensusEngineFactory struct {
keyStore store.KeyStore
clockStore store.ClockStore
inboxStore store.InboxStore
shardsStore store.ShardsStore
hypergraphStore store.HypergraphStore
frameProver crypto.FrameProver
inclusionProver crypto.InclusionProver
@ -58,6 +59,7 @@ func NewAppConsensusEngineFactory(
keyStore store.KeyStore,
clockStore store.ClockStore,
inboxStore store.InboxStore,
shardsStore store.ShardsStore,
hypergraphStore store.HypergraphStore,
frameProver crypto.FrameProver,
inclusionProver crypto.InclusionProver,
@ -85,6 +87,7 @@ func NewAppConsensusEngineFactory(
keyStore: keyStore,
clockStore: clockStore,
inboxStore: inboxStore,
shardsStore: shardsStore,
hypergraphStore: hypergraphStore,
frameProver: frameProver,
inclusionProver: inclusionProver,
@ -141,6 +144,7 @@ func (f *AppConsensusEngineFactory) CreateAppConsensusEngine(
f.keyStore,
f.clockStore,
f.inboxStore,
f.shardsStore,
f.hypergraphStore,
f.frameProver,
f.inclusionProver,

View File

@ -552,14 +552,16 @@ func registerProverInHypergraphWithFilter(t *testing.T, hg thypergraph.Hypergrap
txn.Commit()
// Commit the hypergraph
hg.Commit()
hg.Commit(0)
t.Logf(" Registered prover with address: %x, filter: %x (public key length: %d)", address, filter, len(publicKey))
}
type mockGlobalClientLocks struct {
committed bool
hashes [][]byte
committed bool
hashes [][]byte
shardAddresses map[string][][]byte
shardAddressesMu sync.Mutex
}
func (m *mockGlobalClientLocks) GetGlobalFrame(ctx context.Context, in *protobufs.GetGlobalFrameRequest, opts ...grpc.CallOption) (*protobufs.GlobalFrameResponse, error) {
@ -572,17 +574,23 @@ func (m *mockGlobalClientLocks) GetGlobalShards(ctx context.Context, in *protobu
return nil, errors.New("not used in this test")
}
func (m *mockGlobalClientLocks) GetLockedAddresses(ctx context.Context, in *protobufs.GetLockedAddressesRequest, opts ...grpc.CallOption) (*protobufs.GetLockedAddressesResponse, error) {
out := &protobufs.GetLockedAddressesResponse{Transactions: make([]*protobufs.LockedTransaction, 0, len(m.hashes))}
for _, h := range m.hashes {
out := &protobufs.GetLockedAddressesResponse{Transactions: []*protobufs.LockedTransaction{}}
m.shardAddressesMu.Lock()
hits := m.shardAddresses[string(in.ShardAddress)]
for _, h := range hits {
out.Transactions = append(out.Transactions, &protobufs.LockedTransaction{
TransactionHash: h,
Committed: m.committed,
})
}
m.shardAddressesMu.Unlock()
return out, nil
}
func createValidPendingTxPayload(t *testing.T, hgs []thypergraph.Hypergraph, km *keys.InMemoryKeyManager) []byte {
func createValidPendingTxPayload(t *testing.T, hgs []thypergraph.Hypergraph, km *keys.InMemoryKeyManager, prefix byte) *token.PendingTransaction {
// set this value so we skip cutover checks
token.BEHAVIOR_PASS = true
dc := &bulletproofs.Decaf448KeyConstructor{}
vk, _ := dc.New()
sk, _ := dc.New()
@ -604,12 +612,15 @@ func createValidPendingTxPayload(t *testing.T, hgs []thypergraph.Hypergraph, km
psk, err := km.CreateAgreementKey("q-spend-key", crypto.KeyTypeDecaf448)
assert.NoError(t, err)
// Control shard placement
address1 := [64]byte{}
copy(address1[:32], token.QUIL_TOKEN_ADDRESS)
rand.Read(address1[32:])
address1[32] = prefix
rand.Read(address1[33:])
address2 := [64]byte{}
copy(address2[:32], token.QUIL_TOKEN_ADDRESS)
rand.Read(address2[32:])
address2[32] = prefix
rand.Read(address2[33:])
tree1 := &qcrypto.VectorCommitmentTree{}
tree2 := &qcrypto.VectorCommitmentTree{}
@ -686,7 +697,10 @@ func createValidPendingTxPayload(t *testing.T, hgs []thypergraph.Hypergraph, km
hg.SetVertexData(txn, address1, tree1)
hg.AddVertex(txn, hypergraph.NewVertex([32]byte(token.QUIL_TOKEN_ADDRESS), [32]byte(address2[32:]), tree2.Commit(hg.GetProver(), false), big.NewInt(55*26)))
hg.SetVertexData(txn, address2, tree2)
txn.Commit()
err := txn.Commit()
if err != nil {
t.Fatal(err)
}
}
// simulate input as commitment to total
@ -725,22 +739,7 @@ func createValidPendingTxPayload(t *testing.T, hgs []thypergraph.Hypergraph, km
rdfMultiprover,
)
if err := tx.Prove(1); err != nil {
t.Fatal(err)
}
req := &protobufs.MessageBundle{
Requests: []*protobufs.MessageRequest{
{
Request: &protobufs.MessageRequest_PendingTransaction{
PendingTransaction: tx.ToProtobuf(),
},
},
},
}
out, err := req.ToCanonicalBytes()
assert.NoError(t, err)
return out
return tx
}
func prepareRDFSchemaFromConfig(

View File

@ -9,6 +9,7 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
"golang.org/x/crypto/sha3"
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
"source.quilibrium.com/quilibrium/monorepo/protobufs"
"source.quilibrium.com/quilibrium/monorepo/types/crypto"
@ -224,18 +225,22 @@ func (e *AppConsensusEngine) handleProverMessage(message *pb.Message) {
typePrefix := e.peekMessageType(message)
e.logger.Debug("handling prover message", zap.Uint32("type_prefix", typePrefix))
switch typePrefix {
case protobufs.MessageBundleType:
// MessageBundle messages need to be collected for execution
// Store them in pendingMessages to be processed during Collect
hash := sha3.Sum256(message.Data)
e.pendingMessagesMu.Lock()
e.pendingMessages = append(e.pendingMessages, &protobufs.Message{
Address: e.appAddress[:32],
Hash: hash[:],
Payload: message.Data,
})
e.pendingMessagesMu.Unlock()
e.logger.Debug(
"collected global request for execution",
"collected app request for execution",
zap.Uint32("type", typePrefix),
)
@ -486,20 +491,22 @@ func (e *AppConsensusEngine) handleLivenessCheck(message *pb.Message) {
return
}
lcBytes, err := livenessCheck.ConstructSignaturePayload()
if err != nil {
e.logger.Error(
"could not construct signature message for liveness check",
zap.Error(err),
)
livenessCheckProcessedTotal.WithLabelValues(e.appAddressHex, "error").Inc()
return
}
var found []byte = nil
for _, prover := range proverSet {
if bytes.Equal(
prover.Address,
livenessCheck.PublicKeySignatureBls48581.Address,
) {
lcBytes, err := livenessCheck.ConstructSignaturePayload()
if err != nil {
e.logger.Error(
"could not construct signature message for liveness check",
zap.Error(err),
)
break
}
valid, err := e.keyManager.ValidateSignature(
crypto.KeyTypeBLS48581G1,
prover.PublicKey,
@ -534,27 +541,6 @@ func (e *AppConsensusEngine) handleLivenessCheck(message *pb.Message) {
return
}
signatureData, err := livenessCheck.ConstructSignaturePayload()
if err != nil {
e.logger.Error("invalid signature payload", zap.Error(err))
livenessCheckProcessedTotal.WithLabelValues(e.appAddressHex, "error").Inc()
return
}
valid, err := e.keyManager.ValidateSignature(
crypto.KeyTypeBLS48581G1,
found,
signatureData,
livenessCheck.PublicKeySignatureBls48581.Signature,
livenessCheck.GetSignatureDomain(),
)
if err != nil || !valid {
e.logger.Error("invalid liveness check signature", zap.Error(err))
livenessCheckProcessedTotal.WithLabelValues(e.appAddressHex, "error").Inc()
return
}
if livenessCheck.PublicKeySignatureBls48581 == nil {
e.logger.Error("no signature on liveness check")
livenessCheckProcessedTotal.WithLabelValues(e.appAddressHex, "error").Inc()

View File

@ -80,6 +80,7 @@ func (e *AppConsensusEngine) subscribeToProverMessages() error {
case <-e.haltCtx.Done():
return nil
case e.proverMessageQueue <- message:
e.logger.Debug("got prover message")
return nil
case <-e.ctx.Done():
return errors.New("context cancelled")

View File

@ -16,7 +16,7 @@ import (
)
func (e *AppConsensusEngine) validateConsensusMessage(
peerID peer.ID,
_ peer.ID,
message *pb.Message,
) p2p.ValidationResult {
// Check if data is long enough to contain type prefix
@ -53,6 +53,12 @@ func (e *AppConsensusEngine) validateConsensusMessage(
}
if !bytes.Equal(frame.Header.Address, e.appAddress) {
proposalValidationTotal.WithLabelValues(e.appAddressHex, "ignore").Inc()
return p2p.ValidationResultIgnore
}
if frametime.AppFrameSince(frame) > 20*time.Second {
proposalValidationTotal.WithLabelValues(e.appAddressHex, "ignore").Inc()
return p2p.ValidationResultIgnore
}
@ -94,8 +100,12 @@ func (e *AppConsensusEngine) validateConsensusMessage(
}
now := time.Now().UnixMilli()
if livenessCheck.Timestamp > now+5000 ||
livenessCheck.Timestamp < now-5000 {
if livenessCheck.Timestamp > now+500 ||
livenessCheck.Timestamp < now-1000 {
livenessCheckValidationTotal.WithLabelValues(
e.appAddressHex,
"ignore",
).Inc()
return p2p.ValidationResultIgnore
}
@ -128,6 +138,7 @@ func (e *AppConsensusEngine) validateConsensusMessage(
now := time.Now().UnixMilli()
if vote.Timestamp > now+5000 || vote.Timestamp < now-5000 {
voteValidationTotal.WithLabelValues(e.appAddressHex, "ignore").Inc()
return p2p.ValidationResultIgnore
}
@ -157,6 +168,10 @@ func (e *AppConsensusEngine) validateConsensusMessage(
now := time.Now().UnixMilli()
if confirmation.Timestamp > now+5000 || confirmation.Timestamp < now-5000 {
confirmationValidationTotal.WithLabelValues(
e.appAddressHex,
"ignore",
).Inc()
return p2p.ValidationResultIgnore
}
@ -179,7 +194,7 @@ func (e *AppConsensusEngine) validateConsensusMessage(
}
func (e *AppConsensusEngine) validateProverMessage(
peerID peer.ID,
_ peer.ID,
message *pb.Message,
) p2p.ValidationResult {
// Check if data is long enough to contain type prefix
@ -223,7 +238,7 @@ func (e *AppConsensusEngine) validateProverMessage(
}
func (e *AppConsensusEngine) validateGlobalProverMessage(
peerID peer.ID,
_ peer.ID,
message *pb.Message,
) p2p.ValidationResult {
// Check if data is long enough to contain type prefix
@ -267,7 +282,7 @@ func (e *AppConsensusEngine) validateGlobalProverMessage(
}
func (e *AppConsensusEngine) validateFrameMessage(
peerID peer.ID,
_ peer.ID,
message *pb.Message,
) p2p.ValidationResult {
timer := prometheus.NewTimer(
@ -340,7 +355,7 @@ func (e *AppConsensusEngine) validateFrameMessage(
}
func (e *AppConsensusEngine) validateGlobalFrameMessage(
peerID peer.ID,
_ peer.ID,
message *pb.Message,
) p2p.ValidationResult {
timer := prometheus.NewTimer(globalFrameValidationDuration)
@ -400,7 +415,7 @@ func (e *AppConsensusEngine) validateGlobalFrameMessage(
}
func (e *AppConsensusEngine) validateAlertMessage(
peerID peer.ID,
_ peer.ID,
message *pb.Message,
) p2p.ValidationResult {
// Check if data is long enough to contain type prefix
@ -450,7 +465,7 @@ func (e *AppConsensusEngine) validateAlertMessage(
}
func (e *AppConsensusEngine) validatePeerInfoMessage(
peerID peer.ID,
_ peer.ID,
message *pb.Message,
) p2p.ValidationResult {
// Check if data is long enough to contain type prefix
@ -510,7 +525,7 @@ func (e *AppConsensusEngine) validatePeerInfoMessage(
}
func (e *AppConsensusEngine) validateDispatchMessage(
peerID peer.ID,
_ peer.ID,
message *pb.Message,
) p2p.ValidationResult {
// Check if data is long enough to contain type prefix

View File

@ -83,31 +83,8 @@ func (p *GlobalLivenessProvider) Collect(
)
for i, message := range messages {
err := p.engine.executionManager.ValidateMessage(
frameNumber,
message.Address,
message.Payload,
)
err := p.validateAndLockMessage(frameNumber, i, message)
if err != nil {
p.engine.logger.Debug(
"invalid message",
zap.Int("message_index", i),
zap.Error(err),
)
continue
}
_, err = p.engine.executionManager.Lock(
frameNumber,
message.Address,
message.Payload,
)
if err != nil {
p.engine.logger.Debug(
"message failed lock",
zap.Int("message_index", i),
zap.Error(err),
)
continue
}
@ -130,7 +107,14 @@ func (p *GlobalLivenessProvider) Collect(
proverRoot := make([]byte, 64)
// TODO(2.1.1+): Refactor this with caching
commitSet := p.engine.hypergraph.Commit()
commitSet, err := p.engine.hypergraph.Commit(frameNumber)
if err != nil {
p.engine.logger.Error(
"could not commit",
zap.Error(err),
)
return GlobalCollectedCommitments{}, errors.Wrap(err, "collect")
}
collected := 0
// The poseidon hash's field is < 0x3fff...ffff, so we use the upper two bits
@ -263,3 +247,50 @@ func (p *GlobalLivenessProvider) SendLiveness(
return nil
}
func (p *GlobalLivenessProvider) validateAndLockMessage(
frameNumber uint64,
i int,
message *protobufs.Message,
) (err error) {
defer func() {
if r := recover(); r != nil {
p.engine.logger.Error(
"panic recovered from message",
zap.Any("panic", r),
zap.Stack("stacktrace"),
)
err = errors.New("panicked processing message")
}
}()
err = p.engine.executionManager.ValidateMessage(
frameNumber,
message.Address,
message.Payload,
)
if err != nil {
p.engine.logger.Debug(
"invalid message",
zap.Int("message_index", i),
zap.Error(err),
)
return err
}
_, err = p.engine.executionManager.Lock(
frameNumber,
message.Address,
message.Payload,
)
if err != nil {
p.engine.logger.Debug(
"message failed lock",
zap.Int("message_index", i),
zap.Error(err),
)
return err
}
return nil
}

View File

@ -10,7 +10,7 @@ type GlobalTracer struct {
}
func (t *GlobalTracer) Trace(message string) {
// t.logger.Debug(message)
t.logger.Debug(message)
}
func (t *GlobalTracer) Error(message string, err error) {

View File

@ -361,13 +361,7 @@ func (p *GlobalVotingProvider) FinalizeVotes(
for i := 0; i < len(provers); i++ {
activeProver := provers[i]
if err != nil {
p.engine.logger.Error(
"could not get prover info",
zap.String("address", hex.EncodeToString(activeProver.Address)),
)
continue
}
// Check if this prover voted in our voterMap
if _, ok := voterMap[string(activeProver.Address)]; ok {
byteIndex := i / 8

View File

@ -83,6 +83,7 @@ func (e *GlobalConsensusEngine) eventDistributorLoop() {
e.hasSentKeyBundle = true
e.publishKeyRegistry()
}
if e.proposer != nil {
workers, err := e.workerManager.RangeWorkers()
if err != nil {
@ -150,6 +151,7 @@ func (e *GlobalConsensusEngine) eventDistributorLoop() {
e.inclusionProver,
),
e.proverRegistry,
e.clockStore,
)
if err != nil {
e.logger.Error(
@ -387,7 +389,12 @@ func (e *GlobalConsensusEngine) evaluateForProposals(
pendingFilters := [][]byte{}
proposalDescriptors := []provers.ShardDescriptor{}
decideDescriptors := []provers.ShardDescriptor{}
shardKeys := e.hypergraph.Commit()
shardKeys, err := e.hypergraph.Commit(data.Frame.Header.FrameNumber)
if err != nil {
e.logger.Error("could not commit", zap.Error(err))
return
}
for key := range shardKeys {
shards, err := e.shardsStore.GetAppShards(
slices.Concat(key.L1[:], key.L2[:]),
@ -427,7 +434,13 @@ func (e *GlobalConsensusEngine) evaluateForProposals(
if self != nil {
e.logger.Debug("checking allocations")
for _, allocation := range self.Allocations {
e.logger.Debug("checking allocation", zap.String("filter", hex.EncodeToString(allocation.ConfirmationFilter)))
e.logger.Debug(
"checking allocation",
zap.String(
"filter",
hex.EncodeToString(allocation.ConfirmationFilter),
),
)
if bytes.Equal(allocation.ConfirmationFilter, filter) {
allocated = allocation.Status != 4
if e.config.P2P.Network != 0 ||

View File

@ -1,16 +1,12 @@
package global
import (
"bytes"
_ "embed"
"encoding/base64"
"encoding/binary"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"math/big"
"net/http"
"slices"
"time"
@ -45,6 +41,9 @@ type GenesisJson struct {
ArchivePeers map[string]string `json:"archive_peers"`
}
//go:embed mainnet_genesis.json
var mainnetGenesisJSON []byte
// TODO[2.1.1+]: Refactor out direct hypergraph access
func (e *GlobalConsensusEngine) initializeGenesis() *protobufs.GlobalFrame {
e.logger.Info("initializing genesis frame for global consensus")
@ -53,43 +52,9 @@ func (e *GlobalConsensusEngine) initializeGenesis() *protobufs.GlobalFrame {
// If on mainnet, load from release
if e.config.P2P.Network == 0 {
var lastErr error
var genesisData GenesisJson = GenesisJson{}
for attempt := 1; attempt <= 5; attempt++ {
if err := func() error {
resp, err := http.Get("https://releases.quilibrium.com/genesis.json")
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("http status %d", resp.StatusCode)
}
buf := bytes.NewBuffer(nil)
_, err = io.Copy(buf, resp.Body)
if err != nil {
return err
}
err = json.Unmarshal(buf.Bytes(), &genesisData)
if err != nil {
return err
}
return nil
}(); err != nil {
lastErr = err
// simple backoff: 200ms * attempt
time.Sleep(time.Duration(200*attempt) * time.Millisecond)
continue
}
lastErr = nil
break
}
if lastErr != nil {
e.logger.Error("failed to download genesis", zap.Error(lastErr))
var genesisData GenesisJson
if err := json.Unmarshal(mainnetGenesisJSON, &genesisData); err != nil {
e.logger.Error("failed to parse embedded genesis data", zap.Error(err))
return nil
}
@ -129,7 +94,7 @@ func (e *GlobalConsensusEngine) initializeGenesis() *protobufs.GlobalFrame {
commitments[i] = &tries.VectorCommitmentTree{}
}
proverRoot := make([]byte, 64)
var proverRoot []byte
// Parse and set initial commitments from JSON
for hexKey, base64Value := range genesisData.InitialCommitments {
@ -205,7 +170,12 @@ func (e *GlobalConsensusEngine) initializeGenesis() *protobufs.GlobalFrame {
return nil
}
roots := e.hypergraph.Commit()
roots, err := e.hypergraph.Commit(0)
if err != nil {
e.logger.Error("could not commit", zap.Error(err))
return nil
}
proverRoots := roots[tries.ShardKey{
L1: [3]byte{},
L2: intrinsics.GLOBAL_INTRINSIC_ADDRESS,
@ -416,20 +386,6 @@ func (e *GlobalConsensusEngine) createStubGenesis() *protobufs.GlobalFrame {
return nil
}
roots := e.hypergraph.Commit()
// Parse and set initial commitments from JSON
for shardKey, commits := range roots {
for i := 0; i < 3; i++ {
commitments[shardKey.L1[i]].Insert(
shardKey.L2[:],
commits[0],
nil,
big.NewInt(int64(len(commits[0]))),
)
commitments[shardKey.L1[i]].Commit(e.inclusionProver, false)
}
}
state = hgstate.NewHypergraphState(e.hypergraph)
for _, pubkey := range proverPubKeys {
@ -446,7 +402,25 @@ func (e *GlobalConsensusEngine) createStubGenesis() *protobufs.GlobalFrame {
return nil
}
roots = e.hypergraph.Commit()
roots, err := e.hypergraph.Commit(0)
if err != nil {
e.logger.Error("could not commit", zap.Error(err))
return nil
}
// Parse and set initial commitments from JSON
for shardKey, commits := range roots {
for i := 0; i < 3; i++ {
commitments[shardKey.L1[i]].Insert(
shardKey.L2[:],
commits[0],
nil,
big.NewInt(int64(len(commits[0]))),
)
commitments[shardKey.L1[i]].Commit(e.inclusionProver, false)
}
}
proverRoots := roots[tries.ShardKey{
L1: [3]byte{},
L2: intrinsics.GLOBAL_INTRINSIC_ADDRESS,

View File

@ -28,6 +28,7 @@ import (
"source.quilibrium.com/quilibrium/monorepo/config"
"source.quilibrium.com/quilibrium/monorepo/consensus"
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
qhypergraph "source.quilibrium.com/quilibrium/monorepo/hypergraph"
"source.quilibrium.com/quilibrium/monorepo/node/consensus/provers"
"source.quilibrium.com/quilibrium/monorepo/node/consensus/reward"
consensustime "source.quilibrium.com/quilibrium/monorepo/node/consensus/time"
@ -36,6 +37,7 @@ import (
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/global/compat"
"source.quilibrium.com/quilibrium/monorepo/node/execution/manager"
hgstate "source.quilibrium.com/quilibrium/monorepo/node/execution/state/hypergraph"
qgrpc "source.quilibrium.com/quilibrium/monorepo/node/internal/grpc"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/p2p/onion"
@ -296,7 +298,7 @@ func NewGlobalConsensusEngine(
return 6
}
return uint64(len(currentSet))
return uint64(len(currentSet)) * 2 / 3
}
}
@ -390,6 +392,9 @@ func NewGlobalConsensusEngine(
decafConstructor,
compiler,
frameProver,
rewardIssuance,
proverRegistry,
blsConstructor,
true, // includeGlobal
)
if err != nil {
@ -492,7 +497,42 @@ func (e *GlobalConsensusEngine) Start(quit chan struct{}) <-chan error {
var initialState **protobufs.GlobalFrame = nil
if frame != nil {
initialState = &frame
if frame.Header.FrameNumber == 244200 && e.config.P2P.Network == 0 {
e.logger.Warn("purging previous genesis to start new")
err = e.clockStore.DeleteGlobalClockFrameRange(0, 244201)
if err != nil {
panic(err)
}
set := e.hypergraph.(*qhypergraph.HypergraphCRDT).GetVertexAddsSet(
tries.ShardKey{
L1: [3]byte{},
L2: [32]byte(bytes.Repeat([]byte{0xff}, 32)),
},
)
leaves := tries.GetAllLeaves(
set.GetTree().SetType,
set.GetTree().PhaseType,
set.GetTree().ShardKey,
set.GetTree().Root,
)
txn, err := e.hypergraph.NewTransaction(false)
if err != nil {
panic(err)
}
for _, l := range leaves {
err = set.GetTree().Delete(txn, l.Key)
if err != nil {
txn.Abort()
panic(err)
}
}
if err = txn.Commit(); err != nil {
panic(err)
}
frame = nil
} else {
initialState = &frame
}
}
if e.config.P2P.Network == 99 || e.config.Engine.ArchiveMode {
@ -535,6 +575,7 @@ func (e *GlobalConsensusEngine) Start(quit chan struct{}) <-chan error {
return errChan
}
// Subscribe to shard consensus messages to broker lock agreement
err = e.subscribeToShardConsensusMessages()
if err != nil {
errChan <- errors.Wrap(err, "start")
@ -610,6 +651,10 @@ func (e *GlobalConsensusEngine) Start(quit chan struct{}) <-chan error {
e.wg.Add(1)
go e.updateMetrics()
// Start periodic tx lock pruning
e.wg.Add(1)
go e.pruneTxLocksPeriodically()
if e.config.P2P.Network == 99 || e.config.Engine.ArchiveMode {
// Start the state machine
if err := e.stateMachine.Start(); err != nil {
@ -710,8 +755,10 @@ func (e *GlobalConsensusEngine) setupGRPCServer() error {
}
// Create gRPC server with TLS
e.grpcServer = grpc.NewServer(
e.grpcServer = qgrpc.NewServer(
grpc.Creds(tlsCreds),
grpc.ChainUnaryInterceptor(e.authProvider.UnaryInterceptor),
grpc.ChainStreamInterceptor(e.authProvider.StreamInterceptor),
grpc.MaxRecvMsgSize(10*1024*1024),
grpc.MaxSendMsgSize(10*1024*1024),
)
@ -1886,6 +1933,74 @@ func (e *GlobalConsensusEngine) reportPeerInfoPeriodically() {
}
}
func (e *GlobalConsensusEngine) pruneTxLocksPeriodically() {
defer e.wg.Done()
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
e.pruneTxLocks()
for {
select {
case <-e.ctx.Done():
return
case <-ticker.C:
e.pruneTxLocks()
}
}
}
func (e *GlobalConsensusEngine) pruneTxLocks() {
e.txLockMu.RLock()
if len(e.txLockMap) == 0 {
e.txLockMu.RUnlock()
return
}
e.txLockMu.RUnlock()
frame, err := e.clockStore.GetLatestGlobalClockFrame()
if err != nil {
if !errors.Is(err, store.ErrNotFound) {
e.logger.Debug(
"failed to load latest global frame for tx lock pruning",
zap.Error(err),
)
}
return
}
if frame == nil || frame.Header == nil {
return
}
head := frame.Header.FrameNumber
if head < 2 {
return
}
cutoff := head - 2
e.txLockMu.Lock()
removed := 0
for frameNumber := range e.txLockMap {
if frameNumber < cutoff {
delete(e.txLockMap, frameNumber)
removed++
}
}
e.txLockMu.Unlock()
if removed > 0 {
e.logger.Debug(
"pruned stale tx locks",
zap.Uint64("head_frame", head),
zap.Uint64("cutoff_frame", cutoff),
zap.Int("frames_removed", removed),
)
}
}
// validatePeerInfoSignature validates the signature of a peer info message
func (e *GlobalConsensusEngine) validatePeerInfoSignature(
peerInfo *protobufs.PeerInfo,

View File

@ -341,7 +341,7 @@ func registerProverInHypergraph(t *testing.T, hg thypergraph.Hypergraph, publicK
txn.Commit()
// Commit the hypergraph
hg.Commit()
hg.Commit(0)
t.Logf(" Registered global prover with address: %x (public key length: %d)", address, len(publicKey))
}
@ -728,7 +728,7 @@ func TestGlobalConsensusEngine_Integration_MultiNodeConsensus(t *testing.T) {
// Commit the hypergraph
for i := 0; i < 6; i++ {
hypergraphs[i].Commit()
hypergraphs[i].Commit(0)
}
// Create six engines that can communicate (minimum required for consensus)
@ -894,6 +894,11 @@ loop:
}
func TestGlobalConsensusEngine_Integration_ShardCoverage(t *testing.T) {
// This test needs to run long enough to hit the condition required
if testing.Short() {
t.Skip("Skipping shard coverage scenario test in short mode")
}
// Generate hosts for testing
_, m, cleanupHosts := tests.GenerateSimnetHosts(t, 1, []libp2p.Option{})
defer cleanupHosts()
@ -906,7 +911,7 @@ func TestGlobalConsensusEngine_Integration_ShardCoverage(t *testing.T) {
Path: ".test/global",
}, pebbleDB, zap.L(), &verenc.MPCitHVerifiableEncryptor{}, inclusionProver)
hg := hgcrdt.NewHypergraph(zap.NewNop(), hypergraphStore, inclusionProver, []int{}, &tests.Nopthenticator{})
for i := range 3 {
for i := range 6 {
k := make([]byte, 585)
k[1] = byte(i)
abi, _ := poseidon.HashBytes(k)
@ -944,17 +949,12 @@ func TestGlobalConsensusEngine_Integration_ShardCoverage(t *testing.T) {
// Start the event distributor
engine.Start(make(chan struct{}))
time.Sleep(1 * time.Second)
// Configure low coverage scenario in hypergraph
// Since we registered only 1 prover above, this is already a low coverage scenario
// Run shard coverage check
err := engine.checkShardCoverage(1)
require.NoError(t, err)
// Wait for event processing and possible new app shard head
time.Sleep(10 * time.Second)
time.Sleep(1800 * time.Second)
mu.Lock()
found := false
newHeadAfter := false
@ -1331,7 +1331,7 @@ func registerProverInHypergraphWithFilter(t *testing.T, hg thypergraph.Hypergrap
txn.Commit()
// Commit the hypergraph
hg.Commit()
hg.Commit(0)
t.Logf(" Registered prover with address: %x, filter: %x (public key length: %d)", address, filter, len(publicKey))
}

View File

@ -0,0 +1,18 @@
{
"frame_number":244200,
"timestamp":1744726759481,
"difficulty":160000,
"parent_selector":"F9PvaAqlX01EkmsUxyt1jwdGo2pGatdsKP3I9Ta0ASQ=",
"initial_commitments": {
"11558584af7017a9bfd1ff1864302d643fbe58c62dcf90cbcd8fde74a26794d9": "AwNt1N8mHx9RJ+PxyvTpOxXUyv7EXYr9CYE5zcdh5bK9G8goL4dUZ8EIIhb+cPxNqZjgwBHDPAOfT6zurDFaFDq77vBBI1f0DqY="
},
"output":"AFS8suTHvpxSHDxkZvj0jiLUy/Yo8p9PSLrGNZ4CjytqfPTftLFWTMOm5SWT3tfJvUsidJM0PGz0s5xTOh4obGCKjDf5j0ic3o1BYQScBcaFLt0pvLFrxQ1Zz2uzTU3Wa3/vPbzEKAP0XNgorg1PeQ+9km7xB9lxxqbDcod0uXBSAAEk1j2eEhdkE39wR7lyh3+1+qiI1ehuBkkJgvlt1KU3cKo0mYfUNv390gX9Nkyx+wxS029nLtOX5msyW/MlMMGP93/FDtZN6Kc5J5BpSrL/xZ4I1NPPONn/eHUu3Z0uKdnZCg1132dhxmPpi1qpr/V8wGOlosWU51wvEl/rSTsZACXAOYu4f+7Hf6wulfIjpCJfvY4J0QAOhEZDD+qA7VaGgZEIzKza0c4QGtpAxCe010Rv2M1725A8Pi5jY95hZbcJtAOVLbxaSm1bGzzqVUSftmlQWn77N+UIitMbYyLThKNMFnOit31pemvdcU+kWKYrvNlx5ceq5qhOptc4tit7//k/PHkf2OANfVCv/DbOnxtCgHOBwaGUteL+Z3GabU8ygqIRDHbyT01gcnopqfo0UuInblbpx9r37QXy2qmC93ZqxT4FmwvT+NJKcAgRjfe5yl4yGJUWX70IEaICnhr5prF+xG8no18VpHUaM8XjrMQinUFVcujtvKA/ojvmszVx",
"beacon_ed448_key": "ImqaBAzHM61pHODoywHu2a6FIOqoXKY/RECZuOXjDfds8DBxtA0g+4hCfOgwiti2TpOF8AH7xH0A",
"beacon_bls48581_key": "AgeOEovEd0y3HSEG73walM+C+e1n0WmypDt18tH941CGVB1/duFEp0EGvlLsv7ZGZJYvQn2+FZhnJrN1sZhU/9IdJfJR5gZKozAR43NxDlAQUDmszc2D3emLR9TQygKXc58xBh3zrTDkIfgc5ZBAL6kuhBvN758ivDxKljD58kFcdWExCBhlvy4zRQczeSOFOVL5BC3QM5gxNPfpkf3Vc3JYdbHpVRa2EgBrDdQhnGwdBuNOqa6xolNWwphFRdCyFXkdYFskExs5jjmWk+rvcU+jeoJii/Bj6vF4vBEeCaqUQ8GfMC3GHE+ABLp4lJUUA+PqBuYGMtpk22B7FkZIUPvy7bJoD/NeSX1mi16hAH/oAZrPVaY2SRGz4o3/zl4QCfP3LXsHhEOsMxOvklIEcY/ASH/G+tpWrdh+Lu0Hmq8iUuJFunET8c38+TsD+JxHiK/1edmVt9NHPi7axxz78zP7EqDIgQNT+F4c1XivCzu6Vk6U1MTqVV74sjD0VKOeN622qWU935CUGXN1g/6Vt5H5B4EruCmpQpeeOrszZr8/JU8dpBGmAtziDpRT3FDLqtNDAY16zQIPOdgETTsekPAGUZzNUQ8dXtyVC9tZ2QSHAfTjKvRgl+OXPn0ygnTJrKosgLQtlXAlvkfC+YNBy4LKGPDpL0PHVcQbhecUTwERWd8Ag78+B4mzW2reLyp3a8ea7+nxdsMFWjskPnN0Rvo03wCI2xYCMHbn11ZW346SXO3u3cHMF0rnvG7Pl+Gi4IRiYM7nm34/",
"archive_peers": {
"QmestbFp8PddwRk6ysBRrmWZEiHun5aRidHkqFxgeFaWVK": "030d8e130fd666160ff5df6dbf935adb69b4906e6ac074f675a268b59b470b9393f45273143924d6a8d647fbce93d4bbfaad10bc13c72770b68070ad0898fd98f2f9882d614c00eb909a07dcccc8d7d7472b13617832ebd1943fd66d8a05cda88171990b15e8a99514a7817619f7fb2577ee82ca496ca36aaa52f31052c2e76039b10f0fcb0b44c2a0dd830e4fdf40fb5647e903c3e44689df8cf38830b9edc6a1746c38672b255b956a434d6538c8807d9be066e9a7121da436a6ac215cafe10a8b44192b252bd5683c3e0c7805155e8f7ad8dfc3b01467f2f029f91100caf4907600d8562b3442952ba8dc086661aa5cafaf09acbf01ec55b950032b3d358b2fa6ee282fd72a4cb28022052bd48656e5aac56a9c50eae3e187eda525fc4552010cb1419507b271c7ff3bd7aabb9ee9a1eb1dc4ad88a76f42808e716490c02efddc3c0a5e2386efcdb0b83dccdc7c543d1571e2d9c927ff433ca8aa03b091e0138549c79fe2c1a90e669cef03540353d6ea1ca5ca71b43dc27caa64447b3c49bd4a66dd12f457468b1d32515efc471e2189bc2bb4ce43b5a526c1e6aa7d60e1e1b6e6f795232a823c37ea6ab2a8af2834a3980a758bec050507b416dad50a3645cf757b48b7f43e1b786d1d9b00c270bcd650ae9e1e325765a751c566776d6e5558a6fcd054a2b4947d5b4adf4c528dac71bde90fcca346505869180f4ed740085197cd2a40c6f8ebbeb90d8c5bf359e21870626fbb2151c9f5d507f5903f7abfaeaea88ca14af4cb51b45d9624c8ebecec5f98b9b41182c539d13ae3fe21c2cafd80abdfbba14b75",
"QmaMcbX3NH5d6BXy87C1n2xT68XzCwtkbvMRqdLYphh19b": "02108cc721d540c80e378b84b06cb67ecaf9f4bd091a52c1d85b3ef26d536e8482b72cf6651b445bcd0943b40d98bf03c66208170c04689d510cc2317c7ab5c26bb00bb8a1186aee4cc80f6041aca5676b575e65801c88f9e039845338f7ec42fa825b7a7d79138b8ea3427675398b9b6cafd90e5acb33445059946298e675917fdd40fb5810b2dc9de51d7798f271d6eb6cee0353ffb24982dd2e5ac9e482e0fdac819ded3115fe2ce5a28b581a22e74c79aba7895f4ec758c7f57d85c481b4393a0bea7a6b37a0e64a7a4674818f59c9acdb40538103fba190a89f0f05c46a96b24b93c643279c929f6f81f43d17ce0b5ec29e23bf6bcc22efafc6bd8aba77fabf128742fcf5c5f77266c5690118f2b331d4fc6283b9a56f905f94421b9fe9b8789d4b5e07b1b4ae71cb2c064309303fa348a0e58c009f2e34087ac5fcf89c02e6a20b70e7840483bdbbd98f79d4586c4478e869711f70a8bea4baeb789ec63575d42975eb148c79f7f1a02e8d1199214724596cab28dede4c585feab18fa25d9640b117872d8e8af563e1c5dc8a63e7b971ba3fbb79a744c2672dcec227d78583461e66f08ff2cdd6772dd310c9b008a85b1d3a3b010ff224faf8b4863056812a6c0f038a1f29af7c1d23d88f06e722c50f12f59543e550d22155cb7696325bba91055fd1f136dbdf3183c4b39eed32350dc8d11d2a8aaea57d5a77370de21169b96cb673a97aefc2eaa222a8f7964f10618bb25f61f3eb5bcea94e130fcb1a33bd36d3ac612d8e72de81eff2ceefca9c85efcd3218f61737c11c070b4f14790808e591170d2533",
"QmS3xJKbAmQxDiry9HpXV6bJyRvyd47pbufpZwEmgY1cy6": "0202e6667d72cf6e25691421b84774f38399aa0580e6807ec71053cb7c67571b2d36e992bf65862c81568860c9a649c413d63785f431f8484f3372d2f61de79485ee31f3670f6dc0340d0142b89c4b17972a0ee8a90e923c237e49dd9d0a658f93a008cb962f5ccb5fbe774c21ecf3bf228944114b8dbbd98128bf40299d52a30f8447db3c755e94a942b68f950e50e26c94d7126265216e69e995337443ec72baf1a5c61c72195e16923b7d04d52802cbd1a27d0b92bce34b6b755fdc7021427a6678d9cbf209874884993effb96181e6caa04dfa43586f72f262bc0a327d6b05f8754c4ffcd2e0a94745917a544fcb808043745d24fc816d4c5a84b03358b0ab24f26f92f409fad55206142aab29952d27f094394ee8b00b5f418a89d1caf95455dd6551067b0ac9540624097b283eeb59ca2b2f8c4e82bb06d6952be97a6e61ed55878aec3a13496a2d9e1015c7a456525552f8c0e9ee8cc8c5c989bc1feb57b8d630d24a05ccf824ee66031a0060729318061c6b933ca1e9659e44f3a11c3f65e3f8d2c2bc7944124290192355913ead6be3ca047a01d2b7a66f48aeed19b96b9209fab73922a1424d4006c42270f8814bf5c544080db0b783402eefcc7a5b41b52d8f6c287dc1c6806994d74a77566fb0cfb946a08329478d0b255d9afbdfa860051060e73b04bb817d86888115bb1b74078a479e9dda2a957e14780ca5100ac7fea80f497bac01b6b9f44e6137de16616961501dcb28b0e766cf3c1fdc87c5ab701510560041857ff32f629fba9077ef7d1473ecd69d0e39ee9c899d2d2afcd2013929670d25",
"QmZKERVN8UkwLp9mPCZw4aaRx9N8Ewnkv7VQh1zyZwBSir": "0301d31c4a06e16789184aa15898d96df20e4286569cfde2f26ae44407705a3ab2969876a146e360f33516422bec027809143183b07c3d84c578dbfa87a690a50e35f450c281be0433c70e9e5d2b0aa1967719d06af2c9c2e3e257624567e4c8f9882328ed2011d327ece7cdc1a23ec370ede0ad28a00cf476156c0d7b0968e16b21e01bae11993d988415f18173bcb99887e00137202680a818549aa6944360ac03f234e9aaaa3b333ee96a9f19f693cac97ec5c736b216d210550311507766b72779021b4023d354bd35fc0f2834014911a4ea8fddff19a7a8f69e030cb119d64190fb81d3635721014b05695566d0cb890f5d86ad0d007ea2a8b3008717d89ff9775950083439969873cfacd258be04d05128de5ae60bfb704174592f6565c5539d8e6804a2e899e19acb512eeba676a5b0c64b868937b578f3741a671938aedba2329c17d21a4d910d2b2b886b5efa502c1de3f05495eef88e2247d4d751983a81a928f9b957eabdfb7f7e510ec5dedf9bcdaff92126aff162773299ab920f390fdb1b3bd9e6ae46eb3b16a07ffd69fb38c916c77ed6deb721b0355c21cb9d9cb4b22e8a41756a40c2d48a4764f6781c865a700614126c1008d910a7bbd758261bee38914b753d15259c094b57f301acff008fbad5161aff0204a96290f395206535feaedcedb0cb6121fdf31c28ab9c7d85c7dec473f531347b4f76c12a0c5eb7f0c3c0077697373a409dba2a0813be122807ae6df88e1aa5d086e265e9ea394e5b98a0d96527cf69bc794ea17c54cfa68fd5c75856a6c6d3ff9e7a9df0f22853e20ac9b6442d"
}
}

View File

@ -9,6 +9,7 @@ import (
"slices"
"github.com/iden3/go-iden3-crypto/poseidon"
pcrypto "github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
@ -18,6 +19,8 @@ import (
"source.quilibrium.com/quilibrium/monorepo/types/tries"
)
var keyRegistryDomain = []byte("KEY_REGISTRY")
func (e *GlobalConsensusEngine) processGlobalConsensusMessageQueue() {
defer e.wg.Done()
@ -286,29 +289,56 @@ func (e *GlobalConsensusEngine) handleAppFrameMessage(message *pb.Message) {
typePrefix := e.peekMessageType(message)
switch typePrefix {
case protobufs.AppShardFrameType:
timer := prometheus.NewTimer(shardFrameProcessingDuration)
defer timer.ObserveDuration()
frame := &protobufs.AppShardFrame{}
if err := frame.FromCanonicalBytes(message.Data); err != nil {
e.logger.Debug("failed to unmarshal frame", zap.Error(err))
shardFramesProcessedTotal.WithLabelValues("error").Inc()
return
}
e.frameStoreMu.RLock()
existing, ok := e.appFrameStore[string(frame.Header.Address)]
if ok && existing != nil &&
existing.Header.FrameNumber >= frame.Header.FrameNumber {
e.frameStoreMu.RUnlock()
return
}
e.frameStoreMu.RUnlock()
valid, err := e.appFrameValidator.Validate(frame)
if !valid || err != nil {
e.logger.Debug("failed to validate frame", zap.Error(err))
shardFramesProcessedTotal.WithLabelValues("error").Inc()
}
e.pendingMessagesMu.Lock()
bundle := &protobufs.MessageBundle{
Requests: []*protobufs.MessageRequest{
&protobufs.MessageRequest{
Request: &protobufs.MessageRequest_Shard{
Shard: frame.Header,
},
},
},
Timestamp: frame.Header.Timestamp,
}
bundleBytes, err := bundle.ToCanonicalBytes()
if err != nil {
e.logger.Error("failed to add shard bundle", zap.Error(err))
e.pendingMessagesMu.Unlock()
return
}
e.pendingMessages = append(e.pendingMessages, bundleBytes)
e.pendingMessagesMu.Unlock()
e.frameStoreMu.Lock()
defer e.frameStoreMu.Unlock()
if old, ok := e.appFrameStore[string(frame.Header.Address)]; ok {
if old.Header.FrameNumber > frame.Header.FrameNumber ||
(old.Header.FrameNumber == frame.Header.FrameNumber &&
compareBits(
old.Header.PublicKeySignatureBls48581.Bitmask,
frame.Header.PublicKeySignatureBls48581.Bitmask,
) >= 0) {
return
}
}
e.appFrameStore[string(frame.Header.Address)] = frame
shardFramesProcessedTotal.WithLabelValues("success").Inc()
default:
e.logger.Debug(
"unknown message type",
@ -347,6 +377,134 @@ func (e *GlobalConsensusEngine) handlePeerInfoMessage(message *pb.Message) {
// Also add to the existing peer info manager
e.peerInfoManager.AddPeerInfo(peerInfo)
case protobufs.KeyRegistryType:
keyRegistry := &protobufs.KeyRegistry{}
if err := keyRegistry.FromCanonicalBytes(message.Data); err != nil {
e.logger.Debug("failed to unmarshal key registry", zap.Error(err))
return
}
if err := keyRegistry.Validate(); err != nil {
e.logger.Debug("invalid key registry", zap.Error(err))
return
}
validation, err := e.validateKeyRegistry(keyRegistry)
if err != nil {
e.logger.Debug("invalid key registry signatures", zap.Error(err))
return
}
txn, err := e.keyStore.NewTransaction()
if err != nil {
e.logger.Error("failed to create keystore txn", zap.Error(err))
return
}
commit := false
defer func() {
if !commit {
if abortErr := txn.Abort(); abortErr != nil {
e.logger.Warn("failed to abort keystore txn", zap.Error(abortErr))
}
}
}()
var identityAddress []byte
if keyRegistry.IdentityKey != nil &&
len(keyRegistry.IdentityKey.KeyValue) != 0 {
if err := e.keyStore.PutIdentityKey(
txn,
validation.identityPeerID,
keyRegistry.IdentityKey,
); err != nil {
e.logger.Error("failed to store identity key", zap.Error(err))
return
}
identityAddress = validation.identityPeerID
}
var proverAddress []byte
if keyRegistry.ProverKey != nil &&
len(keyRegistry.ProverKey.KeyValue) != 0 {
if err := e.keyStore.PutProvingKey(
txn,
validation.proverAddress,
&protobufs.BLS48581SignatureWithProofOfPossession{
PublicKey: keyRegistry.ProverKey,
},
); err != nil {
e.logger.Error("failed to store prover key", zap.Error(err))
return
}
proverAddress = validation.proverAddress
}
if len(identityAddress) != 0 && len(proverAddress) == 32 &&
keyRegistry.IdentityToProver != nil &&
len(keyRegistry.IdentityToProver.Signature) != 0 &&
keyRegistry.ProverToIdentity != nil &&
len(keyRegistry.ProverToIdentity.Signature) != 0 {
if err := e.keyStore.PutCrossSignature(
txn,
identityAddress,
proverAddress,
keyRegistry.IdentityToProver.Signature,
keyRegistry.ProverToIdentity.Signature,
); err != nil {
e.logger.Error("failed to store cross signatures", zap.Error(err))
return
}
}
for _, collection := range keyRegistry.KeysByPurpose {
for _, key := range collection.X448Keys {
if key == nil || key.Key == nil ||
len(key.Key.KeyValue) == 0 {
continue
}
addrBI, err := poseidon.HashBytes(key.Key.KeyValue)
if err != nil {
e.logger.Error("failed to derive x448 key address", zap.Error(err))
return
}
address := addrBI.FillBytes(make([]byte, 32))
if err := e.keyStore.PutSignedX448Key(txn, address, key); err != nil {
e.logger.Error("failed to store signed x448 key", zap.Error(err))
return
}
}
for _, key := range collection.Decaf448Keys {
if key == nil || key.Key == nil ||
len(key.Key.KeyValue) == 0 {
continue
}
addrBI, err := poseidon.HashBytes(key.Key.KeyValue)
if err != nil {
e.logger.Error(
"failed to derive decaf448 key address",
zap.Error(err),
)
return
}
address := addrBI.FillBytes(make([]byte, 32))
if err := e.keyStore.PutSignedDecaf448Key(
txn,
address,
key,
); err != nil {
e.logger.Error("failed to store signed decaf448 key", zap.Error(err))
return
}
}
}
if err := txn.Commit(); err != nil {
e.logger.Error("failed to commit key registry txn", zap.Error(err))
return
}
commit = true
default:
e.logger.Debug(
@ -356,6 +514,264 @@ func (e *GlobalConsensusEngine) handlePeerInfoMessage(message *pb.Message) {
}
}
type keyRegistryValidationResult struct {
identityPeerID []byte
proverAddress []byte
}
func (e *GlobalConsensusEngine) validateKeyRegistry(
keyRegistry *protobufs.KeyRegistry,
) (*keyRegistryValidationResult, error) {
if keyRegistry.IdentityKey == nil ||
len(keyRegistry.IdentityKey.KeyValue) == 0 {
return nil, fmt.Errorf("key registry missing identity key")
}
if err := keyRegistry.IdentityKey.Validate(); err != nil {
return nil, fmt.Errorf("invalid identity key: %w", err)
}
pubKey, err := pcrypto.UnmarshalEd448PublicKey(
keyRegistry.IdentityKey.KeyValue,
)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal identity key: %w", err)
}
peerID, err := peer.IDFromPublicKey(pubKey)
if err != nil {
return nil, fmt.Errorf("failed to derive identity peer id: %w", err)
}
identityPeerID := []byte(peerID.String())
if keyRegistry.ProverKey == nil ||
len(keyRegistry.ProverKey.KeyValue) == 0 {
return nil, fmt.Errorf("key registry missing prover key")
}
if err := keyRegistry.ProverKey.Validate(); err != nil {
return nil, fmt.Errorf("invalid prover key: %w", err)
}
if keyRegistry.IdentityToProver == nil ||
len(keyRegistry.IdentityToProver.Signature) == 0 {
return nil, fmt.Errorf("missing identity-to-prover signature")
}
identityMsg := slices.Concat(
keyRegistryDomain,
keyRegistry.ProverKey.KeyValue,
)
valid, err := e.keyManager.ValidateSignature(
crypto.KeyTypeEd448,
keyRegistry.IdentityKey.KeyValue,
identityMsg,
keyRegistry.IdentityToProver.Signature,
nil,
)
if err != nil {
return nil, fmt.Errorf(
"identity-to-prover signature validation failed: %w",
err,
)
}
if !valid {
return nil, fmt.Errorf("identity-to-prover signature invalid")
}
if keyRegistry.ProverToIdentity == nil ||
len(keyRegistry.ProverToIdentity.Signature) == 0 {
return nil, fmt.Errorf("missing prover-to-identity signature")
}
valid, err = e.keyManager.ValidateSignature(
crypto.KeyTypeBLS48581G1,
keyRegistry.ProverKey.KeyValue,
keyRegistry.IdentityKey.KeyValue,
keyRegistry.ProverToIdentity.Signature,
keyRegistryDomain,
)
if err != nil {
return nil, fmt.Errorf(
"prover-to-identity signature validation failed: %w",
err,
)
}
if !valid {
return nil, fmt.Errorf("prover-to-identity signature invalid")
}
addrBI, err := poseidon.HashBytes(keyRegistry.ProverKey.KeyValue)
if err != nil {
return nil, fmt.Errorf("failed to derive prover key address: %w", err)
}
proverAddress := addrBI.FillBytes(make([]byte, 32))
for purpose, collection := range keyRegistry.KeysByPurpose {
if collection == nil {
continue
}
for _, key := range collection.X448Keys {
if err := e.validateSignedX448Key(
key,
identityPeerID,
proverAddress,
keyRegistry,
); err != nil {
return nil, fmt.Errorf(
"invalid x448 key (purpose %s): %w",
purpose,
err,
)
}
}
for _, key := range collection.Decaf448Keys {
if err := e.validateSignedDecaf448Key(
key,
identityPeerID,
proverAddress,
keyRegistry,
); err != nil {
return nil, fmt.Errorf(
"invalid decaf448 key (purpose %s): %w",
purpose,
err,
)
}
}
}
return &keyRegistryValidationResult{
identityPeerID: identityPeerID,
proverAddress: proverAddress,
}, nil
}
func (e *GlobalConsensusEngine) validateSignedX448Key(
key *protobufs.SignedX448Key,
identityPeerID []byte,
proverAddress []byte,
keyRegistry *protobufs.KeyRegistry,
) error {
if key == nil || key.Key == nil || len(key.Key.KeyValue) == 0 {
return nil
}
msg := slices.Concat(keyRegistryDomain, key.Key.KeyValue)
switch sig := key.Signature.(type) {
case *protobufs.SignedX448Key_Ed448Signature:
if sig.Ed448Signature == nil ||
len(sig.Ed448Signature.Signature) == 0 {
return fmt.Errorf("missing ed448 signature")
}
if !bytes.Equal(key.ParentKeyAddress, identityPeerID) {
return fmt.Errorf("unexpected parent for ed448 signed x448 key")
}
valid, err := e.keyManager.ValidateSignature(
crypto.KeyTypeEd448,
keyRegistry.IdentityKey.KeyValue,
msg,
sig.Ed448Signature.Signature,
nil,
)
if err != nil {
return fmt.Errorf("failed to validate ed448 signature: %w", err)
}
if !valid {
return fmt.Errorf("ed448 signature invalid")
}
case *protobufs.SignedX448Key_BlsSignature:
if sig.BlsSignature == nil ||
len(sig.BlsSignature.Signature) == 0 {
return fmt.Errorf("missing bls signature")
}
if len(proverAddress) != 0 &&
!bytes.Equal(key.ParentKeyAddress, proverAddress) {
return fmt.Errorf("unexpected parent for bls signed x448 key")
}
valid, err := e.keyManager.ValidateSignature(
crypto.KeyTypeBLS48581G1,
keyRegistry.ProverKey.KeyValue,
key.Key.KeyValue,
sig.BlsSignature.Signature,
keyRegistryDomain,
)
if err != nil {
return fmt.Errorf("failed to validate bls signature: %w", err)
}
if !valid {
return fmt.Errorf("bls signature invalid")
}
case *protobufs.SignedX448Key_DecafSignature:
return fmt.Errorf("decaf signature not supported for x448 key")
default:
return fmt.Errorf("missing signature for x448 key")
}
return nil
}
func (e *GlobalConsensusEngine) validateSignedDecaf448Key(
key *protobufs.SignedDecaf448Key,
identityPeerID []byte,
proverAddress []byte,
keyRegistry *protobufs.KeyRegistry,
) error {
if key == nil || key.Key == nil || len(key.Key.KeyValue) == 0 {
return nil
}
msg := slices.Concat(keyRegistryDomain, key.Key.KeyValue)
switch sig := key.Signature.(type) {
case *protobufs.SignedDecaf448Key_Ed448Signature:
if sig.Ed448Signature == nil ||
len(sig.Ed448Signature.Signature) == 0 {
return fmt.Errorf("missing ed448 signature")
}
if !bytes.Equal(key.ParentKeyAddress, identityPeerID) {
return fmt.Errorf("unexpected parent for ed448 signed decaf key")
}
valid, err := e.keyManager.ValidateSignature(
crypto.KeyTypeEd448,
keyRegistry.IdentityKey.KeyValue,
msg,
sig.Ed448Signature.Signature,
nil,
)
if err != nil {
return fmt.Errorf("failed to validate ed448 signature: %w", err)
}
if !valid {
return fmt.Errorf("ed448 signature invalid")
}
case *protobufs.SignedDecaf448Key_BlsSignature:
if sig.BlsSignature == nil ||
len(sig.BlsSignature.Signature) == 0 {
return fmt.Errorf("missing bls signature")
}
if len(proverAddress) != 0 &&
!bytes.Equal(key.ParentKeyAddress, proverAddress) {
return fmt.Errorf("unexpected parent for bls signed decaf key")
}
valid, err := e.keyManager.ValidateSignature(
crypto.KeyTypeBLS48581G1,
keyRegistry.ProverKey.KeyValue,
key.Key.KeyValue,
sig.BlsSignature.Signature,
keyRegistryDomain,
)
if err != nil {
return fmt.Errorf("failed to validate bls signature: %w", err)
}
if !valid {
return fmt.Errorf("bls signature invalid")
}
case *protobufs.SignedDecaf448Key_DecafSignature:
return fmt.Errorf("decaf signature validation not supported")
default:
return fmt.Errorf("missing signature for decaf key")
}
return nil
}
func (e *GlobalConsensusEngine) handleAlertMessage(message *pb.Message) {
defer func() {
if r := recover(); r != nil {

View File

@ -14,7 +14,7 @@ import (
)
func (e *GlobalConsensusEngine) validateGlobalConsensusMessage(
peerID peer.ID,
_ peer.ID,
message *pb.Message,
) tp2p.ValidationResult {
// Check if data is long enough to contain type prefix
@ -163,7 +163,7 @@ func (e *GlobalConsensusEngine) validateGlobalConsensusMessage(
}
func (e *GlobalConsensusEngine) validateShardConsensusMessage(
peerID peer.ID,
_ peer.ID,
message *pb.Message,
) tp2p.ValidationResult {
// Check if data is long enough to contain type prefix
@ -199,7 +199,7 @@ func (e *GlobalConsensusEngine) validateShardConsensusMessage(
}
if frametime.AppFrameSince(frame) > 20*time.Second {
shardProposalValidationTotal.WithLabelValues("reject").Inc()
shardProposalValidationTotal.WithLabelValues("ignore").Inc()
return tp2p.ValidationResultIgnore
}
@ -238,8 +238,9 @@ func (e *GlobalConsensusEngine) validateShardConsensusMessage(
}
now := time.Now().UnixMilli()
if livenessCheck.Timestamp > now+5000 ||
livenessCheck.Timestamp < now-5000 {
if livenessCheck.Timestamp > now+500 ||
livenessCheck.Timestamp < now-1000 {
shardLivenessCheckValidationTotal.WithLabelValues("ignore").Inc()
return tp2p.ValidationResultIgnore
}
@ -266,6 +267,7 @@ func (e *GlobalConsensusEngine) validateShardConsensusMessage(
now := time.Now().UnixMilli()
if vote.Timestamp > now+5000 || vote.Timestamp < now-5000 {
shardVoteValidationTotal.WithLabelValues("ignore").Inc()
return tp2p.ValidationResultIgnore
}
@ -292,6 +294,7 @@ func (e *GlobalConsensusEngine) validateShardConsensusMessage(
now := time.Now().UnixMilli()
if confirmation.Timestamp > now+5000 || confirmation.Timestamp < now-5000 {
shardConfirmationValidationTotal.WithLabelValues("ignore").Inc()
return tp2p.ValidationResultIgnore
}
@ -365,7 +368,7 @@ func (e *GlobalConsensusEngine) validateProverMessage(
}
func (e *GlobalConsensusEngine) validateAppFrameMessage(
peerID peer.ID,
_ peer.ID,
message *pb.Message,
) tp2p.ValidationResult {
// Check if data is long enough to contain type prefix
@ -382,9 +385,15 @@ func (e *GlobalConsensusEngine) validateAppFrameMessage(
switch typePrefix {
case protobufs.AppShardFrameType:
start := time.Now()
defer func() {
shardFrameValidationDuration.Observe(time.Since(start).Seconds())
}()
frame := &protobufs.AppShardFrame{}
if err := frame.FromCanonicalBytes(message.Data); err != nil {
e.logger.Debug("failed to unmarshal frame", zap.Error(err))
shardFrameValidationTotal.WithLabelValues("reject").Inc()
return tp2p.ValidationResultReject
}
@ -392,24 +401,30 @@ func (e *GlobalConsensusEngine) validateAppFrameMessage(
frame.Header.PublicKeySignatureBls48581.PublicKey == nil ||
frame.Header.PublicKeySignatureBls48581.PublicKey.KeyValue == nil {
e.logger.Debug("frame validation missing signature")
shardFrameValidationTotal.WithLabelValues("reject").Inc()
return tp2p.ValidationResultReject
}
valid, err := e.appFrameValidator.Validate(frame)
if err != nil {
e.logger.Debug("frame validation error", zap.Error(err))
shardFrameValidationTotal.WithLabelValues("reject").Inc()
return tp2p.ValidationResultReject
}
if !valid {
e.logger.Debug("invalid frame")
shardFrameValidationTotal.WithLabelValues("reject").Inc()
return tp2p.ValidationResultReject
}
if frametime.AppFrameSince(frame) > 20*time.Second {
shardFrameValidationTotal.WithLabelValues("ignore").Inc()
return tp2p.ValidationResultIgnore
}
shardFrameValidationTotal.WithLabelValues("accept").Inc()
default:
return tp2p.ValidationResultReject
}
@ -418,7 +433,7 @@ func (e *GlobalConsensusEngine) validateAppFrameMessage(
}
func (e *GlobalConsensusEngine) validateFrameMessage(
peerID peer.ID,
_ peer.ID,
message *pb.Message,
) tp2p.ValidationResult {
// Check if data is long enough to contain type prefix
@ -469,6 +484,7 @@ func (e *GlobalConsensusEngine) validateFrameMessage(
}
if frametime.GlobalFrameSince(frame) > 20*time.Second {
frameValidationTotal.WithLabelValues("ignore").Inc()
return tp2p.ValidationResultIgnore
}
@ -482,7 +498,7 @@ func (e *GlobalConsensusEngine) validateFrameMessage(
}
func (e *GlobalConsensusEngine) validatePeerInfoMessage(
peerID peer.ID,
_ peer.ID,
message *pb.Message,
) tp2p.ValidationResult {
// Check if data is long enough to contain type prefix
@ -511,28 +527,45 @@ func (e *GlobalConsensusEngine) validatePeerInfoMessage(
return tp2p.ValidationResultReject
}
// Validate timestamp: reject if older than 1 minute or newer than 5 minutes
// from now
now := time.Now().UnixMilli()
oneMinuteAgo := now - (1 * 60 * 1000) // 1 minute ago
fiveMinutesLater := now + (5 * 60 * 1000) // 5 minutes from now
if peerInfo.Timestamp < oneMinuteAgo {
if peerInfo.Timestamp < now-1000 {
e.logger.Debug("peer info timestamp too old",
zap.Int64("peer_timestamp", peerInfo.Timestamp),
zap.Int64("cutoff", oneMinuteAgo),
)
return tp2p.ValidationResultIgnore
}
if peerInfo.Timestamp > fiveMinutesLater {
if peerInfo.Timestamp > now+5000 {
e.logger.Debug("peer info timestamp too far in future",
zap.Int64("peer_timestamp", peerInfo.Timestamp),
zap.Int64("cutoff", fiveMinutesLater),
)
return tp2p.ValidationResultIgnore
}
case protobufs.KeyRegistryType:
keyRegistry := &protobufs.KeyRegistry{}
if err := keyRegistry.FromCanonicalBytes(message.Data); err != nil {
e.logger.Debug("failed to unmarshal key registry", zap.Error(err))
return tp2p.ValidationResultReject
}
err := keyRegistry.Validate()
if err != nil {
e.logger.Debug("key registry validation error", zap.Error(err))
return tp2p.ValidationResultReject
}
now := time.Now().UnixMilli()
if int64(keyRegistry.LastUpdated) < now-1000 {
e.logger.Debug("key registry timestamp too old")
return tp2p.ValidationResultIgnore
}
if int64(keyRegistry.LastUpdated) > now+5000 {
e.logger.Debug("key registry timestamp too far in future")
return tp2p.ValidationResultIgnore
}
default:
e.logger.Debug("received unknown type", zap.Uint32("type", typePrefix))
return tp2p.ValidationResultIgnore
@ -542,7 +575,7 @@ func (e *GlobalConsensusEngine) validatePeerInfoMessage(
}
func (e *GlobalConsensusEngine) validateAlertMessage(
peerID peer.ID,
_ peer.ID,
message *pb.Message,
) tp2p.ValidationResult {
// Check if data is long enough to contain type prefix

View File

@ -15,6 +15,7 @@ import (
qgrpc "source.quilibrium.com/quilibrium/monorepo/node/internal/grpc"
"source.quilibrium.com/quilibrium/monorepo/node/rpc"
"source.quilibrium.com/quilibrium/monorepo/protobufs"
"source.quilibrium.com/quilibrium/monorepo/types/store"
)
func (e *GlobalConsensusEngine) GetGlobalFrame(
@ -26,14 +27,17 @@ func (e *GlobalConsensusEngine) GetGlobalFrame(
return nil, status.Error(codes.Internal, "remote peer ID not found")
}
registry, err := e.keyStore.GetKeyRegistry(
[]byte(peerID),
)
if err != nil {
return nil, status.Error(codes.PermissionDenied, "could not identify peer")
}
if !bytes.Equal(e.pubsub.GetPeerID(), []byte(peerID)) {
registry, err := e.keyStore.GetKeyRegistry(
[]byte(peerID),
)
if err != nil {
return nil, status.Error(
codes.PermissionDenied,
"could not identify peer",
)
}
if registry.ProverKey == nil || registry.ProverKey.KeyValue == nil {
return nil, status.Error(
codes.PermissionDenied,
@ -80,6 +84,7 @@ func (e *GlobalConsensusEngine) GetGlobalFrame(
zap.String("peer_id", peerID.String()),
)
var frame *protobufs.GlobalFrame
var err error
if request.FrameNumber == 0 {
frame, err = e.globalTimeReel.GetHead()
if frame.Header.FrameNumber == 0 {
@ -111,11 +116,24 @@ func (e *GlobalConsensusEngine) GetAppShards(
ctx context.Context,
req *protobufs.GetAppShardsRequest,
) (*protobufs.GetAppShardsResponse, error) {
if len(req.ShardKey) != 35 {
return nil, errors.Wrap(errors.New("invalid shard key"), "get app shards")
peerID, ok := qgrpc.PeerIDFromContext(ctx)
if !ok {
return nil, status.Error(codes.Internal, "remote peer ID not found")
}
shards, err := e.shardsStore.GetAppShards(req.ShardKey, req.Prefix)
if !bytes.Equal(e.pubsub.GetPeerID(), []byte(peerID)) {
if len(req.ShardKey) != 35 {
return nil, errors.Wrap(errors.New("invalid shard key"), "get app shards")
}
}
var shards []store.ShardInfo
var err error
if len(req.ShardKey) != 35 {
shards, err = e.shardsStore.RangeAppShards()
} else {
shards, err = e.shardsStore.GetAppShards(req.ShardKey, req.Prefix)
}
if err != nil {
return nil, errors.Wrap(err, "get app shards")
}
@ -164,11 +182,17 @@ func (e *GlobalConsensusEngine) GetAppShards(
}
}
shardKey := []byte{}
if len(req.ShardKey) != 35 {
shardKey = slices.Concat(shard.L1, shard.L2)
}
response.Info = append(response.Info, &protobufs.AppShardInfo{
Prefix: shard.Path,
Size: size.Bytes(),
Commitment: commitment,
DataShards: dataShards,
ShardKey: shardKey,
})
}
@ -262,6 +286,45 @@ func (e *GlobalConsensusEngine) GetLockedAddresses(
}, nil
}
func (e *GlobalConsensusEngine) GetWorkerInfo(
ctx context.Context,
req *protobufs.GlobalGetWorkerInfoRequest,
) (*protobufs.GlobalGetWorkerInfoResponse, error) {
peerID, ok := qgrpc.PeerIDFromContext(ctx)
if !ok {
return nil, status.Error(codes.Internal, "remote peer ID not found")
}
if !bytes.Equal(e.pubsub.GetPeerID(), []byte(peerID)) {
return nil, status.Error(codes.Internal, "remote peer ID not found")
}
workers, err := e.workerManager.RangeWorkers()
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
resp := &protobufs.GlobalGetWorkerInfoResponse{
Workers: []*protobufs.GlobalGetWorkerInfoResponseItem{},
}
for _, w := range workers {
resp.Workers = append(
resp.Workers,
&protobufs.GlobalGetWorkerInfoResponseItem{
CoreId: uint32(w.CoreId),
ListenMultiaddr: w.ListenMultiaddr,
StreamListenMultiaddr: w.StreamListenMultiaddr,
Filter: w.Filter,
TotalStorage: uint64(w.TotalStorage),
Allocated: w.Allocated,
},
)
}
return resp, nil
}
func (e *GlobalConsensusEngine) RegisterServices(server *grpc.Server) {
protobufs.RegisterGlobalServiceServer(server, e)
protobufs.RegisterDispatchServiceServer(server, e.dispatchService)

View File

@ -94,9 +94,9 @@ func BenchmarkLazyVectorCommitmentTreeVerify(b *testing.B) {
if err != nil {
b.Errorf("Failed to insert item %d: %v", i, err)
}
tree.Commit(false)
c := tree.Commit(false)
p := tree.Prove(d)
if !tree.Verify(p) {
if valid, _ := tree.Verify(c, p); !valid {
b.Errorf("bad proof")
}
}
@ -407,7 +407,7 @@ func TestLazyVectorCommitmentTrees(t *testing.T) {
}
proofs := tree.Prove(addresses[500])
if !tree.Verify(proofs) {
if valid, _ := tree.Verify(tcommit, proofs); !valid {
t.Errorf("proof failed")
}
@ -475,7 +475,7 @@ func TestTreeLeafReaddition(t *testing.T) {
originalProof := tree.Prove(testKey)
// Validate the proof
if !tree.Verify(originalProof) {
if valid, _ := tree.Verify(originalRoot, originalProof); !valid {
t.Errorf("Failed to verify original proof")
}
@ -500,7 +500,7 @@ func TestTreeLeafReaddition(t *testing.T) {
}
// Verify the original proof still works
if !tree.Verify(originalProof) {
if valid, _ := tree.Verify(newRoot, originalProof); !valid {
t.Errorf("Original proof no longer valid after re-adding the same leaf")
}
}
@ -556,7 +556,7 @@ func TestTreeRemoveReaddLeaf(t *testing.T) {
originalProof := tree.Prove(testKey)
// Validate the proof
if !tree.Verify(originalProof) {
if valid, _ := tree.Verify(originalRoot, originalProof); !valid {
t.Errorf("Failed to verify original proof")
}
@ -582,7 +582,7 @@ func TestTreeRemoveReaddLeaf(t *testing.T) {
}
// Verify the proof fails
if tree.Verify(originalProof) {
if valid, _ := tree.Verify(deletedRoot, originalProof); valid {
t.Errorf("Original proof still verified")
}
@ -610,7 +610,7 @@ func TestTreeRemoveReaddLeaf(t *testing.T) {
newProof := tree.Prove(testKey)
// Verify the new proof works
if !tree.Verify(newProof) {
if valid, _ := tree.Verify(restoredRoot, newProof); !valid {
t.Errorf("New proof not valid after re-adding the leaf")
}
@ -757,7 +757,7 @@ func TestTreeLongestBranch(t *testing.T) {
}
newCommit := tree.Commit(false)
if !tree.Verify(origProof) {
if valid, _ := tree.Verify(newCommit, origProof); !valid {
t.Errorf("Proof does not sustain after tree rollback.")
}
@ -895,7 +895,7 @@ func TestTreeBranchStructure(t *testing.T) {
}
// Confirm original proof still works
if !tree.Verify(initialProof) {
if valid, _ := tree.Verify(restoredRoot, initialProof); !valid {
t.Errorf("Original proof no longer valid after restoring tree structure")
}
@ -952,7 +952,7 @@ func TestTreeBranchStructure(t *testing.T) {
}
// Commit after removal
tree.Commit(false)
c := tree.Commit(false)
afterGroupRemoval := tree.GetSize()
expectedAfterRemoval := big.NewInt(3 + keysPerGroup)
@ -968,7 +968,7 @@ func TestTreeBranchStructure(t *testing.T) {
// t.Errorf("Fetch had error: %v", err)
// }
if !tree.Verify(fullProof) {
if valid, _ := tree.Verify(c, fullProof); !valid {
t.Errorf("somehow the regular proof failed?")
}
}

View File

@ -103,6 +103,7 @@ func (r *DataWorkerIPCServer) Start() error {
}
func (r *DataWorkerIPCServer) Stop() error {
r.logger.Info("stopping server gracefully")
r.server.GracefulStop()
go func() {
r.quit <- struct{}{}
@ -171,6 +172,8 @@ func (r *DataWorkerIPCServer) RespawnServer(filter []byte) error {
}
r.server = qgrpc.NewServer(
grpc.Creds(tlsCreds),
grpc.ChainUnaryInterceptor(r.authProvider.UnaryInterceptor),
grpc.ChainStreamInterceptor(r.authProvider.StreamInterceptor),
grpc.MaxRecvMsgSize(10*1024*1024),
grpc.MaxSendMsgSize(10*1024*1024),
)

View File

@ -146,13 +146,8 @@ func (e *ComputeExecutionEngine) Start() <-chan error {
go func() {
e.logger.Info("starting compute execution engine")
for {
select {
case <-e.stopChan:
e.logger.Info("stopping compute execution engine")
return
}
}
<-e.stopChan
e.logger.Info("stopping compute execution engine")
}()
return errChan
@ -920,7 +915,6 @@ func (e *ComputeExecutionEngine) processIndividualMessage(
e.logger.Debug(
"processed individual message",
zap.String("address", hex.EncodeToString(address)),
zap.Any("state", state),
)
return &execution.ProcessMessageResult{

View File

@ -490,6 +490,7 @@ func createTestAppConsensusEngine(
pebbleDB := pstore.NewPebbleDB(logger, config.DB, 0)
clockStore := pstore.NewPebbleClockStore(pebbleDB, logger)
inboxStore := pstore.NewPebbleInboxStore(pebbleDB, logger)
shardStore := pstore.NewPebbleShardsStore(pebbleDB, logger)
hypergraphStore := pstore.NewPebbleHypergraphStore(config.DB, pebbleDB, logger, &mocks.MockVerifiableEncryptor{}, mockInclusionProver)
appTimeReel := createTestAppTimeReel(t, appAddress, clockStore)
mockProverRegistry := createTestProverRegistry()
@ -545,6 +546,7 @@ func createTestAppConsensusEngine(
mockKeyStore,
clockStore,
inboxStore,
shardStore,
hypergraphStore,
mockFrameProver,
mockInclusionProver,
@ -1015,6 +1017,8 @@ func TestComputeExecutionEngine_ProcessMessage_DeployWithPaymentAndExecute(
) {
// Helper function to perform deployment steps with specific keys
performDeploymentWithKeys := func(t *testing.T, engine *engines.ComputeExecutionEngine, mockHG *mocks.MockHypergraph, mockCompiler *mocks.MockCompiler, readKey, writeKey []byte, engineMode engines.ExecutionMode) []byte {
mockHG.On("GetCoveredPrefix").Return([]int{}, nil)
// Use provided keys
outFees := []*big.Int{big.NewInt(1)}
total := big.NewInt(10000000)
@ -2212,7 +2216,7 @@ req:A a rdfs:Property;
maskedCoinBalanceBytes1 := make([]byte, 56)
rand.Read(maskedCoinBalanceBytes1)
tree1.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_CUTOVER+1), nil, big.NewInt(8))
tree1.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+1), nil, big.NewInt(8))
tree1.Insert([]byte{1 << 2}, comm1, nil, big.NewInt(56))
tree1.Insert([]byte{2 << 2}, otk1, nil, big.NewInt(56))
tree1.Insert([]byte{3 << 2}, verifkey1, nil, big.NewInt(56))
@ -3581,6 +3585,7 @@ req:A a rdfs:Property;
mockHG := tests.CreateHypergraphWithInclusionProver(mockInclusionProver)
mockHG.On("Commit").Return(map[tries.ShardKey][][]byte{}).Maybe()
mockHG.On("TrackChange", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe()
mockHG.On("GetCoveredPrefix").Return([]int{}, nil)
mockKeyManager := new(mocks.MockKeyManager)
mockKeyManager.On("ValidateSignature", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true, nil)
mockBulletproofProver := new(mocks.MockBulletproofProver)
@ -3667,6 +3672,7 @@ req:A a rdfs:Property;
mockHG := tests.CreateHypergraphWithInclusionProver(mockInclusionProver)
mockHG.On("Commit").Return(map[tries.ShardKey][][]byte{}).Maybe()
mockHG.On("TrackChange", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe()
mockHG.On("GetCoveredPrefix").Return([]int{}, nil)
mockKeyManager := new(mocks.MockKeyManager)
mockKeyManager.On("ValidateSignature", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true, nil)
mockBulletproofProver := new(mocks.MockBulletproofProver)
@ -3798,6 +3804,7 @@ req:A a rdfs:Property;
mockHG := tests.CreateHypergraphWithInclusionProver(mockInclusionProver)
mockHG.On("Commit").Return(map[tries.ShardKey][][]byte{}).Maybe()
mockHG.On("TrackChange", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe()
mockHG.On("GetCoveredPrefix").Return([]int{}, nil)
mockKeyManager := new(mocks.MockKeyManager)
mockKeyManager.On("ValidateSignature", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true, nil)
mockBulletproofProver := new(mocks.MockBulletproofProver)
@ -3922,6 +3929,7 @@ req:A a rdfs:Property;
mockHG := tests.CreateHypergraphWithInclusionProver(mockInclusionProver)
mockHG.On("Commit").Return(map[tries.ShardKey][][]byte{}).Maybe()
mockHG.On("TrackChange", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe()
mockHG.On("GetCoveredPrefix").Return([]int{}, nil)
mockKeyManager := new(mocks.MockKeyManager)
mockKeyManager.On("ValidateSignature", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true, nil)
mockBulletproofProver := new(mocks.MockBulletproofProver)
@ -4101,6 +4109,7 @@ req:A a rdfs:Property;
mockHG := tests.CreateHypergraphWithInclusionProver(mockInclusionProver)
mockHG.On("Commit").Return(map[tries.ShardKey][][]byte{}).Maybe()
mockHG.On("TrackChange", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe()
mockHG.On("GetCoveredPrefix").Return([]int{}, nil)
mockKeyManager := new(mocks.MockKeyManager)
mockKeyManager.On("ValidateSignature", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true, nil)
mockBulletproofProver := new(mocks.MockBulletproofProver)
@ -4233,6 +4242,7 @@ req:A a rdfs:Property;
mockHG := tests.CreateHypergraphWithInclusionProver(mockInclusionProver)
mockHG.On("Commit").Return(map[tries.ShardKey][][]byte{}).Maybe()
mockHG.On("TrackChange", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe()
mockHG.On("GetCoveredPrefix").Return([]int{}, nil)
mockKeyManager := new(mocks.MockKeyManager)
mockKeyManager.On("ValidateSignature", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true, nil)
mockBulletproofProver := new(mocks.MockBulletproofProver)
@ -4356,6 +4366,7 @@ req:A a rdfs:Property;
mockHG := tests.CreateHypergraphWithInclusionProver(mockInclusionProver)
mockHG.On("Commit").Return(map[tries.ShardKey][][]byte{}).Maybe()
mockHG.On("TrackChange", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe()
mockHG.On("GetCoveredPrefix").Return([]int{}, nil)
mockKeyManager := new(mocks.MockKeyManager)
mockKeyManager.On("ValidateSignature", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true, nil)
mockBulletproofProver := new(mocks.MockBulletproofProver)
@ -4493,6 +4504,7 @@ rdfs:range req:Request.
mockHG := tests.CreateHypergraphWithInclusionProver(mockInclusionProver)
mockHG.On("Commit").Return(map[tries.ShardKey][][]byte{}).Maybe()
mockHG.On("TrackChange", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe()
mockHG.On("GetCoveredPrefix").Return([]int{}, nil)
mockKeyManager := new(mocks.MockKeyManager)
mockKeyManager.On("ValidateSignature", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true, nil)
mockBulletproofProver := new(mocks.MockBulletproofProver)
@ -4611,6 +4623,7 @@ req:A a rdfs:Property;
mockHG := tests.CreateHypergraphWithInclusionProver(mockInclusionProver)
mockHG.On("Commit").Return(map[tries.ShardKey][][]byte{}).Maybe()
mockHG.On("TrackChange", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe()
mockHG.On("GetCoveredPrefix").Return([]int{}, nil)
mockKeyManager := new(mocks.MockKeyManager)
mockBulletproofProver := new(mocks.MockBulletproofProver)
mockVerEnc := new(mocks.MockVerifiableEncryptor)
@ -4705,6 +4718,7 @@ req:A a rdfs:Property;
mockHG := tests.CreateHypergraphWithInclusionProver(mockInclusionProver)
mockHG.On("Commit").Return(map[tries.ShardKey][][]byte{}).Maybe()
mockHG.On("TrackChange", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe()
mockHG.On("GetCoveredPrefix").Return([]int{}, nil)
mockKeyManager := new(mocks.MockKeyManager)
mockBulletproofProver := new(mocks.MockBulletproofProver)
mockVerEnc := new(mocks.MockVerifiableEncryptor)
@ -4830,6 +4844,7 @@ req:A a rdfs:Property;
mockHG := tests.CreateHypergraphWithInclusionProver(mockInclusionProver)
mockHG.On("Commit").Return(map[tries.ShardKey][][]byte{}).Maybe()
mockHG.On("TrackChange", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe()
mockHG.On("GetCoveredPrefix").Return([]int{}, nil)
mockKeyManager := new(mocks.MockKeyManager)
mockBulletproofProver := new(mocks.MockBulletproofProver)
mockVerEnc := new(mocks.MockVerifiableEncryptor)
@ -4952,6 +4967,7 @@ req:A a rdfs:Property;
mockHG := tests.CreateHypergraphWithInclusionProver(mockInclusionProver)
mockHG.On("Commit").Return(map[tries.ShardKey][][]byte{}).Maybe()
mockHG.On("TrackChange", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe()
mockHG.On("GetCoveredPrefix").Return([]int{}, nil)
mockKeyManager := new(mocks.MockKeyManager)
mockBulletproofProver := new(mocks.MockBulletproofProver)
mockVerEnc := new(mocks.MockVerifiableEncryptor)
@ -5074,6 +5090,7 @@ req:A a rdfs:Property;
mockHG := tests.CreateHypergraphWithInclusionProver(mockInclusionProver)
mockHG.On("Commit").Return(map[tries.ShardKey][][]byte{}).Maybe()
mockHG.On("TrackChange", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe()
mockHG.On("GetCoveredPrefix").Return([]int{}, nil)
mockKeyManager := new(mocks.MockKeyManager)
mockBulletproofProver := new(mocks.MockBulletproofProver)
mockVerEnc := new(mocks.MockVerifiableEncryptor)
@ -5192,6 +5209,7 @@ req:A a rdfs:Property;
mockHG := tests.CreateHypergraphWithInclusionProver(mockInclusionProver)
mockHG.On("Commit").Return(map[tries.ShardKey][][]byte{}).Maybe()
mockHG.On("TrackChange", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe()
mockHG.On("GetCoveredPrefix").Return([]int{}, nil)
mockKeyManager := new(mocks.MockKeyManager)
mockBulletproofProver := new(mocks.MockBulletproofProver)
mockVerEnc := new(mocks.MockVerifiableEncryptor)

View File

@ -7,6 +7,7 @@ import (
"source.quilibrium.com/quilibrium/monorepo/node/execution/fees"
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token"
"source.quilibrium.com/quilibrium/monorepo/types/compiler"
"source.quilibrium.com/quilibrium/monorepo/types/consensus"
"source.quilibrium.com/quilibrium/monorepo/types/crypto"
"source.quilibrium.com/quilibrium/monorepo/types/execution"
"source.quilibrium.com/quilibrium/monorepo/types/hypergraph"
@ -62,6 +63,9 @@ func CreateExecutionEngine(
decafConstructor crypto.DecafConstructor,
compiler compiler.CircuitCompiler,
frameProver crypto.FrameProver,
rewardIssuance consensus.RewardIssuance,
proverRegistry consensus.ProverRegistry,
blsConstructor crypto.BlsConstructor,
mode ExecutionMode,
) (execution.ShardExecutionEngine, error) {
switch engineType {
@ -78,6 +82,9 @@ func CreateExecutionEngine(
verEnc,
decafConstructor,
frameProver,
rewardIssuance,
proverRegistry,
blsConstructor,
)
case EngineTypeCompute:
return NewComputeExecutionEngine(
@ -134,6 +141,9 @@ func CreateAllEngines(
decafConstructor crypto.DecafConstructor,
compiler compiler.CircuitCompiler,
frameProver crypto.FrameProver,
rewardIssuance consensus.RewardIssuance,
proverRegistry consensus.ProverRegistry,
blsConstructor crypto.BlsConstructor,
includeGlobal bool,
) ([]execution.ShardExecutionEngine, error) {
engines := make([]execution.ShardExecutionEngine, 0, 4)
@ -157,6 +167,9 @@ func CreateAllEngines(
decafConstructor,
compiler,
frameProver,
rewardIssuance,
proverRegistry,
blsConstructor,
mode,
)
if err != nil {
@ -185,6 +198,9 @@ func CreateAllEngines(
decafConstructor,
compiler,
frameProver,
rewardIssuance,
proverRegistry,
blsConstructor,
mode,
)
if err != nil {

View File

@ -81,6 +81,9 @@ func TestCreateExecutionEngine(t *testing.T) {
mockDecaf,
mockCompiler,
mockFrameProver,
nil,
nil,
nil,
engines.GlobalMode,
)
@ -126,6 +129,9 @@ func TestCreateAllEngines(t *testing.T) {
mockDecaf,
mockCompiler,
mockFrameProver,
nil,
nil,
nil,
true, // includeGlobal
)
// CreateAllEngines doesn't return error, it just logs warnings

View File

@ -15,6 +15,7 @@ import (
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/global"
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token"
"source.quilibrium.com/quilibrium/monorepo/protobufs"
"source.quilibrium.com/quilibrium/monorepo/types/consensus"
"source.quilibrium.com/quilibrium/monorepo/types/crypto"
"source.quilibrium.com/quilibrium/monorepo/types/execution"
"source.quilibrium.com/quilibrium/monorepo/types/execution/intrinsics"
@ -37,6 +38,9 @@ type GlobalExecutionEngine struct {
verEnc crypto.VerifiableEncryptor
decafConstructor crypto.DecafConstructor
frameProver crypto.FrameProver
rewardIssuance consensus.RewardIssuance
proverRegistry consensus.ProverRegistry
blsConstructor crypto.BlsConstructor
// State
intrinsics map[string]intrinsics.Intrinsic
@ -57,6 +61,9 @@ func NewGlobalExecutionEngine(
verEnc crypto.VerifiableEncryptor,
decafConstructor crypto.DecafConstructor,
frameProver crypto.FrameProver,
rewardIssuance consensus.RewardIssuance,
proverRegistry consensus.ProverRegistry,
blsConstructor crypto.BlsConstructor,
) (*GlobalExecutionEngine, error) {
return &GlobalExecutionEngine{
logger: logger,
@ -70,6 +77,9 @@ func NewGlobalExecutionEngine(
verEnc: verEnc,
decafConstructor: decafConstructor,
frameProver: frameProver,
rewardIssuance: rewardIssuance,
proverRegistry: proverRegistry,
blsConstructor: blsConstructor,
intrinsics: make(map[string]intrinsics.Intrinsic),
}, nil
}
@ -119,13 +129,8 @@ func (e *GlobalExecutionEngine) Start() <-chan error {
go func() {
e.logger.Info("starting global execution engine")
for {
select {
case <-e.stopChan:
e.logger.Info("stopping global execution engine")
return
}
}
<-e.stopChan
e.logger.Info("stopping global execution engine")
}()
return errChan
@ -226,7 +231,8 @@ func (e *GlobalExecutionEngine) validateBundle(
op.GetConfirm() != nil ||
op.GetReject() != nil ||
op.GetKick() != nil ||
op.GetUpdate() != nil
op.GetUpdate() != nil ||
op.GetShard() != nil
if !isGlobalOp {
if e.config.Network == 0 &&
@ -272,7 +278,7 @@ func (e *GlobalExecutionEngine) validateIndividualMessage(
frameNumber uint64,
address []byte,
message *protobufs.MessageRequest,
fromBundle bool,
_ bool,
) error {
// Try to get or load the global intrinsic
intrinsic, err := e.tryGetIntrinsic(address)
@ -427,7 +433,7 @@ func (e *GlobalExecutionEngine) processIndividualMessage(
}
// Process the operation
newState, err := intrinsic.InvokeStep(
_, err = intrinsic.InvokeStep(
frameNumber,
payload,
big.NewInt(0),
@ -438,7 +444,7 @@ func (e *GlobalExecutionEngine) processIndividualMessage(
return nil, errors.Wrap(err, "process individual message")
}
newState, err = intrinsic.Commit()
newState, err := intrinsic.Commit()
if err != nil {
return nil, errors.Wrap(err, "process individual message")
}
@ -446,7 +452,6 @@ func (e *GlobalExecutionEngine) processIndividualMessage(
e.logger.Debug(
"processed individual message",
zap.String("address", hex.EncodeToString(address)),
zap.Any("state", newState),
)
return &execution.ProcessMessageResult{
@ -550,6 +555,9 @@ func (e *GlobalExecutionEngine) tryGetIntrinsic(address []byte) (
e.keyManager,
e.frameProver,
e.clockStore,
e.rewardIssuance,
e.proverRegistry,
e.blsConstructor,
)
if err != nil {
return nil, errors.Wrap(err, "try get intrinsic")
@ -572,6 +580,8 @@ func (e *GlobalExecutionEngine) tryExtractMessageForIntrinsic(
switch r := message.Request.(type) {
case *protobufs.MessageRequest_Update:
payload, err = r.Update.ToCanonicalBytes()
case *protobufs.MessageRequest_Shard:
payload, err = r.Shard.ToCanonicalBytes()
case *protobufs.MessageRequest_Join:
for _, f := range r.Join.Filters {
if len(f) >= 32 {

View File

@ -49,6 +49,9 @@ func TestGlobalExecutionEngine_Start(t *testing.T) {
mockVerEnc,
mockDecaf,
mockFrameProver,
nil,
nil,
nil,
)
require.NoError(t, err)
@ -142,6 +145,9 @@ func TestGlobalExecutionEngine_ProcessMessage(t *testing.T) {
mockVerEnc,
mockDecaf,
mockFrameProver,
nil,
nil,
nil,
)
require.NoError(t, err)
@ -205,6 +211,7 @@ func TestGlobalExecutionEngine_AllOperationTypes(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
logger := zap.NewNop()
mockHG := new(mocks.MockHypergraph)
mockHG.On("GetCoveredPrefix").Return([]int{}, nil)
mockClockStore := new(mocks.MockClockStore)
mockShardsStore := new(mocks.MockShardsStore)
mockKeyManager := new(mocks.MockKeyManager)
@ -369,6 +376,9 @@ func TestGlobalExecutionEngine_AllOperationTypes(t *testing.T) {
mockVerEnc,
mockDecaf,
mockFrameProver,
nil,
nil,
nil,
)
require.NoError(t, err)

View File

@ -214,14 +214,8 @@ func (e *HypergraphExecutionEngine) Start() <-chan error {
go func() {
e.logger.Info("starting hypergraph execution engine")
// Main loop
for {
select {
case <-e.stopChan:
e.logger.Info("stopping hypergraph execution engine")
return
}
}
<-e.stopChan
e.logger.Info("stopping hypergraph execution engine")
}()
return errChan
@ -363,7 +357,6 @@ func (e *HypergraphExecutionEngine) validateIndividualMessage(
) error {
isHypergraphOp := false
isUpdate := false
var err error
switch message.Request.(type) {
case *protobufs.MessageRequest_HypergraphDeploy:
isHypergraphOp = true
@ -380,9 +373,6 @@ func (e *HypergraphExecutionEngine) validateIndividualMessage(
case *protobufs.MessageRequest_HyperedgeRemove:
isHypergraphOp = true
}
if err != nil {
return errors.Wrap(err, "validate individual message")
}
if !isHypergraphOp {
return errors.Wrap(
@ -696,7 +686,7 @@ func (e *HypergraphExecutionEngine) processIndividualMessage(
}
// Process the operation
newState, err := intrinsic.InvokeStep(
_, err = intrinsic.InvokeStep(
frameNumber,
payload,
feePaid,
@ -707,7 +697,7 @@ func (e *HypergraphExecutionEngine) processIndividualMessage(
return nil, errors.Wrap(err, "process individual message")
}
newState, err = intrinsic.Commit()
newState, err := intrinsic.Commit()
if err != nil {
return nil, errors.Wrap(err, "process individual message")
}
@ -715,7 +705,6 @@ func (e *HypergraphExecutionEngine) processIndividualMessage(
e.logger.Debug(
"processed individual message",
zap.String("address", hex.EncodeToString(address)),
zap.Any("state", newState),
)
return &execution.ProcessMessageResult{

View File

@ -26,6 +26,7 @@ import (
func TestHypergraphExecutionEngine_Start(t *testing.T) {
logger := zap.NewNop()
mockHG := new(mocks.MockHypergraph)
mockHG.On("GetCoveredPrefix").Return([]int{}, nil)
mockClockStore := new(mocks.MockClockStore)
mockKeyManager := new(mocks.MockKeyManager)
mockInclusionProver := new(mocks.MockInclusionProver)
@ -63,6 +64,7 @@ func TestHypergraphExecutionEngine_Start(t *testing.T) {
func TestHypergraphExecutionEngine_ProcessMessage_Deploy(t *testing.T) {
mockhg := tests.CreateHypergraphWithInclusionProver(&mocks.MockInclusionProver{})
mockhg.On("GetCoveredPrefix").Return([]int{}, nil)
hypergraphDeployReq := createHypergraphDeployPayload(t, mockhg)
vertexAdd := createVertexAddPayload(t, mockhg)
tests := []struct {
@ -163,6 +165,7 @@ func TestHypergraphExecutionEngine_ProcessMessage_Deploy(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
logger := zap.NewNop()
mockHG := new(mocks.MockHypergraph)
mockHG.On("GetCoveredPrefix").Return([]int{}, nil).Maybe()
mockClockStore := new(mocks.MockClockStore)
mockKeyManager := new(mocks.MockKeyManager)
mockInclusionProver := new(mocks.MockInclusionProver)
@ -213,6 +216,7 @@ func TestHypergraphExecutionEngine_ProcessMessage_Deploy(t *testing.T) {
func TestHypergraphExecutionEngine_BundledMessages(t *testing.T) {
logger := zap.NewNop()
mockHG := new(mocks.MockHypergraph)
mockHG.On("GetCoveredPrefix").Return([]int{}, nil)
mockClockStore := new(mocks.MockClockStore)
mockKeyManager := new(mocks.MockKeyManager)
mockInclusionProver := new(mocks.MockInclusionProver)
@ -363,6 +367,7 @@ func TestHypergraphExecutionEngine_AllOperationTypes(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
logger := zap.NewNop()
mockHG := new(mocks.MockHypergraph)
mockHG.On("GetCoveredPrefix").Return([]int{}, nil)
mockClockStore := new(mocks.MockClockStore)
mockKeyManager := new(mocks.MockKeyManager)
mockInclusionProver := new(mocks.MockInclusionProver)

View File

@ -101,6 +101,7 @@ func (e *TokenExecutionEngine) Prove(
case *protobufs.MessageRequest_Transaction:
transaction, err := token.TransactionFromProtobuf(
req.Transaction,
e.inclusionProver,
)
if err != nil {
return nil, errors.Wrap(err, "prove")
@ -119,6 +120,7 @@ func (e *TokenExecutionEngine) Prove(
case *protobufs.MessageRequest_PendingTransaction:
pendingTransaction, err := token.PendingTransactionFromProtobuf(
req.PendingTransaction,
e.inclusionProver,
)
if err != nil {
return nil, errors.Wrap(err, "prove")
@ -191,6 +193,7 @@ func (e *TokenExecutionEngine) GetCost(message []byte) (*big.Int, error) {
case *protobufs.MessageRequest_Transaction:
transaction, err := token.TransactionFromProtobuf(
req.Transaction,
e.inclusionProver,
)
if err != nil {
return nil, errors.Wrap(err, "get cost")
@ -201,6 +204,7 @@ func (e *TokenExecutionEngine) GetCost(message []byte) (*big.Int, error) {
case *protobufs.MessageRequest_PendingTransaction:
pendingTransaction, err := token.PendingTransactionFromProtobuf(
req.PendingTransaction,
e.inclusionProver,
)
if err != nil {
return nil, errors.Wrap(err, "get cost")
@ -260,14 +264,8 @@ func (e *TokenExecutionEngine) Start() <-chan error {
go func() {
e.logger.Info("starting token execution engine")
// Main loop
for {
select {
case <-e.stopChan:
e.logger.Info("stopping token execution engine")
return
}
}
<-e.stopChan
e.logger.Info("stopping token execution engine")
}()
return errChan
@ -405,7 +403,6 @@ func (e *TokenExecutionEngine) validateIndividualMessage(
) error {
isTokenOp := false
isUpdate := false
var err error
switch message.Request.(type) {
case *protobufs.MessageRequest_TokenDeploy:
isTokenOp = true
@ -420,9 +417,6 @@ func (e *TokenExecutionEngine) validateIndividualMessage(
case *protobufs.MessageRequest_Transaction:
isTokenOp = true
}
if err != nil {
return errors.Wrap(err, "validate individual message")
}
if !isTokenOp {
return errors.Wrap(
@ -771,7 +765,6 @@ func (e *TokenExecutionEngine) processIndividualMessage(
e.logger.Debug(
"processed individual message",
zap.String("address", hex.EncodeToString(address)),
zap.Any("state", newState),
)
return &execution.ProcessMessageResult{
@ -869,6 +862,7 @@ func (e *TokenExecutionEngine) handleDeploy(
e.bulletproofProver,
e.inclusionProver,
e.keyManager,
e.clockStore,
)
if err != nil {
return nil, errors.Wrap(err, "handle deploy")
@ -927,6 +921,7 @@ func (e *TokenExecutionEngine) tryGetIntrinsic(
e.bulletproofProver,
e.inclusionProver,
e.keyManager,
e.clockStore,
)
if err != nil {
return nil, errors.Wrap(err, "try get intrinsic")

View File

@ -7,6 +7,7 @@ import (
"slices"
"testing"
"github.com/iden3/go-iden3-crypto/poseidon"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
@ -19,6 +20,7 @@ import (
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/tests"
"source.quilibrium.com/quilibrium/monorepo/protobufs"
"source.quilibrium.com/quilibrium/monorepo/types/crypto"
"source.quilibrium.com/quilibrium/monorepo/types/mocks"
"source.quilibrium.com/quilibrium/monorepo/types/schema"
"source.quilibrium.com/quilibrium/monorepo/types/tries"
@ -62,6 +64,7 @@ func createMintableTestConfig() *token.TokenIntrinsicConfiguration {
func TestTokenExecutionEngine_Start(t *testing.T) {
logger := zap.NewNop()
mockHG := new(mocks.MockHypergraph)
mockHG.On("GetCoveredPrefix").Return([]int{}, nil).Maybe()
mockClockStore := new(mocks.MockClockStore)
mockKeyManager := new(mocks.MockKeyManager)
mockInclusionProver := new(mocks.MockInclusionProver)
@ -137,6 +140,7 @@ func TestTokenExecutionEngine_ProcessMessage_DeployEdgeCases(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
logger := zap.NewNop()
mockHG := new(mocks.MockHypergraph)
mockHG.On("GetCoveredPrefix").Return([]int{}, nil).Maybe()
mockClockStore := new(mocks.MockClockStore)
mockKeyManager := new(mocks.MockKeyManager)
mockInclusionProver := new(mocks.MockInclusionProver)
@ -183,6 +187,7 @@ func TestTokenExecutionEngine_BundledMessages(t *testing.T) {
t.Skip("something weird about payment setup")
logger := zap.NewNop()
mockHG := new(mocks.MockHypergraph)
mockHG.On("GetCoveredPrefix").Return([]int{}, nil).Maybe()
mockClockStore := new(mocks.MockClockStore)
mockKeyManager := new(mocks.MockKeyManager)
mockInclusionProver := new(mocks.MockInclusionProver)
@ -264,6 +269,7 @@ func TestTokenExecutionEngine_ModeSwitch(t *testing.T) {
// Test that GlobalMode properly restricts operations after deployment
logger := zap.NewNop()
mockHG := new(mocks.MockHypergraph)
mockHG.On("GetCoveredPrefix").Return([]int{}, nil).Maybe()
mockClockStore := new(mocks.MockClockStore)
mockKeyManager := new(mocks.MockKeyManager)
mockInclusionProver := new(mocks.MockInclusionProver)
@ -290,6 +296,7 @@ func TestTokenExecutionEngine_ModeSwitch(t *testing.T) {
mockHG.On("NewTransaction", mock.Anything).Return(mockTxn, nil).Maybe()
mockHG.On("AddVertex", mock.Anything, mock.Anything).Return(nil).Maybe()
mockHG.On("SetVertexData", mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe()
mockHG.On("GetShardCommits", mock.Anything, mock.Anything).Return([][]byte{make([]byte, 64), make([]byte, 64), make([]byte, 64), make([]byte, 64)}, nil)
// Mock for TrackChange
mockHG.On("TrackChange", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe()
@ -327,7 +334,7 @@ func TestTokenExecutionEngine_ModeSwitch(t *testing.T) {
}
state := hgstate.NewHypergraphState(mockHG)
responses, err := globalEngine.ProcessMessage(1, big.NewInt(1), token.TOKEN_BASE_DOMAIN[:], deployMsg.Payload, state)
responses, err := globalEngine.ProcessMessage(2, big.NewInt(1), token.TOKEN_BASE_DOMAIN[:], deployMsg.Payload, state)
assert.NoError(t, err)
assert.NotNil(t, responses)
err = responses.State.Commit()
@ -376,7 +383,7 @@ func TestTokenExecutionEngine_ModeSwitch(t *testing.T) {
}
state = hgstate.NewHypergraphState(mockHG)
responses, err = globalEngine.ProcessMessage(1, big.NewInt(1), testAddr, txMsg.Payload, state)
_, err = globalEngine.ProcessMessage(2, big.NewInt(1), testAddr, txMsg.Payload, state)
assert.Error(t, err)
}
@ -428,10 +435,40 @@ func createTokenTransactionPayload(t *testing.T) *protobufs.MessageRequest {
// Create mock dependencies
mockHG := new(mocks.MockHypergraph)
mockBP := new(mocks.MockBulletproofProver)
mockBP.On("GenerateRangeProofFromBig", mock.Anything, mock.Anything, mock.Anything).Return(crypto.RangeProofResult{
Proof: make([]byte, 56),
Commitment: make([]byte, 112),
Blinding: make([]byte, 112),
}, nil)
mockBP.On("SignHidden", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(make([]byte, 336))
mockIP := new(mocks.MockInclusionProver)
mockIP.On("CommitRaw", mock.Anything, mock.Anything).Return(make([]byte, 74), nil)
mockMultiproof := new(mocks.MockMultiproof)
mockMultiproof.On("FromBytes", mock.Anything).Return(nil)
mockMultiproof.On("ToBytes").Return([]byte{}, nil)
mockIP.On("ProveMultiple", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(mockMultiproof).Maybe()
mockVE := new(mocks.MockVerifiableEncryptor)
mockDC := new(mocks.MockDecafConstructor)
mockKM := new(mocks.MockKeyManager)
mockKM.On("GetAgreementKey", "q-view-key").Return(&mocks.MockDecafAgreement{}, nil)
mockKM.On("GetAgreementKey", "q-spend-key").Return(&mocks.MockDecafAgreement{}, nil)
mockHG.On("GetVertex", mock.Anything).Return(nil, nil)
mockTraversalProofMultiproof := new(mocks.MockMultiproof)
// Create a properly sized multiproof byte array (74 bytes multicommitment + some proof data)
multiproofBytes := make([]byte, 148) // 74 for multicommitment + 74 for proof
rand.Read(multiproofBytes)
mockTraversalProofMultiproof.On("ToBytes").Return(multiproofBytes, nil)
mockTraversalProof := &tries.TraversalProof{
Multiproof: mockTraversalProofMultiproof,
SubProofs: []tries.TraversalSubProof{
{
Commits: [][]byte{make([]byte, 74)}, // At least one commit
Ys: [][]byte{make([]byte, 64)}, // Matching Ys
Paths: [][]uint64{{0}}, // At least one path
},
},
}
mockHG.On("CreateTraversalProof", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(mockTraversalProof, nil).Maybe()
// Mock token configuration
tokenconfig := &token.TokenIntrinsicConfiguration{
@ -445,6 +482,26 @@ func createTokenTransactionPayload(t *testing.T) *protobufs.MessageRequest {
domain := make([]byte, 32)
rand.Read(domain)
tr := &tries.VectorCommitmentTree{}
tr.Insert([]byte{0}, []byte{0, 0, 0, 0, 0, 0, 0, 0}, nil, big.NewInt(0))
tr.Insert([]byte{1 << 2}, make([]byte, 56), nil, big.NewInt(0))
tr.Insert([]byte{2 << 2}, make([]byte, 56), nil, big.NewInt(0))
tr.Insert([]byte{3 << 2}, make([]byte, 56), nil, big.NewInt(0))
tr.Insert([]byte{4 << 2}, make([]byte, 56), nil, big.NewInt(0))
tr.Insert([]byte{5 << 2}, make([]byte, 56), nil, big.NewInt(0))
tr.Insert([]byte{6 << 2}, make([]byte, 56), nil, big.NewInt(0))
tr.Insert([]byte{7 << 2}, make([]byte, 56), nil, big.NewInt(0))
tr.Insert([]byte{8 << 2}, make([]byte, 56), nil, big.NewInt(0))
tr.Insert([]byte{9 << 2}, make([]byte, 56), nil, big.NewInt(0))
tr.Insert([]byte{10 << 2}, make([]byte, 56), nil, big.NewInt(0))
tr.Insert([]byte{11 << 2}, make([]byte, 64), nil, big.NewInt(0))
tr.Insert([]byte{12 << 2}, make([]byte, 56), nil, big.NewInt(0))
tybi, _ := poseidon.HashBytes(slices.Concat(make([]byte, 32), []byte("pending:PendingTransaction")))
ty := tybi.FillBytes(make([]byte, 32))
tr.Insert(slices.Repeat([]byte{0xff}, 32), ty, nil, big.NewInt(0))
mockHG.On("GetVertexData", mock.Anything).Return(tr, nil)
// Create mock inputs
input1, _ := token.NewTransactionInput(make([]byte, 64))
input2, _ := token.NewTransactionInput(make([]byte, 64))
@ -456,7 +513,7 @@ func createTokenTransactionPayload(t *testing.T) *protobufs.MessageRequest {
rand.Read(mockSK)
out1, _ := token.NewTransactionOutput(big.NewInt(7), mockVK, mockSK)
out2, _ := token.NewTransactionOutput(big.NewInt(2), mockVK, mockSK)
rdf, _ := newTokenRDFHypergraphSchema(domain, tokenconfig)
// Create transaction
tx := token.NewTransaction(
[32]byte(domain),
@ -470,17 +527,15 @@ func createTokenTransactionPayload(t *testing.T) *protobufs.MessageRequest {
mockVE,
mockDC,
keys.ToKeyRing(mockKM, false),
"", // rdfSchema
nil, // rdfMultiprover
rdf,
schema.NewRDFMultiprover(&schema.TurtleRDFParser{}, mockIP),
)
// Mock the Prove call to set required fields
tx.RangeProof = make([]byte, 100) // Mock range proof
// Create a mock multiproof
mockMultiproof := new(mocks.MockMultiproof)
mockMultiproof.On("ToBytes").Return(make([]byte, 100), nil)
err := tx.Prove(0)
require.NoError(t, err)
tx.TraversalProof = &tries.TraversalProof{
Multiproof: mockMultiproof,
SubProofs: []tries.TraversalSubProof{
@ -646,6 +701,7 @@ func createTokenMintTransactionPayload(t *testing.T) *protobufs.MessageRequest {
keys.ToKeyRing(mockKM, false),
"", // rdfSchema
nil, // rdfMultiprover
&mocks.MockClockStore{},
)
// Mock the Prove call to set required fields

View File

@ -727,12 +727,12 @@ func main(a int, b int) int {
deployment1, err := compute.NewCodeDeployment(domain, circuit1Code, [2]string{"qcl:Int", "qcl:Int"}, inputSizes1, []string{"qcl:Int"}, compiler.NewBedlamCompiler())
require.NoError(t, err)
err = deployment1.Prove(token.FRAME_2_1_CUTOVER + 1)
err = deployment1.Prove(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 1)
require.NoError(t, err)
// Materialize first circuit
state := hgstate.NewHypergraphState(hg)
newst, err := deployment1.Materialize(token.FRAME_2_1_CUTOVER+1, state)
newst, err := deployment1.Materialize(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+1, state)
state = newst.(*hgstate.HypergraphState)
require.NoError(t, err)
@ -753,11 +753,11 @@ func main(a int, b int) int {
deployment2, err := compute.NewCodeDeployment(domain, circuit2Code, [2]string{"qcl:Int", "qcl:Int"}, inputSizes2, []string{"qcl:Int"}, compiler.NewBedlamCompiler())
require.NoError(t, err)
err = deployment2.Prove(token.FRAME_2_1_CUTOVER + 2)
err = deployment2.Prove(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 2)
require.NoError(t, err)
// Materialize second circuit
newst, err = deployment2.Materialize(token.FRAME_2_1_CUTOVER+2, state)
newst, err = deployment2.Materialize(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+2, state)
require.NoError(t, err)
state = newst.(*hgstate.HypergraphState)

View File

@ -11,6 +11,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
observability "source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics"
"source.quilibrium.com/quilibrium/monorepo/protobufs"
"source.quilibrium.com/quilibrium/monorepo/types/consensus"
"source.quilibrium.com/quilibrium/monorepo/types/crypto"
"source.quilibrium.com/quilibrium/monorepo/types/execution/intrinsics"
"source.quilibrium.com/quilibrium/monorepo/types/execution/state"
@ -32,6 +33,9 @@ type GlobalIntrinsic struct {
keyManager keys.KeyManager
frameProver crypto.FrameProver
frameStore store.ClockStore
rewardIssuance consensus.RewardIssuance
proverRegistry consensus.ProverRegistry
blsConstructor crypto.BlsConstructor
}
var GLOBAL_RDF_SCHEMA = `BASE <https://types.quilibrium.com/schema-repository/>
@ -618,6 +622,60 @@ func (a *GlobalIntrinsic) Validate(
observability.ValidateTotal.WithLabelValues("global", "prover_kick").Inc()
return nil
case protobufs.FrameHeaderType:
// Parse ProverKick directly from input
pbHeader := &protobufs.FrameHeader{}
if err := pbHeader.FromCanonicalBytes(input); err != nil {
observability.ValidateErrors.WithLabelValues(
"global",
"prover_shard_update",
).Inc()
return errors.Wrap(err, "validate")
}
op, err := NewProverShardUpdate(
pbHeader,
a.keyManager,
a.hypergraph,
a.rdfMultiprover,
a.frameProver,
a.rewardIssuance,
a.proverRegistry,
a.blsConstructor,
)
if err != nil {
observability.ValidateErrors.WithLabelValues(
"global",
"prover_shard_update",
).Inc()
return errors.Wrap(err, "validate")
}
valid, err := op.Verify(frameNumber)
if err != nil {
observability.ValidateErrors.WithLabelValues(
"global",
"prover_shard_update",
).Inc()
return errors.Wrap(err, "validate")
}
if !valid {
observability.ValidateErrors.WithLabelValues(
"global",
"prover_shard_update",
).Inc()
return errors.Wrap(
errors.New("invalid prover shard update"),
"validate",
)
}
observability.ValidateTotal.WithLabelValues(
"global",
"prover_shard_update",
).Inc()
return nil
default:
observability.ValidateErrors.WithLabelValues(
"global",
@ -1769,6 +1827,9 @@ func LoadGlobalIntrinsic(
keyManager keys.KeyManager,
frameProver crypto.FrameProver,
frameStore store.ClockStore,
rewardIssuance consensus.RewardIssuance,
proverRegistry consensus.ProverRegistry,
blsConstructor crypto.BlsConstructor,
) (*GlobalIntrinsic, error) {
// Verify the address is the global intrinsic address
if !bytes.Equal(address, intrinsics.GLOBAL_INTRINSIC_ADDRESS[:]) {
@ -1793,6 +1854,9 @@ func LoadGlobalIntrinsic(
keyManager: keyManager,
frameProver: frameProver,
frameStore: frameStore,
rewardIssuance: rewardIssuance,
proverRegistry: proverRegistry,
blsConstructor: blsConstructor,
}, nil
}

View File

@ -174,9 +174,22 @@ func (p *ProverConfirm) Materialize(
return nil, errors.Wrap(err, "materialize")
}
// Store join confirmation frame number
// Set active frame to current
frameNumberBytes := make([]byte, 8)
binary.BigEndian.PutUint64(frameNumberBytes, p.FrameNumber)
err = p.rdfMultiprover.Set(
GLOBAL_RDF_SCHEMA,
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
"allocation:ProverAllocation",
"LastActiveFrameNumber",
frameNumberBytes,
allocationTree,
)
if err != nil {
return nil, errors.Wrap(err, "materialize")
}
// Store join confirmation frame number
err = p.rdfMultiprover.Set(
GLOBAL_RDF_SCHEMA,
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],

View File

@ -143,6 +143,7 @@ func TestProverConfirm_Verify(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
// Test data
filter := []byte("testfilter")
@ -214,6 +215,7 @@ func TestProverConfirm_Verify(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
// Test data
filter := []byte("testfilter")
@ -262,6 +264,7 @@ func TestProverConfirm_Verify(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
// Test data
filter := []byte("testfilter")
@ -310,6 +313,7 @@ func TestProverConfirm_Verify(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
// Test data
filter := []byte("testfilter")
@ -383,6 +387,7 @@ func TestProverConfirm_Materialize(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
hypergraphState := hgstate.NewHypergraphState(mockHypergraph)
// Test data
@ -492,6 +497,7 @@ func TestProverConfirm_Materialize(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
hypergraphState := hgstate.NewHypergraphState(mockHypergraph)
// Test data
@ -599,6 +605,7 @@ func TestProverConfirm_Materialize(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
hypergraphState := hgstate.NewHypergraphState(mockHypergraph)
// Test data
@ -679,6 +686,7 @@ func TestProverConfirm_Materialize(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
hypergraphState := hgstate.NewHypergraphState(mockHypergraph)
// Test data
@ -719,6 +727,7 @@ func TestProverConfirm_Materialize(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
hypergraphState := hgstate.NewHypergraphState(mockHypergraph)
// Test data

View File

@ -83,6 +83,7 @@ func TestProverJoin_Verify(t *testing.T) {
copy(fullAddress[32:], address)
mockHypergraph.On("GetVertexData", fullAddress).Return(nil, assert.AnError)
mockHypergraph.On("GetProver").Return(mockInclusionProver)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
mockFrameProver.On("VerifyMultiProof", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true, nil)
// Configure mock key manager
@ -174,6 +175,7 @@ func TestProverJoin_Verify(t *testing.T) {
// Test case: verify succeeds if prover is in left state (4)
mockKeyManager = new(mocks.MockKeyManager)
mockHypergraph = new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
// Create a tree showing prover is left (status = 4)
tree = &qcrypto.VectorCommitmentTree{}
@ -238,6 +240,7 @@ func TestProverJoin_Materialize(t *testing.T) {
copy(fullAddress[32:], address)
// Configure mock to return nil vertex (prover doesn't exist yet)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
mockHypergraph.On("GetVertex", mock.Anything).Return(nil, assert.AnError)
mockHypergraph.On("GetVertexData", mock.Anything).Return(nil, assert.AnError)
mockHypergraph.On("GetHyperedge", mock.Anything).Return(nil, assert.AnError)
@ -295,6 +298,7 @@ func TestProverJoin_Materialize(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
hypergraphState := hgstate.NewHypergraphState(mockHypergraph)
mockInclusionProver := new(mocks.MockInclusionProver)
mockFrameProver := new(mocks.MockFrameProver)
@ -393,6 +397,7 @@ func TestProverJoin_Materialize(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
hypergraphState := hgstate.NewHypergraphState(mockHypergraph)
mockInclusionProver := new(mocks.MockInclusionProver)
mockFrameProver := new(mocks.MockFrameProver)
@ -454,6 +459,7 @@ func TestProverJoin_Materialize(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
hypergraphState := hgstate.NewHypergraphState(mockHypergraph)
mockFrameProver := new(mocks.MockFrameProver)
mockFrameStore := new(mocks.MockClockStore)
@ -485,6 +491,7 @@ func TestProverJoin_Materialize(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
hypergraphState := hgstate.NewHypergraphState(mockHypergraph)
mockInclusionProver := new(mocks.MockInclusionProver)
mockFrameProver := new(mocks.MockFrameProver)
@ -618,6 +625,7 @@ func TestProverJoin_GetCost(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
mockFrameStore := new(mocks.MockClockStore)
// Test data

View File

@ -16,6 +16,7 @@ import (
"source.quilibrium.com/quilibrium/monorepo/types/execution/state"
"source.quilibrium.com/quilibrium/monorepo/types/hypergraph"
"source.quilibrium.com/quilibrium/monorepo/types/schema"
"source.quilibrium.com/quilibrium/monorepo/types/store"
"source.quilibrium.com/quilibrium/monorepo/types/tries"
)
@ -41,6 +42,7 @@ type ProverKick struct {
hypergraph hypergraph.Hypergraph
rdfMultiprover *schema.RDFMultiprover
proverRegistry consensus.ProverRegistry
clockStore store.ClockStore
}
func NewProverKick(
@ -53,6 +55,7 @@ func NewProverKick(
hypergraph hypergraph.Hypergraph,
rdfMultiprover *schema.RDFMultiprover,
proverRegistry consensus.ProverRegistry,
clockStore store.ClockStore,
) (*ProverKick, error) {
return &ProverKick{
FrameNumber: frameNumber,
@ -64,6 +67,7 @@ func NewProverKick(
hypergraph: hypergraph,
rdfMultiprover: rdfMultiprover,
proverRegistry: proverRegistry,
clockStore: clockStore,
}, nil
}
@ -392,10 +396,16 @@ func (p *ProverKick) Verify(frameNumber uint64) (bool, error) {
)
}
frame, err := p.clockStore.GetGlobalClockFrame(frameNumber - 1)
if err != nil {
return false, errors.Wrap(err, "verify")
}
validTraversal, err := p.hypergraph.VerifyTraversalProof(
intrinsics.GLOBAL_INTRINSIC_ADDRESS,
hypergraph.VertexAtomType,
hypergraph.AddsPhaseType,
frame.Header.ProverTreeCommitment,
p.TraversalProof,
)
if err != nil {

View File

@ -42,6 +42,13 @@ func createTestFrameHeader(frameNumber uint64, pubKey []byte, bitmask []byte, ou
func TestProverKick_Prove(t *testing.T) {
// Setup
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
mockClockStore := new(mocks.MockClockStore)
mockClockStore.On("GetGlobalClockFrame", mock.Anything).Return(&protobufs.GlobalFrame{
Header: &protobufs.GlobalFrameHeader{
ProverTreeCommitment: make([]byte, 64), // just needs to match shape
},
}, nil)
// Test data
frameNumber := uint64(12345)
@ -101,6 +108,7 @@ func TestProverKick_Prove(t *testing.T) {
mockHypergraph,
rdfMultiprover,
&mocks.MockProverRegistry{},
mockClockStore,
)
require.NoError(t, err)
@ -119,6 +127,13 @@ func TestProverKick_Verify(t *testing.T) {
t.Run("Valid kick with equivocation", func(t *testing.T) {
// Setup
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
mockClockStore := new(mocks.MockClockStore)
mockClockStore.On("GetGlobalClockFrame", mock.Anything).Return(&protobufs.GlobalFrame{
Header: &protobufs.GlobalFrameHeader{
ProverTreeCommitment: make([]byte, 64), // just needs to match shape
},
}, nil)
// Test data
frameNumber := uint64(12345)
@ -168,7 +183,7 @@ func TestProverKick_Verify(t *testing.T) {
big.NewInt(0),
), nil)
mockHypergraph.On("CreateTraversalProof", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&tries.TraversalProof{}, nil)
mockHypergraph.On("VerifyTraversalProof", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true, nil)
mockHypergraph.On("VerifyTraversalProof", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true, nil)
mockHypergraph.On("GetVertexData", mock.Anything).Return(kickedTree, nil)
mockFrameProver := &mocks.MockFrameProver{}
mockFrameProver.On("VerifyFrameHeaderSignature", mock.Anything, mock.Anything).Return(true, nil)
@ -218,6 +233,7 @@ func TestProverKick_Verify(t *testing.T) {
mockHypergraph,
rdfMultiprover,
mockProverRegistry,
mockClockStore,
)
require.NoError(t, err)
@ -233,6 +249,13 @@ func TestProverKick_Verify(t *testing.T) {
t.Run("Invalid - no equivocation (same frames)", func(t *testing.T) {
// Setup
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
mockClockStore := new(mocks.MockClockStore)
mockClockStore.On("GetGlobalClockFrame", mock.Anything).Return(&protobufs.GlobalFrame{
Header: &protobufs.GlobalFrameHeader{
ProverTreeCommitment: make([]byte, 64), // just needs to match shape
},
}, nil)
// Test data
frameNumber := uint64(12345)
@ -265,6 +288,7 @@ func TestProverKick_Verify(t *testing.T) {
mockHypergraph,
rdfMultiprover,
&mocks.MockProverRegistry{},
mockClockStore,
)
require.NoError(t, err)
@ -278,6 +302,13 @@ func TestProverKick_Verify(t *testing.T) {
t.Run("Invalid - different frame numbers", func(t *testing.T) {
// Setup
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
mockClockStore := new(mocks.MockClockStore)
mockClockStore.On("GetGlobalClockFrame", mock.Anything).Return(&protobufs.GlobalFrame{
Header: &protobufs.GlobalFrameHeader{
ProverTreeCommitment: make([]byte, 64), // just needs to match shape
},
}, nil)
// Test data
frameNumber := uint64(12345)
@ -322,6 +353,7 @@ func TestProverKick_Verify(t *testing.T) {
mockHypergraph,
rdfMultiprover,
&mocks.MockProverRegistry{},
mockClockStore,
)
require.NoError(t, err)
@ -335,8 +367,15 @@ func TestProverKick_Verify(t *testing.T) {
t.Run("Invalid - no overlapping bitmasks", func(t *testing.T) {
// Setup
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
mockFrameProver := new(mocks.MockFrameProver)
mockFrameProver.On("VerifyFrameHeaderSignature", mock.Anything, mock.Anything).Return(true, nil)
mockClockStore := new(mocks.MockClockStore)
mockClockStore.On("GetGlobalClockFrame", mock.Anything).Return(&protobufs.GlobalFrame{
Header: &protobufs.GlobalFrameHeader{
ProverTreeCommitment: make([]byte, 64), // just needs to match shape
},
}, nil)
// Test data
frameNumber := uint64(12345)
@ -386,6 +425,7 @@ func TestProverKick_Verify(t *testing.T) {
mockHypergraph,
rdfMultiprover,
mockProverRegistry,
mockClockStore,
)
require.NoError(t, err)
@ -399,8 +439,15 @@ func TestProverKick_Verify(t *testing.T) {
t.Run("Invalid - kicked not active", func(t *testing.T) {
// Setup
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
mockFrameProver := new(mocks.MockFrameProver)
mockFrameProver.On("VerifyFrameHeaderSignature", mock.Anything, mock.Anything).Return(true, nil)
mockClockStore := new(mocks.MockClockStore)
mockClockStore.On("GetGlobalClockFrame", mock.Anything).Return(&protobufs.GlobalFrame{
Header: &protobufs.GlobalFrameHeader{
ProverTreeCommitment: make([]byte, 64), // just needs to match shape
},
}, nil)
// Test data
frameNumber := uint64(12345)
@ -452,7 +499,7 @@ func TestProverKick_Verify(t *testing.T) {
big.NewInt(0),
), nil)
mockHypergraph.On("GetVertexData", mock.Anything).Return(kickedTree, nil)
mockHypergraph.On("VerifyTraversalProof", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true, nil)
mockHypergraph.On("VerifyTraversalProof", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true, nil)
// Create the prover kick operation
rdfMultiprover := createMockRDFMultiprover()
@ -466,6 +513,7 @@ func TestProverKick_Verify(t *testing.T) {
mockHypergraph,
rdfMultiprover,
mockProverRegistry,
mockClockStore,
)
require.NoError(t, err)
@ -481,7 +529,14 @@ func TestProverKick_Materialize(t *testing.T) {
t.Run("Materialize kick - updates status to left", func(t *testing.T) {
// Setup
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
hypergraphState := hgstate.NewHypergraphState(mockHypergraph)
mockClockStore := new(mocks.MockClockStore)
mockClockStore.On("GetGlobalClockFrame", mock.Anything).Return(&protobufs.GlobalFrame{
Header: &protobufs.GlobalFrameHeader{
ProverTreeCommitment: make([]byte, 64), // just needs to match shape
},
}, nil)
// Test data
frameNumber := uint64(253000)
@ -581,6 +636,7 @@ func TestProverKick_Materialize(t *testing.T) {
nil,
rdfMultiprover,
&mocks.MockProverRegistry{},
mockClockStore,
)
require.NoError(t, err)
@ -594,7 +650,14 @@ func TestProverKick_Materialize(t *testing.T) {
t.Run("Materialize prover not found - returns error", func(t *testing.T) {
// Setup
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
hypergraphState := hgstate.NewHypergraphState(mockHypergraph)
mockClockStore := new(mocks.MockClockStore)
mockClockStore.On("GetGlobalClockFrame", mock.Anything).Return(&protobufs.GlobalFrame{
Header: &protobufs.GlobalFrameHeader{
ProverTreeCommitment: make([]byte, 64), // just needs to match shape
},
}, nil)
// Test data
frameNumber := uint64(253000)
@ -646,6 +709,7 @@ func TestProverKick_Materialize(t *testing.T) {
nil,
createMockRDFMultiprover(),
&mocks.MockProverRegistry{},
mockClockStore,
)
require.NoError(t, err)
@ -659,7 +723,14 @@ func TestProverKick_Materialize(t *testing.T) {
t.Run("Materialize hypergraph add error - returns error", func(t *testing.T) {
// Setup
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
hypergraphState := hgstate.NewHypergraphState(mockHypergraph)
mockClockStore := new(mocks.MockClockStore)
mockClockStore.On("GetGlobalClockFrame", mock.Anything).Return(&protobufs.GlobalFrame{
Header: &protobufs.GlobalFrameHeader{
ProverTreeCommitment: make([]byte, 64), // just needs to match shape
},
}, nil)
// Test data
frameNumber := uint64(253000)
@ -733,6 +804,7 @@ func TestProverKick_Materialize(t *testing.T) {
nil,
createMockRDFMultiprover(),
&mocks.MockProverRegistry{},
mockClockStore,
)
require.NoError(t, err)
@ -748,7 +820,14 @@ func TestProverKick_Materialize(t *testing.T) {
t.Run("Materialize with existing data - preserves other data", func(t *testing.T) {
// Setup
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
hypergraphState := hgstate.NewHypergraphState(mockHypergraph)
mockClockStore := new(mocks.MockClockStore)
mockClockStore.On("GetGlobalClockFrame", mock.Anything).Return(&protobufs.GlobalFrame{
Header: &protobufs.GlobalFrameHeader{
ProverTreeCommitment: make([]byte, 64), // just needs to match shape
},
}, nil)
// Test data
frameNumber := uint64(253000)
@ -844,6 +923,7 @@ func TestProverKick_Materialize(t *testing.T) {
nil,
createMockRDFMultiprover(),
&mocks.MockProverRegistry{},
mockClockStore,
)
require.NoError(t, err)
@ -857,6 +937,13 @@ func TestProverKick_Materialize(t *testing.T) {
func TestProverKick_GetCost(t *testing.T) {
// Setup
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
mockClockStore := new(mocks.MockClockStore)
mockClockStore.On("GetGlobalClockFrame", mock.Anything).Return(&protobufs.GlobalFrame{
Header: &protobufs.GlobalFrameHeader{
ProverTreeCommitment: make([]byte, 64), // just needs to match shape
},
}, nil)
out1, out2 := make([]byte, 516), make([]byte, 516)
out1[0] = 0xff
@ -880,6 +967,7 @@ func TestProverKick_GetCost(t *testing.T) {
mockHypergraph,
rdfMultiprover,
&mocks.MockProverRegistry{},
mockClockStore,
)
require.NoError(t, err)
@ -893,8 +981,15 @@ func TestProverKick_VerifyEquivocation(t *testing.T) {
t.Run("Different bitmask lengths with overlap", func(t *testing.T) {
// Setup
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
mockFrameProver := new(mocks.MockFrameProver)
mockFrameProver.On("VerifyFrameHeaderSignature", mock.Anything, mock.Anything).Return(true, nil)
mockClockStore := new(mocks.MockClockStore)
mockClockStore.On("GetGlobalClockFrame", mock.Anything).Return(&protobufs.GlobalFrame{
Header: &protobufs.GlobalFrameHeader{
ProverTreeCommitment: make([]byte, 64), // just needs to match shape
},
}, nil)
// Test data
frameNumber := uint64(12345)
@ -939,7 +1034,7 @@ func TestProverKick_VerifyEquivocation(t *testing.T) {
), nil)
mockHypergraph.On("GetVertexData", mock.Anything).Return(kickedTree, nil)
mockHypergraph.On("GetHyperedge", mock.Anything).Return(&mockHyperedge{}, nil)
mockHypergraph.On("VerifyTraversalProof", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true, nil)
mockHypergraph.On("VerifyTraversalProof", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true, nil)
mockInclusionProver := &mocks.MockInclusionProver{}
mp := &mocks.MockMultiproof{}
mp.On("FromBytes", mock.Anything).Return(nil)
@ -990,6 +1085,7 @@ func TestProverKick_VerifyEquivocation(t *testing.T) {
mockHypergraph,
rdfMultiprover,
mockProverRegistry,
mockClockStore,
)
require.NoError(t, err)
proverKick.Proof = []byte{0x00}
@ -1005,6 +1101,13 @@ func TestProverKick_VerifyEquivocation(t *testing.T) {
t.Run("Missing BLS signature in frame", func(t *testing.T) {
// Setup
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
mockClockStore := new(mocks.MockClockStore)
mockClockStore.On("GetGlobalClockFrame", mock.Anything).Return(&protobufs.GlobalFrame{
Header: &protobufs.GlobalFrameHeader{
ProverTreeCommitment: make([]byte, 64), // just needs to match shape
},
}, nil)
// Test data
frameNumber := uint64(12345)
@ -1043,6 +1146,7 @@ func TestProverKick_VerifyEquivocation(t *testing.T) {
mockHypergraph,
rdfMultiprover,
&mocks.MockProverRegistry{},
mockClockStore,
)
require.NoError(t, err)

View File

@ -94,6 +94,7 @@ func TestProverLeave_Verify(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
// Test data
filters := [][]byte{[]byte("testfilter1"), []byte("testfilter2")}
@ -196,6 +197,7 @@ func TestProverLeave_Verify(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
// Test data
filters := [][]byte{[]byte("testfilter")}
@ -251,6 +253,7 @@ func TestProverLeave_Verify(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
// Test data
filters := [][]byte{[]byte("testfilter")}
@ -340,6 +343,7 @@ func TestProverLeave_Materialize(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
hypergraphState := hgstate.NewHypergraphState(mockHypergraph)
// Test data
@ -440,6 +444,7 @@ func TestProverLeave_Materialize(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
hypergraphState := hgstate.NewHypergraphState(mockHypergraph)
// Test data

View File

@ -84,6 +84,7 @@ func TestProverPause_Verify(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
// Test data
filter := []byte("testfilter")
@ -151,6 +152,7 @@ func TestProverPause_Verify(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
// Test data
filter := []byte("testfilter")
@ -195,6 +197,7 @@ func TestProverPause_Verify(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
// Test data
filter := []byte("testfilter")
@ -306,6 +309,7 @@ func TestProverPause_Materialize(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
hypergraphState := hgstate.NewHypergraphState(mockHypergraph)
// Test data
@ -407,6 +411,7 @@ func TestProverPause_Materialize(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
hypergraphState := hgstate.NewHypergraphState(mockHypergraph)
// Test data
@ -499,6 +504,7 @@ func TestProverPause_Materialize(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
hypergraphState := hgstate.NewHypergraphState(mockHypergraph)
// Test data
@ -592,6 +598,7 @@ func TestProverPause_Materialize(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
hypergraphState := hgstate.NewHypergraphState(mockHypergraph)
// Test data

View File

@ -80,6 +80,7 @@ func TestProverReject_Verify(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
// Test data
filter := []byte("testfilter")
@ -151,6 +152,7 @@ func TestProverReject_Verify(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
// Test data
filter := []byte("testfilter")
@ -222,6 +224,7 @@ func TestProverReject_Verify(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
// Test data
filter := []byte("testfilter")
@ -314,6 +317,7 @@ func TestProverReject_Verify(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
// Test data
filter := []byte("testfilter")

View File

@ -101,6 +101,7 @@ func TestProverResume_Verify(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
// Test data
filter := []byte("testfilter")
@ -172,6 +173,7 @@ func TestProverResume_Verify(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
// Test data
filter := []byte("testfilter")
@ -220,6 +222,7 @@ func TestProverResume_Verify(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
// Test data
filter := []byte("testfilter")
@ -241,7 +244,7 @@ func TestProverResume_Verify(t *testing.T) {
mockHypergraph.On("GetVertex", mock.Anything).Return(hypergraph.NewVertex([32]byte(intrinsics.GLOBAL_INTRINSIC_ADDRESS), [32]byte(address), make([]byte, 74), big.NewInt(0)), nil)
mockHypergraph.On("GetVertexData", mock.Anything).Return(tree, nil)
mockHypergraph.On("VerifyTraversalProof", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true, nil)
mockHypergraph.On("VerifyTraversalProof", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true, nil)
// Create the prover resume operation
rdfMultiprover := createMockRDFMultiprover()
@ -265,6 +268,7 @@ func TestProverResume_Verify(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
// Test data
filter := []byte("testfilter")
@ -336,6 +340,7 @@ func TestProverResume_Materialize(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
hypergraphState := hgstate.NewHypergraphState(mockHypergraph)
// Test data
@ -439,6 +444,7 @@ func TestProverResume_Materialize(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
hypergraphState := hgstate.NewHypergraphState(mockHypergraph)
// Test data
@ -531,6 +537,7 @@ func TestProverResume_Materialize(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
hypergraphState := hgstate.NewHypergraphState(mockHypergraph)
// Test data
@ -626,6 +633,7 @@ func TestProverResume_Materialize(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
hypergraphState := hgstate.NewHypergraphState(mockHypergraph)
// Test data

View File

@ -0,0 +1,599 @@
package global
import (
"bytes"
"encoding/binary"
"math"
"math/big"
"slices"
"sort"
"github.com/iden3/go-iden3-crypto/poseidon"
"github.com/pkg/errors"
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token"
hgstate "source.quilibrium.com/quilibrium/monorepo/node/execution/state/hypergraph"
"source.quilibrium.com/quilibrium/monorepo/protobufs"
"source.quilibrium.com/quilibrium/monorepo/types/consensus"
"source.quilibrium.com/quilibrium/monorepo/types/crypto"
"source.quilibrium.com/quilibrium/monorepo/types/execution/intrinsics"
"source.quilibrium.com/quilibrium/monorepo/types/execution/state"
"source.quilibrium.com/quilibrium/monorepo/types/hypergraph"
"source.quilibrium.com/quilibrium/monorepo/types/keys"
"source.quilibrium.com/quilibrium/monorepo/types/schema"
"source.quilibrium.com/quilibrium/monorepo/types/tries"
)
const (
ringGroupSize = 8
defaultShardLeaves = 1
rewardUnits = 8_000_000_000
)
type ProverShardUpdate struct {
FrameHeader *protobufs.FrameHeader
// Private dependencies
keyManager keys.KeyManager
hypergraph hypergraph.Hypergraph
rdfMultiprover *schema.RDFMultiprover
frameProver crypto.FrameProver
rewardIssuance consensus.RewardIssuance
proverRegistry consensus.ProverRegistry
blsConstructor crypto.BlsConstructor
}
func NewProverShardUpdate(
frameHeader *protobufs.FrameHeader,
keyManager keys.KeyManager,
hypergraph hypergraph.Hypergraph,
rdfMultiprover *schema.RDFMultiprover,
frameProver crypto.FrameProver,
rewardIssuance consensus.RewardIssuance,
proverRegistry consensus.ProverRegistry,
blsConstructor crypto.BlsConstructor,
) (*ProverShardUpdate, error) {
return &ProverShardUpdate{
FrameHeader: frameHeader,
keyManager: keyManager,
hypergraph: hypergraph,
rdfMultiprover: rdfMultiprover,
frameProver: frameProver,
rewardIssuance: rewardIssuance,
proverRegistry: proverRegistry,
blsConstructor: blsConstructor,
}, nil
}
func (p *ProverShardUpdate) GetCost() (*big.Int, error) {
return big.NewInt(0), nil
}
func (p *ProverShardUpdate) Prove(uint64) error {
// This intrinsic is applied post-consensus confirmation and self contains
// a proof
return nil
}
func (p *ProverShardUpdate) Verify(frameNumber uint64) (bool, error) {
_, err := p.buildContext()
if err != nil {
return false, errors.Wrap(err, "verify")
}
if frameNumber != p.FrameHeader.FrameNumber+1 {
return false, errors.Wrap(errors.New("invalid update"), "verify")
}
_, err = p.frameProver.VerifyFrameHeader(p.FrameHeader, p.blsConstructor)
if err != nil {
return false, errors.Wrap(err, "verify")
}
return true, nil
}
func (p *ProverShardUpdate) GetReadAddresses(uint64) ([][]byte, error) {
return nil, nil
}
func (p *ProverShardUpdate) GetWriteAddresses(uint64) ([][]byte, error) {
ctx, err := p.buildContext()
if err != nil {
return nil, errors.Wrap(err, "get write addresses")
}
addressSet := map[string][]byte{}
for _, idx := range ctx.participantIndices {
prover := ctx.activeProvers[idx]
rewardKey, err := p.rewardAddress(prover.Address)
if err != nil {
return nil, errors.Wrap(err, "get write addresses")
}
rewardAddr := compositeAddress(
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
rewardKey,
)
addressSet[string(rewardAddr)] = rewardAddr
allocationAddr, err := p.allocationAddress(prover, p.FrameHeader.Address)
if err != nil {
return nil, errors.Wrap(err, "get write addresses")
}
addressSet[string(allocationAddr[:])] = allocationAddr[:]
}
addresses := make([][]byte, 0, len(addressSet))
for _, addr := range addressSet {
addresses = append(addresses, slices.Clone(addr))
}
return addresses, nil
}
func (p *ProverShardUpdate) Materialize(
frameNumber uint64,
state state.State,
) (state.State, error) {
hg, ok := state.(*hgstate.HypergraphState)
if !ok {
return nil, errors.Wrap(
errors.New("invalid state type for shard update"),
"materialize",
)
}
ctx, err := p.buildContext()
if err != nil {
return nil, err
}
worldSize := p.hypergraph.GetSize(nil, nil).Uint64()
rewardsPerRing := make(map[uint8]*big.Int, len(ctx.participantsByRing))
for ring, participants := range ctx.participantsByRing {
alloc := map[string]*consensus.ProverAllocation{
string(p.FrameHeader.Address): {
Ring: ring,
Shards: ctx.shardCount,
StateSize: ctx.stateSize,
},
}
outputs, err := p.rewardIssuance.Calculate(
uint64(p.FrameHeader.Difficulty),
worldSize,
rewardUnits,
[]map[string]*consensus.ProverAllocation{alloc},
)
if err != nil {
return nil, errors.Wrap(err, "materialize")
}
if len(outputs) != 1 {
return nil, errors.Wrap(
errors.New("unexpected reward issuance output size"),
"materialize",
)
}
if len(participants) == 0 {
continue
}
share := new(big.Int).Set(outputs[0])
share.Div(share, big.NewInt(int64(len(participants))))
rewardsPerRing[ring] = share
}
for ring, participants := range ctx.participantsByRing {
share := rewardsPerRing[ring]
for _, idx := range participants {
prover := ctx.activeProvers[idx]
if err := p.applyReward(
hg,
frameNumber,
prover,
p.FrameHeader.Address,
share,
); err != nil {
return nil, errors.Wrap(err, "materalize")
}
if err := p.updateAllocationActivity(
hg,
frameNumber,
prover,
p.FrameHeader.Address,
); err != nil {
return nil, errors.Wrap(err, "materalize")
}
}
}
return state, nil
}
type shardUpdateContext struct {
activeProvers []*consensus.ProverInfo
participantIndices []int
participantsByRing map[uint8][]int
ringByProverAddress map[string]uint8
stateSize uint64
shardCount uint64
}
func (p *ProverShardUpdate) buildContext() (*shardUpdateContext, error) {
if p.frameProver == nil {
return nil, errors.New("frame prover dependency missing")
}
if p.rewardIssuance == nil {
return nil, errors.New("reward issuance dependency missing")
}
if p.proverRegistry == nil {
return nil, errors.New("prover registry dependency missing")
}
if p.blsConstructor == nil {
return nil, errors.New("bls constructor dependency missing")
}
if p.hypergraph == nil {
return nil, errors.New("hypergraph dependency missing")
}
if p.rdfMultiprover == nil {
return nil, errors.New("rdf multiprover dependency missing")
}
if len(p.FrameHeader.Address) < 32 {
return nil, errors.New("filter length insufficient")
}
if p.FrameHeader == nil {
return nil, errors.New("frame header is nil")
}
if len(p.FrameHeader.Address) == 0 {
return nil, errors.New("frame header missing address")
}
setIndices, err := p.frameProver.VerifyFrameHeader(
p.FrameHeader,
p.blsConstructor,
)
if err != nil {
return nil, errors.Wrap(err, "verify frame header")
}
if len(setIndices) == 0 {
return nil, errors.New("frame header signature bitmask empty")
}
activeProvers, err := p.proverRegistry.GetActiveProvers(p.FrameHeader.Address)
if err != nil {
return nil, errors.Wrap(err, "get active provers")
}
if len(activeProvers) == 0 {
return nil, errors.New("no active provers for shard")
}
participantsSet := map[int]struct{}{}
for _, idx := range setIndices {
if int(idx) >= len(activeProvers) {
return nil, errors.New("bitmask index exceeds active prover count")
}
participantsSet[int(idx)] = struct{}{}
}
if len(participantsSet)*3 < len(activeProvers)*2 {
return nil, errors.New("insufficient prover participation (< 2/3)")
}
participantIndices := make([]int, 0, len(participantsSet))
for idx := range participantsSet {
participantIndices = append(participantIndices, idx)
}
sort.Ints(participantIndices)
ringByAddress, err := p.computeRingAssignments(activeProvers)
if err != nil {
return nil, err
}
participantsByRing := make(map[uint8][]int)
for _, idx := range participantIndices {
prover := activeProvers[idx]
ring := ringByAddress[string(prover.Address)]
participantsByRing[ring] = append(participantsByRing[ring], idx)
}
metadata, err := p.hypergraph.GetMetadataAtKey(p.FrameHeader.Address)
if err != nil {
return nil, errors.Wrap(err, "get hypergraph metadata")
}
if len(metadata) == 0 {
return nil, errors.New("missing hypergraph metadata for shard")
}
stateSize := metadata[0].Size
shardCount := metadata[0].LeafCount
if shardCount == 0 {
shardCount = defaultShardLeaves
}
return &shardUpdateContext{
activeProvers: activeProvers,
participantIndices: participantIndices,
participantsByRing: participantsByRing,
ringByProverAddress: ringByAddress,
stateSize: stateSize,
shardCount: shardCount,
}, nil
}
func (p *ProverShardUpdate) computeRingAssignments(
activeProvers []*consensus.ProverInfo,
) (map[string]uint8, error) {
type candidate struct {
index int
joinFrame uint64
seniority uint64
address []byte
confirmSet bool
}
candidates := make([]candidate, len(activeProvers))
for i, prover := range activeProvers {
allocation, err := allocationForFilter(prover, p.FrameHeader.Address)
if err != nil {
return nil, err
}
joinFrame := allocation.JoinFrameNumber
if joinFrame == 0 && allocation.JoinConfirmFrameNumber != 0 {
joinFrame = allocation.JoinConfirmFrameNumber
}
candidates[i] = candidate{
index: i,
joinFrame: joinFrame,
seniority: prover.Seniority,
address: prover.Address,
confirmSet: allocation.JoinConfirmFrameNumber != 0,
}
}
sort.Slice(candidates, func(i, j int) bool {
if candidates[i].joinFrame != candidates[j].joinFrame {
return candidates[i].joinFrame < candidates[j].joinFrame
}
if candidates[i].seniority != candidates[j].seniority {
return candidates[i].seniority > candidates[j].seniority
}
return bytes.Compare(candidates[i].address, candidates[j].address) < 0
})
ringAssignments := make(map[string]uint8, len(candidates))
for rank, candidate := range candidates {
ring := uint8(math.Floor(float64(rank) / ringGroupSize))
ringAssignments[string(candidate.address)] = ring
}
return ringAssignments, nil
}
func (p *ProverShardUpdate) applyReward(
hg *hgstate.HypergraphState,
frameNumber uint64,
prover *consensus.ProverInfo,
filter []byte,
share *big.Int,
) error {
if share == nil || share.Sign() == 0 {
// Nothing to distribute for this prover
return nil
}
rewardAddress, err := p.rewardAddress(prover.Address)
if err != nil {
return errors.Wrap(err, "derive reward address")
}
priorVertex, _ := hg.Get(
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
rewardAddress,
hgstate.VertexAddsDiscriminator,
)
var priorTree *tries.VectorCommitmentTree
if priorVertex != nil {
existing, ok := priorVertex.(*tries.VectorCommitmentTree)
if !ok {
return errors.New("invalid reward vertex type")
}
priorTree = existing
}
updateVertex, _ := hg.Get(
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
rewardAddress,
hgstate.VertexAddsDiscriminator,
)
var updateTree *tries.VectorCommitmentTree
if updateVertex != nil {
existing, ok := updateVertex.(*tries.VectorCommitmentTree)
if !ok {
return errors.New("invalid reward vertex type")
}
updateTree = existing
}
var rewardTree *tries.VectorCommitmentTree
if priorTree != nil {
rewardTree = updateTree
} else {
rewardTree = &tries.VectorCommitmentTree{}
}
currentBalanceBytes, err := p.rdfMultiprover.Get(
GLOBAL_RDF_SCHEMA,
"reward:ProverReward",
"Balance",
priorTree,
)
if err != nil {
return errors.Wrap(err, "get reward balance")
}
currentBalance := new(big.Int).SetBytes(currentBalanceBytes)
currentBalance.Add(currentBalance, share)
balanceBytes := make([]byte, 32)
currentBalance.FillBytes(balanceBytes)
if err := p.rdfMultiprover.Set(
GLOBAL_RDF_SCHEMA,
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
"reward:ProverReward",
"Balance",
balanceBytes,
rewardTree,
); err != nil {
return errors.Wrap(err, "set reward balance")
}
vertex := hg.NewVertexAddMaterializedState(
intrinsics.GLOBAL_INTRINSIC_ADDRESS,
[32]byte(rewardAddress),
frameNumber,
priorTree,
rewardTree,
)
if err := hg.Set(
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
rewardAddress,
hgstate.VertexAddsDiscriminator,
frameNumber,
vertex,
); err != nil {
return errors.Wrap(err, "set reward vertex")
}
return nil
}
func (p *ProverShardUpdate) updateAllocationActivity(
hg *hgstate.HypergraphState,
frameNumber uint64,
prover *consensus.ProverInfo,
filter []byte,
) error {
allocationAddr, err := p.allocationAddress(prover, filter)
if err != nil {
return err
}
priorVertex, err := hg.Get(
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
allocationAddr[:],
hgstate.VertexAddsDiscriminator,
)
var priorTree *tries.VectorCommitmentTree
if priorVertex != nil {
existing, ok := priorVertex.(*tries.VectorCommitmentTree)
if !ok {
return errors.New("invalid allocation vertex type")
}
priorTree = existing
}
updateVertex, err := hg.Get(
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
allocationAddr[:],
hgstate.VertexAddsDiscriminator,
)
var allocationTree *tries.VectorCommitmentTree
if priorTree != nil {
allocationTree = updateVertex.(*tries.VectorCommitmentTree)
} else {
allocationTree = &tries.VectorCommitmentTree{}
}
frameBytes := make([]byte, 8)
binary.BigEndian.PutUint64(frameBytes, frameNumber)
if err := p.rdfMultiprover.Set(
GLOBAL_RDF_SCHEMA,
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
"allocation:ProverAllocation",
"LastActiveFrameNumber",
frameBytes,
allocationTree,
); err != nil {
return errors.Wrap(err, "set allocation activity frame")
}
vertex := hg.NewVertexAddMaterializedState(
intrinsics.GLOBAL_INTRINSIC_ADDRESS,
allocationAddr,
frameNumber,
priorTree,
allocationTree,
)
if err := hg.Set(
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
allocationAddr[:],
hgstate.VertexAddsDiscriminator,
frameNumber,
vertex,
); err != nil {
return errors.Wrap(err, "set allocation vertex")
}
return nil
}
func (p *ProverShardUpdate) rewardAddress(
proverAddress []byte,
) ([]byte, error) {
hash, err := poseidon.HashBytes(
slices.Concat(token.QUIL_TOKEN_ADDRESS[:], proverAddress),
)
if err != nil {
return nil, err
}
return hash.FillBytes(make([]byte, 32)), nil
}
func (p *ProverShardUpdate) allocationAddress(
prover *consensus.ProverInfo,
filter []byte,
) ([32]byte, error) {
allocation := [32]byte{}
hashInput := slices.Concat(
[]byte("PROVER_ALLOCATION"),
prover.PublicKey,
filter,
)
hash, err := poseidon.HashBytes(hashInput)
if err != nil {
return allocation, errors.Wrap(err, "hash allocation address")
}
copy(allocation[:], hash.FillBytes(make([]byte, 32)))
return allocation, nil
}
func allocationForFilter(
prover *consensus.ProverInfo,
filter []byte,
) (consensus.ProverAllocationInfo, error) {
for _, allocation := range prover.Allocations {
if bytes.Equal(allocation.ConfirmationFilter, filter) {
return allocation, nil
}
}
return consensus.ProverAllocationInfo{},
errors.New("allocation not found for prover")
}
func compositeAddress(domain []byte, address []byte) []byte {
full := make([]byte, 64)
copy(full[:32], domain)
copy(full[32:], address)
return full
}

View File

@ -25,6 +25,7 @@ func TestProverUpdate_Prove(t *testing.T) {
mockKM := new(mocks.MockKeyManager)
mockSigner := new(mocks.MockBLSSigner)
mockHG := new(mocks.MockHypergraph)
mockHG.On("GetCoveredPrefix").Return([]int{}, nil)
delegate := make([]byte, 32)
for i := range delegate {
@ -70,6 +71,7 @@ func TestProverUpdate_Prove(t *testing.T) {
func TestProverUpdate_Verify_Succeeds(t *testing.T) {
mockKM := new(mocks.MockKeyManager)
mockHG := new(mocks.MockHypergraph)
mockHG.On("GetCoveredPrefix").Return([]int{}, nil).Maybe()
// Setup pubkey and its address
pubKey := make([]byte, 585)
@ -127,6 +129,7 @@ func TestProverUpdate_Verify_Succeeds(t *testing.T) {
func TestProverUpdate_Verify_FailsOnBadSignature(t *testing.T) {
mockKM := new(mocks.MockKeyManager)
mockHG := new(mocks.MockHypergraph)
mockHG.On("GetCoveredPrefix").Return([]int{}, nil)
// Prover tree with pubkey
pubKey := make([]byte, 585)
@ -177,6 +180,7 @@ func TestProverUpdate_Verify_FailsOnBadSignature(t *testing.T) {
func TestProverUpdate_Verify_FailsOnAddressMismatch(t *testing.T) {
mockKM := new(mocks.MockKeyManager)
mockHG := new(mocks.MockHypergraph)
mockHG.On("GetCoveredPrefix").Return([]int{}, nil)
// Prover tree with pubkey
pubKey := make([]byte, 585)
@ -219,6 +223,7 @@ func TestProverUpdate_Verify_FailsOnAddressMismatch(t *testing.T) {
func TestProverUpdate_Materialize_PreservesBalance(t *testing.T) {
mockKM := new(mocks.MockKeyManager)
mockHG := new(mocks.MockHypergraph)
mockHG.On("GetCoveredPrefix").Return([]int{}, nil)
hypergraphState := hgstate.NewHypergraphState(mockHG)
// Prover exists with PublicKey

View File

@ -396,7 +396,10 @@ func (t *TransactionOutput) ToProtobuf() *protobufs.TransactionOutput {
}
// FromProtobuf converts a protobuf Transaction to intrinsics Transaction
func TransactionFromProtobuf(pb *protobufs.Transaction) (*Transaction, error) {
func TransactionFromProtobuf(
pb *protobufs.Transaction,
inclusionProver crypto.InclusionProver,
) (*Transaction, error) {
if pb == nil {
return nil, nil
}
@ -431,12 +434,18 @@ func TransactionFromProtobuf(pb *protobufs.Transaction) (*Transaction, error) {
fees[i] = new(big.Int).SetBytes(fee)
}
proof, err := TraversalProofFromProtobuf(pb.TraversalProof, inclusionProver)
if err != nil {
return nil, err
}
return &Transaction{
Domain: domain,
Inputs: inputs,
Outputs: outputs,
Fees: fees,
RangeProof: pb.RangeProof,
Domain: domain,
Inputs: inputs,
Outputs: outputs,
Fees: fees,
RangeProof: pb.RangeProof,
TraversalProof: proof,
// Runtime dependencies will be injected separately
}, nil
}
@ -564,7 +573,10 @@ func (
// FromProtobuf converts a protobuf PendingTransaction to intrinsics
// PendingTransaction
func PendingTransactionFromProtobuf(pb *protobufs.PendingTransaction) (
func PendingTransactionFromProtobuf(
pb *protobufs.PendingTransaction,
inclusionProver crypto.InclusionProver,
) (
*PendingTransaction,
error,
) {
@ -602,12 +614,18 @@ func PendingTransactionFromProtobuf(pb *protobufs.PendingTransaction) (
fees[i] = new(big.Int).SetBytes(fee)
}
proof, err := TraversalProofFromProtobuf(pb.TraversalProof, inclusionProver)
if err != nil {
return nil, err
}
return &PendingTransaction{
Domain: domain,
Inputs: inputs,
Outputs: outputs,
Fees: fees,
RangeProof: pb.RangeProof,
Domain: domain,
Inputs: inputs,
Outputs: outputs,
Fees: fees,
RangeProof: pb.RangeProof,
TraversalProof: proof,
// Runtime dependencies will be injected separately
}, nil
}

View File

@ -22,6 +22,7 @@ import (
hgcrdt "source.quilibrium.com/quilibrium/monorepo/types/hypergraph"
tkeys "source.quilibrium.com/quilibrium/monorepo/types/keys"
"source.quilibrium.com/quilibrium/monorepo/types/schema"
"source.quilibrium.com/quilibrium/monorepo/types/store"
"source.quilibrium.com/quilibrium/monorepo/types/tries"
qcrypto "source.quilibrium.com/quilibrium/monorepo/types/tries"
)
@ -44,6 +45,7 @@ type TokenIntrinsic struct {
lockedWritesMx sync.RWMutex
lockedReadsMx sync.RWMutex
state state.State
clockStore store.ClockStore
}
// SumCheck implements intrinsics.Intrinsic.
@ -573,7 +575,7 @@ func (t *TokenIntrinsic) InvokeStep(
}
// Convert from protobuf to intrinsics type
tx, err := TransactionFromProtobuf(pbTx)
tx, err := TransactionFromProtobuf(pbTx, t.inclusionProver)
if err != nil {
observability.InvokeStepErrors.WithLabelValues("token", opName).Inc()
return nil, errors.Wrap(err, "invoke step")
@ -589,19 +591,6 @@ func (t *TokenIntrinsic) InvokeStep(
tx.keyRing = keys.ToKeyRing(t.keyManager, true)
tx.rdfMultiprover = t.rdfMultiprover
// Handle TraversalProof conversion from protobuf
if pbTx.TraversalProof != nil {
tp, err := TraversalProofFromProtobuf(
pbTx.TraversalProof,
t.inclusionProver,
)
if err != nil {
observability.InvokeStepErrors.WithLabelValues("token", opName).Inc()
return nil, errors.Wrap(err, "invoke step")
}
tx.TraversalProof = tp
}
// Verify the transaction
valid, err := tx.Verify(frameNumber)
if err != nil {
@ -630,7 +619,7 @@ func (t *TokenIntrinsic) InvokeStep(
}
// Convert from protobuf to intrinsics type
tx, err := PendingTransactionFromProtobuf(pbTx)
tx, err := PendingTransactionFromProtobuf(pbTx, t.inclusionProver)
if err != nil {
observability.InvokeStepErrors.WithLabelValues("token", opName).Inc()
return nil, errors.Wrap(err, "invoke step")
@ -646,19 +635,6 @@ func (t *TokenIntrinsic) InvokeStep(
tx.keyRing = keys.ToKeyRing(t.keyManager, true)
tx.rdfMultiprover = t.rdfMultiprover
// Handle TraversalProof conversion from protobuf
if pbTx.TraversalProof != nil {
tp, err := TraversalProofFromProtobuf(
pbTx.TraversalProof,
t.inclusionProver,
)
if err != nil {
observability.InvokeStepErrors.WithLabelValues("token", opName).Inc()
return nil, errors.Wrap(err, "invoke step")
}
tx.TraversalProof = tp
}
// Verify the transaction
valid, err := tx.Verify(frameNumber)
if err != nil {
@ -705,6 +681,7 @@ func (t *TokenIntrinsic) InvokeStep(
tx.decafConstructor = t.decafConstructor
tx.keyRing = keys.ToKeyRing(t.keyManager, true)
tx.rdfMultiprover = t.rdfMultiprover
tx.clockStore = t.clockStore
// Verify the transaction
valid, err := tx.Verify(frameNumber)
@ -1050,6 +1027,7 @@ func LoadTokenIntrinsic(
bulletproofProver crypto.BulletproofProver,
inclusionProver crypto.InclusionProver,
keyManager tkeys.KeyManager,
clockStore store.ClockStore,
) (*TokenIntrinsic, error) {
var config *TokenIntrinsicConfiguration
var consensusMetadata *qcrypto.VectorCommitmentTree
@ -1121,6 +1099,7 @@ func LoadTokenIntrinsic(
rdfHypergraphSchema: rdfHypergraphSchema,
rdfMultiprover: rdfMultiprover,
state: hg.NewHypergraphState(hypergraph),
clockStore: clockStore,
}, nil
}

View File

@ -17,6 +17,7 @@ import (
"source.quilibrium.com/quilibrium/monorepo/types/hypergraph"
"source.quilibrium.com/quilibrium/monorepo/types/keys"
"source.quilibrium.com/quilibrium/monorepo/types/schema"
"source.quilibrium.com/quilibrium/monorepo/types/store"
qcrypto "source.quilibrium.com/quilibrium/monorepo/types/tries"
)
@ -1728,6 +1729,7 @@ func (i *MintTransactionInput) verifyWithProofOfMeaningfulWork(
}
proverRootDomain := [32]byte(tx.Domain)
var rewardRoot []byte
if bytes.Equal(tx.Domain[:], QUIL_TOKEN_ADDRESS) {
// Special case: PoMW mints under QUIL use global records for proofs
proverRootDomain = intrinsics.GLOBAL_INTRINSIC_ADDRESS
@ -1743,6 +1745,32 @@ func (i *MintTransactionInput) verifyWithProofOfMeaningfulWork(
"verify with mint with proof of meaningful work",
)
}
frame, err := tx.clockStore.GetGlobalClockFrame(
binary.BigEndian.Uint64(tx.Outputs[0].FrameNumber),
)
if err != nil {
return errors.Wrap(
err,
"verify with mint with proof of meaningful work",
)
}
rewardRoot = frame.Header.ProverTreeCommitment
} else {
// Normal case: use our own record of commitments
roots, err := tx.hypergraph.GetShardCommits(
binary.BigEndian.Uint64(tx.Outputs[0].FrameNumber),
tx.Domain[:],
)
if err != nil {
return errors.Wrap(
err,
"verify with mint with proof of meaningful work",
)
}
rewardRoot = roots[0]
}
// Verify the membership proof of the prover:
@ -1750,6 +1778,7 @@ func (i *MintTransactionInput) verifyWithProofOfMeaningfulWork(
proverRootDomain,
hypergraph.VertexAtomType,
hypergraph.AddsPhaseType,
rewardRoot,
traversalProof,
); err != nil || !valid {
return errors.Wrap(
@ -1994,10 +2023,7 @@ func (o *MintTransactionOutput) Verify(
index int,
tx *MintTransaction,
) (bool, error) {
if !bytes.Equal(
binary.BigEndian.AppendUint64(nil, frameNumber),
o.FrameNumber,
) {
if frameNumber <= binary.BigEndian.Uint64(o.FrameNumber) {
return false, errors.Wrap(
errors.New("invalid frame number"),
"verify output",
@ -2061,6 +2087,7 @@ type MintTransaction struct {
// RDF schema support
rdfHypergraphSchema string
rdfMultiprover *schema.RDFMultiprover
clockStore store.ClockStore
}
func NewMintTransaction(
@ -2077,6 +2104,7 @@ func NewMintTransaction(
keyRing keys.KeyRing,
rdfHypergraphSchema string,
rdfMultiprover *schema.RDFMultiprover,
clockStore store.ClockStore,
) *MintTransaction {
return &MintTransaction{
Domain: domain,
@ -2092,6 +2120,7 @@ func NewMintTransaction(
config: config,
rdfHypergraphSchema: rdfHypergraphSchema,
rdfMultiprover: rdfMultiprover,
clockStore: clockStore,
}
}

View File

@ -122,7 +122,7 @@ func (i *PendingTransactionInput) Prove(
var blind []byte
if bytes.Equal(i.address[:32], QUIL_TOKEN_ADDRESS) &&
frameNumber <= FRAME_2_1_CUTOVER {
frameNumber <= FRAME_2_1_CUTOVER && !BEHAVIOR_PASS {
// Structurally, the composition of the pre-2.1 packed tree is:
// 0x0000000000000000 - FrameNumber
// 0x0000000000000001 - CoinBalance
@ -939,12 +939,13 @@ func (o *PendingTransactionOutput) Verify(
frameNumber uint64,
config *TokenIntrinsicConfiguration,
) (bool, error) {
if !bytes.Equal(
binary.BigEndian.AppendUint64(nil, frameNumber),
o.FrameNumber,
) {
if frameNumber <= binary.BigEndian.Uint64(o.FrameNumber) {
return false, errors.Wrap(
errors.New("invalid frame number"),
errors.New(fmt.Sprintf(
"invalid frame number: output: %d, actual: %d",
binary.BigEndian.Uint64(o.FrameNumber),
frameNumber,
)),
"verify output",
)
}
@ -1666,14 +1667,25 @@ func (tx *PendingTransaction) Verify(frameNumber uint64) (bool, error) {
commitments = append(commitments, tx.Outputs[i].Commitment)
}
roots, err := tx.hypergraph.GetShardCommits(
binary.BigEndian.Uint64(tx.Outputs[0].FrameNumber),
tx.Domain[:],
)
if err != nil {
return false, errors.Wrap(err, "verify")
}
valid, err := tx.hypergraph.VerifyTraversalProof(
tx.Domain,
hypergraph.VertexAtomType,
hypergraph.AddsPhaseType,
roots[0],
tx.TraversalProof,
)
if err != nil || !valid {
return false, errors.Wrap(errors.New("invalid traversal proof"), "verify")
return false, errors.Wrap(errors.New(
fmt.Sprintf("invalid traversal proof: %v", err),
), "verify")
}
if !tx.bulletproofProver.VerifyRangeProof(tx.RangeProof, commitment, 128) {

View File

@ -126,8 +126,8 @@ func TestValidPendingTransactionWithMocks(t *testing.T) {
ip.On("VerifyMultiple", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true, nil)
bp.On("GenerateInputCommitmentsFromBig", mock.Anything, mock.Anything).Return([]byte("input-commit" + string(bytes.Repeat([]byte{0x00}, 56-12))))
hg.On("GetShardCommits", mock.Anything, mock.Anything).Return([][]byte{make([]byte, 64), make([]byte, 64), make([]byte, 64), make([]byte, 64)}, nil)
hg.On("CreateTraversalProof", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&qcrypto.TraversalProof{
Multiproof: &mocks.MockMultiproof{},
SubProofs: []qcrypto.TraversalSubProof{{
Commits: [][]byte{[]byte("valid-hg-commit" + string(bytes.Repeat([]byte{0x00}, 74-15)))},
@ -135,12 +135,12 @@ func TestValidPendingTransactionWithMocks(t *testing.T) {
Paths: [][]uint64{{0}},
}},
}, nil)
hg.On("VerifyTraversalProof", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true, nil)
hg.On("VerifyTraversalProof", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true, nil)
hg.On("GetProver").Return(ip)
ip.On("VerifyRaw", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true, nil)
bp.On("VerifyHidden", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true)
tree := &qcrypto.VectorCommitmentTree{}
tree.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, FRAME_2_1_CUTOVER+1), nil, big.NewInt(55))
tree.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+1), nil, big.NewInt(55))
tree.Insert([]byte{1 << 2}, []byte("valid-commitment"+string(bytes.Repeat([]byte{0x00}, 56-16))), nil, big.NewInt(56))
tree.Insert([]byte{2 << 2}, []byte("one-time-key"+string(bytes.Repeat([]byte{0x00}, 56-12))), nil, big.NewInt(56))
tree.Insert([]byte{3 << 2}, []byte("verification-key"+string(bytes.Repeat([]byte{0x00}, 56-16))), nil, big.NewInt(56))
@ -184,11 +184,11 @@ func TestValidPendingTransactionWithMocks(t *testing.T) {
rdfMultiprover,
)
if err := tx.Prove(FRAME_2_1_CUTOVER + 1); err != nil {
if err := tx.Prove(FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 1); err != nil {
t.Fatal(err)
}
if valid, err := tx.Verify(FRAME_2_1_CUTOVER + 1); !valid {
if valid, err := tx.Verify(FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 2); !valid {
t.Fatal("Expected transaction to verify but it failed", err)
}
}

View File

@ -28,12 +28,13 @@ import (
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/store"
"source.quilibrium.com/quilibrium/monorepo/node/tests"
"source.quilibrium.com/quilibrium/monorepo/protobufs"
"source.quilibrium.com/quilibrium/monorepo/types/crypto"
"source.quilibrium.com/quilibrium/monorepo/types/execution/intrinsics"
"source.quilibrium.com/quilibrium/monorepo/types/execution/state"
thypergraph "source.quilibrium.com/quilibrium/monorepo/types/hypergraph"
"source.quilibrium.com/quilibrium/monorepo/types/schema"
qcrypto "source.quilibrium.com/quilibrium/monorepo/types/tries"
"source.quilibrium.com/quilibrium/monorepo/types/tries"
"source.quilibrium.com/quilibrium/monorepo/verenc"
)
@ -239,7 +240,7 @@ func TestValidMintWithProofOfMeaningfulWorkTransaction(t *testing.T) {
rand.Read(rand1[1:])
rand.Read(rand2)
rand.Read(rand3)
tree := &qcrypto.VectorCommitmentTree{}
tree := &tries.VectorCommitmentTree{}
tree.Insert([]byte{0}, proveraddr.FillBytes(make([]byte, 32)), nil, big.NewInt(0))
tree.Insert([]byte{1 << 2}, big.NewInt(10000).FillBytes(make([]byte, 32)), nil, big.NewInt(0))
vert := hypergraph.NewVertex([32]byte(intrinsics.GLOBAL_INTRINSIC_ADDRESS), [32]byte(rewardaddr.FillBytes(make([]byte, 32))), tree.Commit(ip, false), big.NewInt(74))
@ -250,7 +251,7 @@ func TestValidMintWithProofOfMeaningfulWorkTransaction(t *testing.T) {
hg.AddVertex(txn, hypergraph.NewVertex([32]byte(token.QUIL_TOKEN_ADDRESS), [32]byte(rand1), nil, big.NewInt(74)))
err = txn.Commit()
assert.NoError(t, err)
hg.Commit()
roots, err := hg.Commit(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 1)
address1 := [64]byte{}
copy(address1[:32], token.QUIL_TOKEN_ADDRESS)
rand.Read(address1[32:])
@ -266,6 +267,16 @@ func TestValidMintWithProofOfMeaningfulWorkTransaction(t *testing.T) {
tokenconfig := token.QUIL_TOKEN_CONFIGURATION
clockStore := store.NewPebbleClockStore(s, zap.L())
tx, _ := clockStore.NewTransaction(false)
clockStore.PutGlobalClockFrame(&protobufs.GlobalFrame{
Header: &protobufs.GlobalFrameHeader{
FrameNumber: token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 1,
ProverTreeCommitment: roots[tries.ShardKey{L1: [3]byte{0, 0, 0}, L2: [32]byte(slices.Repeat([]byte{0xff}, 32))}][0],
},
}, tx)
tx.Commit()
// Create RDF multiprover for testing
rdfSchema, _ := prepareRDFSchemaFromConfig(token.QUIL_TOKEN_ADDRESS, tokenconfig)
parser := &schema.TurtleRDFParser{}
@ -285,12 +296,13 @@ func TestValidMintWithProofOfMeaningfulWorkTransaction(t *testing.T) {
keys.ToKeyRing(km, false),
rdfSchema,
rdfMultiprover,
clockStore,
)
err = minttx.Prove(token.FRAME_2_1_CUTOVER + 2)
err = minttx.Prove(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 1)
assert.NoError(t, err)
valid, err := minttx.Verify(token.FRAME_2_1_CUTOVER + 2)
valid, err := minttx.Verify(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 2)
assert.NoError(t, err)
assert.True(t, valid)
}
@ -344,7 +356,7 @@ func TestValidMintWithVerkleMultiproofSignatureTransaction(t *testing.T) {
rand.Read(rand2)
rand.Read(rand3)
proofTree := &qcrypto.VectorCommitmentTree{}
proofTree := &tries.VectorCommitmentTree{}
proofTree.Insert([]byte{0x00, 0x00}, message, nil, big.NewInt(int64(len(message))))
proofTree.Insert([]byte{0x00, 0x01}, rand1, nil, big.NewInt(int64(len(message))))
proofTree.Insert([]byte{0x00, 0x02}, rand2, nil, big.NewInt(int64(len(message))))
@ -397,12 +409,13 @@ func TestValidMintWithVerkleMultiproofSignatureTransaction(t *testing.T) {
keys.ToKeyRing(km, false),
rdfSchema,
rdfMultiprover,
store.NewPebbleClockStore(s, zap.L()),
)
err = minttx.Prove(token.FRAME_2_1_CUTOVER + 2)
err = minttx.Prove(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 1)
assert.NoError(t, err)
valid, err := minttx.Verify(token.FRAME_2_1_CUTOVER + 2)
valid, err := minttx.Verify(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 2)
assert.NoError(t, err)
assert.True(t, valid)
}
@ -500,12 +513,13 @@ func TestValidMintWithAuthorityTransaction(t *testing.T) {
keys.ToKeyRing(km, false),
rdfSchema,
rdfMultiprover,
store.NewPebbleClockStore(s, zap.L()),
)
err = minttx.Prove(token.FRAME_2_1_CUTOVER + 2)
err = minttx.Prove(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 1)
assert.NoError(t, err)
valid, err := minttx.Verify(token.FRAME_2_1_CUTOVER + 2)
valid, err := minttx.Verify(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 2)
assert.NoError(t, err)
assert.True(t, valid)
}
@ -601,12 +615,13 @@ func TestValidMintWithSignatureTransaction(t *testing.T) {
keys.ToKeyRing(km, false),
rdfSchema,
rdfMultiprover,
store.NewPebbleClockStore(s, zap.L()),
)
err = minttx.Prove(token.FRAME_2_1_CUTOVER + 2)
err = minttx.Prove(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 1)
assert.NoError(t, err)
valid, err := minttx.Verify(token.FRAME_2_1_CUTOVER + 2)
valid, err := minttx.Verify(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 2)
assert.NoError(t, err)
assert.True(t, valid)
}
@ -718,12 +733,13 @@ func TestValidMintWithPaymentZeroFeeBasisTransaction(t *testing.T) {
keys.ToKeyRing(km, false),
rdfSchema,
rdfMultiprover,
store.NewPebbleClockStore(s, zap.L()),
)
err = minttx.Prove(token.FRAME_2_1_CUTOVER + 2)
err = minttx.Prove(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 1)
assert.NoError(t, err)
valid, err := minttx.Verify(token.FRAME_2_1_CUTOVER + 2)
valid, err := minttx.Verify(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 2)
assert.NoError(t, err)
assert.True(t, valid)
}
@ -780,7 +796,7 @@ func TestValidMintWithPaymentNonDivisibleNonZeroFeeBasisValidQuantityTransaction
copy(address2[:32], token.QUIL_TOKEN_ADDRESS)
rand.Read(address2[32:])
tree1 := &qcrypto.VectorCommitmentTree{}
tree1 := &tries.VectorCommitmentTree{}
otk1, _ := dc.New()
c1, _ := dc.New()
comm1 := bp.GenerateInputCommitmentsFromBig([]*big.Int{big.NewInt(8000000004)}, c1.Private())
@ -808,20 +824,21 @@ func TestValidMintWithPaymentNonDivisibleNonZeroFeeBasisValidQuantityTransaction
}
verifkey1, _ := a1.Add(psk.Public())
tree1.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_CUTOVER+1), nil, big.NewInt(8))
tree1.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+1), nil, big.NewInt(8))
tree1.Insert([]byte{1 << 2}, comm1, nil, big.NewInt(56))
tree1.Insert([]byte{2 << 2}, otk1.Public(), nil, big.NewInt(56))
tree1.Insert([]byte{3 << 2}, verifkey1, nil, big.NewInt(56))
tree1.Insert([]byte{4 << 2}, maskedCoinBalanceBytes1, nil, big.NewInt(56))
tree1.Insert([]byte{5 << 2}, mask1, nil, big.NewInt(56))
// qcrypto.DebugNonLazyNode(tree.Root, 0, "")
// tries.DebugNonLazyNode(tree.Root, 0, "")
typeAddr, _ := hex.DecodeString("096de9a09f693f92cfa9cf3349bab2b3baee09f3e4f9c596514ecb3e8b0dff8f")
tree1.Insert(bytes.Repeat([]byte{0xff}, 32), typeAddr, nil, big.NewInt(32))
txn, _ := hg.NewTransaction(false)
hg.AddVertex(txn, hypergraph.NewVertex([32]byte(token.QUIL_TOKEN_ADDRESS), [32]byte(address1[32:]), tree1.Commit(ip, false), big.NewInt(55*26)))
hg.SetVertexData(txn, address1, tree1)
txn.Commit()
hg.Commit(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 1)
// simulate input as commitment to total
input1, _ := token.NewPendingTransactionInput(address1[:])
@ -860,7 +877,7 @@ func TestValidMintWithPaymentNonDivisibleNonZeroFeeBasisValidQuantityTransaction
rdfMultiprover,
)
if err := tx.Prove(token.FRAME_2_1_CUTOVER + 2); err != nil {
if err := tx.Prove(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 1); err != nil {
t.Fatal(err)
}
@ -871,7 +888,7 @@ func TestValidMintWithPaymentNonDivisibleNonZeroFeeBasisValidQuantityTransaction
err = newTx.FromBytes(output, tokenconfig, hg, bp, ip, ve, dc, keys.ToKeyRing(km, false), "", rdfMultiprover)
assert.NoError(t, err)
if valid, err := newTx.Verify(token.FRAME_2_1_CUTOVER + 2); !valid {
if valid, err := newTx.Verify(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 2); !valid {
t.Fatal("Expected transaction to verify but it failed", err)
}
@ -922,12 +939,13 @@ func TestValidMintWithPaymentNonDivisibleNonZeroFeeBasisValidQuantityTransaction
keys.ToKeyRing(km, false),
rdfSchema,
rdfMultiprover,
store.NewPebbleClockStore(s, zap.L()),
)
err = minttx.Prove(token.FRAME_2_1_CUTOVER + 2)
err = minttx.Prove(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 1)
assert.NoError(t, err)
valid, err := minttx.Verify(token.FRAME_2_1_CUTOVER + 2)
valid, err := minttx.Verify(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 2)
assert.NoError(t, err)
assert.True(t, valid)
}
@ -974,7 +992,7 @@ func TestValidMintWithPaymentNonDivisibleNonZeroFeeBasisInvalidQuantityTransacti
copy(address2[:32], token.QUIL_TOKEN_ADDRESS)
rand.Read(address2[32:])
tree1 := &qcrypto.VectorCommitmentTree{}
tree1 := &tries.VectorCommitmentTree{}
otk1, _ := dc.New()
c1, _ := dc.New()
comm1 := bp.GenerateInputCommitmentsFromBig([]*big.Int{big.NewInt(8000000002)}, c1.Private())
@ -1002,20 +1020,21 @@ func TestValidMintWithPaymentNonDivisibleNonZeroFeeBasisInvalidQuantityTransacti
}
verifkey1, _ := a1.Add(psk.Public())
tree1.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_CUTOVER+1), nil, big.NewInt(8))
tree1.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+1), nil, big.NewInt(8))
tree1.Insert([]byte{1 << 2}, comm1, nil, big.NewInt(56))
tree1.Insert([]byte{2 << 2}, otk1.Public(), nil, big.NewInt(56))
tree1.Insert([]byte{3 << 2}, verifkey1, nil, big.NewInt(56))
tree1.Insert([]byte{4 << 2}, maskedCoinBalanceBytes1, nil, big.NewInt(56))
tree1.Insert([]byte{5 << 2}, mask1, nil, big.NewInt(56))
// qcrypto.DebugNonLazyNode(tree.Root, 0, "")
// tries.DebugNonLazyNode(tree.Root, 0, "")
typeAddr, _ := hex.DecodeString("096de9a09f693f92cfa9cf3349bab2b3baee09f3e4f9c596514ecb3e8b0dff8f")
tree1.Insert(bytes.Repeat([]byte{0xff}, 32), typeAddr, nil, big.NewInt(32))
txn, _ := hg.NewTransaction(false)
hg.AddVertex(txn, hypergraph.NewVertex([32]byte(token.QUIL_TOKEN_ADDRESS), [32]byte(address1[32:]), tree1.Commit(ip, false), big.NewInt(55*26)))
hg.SetVertexData(txn, address1, tree1)
txn.Commit()
hg.Commit(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 1)
// simulate input as commitment to total
input1, _ := token.NewPendingTransactionInput(address1[:])
@ -1054,7 +1073,7 @@ func TestValidMintWithPaymentNonDivisibleNonZeroFeeBasisInvalidQuantityTransacti
rdfMultiprover,
)
if err := tx.Prove(token.FRAME_2_1_CUTOVER + 2); err != nil {
if err := tx.Prove(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 1); err != nil {
t.Fatal(err)
}
@ -1065,7 +1084,7 @@ func TestValidMintWithPaymentNonDivisibleNonZeroFeeBasisInvalidQuantityTransacti
err = newTx.FromBytes(output, tokenconfig, hg, bp, ip, ve, dc, keys.ToKeyRing(km, true), "", rdfMultiprover)
assert.NoError(t, err)
if valid, err := newTx.Verify(token.FRAME_2_1_CUTOVER + 2); !valid {
if valid, err := newTx.Verify(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 2); !valid {
t.Fatal("Expected transaction to verify but it failed", err)
}
@ -1112,12 +1131,13 @@ func TestValidMintWithPaymentNonDivisibleNonZeroFeeBasisInvalidQuantityTransacti
keys.ToKeyRing(km, false),
rdfSchema,
rdfMultiprover,
store.NewPebbleClockStore(s, zap.L()),
)
err = minttx.Prove(token.FRAME_2_1_CUTOVER + 2)
err = minttx.Prove(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 1)
assert.NoError(t, err)
valid, err := minttx.Verify(token.FRAME_2_1_CUTOVER + 2)
valid, err := minttx.Verify(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 2)
assert.Error(t, err)
assert.False(t, valid)
}
@ -1163,7 +1183,7 @@ func TestValidMintWithPaymentNonZeroFeeBasisTransaction(t *testing.T) {
copy(address2[:32], token.QUIL_TOKEN_ADDRESS)
rand.Read(address2[32:])
tree1 := &qcrypto.VectorCommitmentTree{}
tree1 := &tries.VectorCommitmentTree{}
otk1, _ := dc.New()
c1, _ := dc.New()
comm1 := bp.GenerateInputCommitmentsFromBig([]*big.Int{big.NewInt(8000000002)}, c1.Private())
@ -1191,20 +1211,21 @@ func TestValidMintWithPaymentNonZeroFeeBasisTransaction(t *testing.T) {
}
verifkey1, _ := a1.Add(psk.Public())
tree1.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_CUTOVER+1), nil, big.NewInt(8))
tree1.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+1), nil, big.NewInt(8))
tree1.Insert([]byte{1 << 2}, comm1, nil, big.NewInt(56))
tree1.Insert([]byte{2 << 2}, otk1.Public(), nil, big.NewInt(56))
tree1.Insert([]byte{3 << 2}, verifkey1, nil, big.NewInt(56))
tree1.Insert([]byte{4 << 2}, maskedCoinBalanceBytes1, nil, big.NewInt(56))
tree1.Insert([]byte{5 << 2}, mask1, nil, big.NewInt(56))
// qcrypto.DebugNonLazyNode(tree.Root, 0, "")
// tries.DebugNonLazyNode(tree.Root, 0, "")
typeAddr, _ := hex.DecodeString("096de9a09f693f92cfa9cf3349bab2b3baee09f3e4f9c596514ecb3e8b0dff8f")
tree1.Insert(bytes.Repeat([]byte{0xff}, 32), typeAddr, nil, big.NewInt(32))
txn, _ := hg.NewTransaction(false)
hg.AddVertex(txn, hypergraph.NewVertex([32]byte(token.QUIL_TOKEN_ADDRESS), [32]byte(address1[32:]), tree1.Commit(ip, false), big.NewInt(55*26)))
hg.SetVertexData(txn, address1, tree1)
txn.Commit()
hg.Commit(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 1)
// simulate input as commitment to total
input1, _ := token.NewPendingTransactionInput(address1[:])
@ -1243,7 +1264,7 @@ func TestValidMintWithPaymentNonZeroFeeBasisTransaction(t *testing.T) {
rdfMultiprover,
)
if err := tx.Prove(token.FRAME_2_1_CUTOVER + 2); err != nil {
if err := tx.Prove(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 1); err != nil {
t.Fatal(err)
}
@ -1254,7 +1275,7 @@ func TestValidMintWithPaymentNonZeroFeeBasisTransaction(t *testing.T) {
err = newTx.FromBytes(output, tokenconfig, hg, bp, ip, ve, dc, keys.ToKeyRing(km, true), "", rdfMultiprover)
assert.NoError(t, err)
if valid, err := newTx.Verify(token.FRAME_2_1_CUTOVER + 2); !valid {
if valid, err := newTx.Verify(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 2); !valid {
t.Fatal("Expected transaction to verify but it failed", err)
}
@ -1301,12 +1322,13 @@ func TestValidMintWithPaymentNonZeroFeeBasisTransaction(t *testing.T) {
keys.ToKeyRing(km, false),
rdfSchema,
rdfMultiprover,
store.NewPebbleClockStore(s, zap.L()),
)
err = minttx.Prove(token.FRAME_2_1_CUTOVER + 2)
err = minttx.Prove(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 1)
assert.NoError(t, err)
valid, err := minttx.Verify(token.FRAME_2_1_CUTOVER + 2)
valid, err := minttx.Verify(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 2)
assert.NoError(t, err)
assert.True(t, valid)
}
@ -1352,8 +1374,8 @@ func TestValidPendingTransaction(t *testing.T) {
copy(address2[:32], token.QUIL_TOKEN_ADDRESS)
rand.Read(address2[32:])
tree1 := &qcrypto.VectorCommitmentTree{}
tree2 := &qcrypto.VectorCommitmentTree{}
tree1 := &tries.VectorCommitmentTree{}
tree2 := &tries.VectorCommitmentTree{}
otk1, _ := dc.New()
otk2, _ := dc.New()
c1, _ := dc.New()
@ -1403,21 +1425,21 @@ func TestValidPendingTransaction(t *testing.T) {
}
verifkey1, _ := a1.Add(psk.Public())
tree1.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_CUTOVER+1), nil, big.NewInt(8))
tree1.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+1), nil, big.NewInt(8))
tree1.Insert([]byte{1 << 2}, comm1, nil, big.NewInt(56))
tree1.Insert([]byte{2 << 2}, otk1.Public(), nil, big.NewInt(56))
tree1.Insert([]byte{3 << 2}, verifkey1, nil, big.NewInt(56))
tree1.Insert([]byte{4 << 2}, maskedCoinBalanceBytes1, nil, big.NewInt(56))
tree1.Insert([]byte{5 << 2}, mask1, nil, big.NewInt(56))
verifkey2, _ := a2.Add(psk.Public())
tree2.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_CUTOVER+1), nil, big.NewInt(8))
tree2.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+1), nil, big.NewInt(8))
tree2.Insert([]byte{1 << 2}, comm2, nil, big.NewInt(56))
tree2.Insert([]byte{2 << 2}, otk2.Public(), nil, big.NewInt(56))
tree2.Insert([]byte{3 << 2}, verifkey2, nil, big.NewInt(56))
tree2.Insert([]byte{4 << 2}, maskedCoinBalanceBytes2, nil, big.NewInt(56))
tree2.Insert([]byte{5 << 2}, mask2, nil, big.NewInt(56))
// qcrypto.DebugNonLazyNode(tree.Root, 0, "")
// tries.DebugNonLazyNode(tree.Root, 0, "")
typeAddr, _ := hex.DecodeString("096de9a09f693f92cfa9cf3349bab2b3baee09f3e4f9c596514ecb3e8b0dff8f")
tree1.Insert(bytes.Repeat([]byte{0xff}, 32), typeAddr, nil, big.NewInt(32))
tree2.Insert(bytes.Repeat([]byte{0xff}, 32), typeAddr, nil, big.NewInt(32))
@ -1427,6 +1449,7 @@ func TestValidPendingTransaction(t *testing.T) {
hg.AddVertex(txn, hypergraph.NewVertex([32]byte(token.QUIL_TOKEN_ADDRESS), [32]byte(address2[32:]), tree2.Commit(ip, false), big.NewInt(55*26)))
hg.SetVertexData(txn, address2, tree2)
txn.Commit()
hg.Commit(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 1)
// simulate input as commitment to total
input1, _ := token.NewPendingTransactionInput(address1[:])
@ -1464,7 +1487,7 @@ func TestValidPendingTransaction(t *testing.T) {
rdfMultiprover,
)
if err := tx.Prove(token.FRAME_2_1_CUTOVER + 2); err != nil {
if err := tx.Prove(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 1); err != nil {
t.Fatal(err)
}
@ -1475,7 +1498,7 @@ func TestValidPendingTransaction(t *testing.T) {
err = newTx.FromBytes(output, tokenconfig, hg, bp, ip, ve, dc, keys.ToKeyRing(km, false), "", rdfMultiprover)
assert.NoError(t, err)
if valid, err := newTx.Verify(token.FRAME_2_1_CUTOVER + 2); !valid {
if valid, err := newTx.Verify(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 2); !valid {
t.Fatal("Expected transaction to verify but it failed", err)
}
}
@ -1517,8 +1540,8 @@ func TestValidPendingTransactionFeeOnly(t *testing.T) {
copy(address2[:32], token.QUIL_TOKEN_ADDRESS)
rand.Read(address2[32:])
tree1 := &qcrypto.VectorCommitmentTree{}
tree2 := &qcrypto.VectorCommitmentTree{}
tree1 := &tries.VectorCommitmentTree{}
tree2 := &tries.VectorCommitmentTree{}
otk1, _ := dc.New()
otk2, _ := dc.New()
c1, _ := dc.New()
@ -1568,21 +1591,21 @@ func TestValidPendingTransactionFeeOnly(t *testing.T) {
}
verifkey1, _ := a1.Add(psk.Public())
tree1.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_CUTOVER+1), nil, big.NewInt(8))
tree1.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+1), nil, big.NewInt(8))
tree1.Insert([]byte{1 << 2}, comm1, nil, big.NewInt(56))
tree1.Insert([]byte{2 << 2}, otk1.Public(), nil, big.NewInt(56))
tree1.Insert([]byte{3 << 2}, verifkey1, nil, big.NewInt(56))
tree1.Insert([]byte{4 << 2}, maskedCoinBalanceBytes1, nil, big.NewInt(56))
tree1.Insert([]byte{5 << 2}, mask1, nil, big.NewInt(56))
verifkey2, _ := a2.Add(psk.Public())
tree2.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_CUTOVER+1), nil, big.NewInt(8))
tree2.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+1), nil, big.NewInt(8))
tree2.Insert([]byte{1 << 2}, comm2, nil, big.NewInt(56))
tree2.Insert([]byte{2 << 2}, otk2.Public(), nil, big.NewInt(56))
tree2.Insert([]byte{3 << 2}, verifkey2, nil, big.NewInt(56))
tree2.Insert([]byte{4 << 2}, maskedCoinBalanceBytes2, nil, big.NewInt(56))
tree2.Insert([]byte{5 << 2}, mask2, nil, big.NewInt(56))
// qcrypto.DebugNonLazyNode(tree.Root, 0, "")
// tries.DebugNonLazyNode(tree.Root, 0, "")
typeAddr, _ := hex.DecodeString("096de9a09f693f92cfa9cf3349bab2b3baee09f3e4f9c596514ecb3e8b0dff8f")
tree1.Insert(bytes.Repeat([]byte{0xff}, 32), typeAddr, nil, big.NewInt(32))
tree2.Insert(bytes.Repeat([]byte{0xff}, 32), typeAddr, nil, big.NewInt(32))
@ -1592,6 +1615,7 @@ func TestValidPendingTransactionFeeOnly(t *testing.T) {
hg.AddVertex(txn, hypergraph.NewVertex([32]byte(token.QUIL_TOKEN_ADDRESS), [32]byte(address2[32:]), tree2.Commit(ip, false), big.NewInt(55*26)))
hg.SetVertexData(txn, address2, tree2)
txn.Commit()
hg.Commit(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 1)
// simulate input as commitment to total
input1, _ := token.NewPendingTransactionInput(address1[:])
@ -1629,7 +1653,7 @@ func TestValidPendingTransactionFeeOnly(t *testing.T) {
rdfMultiprover,
)
if err := tx.Prove(token.FRAME_2_1_CUTOVER + 2); err != nil {
if err := tx.Prove(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 1); err != nil {
t.Fatal(err)
}
@ -1640,7 +1664,7 @@ func TestValidPendingTransactionFeeOnly(t *testing.T) {
err = newTx.FromBytes(output, tokenconfig, hg, bp, ip, ve, dc, keys.ToKeyRing(km, true), "", rdfMultiprover)
assert.NoError(t, err)
if valid, err := newTx.Verify(token.FRAME_2_1_CUTOVER + 2); !valid {
if valid, err := newTx.Verify(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 2); !valid {
t.Fatal("Expected transaction to verify but it failed", err)
}
}
@ -1652,11 +1676,11 @@ func TestValidPendingTransactionMixed(t *testing.T) {
rvk, _ := dc.New()
rsk, _ := dc.New()
out1, err := token.NewPendingTransactionOutput(big.NewInt(7), vk.Public(), sk.Public(), rvk.Public(), rsk.Public(), token.FRAME_2_1_CUTOVER+3)
out1, err := token.NewPendingTransactionOutput(big.NewInt(7), vk.Public(), sk.Public(), rvk.Public(), rsk.Public(), token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+3)
if err != nil {
t.Fatal(err)
}
out2, err := token.NewPendingTransactionOutput(big.NewInt(2), vk.Public(), sk.Public(), rvk.Public(), rsk.Public(), token.FRAME_2_1_CUTOVER+3)
out2, err := token.NewPendingTransactionOutput(big.NewInt(2), vk.Public(), sk.Public(), rvk.Public(), rsk.Public(), token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+3)
if err != nil {
t.Fatal(err)
}
@ -1703,7 +1727,7 @@ func TestValidPendingTransactionMixed(t *testing.T) {
copy(address2[:32], token.QUIL_TOKEN_ADDRESS)
rand.Read(address2[32:])
tree2 := &qcrypto.VectorCommitmentTree{}
tree2 := &tries.VectorCommitmentTree{}
otk2, _ := dc.New()
c2, _ := dc.New()
comm2 := bp.GenerateInputCommitmentsFromBig([]*big.Int{big.NewInt(6)}, c2.Private())
@ -1730,14 +1754,14 @@ func TestValidPendingTransactionMixed(t *testing.T) {
}
verifkey2, _ := a2.Add(psk.Public())
tree2.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_CUTOVER+1), nil, big.NewInt(8))
tree2.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+1), nil, big.NewInt(8))
tree2.Insert([]byte{1 << 2}, comm2, nil, big.NewInt(56))
tree2.Insert([]byte{2 << 2}, otk2.Public(), nil, big.NewInt(56))
tree2.Insert([]byte{3 << 2}, verifkey2, nil, big.NewInt(56))
tree2.Insert([]byte{4 << 2}, maskedCoinBalanceBytes2, nil, big.NewInt(56))
tree2.Insert([]byte{5 << 2}, mask2, nil, big.NewInt(56))
// qcrypto.DebugNonLazyNode(tree.Root, 0, "")
// tries.DebugNonLazyNode(tree.Root, 0, "")
typeAddr, _ := hex.DecodeString("096de9a09f693f92cfa9cf3349bab2b3baee09f3e4f9c596514ecb3e8b0dff8f")
tree2.Insert(bytes.Repeat([]byte{0xff}, 32), typeAddr, nil, big.NewInt(32))
txn, _ := hg.NewTransaction(false)
@ -1747,6 +1771,7 @@ func TestValidPendingTransactionMixed(t *testing.T) {
hg.SetVertexData(txn, address2, tree2)
txn.Commit()
hg.Commit(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 2)
// simulate input as commitment to total
input1, _ := token.NewPendingTransactionInput(address1[:])
@ -1784,7 +1809,7 @@ func TestValidPendingTransactionMixed(t *testing.T) {
rdfMultiprover,
)
if err := tx.Prove(token.FRAME_2_1_CUTOVER + 3); err != nil {
if err := tx.Prove(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 2); err != nil {
t.Fatal(err)
}
@ -1795,7 +1820,7 @@ func TestValidPendingTransactionMixed(t *testing.T) {
err = newTx.FromBytes(output, tokenconfig, hg, bp, ip, ve, dc, keys.ToKeyRing(km, true), "", rdfMultiprover)
assert.NoError(t, err)
if valid, err := newTx.Verify(token.FRAME_2_1_CUTOVER + 3); !valid {
if valid, err := newTx.Verify(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 3); !valid {
t.Fatal("Expected transaction to verify but it failed", err)
}
}
@ -1818,11 +1843,11 @@ func TestValidPendingTransactionLegacyOnly(t *testing.T) {
rvk, _ := dc.FromBytes(rvkpriv, rvkpub)
rsk, _ := dc.FromBytes(rskpriv, rskpub)
out1, err := token.NewPendingTransactionOutput(big.NewInt(9), vk.Public(), sk.Public(), rvk.Public(), rsk.Public(), token.FRAME_2_1_CUTOVER+3)
out1, err := token.NewPendingTransactionOutput(big.NewInt(9), vk.Public(), sk.Public(), rvk.Public(), rsk.Public(), token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+3)
if err != nil {
t.Fatal(err)
}
// out2, err := token.NewPendingTransactionOutput(big.NewInt(2), vk.Public(), sk.Public(), rvk.Public(), rsk.Public(), token.FRAME_2_1_CUTOVER+3)
// out2, err := token.NewPendingTransactionOutput(big.NewInt(2), vk.Public(), sk.Public(), rvk.Public(), rsk.Public(), token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+3)
// if err != nil {
// t.Fatal(err)
// }
@ -1866,6 +1891,7 @@ func TestValidPendingTransactionLegacyOnly(t *testing.T) {
hg.AddVertex(txn, hypergraph.NewVertex([32]byte(token.QUIL_TOKEN_ADDRESS), [32]byte(address1[32:]), vertTree.Commit(ip, false), big.NewInt(55*26)))
hg.SetVertexData(txn, address1, vertTree)
txn.Commit()
hg.Commit(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 2)
// simulate input as commitment to total
input1, _ := token.NewPendingTransactionInput(address1[:])
@ -1896,7 +1922,7 @@ func TestValidPendingTransactionLegacyOnly(t *testing.T) {
nil,
)
if err := tx.Prove(token.FRAME_2_1_CUTOVER + 3); err != nil {
if err := tx.Prove(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 2); err != nil {
t.Fatal(err)
}
@ -1909,7 +1935,7 @@ func TestValidPendingTransactionLegacyOnly(t *testing.T) {
comms = append(comms, newTx.Outputs[0].Commitment)
if valid, err := newTx.Verify(token.FRAME_2_1_CUTOVER + 3); !valid {
if valid, err := newTx.Verify(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 3); !valid {
t.Fatal("Expected transaction to verify but it failed", err)
}
}
@ -1964,8 +1990,8 @@ func TestValidTransaction(t *testing.T) {
copy(address2[:32], token.QUIL_TOKEN_ADDRESS)
rand.Read(address2[32:])
tree1 := &qcrypto.VectorCommitmentTree{}
tree2 := &qcrypto.VectorCommitmentTree{}
tree1 := &tries.VectorCommitmentTree{}
tree2 := &tries.VectorCommitmentTree{}
otk1a, _ := dc.New()
otk1b, _ := dc.New()
otk2a, _ := dc.New()
@ -2061,7 +2087,7 @@ func TestValidTransaction(t *testing.T) {
verifkey1a, _ := a1.Add(psk.Public())
verifkey1b, _ := b1.Add(othersk.Public())
tree1.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_CUTOVER+1), nil, big.NewInt(8))
tree1.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+1), nil, big.NewInt(8))
tree1.Insert([]byte{1 << 2}, comm1, nil, big.NewInt(56))
tree1.Insert([]byte{2 << 2}, otk1a.Public(), nil, big.NewInt(56))
tree1.Insert([]byte{3 << 2}, otk1b.Public(), nil, big.NewInt(56))
@ -2071,10 +2097,10 @@ func TestValidTransaction(t *testing.T) {
tree1.Insert([]byte{7 << 2}, maskedCoinBalanceBytes1b, nil, big.NewInt(56))
tree1.Insert([]byte{8 << 2}, mask1a, nil, big.NewInt(56))
tree1.Insert([]byte{9 << 2}, mask1b, nil, big.NewInt(56))
tree1.Insert([]byte{10 << 2}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_CUTOVER+3), nil, big.NewInt(8))
tree1.Insert([]byte{10 << 2}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+3), nil, big.NewInt(8))
verifkey2a, _ := a2.Add(othersk.Public())
verifkey2b, _ := b2.Add(psk.Public())
tree2.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_CUTOVER+1), nil, big.NewInt(8))
tree2.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+1), nil, big.NewInt(8))
tree2.Insert([]byte{1 << 2}, comm2, nil, big.NewInt(56))
tree2.Insert([]byte{2 << 2}, otk2a.Public(), nil, big.NewInt(56))
tree2.Insert([]byte{3 << 2}, otk2b.Public(), nil, big.NewInt(56))
@ -2084,9 +2110,9 @@ func TestValidTransaction(t *testing.T) {
tree2.Insert([]byte{7 << 2}, maskedCoinBalanceBytes2b, nil, big.NewInt(56))
tree2.Insert([]byte{8 << 2}, mask2a, nil, big.NewInt(56))
tree2.Insert([]byte{9 << 2}, mask2b, nil, big.NewInt(56))
tree2.Insert([]byte{10 << 2}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_CUTOVER+3), nil, big.NewInt(8))
tree2.Insert([]byte{10 << 2}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+3), nil, big.NewInt(8))
// qcrypto.DebugNonLazyNode(tree.Root, 0, "")
// tries.DebugNonLazyNode(tree.Root, 0, "")
pendingTypeBI, _ := poseidon.HashBytes(
slices.Concat(token.QUIL_TOKEN_ADDRESS, []byte("pending:PendingTransaction")),
@ -2101,6 +2127,7 @@ func TestValidTransaction(t *testing.T) {
hg.AddVertex(txn, hypergraph.NewVertex([32]byte(token.QUIL_TOKEN_ADDRESS), [32]byte(address2[32:]), tree2.Commit(ip, false), big.NewInt(55*26)))
hg.SetVertexData(txn, address2, tree2)
txn.Commit()
hg.Commit(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 2)
// simulate input as commitment to total
input1, _ := token.NewTransactionInput(address1[:])
@ -2138,7 +2165,7 @@ func TestValidTransaction(t *testing.T) {
rdfMultiprover,
)
if err := tx.Prove(token.FRAME_2_1_CUTOVER + 3); err != nil {
if err := tx.Prove(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 2); err != nil {
t.Fatal(err)
}
@ -2149,7 +2176,7 @@ func TestValidTransaction(t *testing.T) {
err = newTx.FromBytes(output, tokenconfig, hg, bp, ip, ve, dc, keys.ToKeyRing(km, true), "", rdfMultiprover)
assert.NoError(t, err)
if valid, err := newTx.Verify(token.FRAME_2_1_CUTOVER + 3); !valid {
if valid, err := newTx.Verify(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 3); !valid {
t.Fatal("Expected transaction to verify but it failed", err)
}
}
@ -2219,8 +2246,8 @@ func TestValidAltTransaction(t *testing.T) {
copy(address2[:32], domain)
rand.Read(address2[32:])
tree1 := &qcrypto.VectorCommitmentTree{}
tree2 := &qcrypto.VectorCommitmentTree{}
tree1 := &tries.VectorCommitmentTree{}
tree2 := &tries.VectorCommitmentTree{}
otk1a, _ := dc.New()
otk1b, _ := dc.New()
otk2a, _ := dc.New()
@ -2316,7 +2343,7 @@ func TestValidAltTransaction(t *testing.T) {
verifkey1a, _ := a1.Add(psk.Public())
verifkey1b, _ := b1.Add(othersk.Public())
tree1.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_CUTOVER+1), nil, big.NewInt(8))
tree1.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+1), nil, big.NewInt(8))
tree1.Insert([]byte{1 << 2}, comm1, nil, big.NewInt(56))
tree1.Insert([]byte{2 << 2}, otk1a.Public(), nil, big.NewInt(56))
tree1.Insert([]byte{3 << 2}, otk1b.Public(), nil, big.NewInt(56))
@ -2326,10 +2353,10 @@ func TestValidAltTransaction(t *testing.T) {
tree1.Insert([]byte{7 << 2}, maskedCoinBalanceBytes1b, nil, big.NewInt(56))
tree1.Insert([]byte{8 << 2}, mask1a, nil, big.NewInt(56))
tree1.Insert([]byte{9 << 2}, mask1b, nil, big.NewInt(56))
tree1.Insert([]byte{10 << 2}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_CUTOVER+3), nil, big.NewInt(8))
tree1.Insert([]byte{10 << 2}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+3), nil, big.NewInt(8))
verifkey2a, _ := a2.Add(othersk.Public())
verifkey2b, _ := b2.Add(psk.Public())
tree2.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_CUTOVER+1), nil, big.NewInt(8))
tree2.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+1), nil, big.NewInt(8))
tree2.Insert([]byte{1 << 2}, comm2, nil, big.NewInt(56))
tree2.Insert([]byte{2 << 2}, otk2a.Public(), nil, big.NewInt(56))
tree2.Insert([]byte{3 << 2}, otk2b.Public(), nil, big.NewInt(56))
@ -2339,9 +2366,9 @@ func TestValidAltTransaction(t *testing.T) {
tree2.Insert([]byte{7 << 2}, maskedCoinBalanceBytes2b, nil, big.NewInt(56))
tree2.Insert([]byte{8 << 2}, mask2a, nil, big.NewInt(56))
tree2.Insert([]byte{9 << 2}, mask2b, nil, big.NewInt(56))
tree2.Insert([]byte{10 << 2}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_CUTOVER+3), nil, big.NewInt(8))
tree2.Insert([]byte{10 << 2}, binary.BigEndian.AppendUint64(nil, token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+3), nil, big.NewInt(8))
// qcrypto.DebugNonLazyNode(tree.Root, 0, "")
// tries.DebugNonLazyNode(tree.Root, 0, "")
pendingTypeBI, _ := poseidon.HashBytes(
slices.Concat(domain, []byte("pending:PendingTransaction")),
@ -2356,6 +2383,7 @@ func TestValidAltTransaction(t *testing.T) {
hg.AddVertex(txn, hypergraph.NewVertex([32]byte(domain), [32]byte(address2[32:]), tree2.Commit(ip, false), big.NewInt(55*26)))
hg.SetVertexData(txn, address2, tree2)
txn.Commit()
hg.Commit(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 3)
// simulate input as commitment to total
input1, _ := token.NewTransactionInput(address1[:])
@ -2382,7 +2410,7 @@ func TestValidAltTransaction(t *testing.T) {
rdfMultiprover,
)
if err := tx.Prove(token.FRAME_2_1_CUTOVER + 3); err != nil {
if err := tx.Prove(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 3); err != nil {
t.Fatal(err)
}
@ -2393,7 +2421,7 @@ func TestValidAltTransaction(t *testing.T) {
err = newTx.FromBytes(output, tokenconfig, hg, bp, ip, ve, dc, keys.ToKeyRing(km, true), "", rdfMultiprover)
assert.NoError(t, err)
if valid, err := newTx.Verify(token.FRAME_2_1_CUTOVER + 3); !valid {
if valid, err := newTx.Verify(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 4); !valid {
t.Fatal("Expected transaction to verify but it failed", err)
}
}
@ -2425,7 +2453,7 @@ func TestFullTokenFlow_MintPendingTransaction(t *testing.T) {
rewardAddress, err := poseidon.HashBytes(slices.Concat(token.QUIL_TOKEN_ADDRESS[:], proveraddr.FillBytes(make([]byte, 32))))
assert.NoError(t, err)
proverTree := &qcrypto.VectorCommitmentTree{}
proverTree := &tries.VectorCommitmentTree{}
proverTree.Insert([]byte{0}, proveraddr.FillBytes(make([]byte, 32)), nil, big.NewInt(0))
proverTree.Insert([]byte{1 << 2}, big.NewInt(10000).FillBytes(make([]byte, 32)), nil, big.NewInt(0))
@ -2441,9 +2469,10 @@ func TestFullTokenFlow_MintPendingTransaction(t *testing.T) {
assert.NoError(t, err)
err = hg.SetVertexData(txn, vert.GetID(), proverTree)
assert.NoError(t, err)
hg.Commit()
err = txn.Commit()
assert.NoError(t, err)
roots, err := hg.Commit(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 1)
assert.NoError(t, err)
hgs := hgstate.NewHypergraphState(hg)
@ -2479,6 +2508,15 @@ func TestFullTokenFlow_MintPendingTransaction(t *testing.T) {
senderSpendKey.Public(),
)
assert.NoError(t, err)
clockStore := store.NewPebbleClockStore(s, zap.L())
trx, _ := clockStore.NewTransaction(false)
clockStore.PutGlobalClockFrame(&protobufs.GlobalFrame{
Header: &protobufs.GlobalFrameHeader{
FrameNumber: token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 1,
ProverTreeCommitment: roots[tries.ShardKey{L1: [3]byte{}, L2: [32]byte(intrinsics.GLOBAL_INTRINSIC_ADDRESS)}][0],
},
}, trx)
trx.Commit()
mintTx := token.NewMintTransaction(
[32]byte(token.QUIL_TOKEN_ADDRESS),
@ -2494,9 +2532,10 @@ func TestFullTokenFlow_MintPendingTransaction(t *testing.T) {
keys.ToKeyRing(km, false),
rdfSchema,
rdfMultiprover,
clockStore,
)
err = mintTx.Prove(token.FRAME_2_1_CUTOVER + 2)
err = mintTx.Prove(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 1)
assert.NoError(t, err)
out, err := mintTx.ToBytes()
assert.NoError(t, err)
@ -2509,10 +2548,11 @@ func TestFullTokenFlow_MintPendingTransaction(t *testing.T) {
bp,
ip,
km,
clockStore,
)
assert.NoError(t, err)
nhgs, err := intrinsic.InvokeStep(token.FRAME_2_1_CUTOVER+2, out, big.NewInt(0), big.NewInt(1), hgs)
nhgs, err := intrinsic.InvokeStep(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+2, out, big.NewInt(0), big.NewInt(1), hgs)
assert.NoError(t, err)
mintedAddress := nhgs.Changeset()[1].Address
@ -2523,6 +2563,7 @@ func TestFullTokenFlow_MintPendingTransaction(t *testing.T) {
err = hgs.Commit()
assert.NoError(t, err)
hgs = hgstate.NewHypergraphState(hg)
hg.Commit(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 2)
// Step 3: Create PendingTransaction
pendingInput, err := token.NewPendingTransactionInput(slices.Concat(token.QUIL_TOKEN_ADDRESS, mintedAddress))
@ -2555,12 +2596,12 @@ func TestFullTokenFlow_MintPendingTransaction(t *testing.T) {
rdfMultiprover,
)
err = pendingTx.Prove(token.FRAME_2_1_CUTOVER + 3)
err = pendingTx.Prove(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 2)
assert.NoError(t, err)
out, err = pendingTx.ToBytes()
assert.NoError(t, err)
nhgs, err = intrinsic.InvokeStep(token.FRAME_2_1_CUTOVER+3, out, big.NewInt(0), big.NewInt(1), hgs)
nhgs, err = intrinsic.InvokeStep(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+3, out, big.NewInt(0), big.NewInt(1), hgs)
assert.NoError(t, err)
hgs = nhgs.(*hgstate.HypergraphState)
pendingAddr := hgs.Changeset()[0].Address
@ -2572,6 +2613,7 @@ func TestFullTokenFlow_MintPendingTransaction(t *testing.T) {
err = hgs.Commit()
assert.NoError(t, err)
hgs = hgstate.NewHypergraphState(hg)
hg.Commit(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 3)
// Step 4: Create Transaction to accept the pending transaction
// Receiver accepts the pending transaction
@ -2602,13 +2644,13 @@ func TestFullTokenFlow_MintPendingTransaction(t *testing.T) {
rdfMultiprover,
)
err = acceptTx.Prove(token.FRAME_2_1_CUTOVER + 4)
err = acceptTx.Prove(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 3)
assert.NoError(t, err)
out, err = acceptTx.ToBytes()
assert.NoError(t, err)
nhgs, err = intrinsic.InvokeStep(token.FRAME_2_1_CUTOVER+4, out, big.NewInt(0), big.NewInt(1), hgs)
nhgs, err = intrinsic.InvokeStep(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+4, out, big.NewInt(0), big.NewInt(1), hgs)
assert.NoError(t, err)
hgs = nhgs.(*hgstate.HypergraphState)
acceptAddr := hgs.Changeset()[0].Address
@ -2693,7 +2735,7 @@ func TestFullTokenFlow_MintPendingTransactionNonDivisible(t *testing.T) {
proveraddr, err := poseidon.HashBytes(prover.Public().([]byte))
assert.NoError(t, err)
proverTree := &qcrypto.VectorCommitmentTree{}
proverTree := &tries.VectorCommitmentTree{}
proverTree.Insert([]byte{0}, prover.Public().([]byte), nil, big.NewInt(0))
proverTree.Insert([]byte{1 << 2}, big.NewInt(10000).FillBytes(make([]byte, 32)), nil, big.NewInt(0))
@ -2709,9 +2751,9 @@ func TestFullTokenFlow_MintPendingTransactionNonDivisible(t *testing.T) {
assert.NoError(t, err)
err = hg.SetVertexData(txn, vert.GetID(), proverTree)
assert.NoError(t, err)
hg.Commit()
err = txn.Commit()
assert.NoError(t, err)
hg.Commit(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 1)
hgs := hgstate.NewHypergraphState(hg)
@ -2790,16 +2832,17 @@ func TestFullTokenFlow_MintPendingTransactionNonDivisible(t *testing.T) {
keys.ToKeyRing(km, false),
rdfSchema,
rdfMultiprover,
store.NewPebbleClockStore(s, zap.L()),
)
err = mintTx.Prove(token.FRAME_2_1_CUTOVER + 2)
err = mintTx.Prove(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 1)
assert.NoError(t, err)
out, err := mintTx.ToBytes()
assert.NoError(t, err)
hgs = hgstate.NewHypergraphState(hg)
nhgs, err = intrinsic.InvokeStep(token.FRAME_2_1_CUTOVER+2, out, big.NewInt(0), big.NewInt(1), hgs)
nhgs, err = intrinsic.InvokeStep(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+2, out, big.NewInt(0), big.NewInt(1), hgs)
assert.NoError(t, err)
mintedAddress := nhgs.Changeset()[1].Address
@ -2810,6 +2853,7 @@ func TestFullTokenFlow_MintPendingTransactionNonDivisible(t *testing.T) {
err = hgs.Commit()
assert.NoError(t, err)
hgs = hgstate.NewHypergraphState(hg)
hg.Commit(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 2)
// Step 3: Create PendingTransaction
pendingInput, err := token.NewPendingTransactionInput(slices.Concat(tokenAddress, mintedAddress))
@ -2842,18 +2886,19 @@ func TestFullTokenFlow_MintPendingTransactionNonDivisible(t *testing.T) {
rdfMultiprover,
)
err = pendingTx.Prove(token.FRAME_2_1_CUTOVER + 3)
err = pendingTx.Prove(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 2)
assert.NoError(t, err)
out, err = pendingTx.ToBytes()
assert.NoError(t, err)
nhgs, err = intrinsic.InvokeStep(token.FRAME_2_1_CUTOVER+3, out, big.NewInt(0), big.NewInt(1), hgs)
nhgs, err = intrinsic.InvokeStep(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+3, out, big.NewInt(0), big.NewInt(1), hgs)
assert.NoError(t, err)
hgs = nhgs.(*hgstate.HypergraphState)
pendingAddr := hgs.Changeset()[0].Address
nhgs, err = intrinsic.Commit()
assert.NoError(t, err)
hg.Commit(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 3)
hgs = nhgs.(*hgstate.HypergraphState)
err = hgs.Commit()
@ -2889,13 +2934,13 @@ func TestFullTokenFlow_MintPendingTransactionNonDivisible(t *testing.T) {
rdfMultiprover,
)
err = acceptTx.Prove(token.FRAME_2_1_CUTOVER + 4)
err = acceptTx.Prove(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 3)
assert.NoError(t, err)
out, err = acceptTx.ToBytes()
assert.NoError(t, err)
nhgs, err = intrinsic.InvokeStep(token.FRAME_2_1_CUTOVER+4, out, big.NewInt(0), big.NewInt(1), hgs)
nhgs, err = intrinsic.InvokeStep(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+4, out, big.NewInt(0), big.NewInt(1), hgs)
assert.NoError(t, err)
hgs = nhgs.(*hgstate.HypergraphState)
acceptAddr := hgs.Changeset()[0].Address

View File

@ -252,7 +252,7 @@ func TestLoadTokenIntrinsic(t *testing.T) {
inclusionProver.On("CommitRaw", mock.Anything, mock.Anything).Return([]byte("mock-commitment"), nil)
t.Run("load failure - vertex not found", func(t *testing.T) {
_, err := LoadTokenIntrinsic(appAddress, mockHypergraphErr, verEnc, decafConstructor, bulletproofProver, inclusionProver, keyManager)
_, err := LoadTokenIntrinsic(appAddress, mockHypergraphErr, verEnc, decafConstructor, bulletproofProver, inclusionProver, keyManager, nil)
assert.Error(t, err)
assert.Contains(t, err.Error(), "vertex not found")
@ -262,7 +262,7 @@ func TestLoadTokenIntrinsic(t *testing.T) {
t.Run("load success - valid token intrinsic", func(t *testing.T) {
inclusionProver.On("CommitRaw", mock.Anything, mock.Anything).Return([]byte("commitment"), nil)
tokenIntrinsic, err := LoadTokenIntrinsic(appAddress, mockHypergraphSuccess, verEnc, decafConstructor, bulletproofProver, inclusionProver, keyManager)
tokenIntrinsic, err := LoadTokenIntrinsic(appAddress, mockHypergraphSuccess, verEnc, decafConstructor, bulletproofProver, inclusionProver, keyManager, nil)
require.NoError(t, err)
require.NotNil(t, tokenIntrinsic)

View File

@ -26,6 +26,9 @@ const FRAME_2_1_CUTOVER = 244200
const FRAME_2_1_EXTENDED_ENROLL_END = 252840
const FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END = FRAME_2_1_EXTENDED_ENROLL_END + 360
// used to skip frame-based checks, for tests
var BEHAVIOR_PASS = false
// using ed448 derivation process of seed = [57]byte{0x00..}
var publicReadKey, _ = hex.DecodeString("2cf07ca8d9ab1a4bb0902e25a9b90759dd54d881f54d52a76a17e79bf0361c325650f12746e4337ffb5940e7665ad7bf83f44af98d964bbe")
@ -87,7 +90,7 @@ func (i *TransactionInput) Prove(tx *Transaction, index int) ([]byte, error) {
var blind []byte
if bytes.Equal(i.address[:32], QUIL_TOKEN_ADDRESS) &&
frameNumber <= FRAME_2_1_CUTOVER {
frameNumber <= FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END && !BEHAVIOR_PASS {
return nil, errors.Wrap(errors.New("invalid action"), "prove input")
}
@ -133,7 +136,13 @@ func (i *TransactionInput) Prove(tx *Transaction, index int) ([]byte, error) {
if tx.config.Behavior&Acceptable != 0 {
if !bytes.Equal(pendingTypeBytes, checkType) {
return nil, errors.Wrap(
errors.New("invalid type for address"),
errors.New(
fmt.Sprintf(
"invalid type for address: %x, expected %x",
checkType,
pendingTypeBytes,
),
),
"prove input",
)
}
@ -884,10 +893,7 @@ func (o *TransactionOutput) Verify(
frameNumber uint64,
config *TokenIntrinsicConfiguration,
) (bool, error) {
if !bytes.Equal(
binary.BigEndian.AppendUint64(nil, frameNumber),
o.FrameNumber,
) {
if frameNumber <= binary.BigEndian.Uint64(o.FrameNumber) {
return false, errors.Wrap(
errors.New("invalid frame number"),
"verify output",
@ -1235,7 +1241,8 @@ func (tx *Transaction) Prove(frameNumber uint64) error {
return err
}
if len(res.Commitment) != len(tx.Outputs)*56 {
if len(res.Commitment) != len(tx.Outputs)*56 ||
len(res.Blinding) != len(tx.Outputs)*56 {
return errors.Wrap(errors.New("invalid range proof"), "prove")
}
@ -1553,10 +1560,19 @@ func (tx *Transaction) Verify(frameNumber uint64) (bool, error) {
commitments = append(commitments, tx.Outputs[i].Commitment)
}
roots, err := tx.hypergraph.GetShardCommits(
binary.BigEndian.Uint64(tx.Outputs[0].FrameNumber),
tx.Domain[:],
)
if err != nil {
return false, errors.Wrap(err, "verify")
}
valid, err := tx.hypergraph.VerifyTraversalProof(
tx.Domain,
hypergraph.VertexAtomType,
hypergraph.AddsPhaseType,
roots[0],
tx.TraversalProof,
)
if err != nil || !valid {

View File

@ -78,9 +78,9 @@ func TestValidTransactionWithMocks(t *testing.T) {
mp.On("FromBytes", mock.Anything).Return(nil)
ip.On("ProveMultiple", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(mp, nil)
ip.On("VerifyMultiple", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true, nil)
hg.On("GetShardCommits", mock.Anything, mock.Anything).Return([][]byte{make([]byte, 64), make([]byte, 64), make([]byte, 64), make([]byte, 64)}, nil)
bp.On("GenerateInputCommitmentsFromBig", mock.Anything, mock.Anything).Return([]byte("input-commit" + string(bytes.Repeat([]byte{0x00}, 56-12))))
hg.On("CreateTraversalProof", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&qcrypto.TraversalProof{
Multiproof: &mocks.MockMultiproof{},
SubProofs: []qcrypto.TraversalSubProof{{
Commits: [][]byte{[]byte("valid-hg-commit" + string(bytes.Repeat([]byte{0x00}, 74-15)))},
@ -88,12 +88,12 @@ func TestValidTransactionWithMocks(t *testing.T) {
Paths: [][]uint64{{0}},
}},
}, nil)
hg.On("VerifyTraversalProof", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true, nil)
hg.On("VerifyTraversalProof", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true, nil)
hg.On("GetProver").Return(ip)
ip.On("VerifyRaw", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true, nil)
bp.On("VerifyHidden", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true)
tree := &qcrypto.VectorCommitmentTree{}
tree.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, FRAME_2_1_CUTOVER+1), nil, big.NewInt(55))
tree.Insert([]byte{0}, binary.BigEndian.AppendUint64(nil, FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+1), nil, big.NewInt(55))
tree.Insert([]byte{1 << 2}, []byte("valid-commitment"+string(bytes.Repeat([]byte{0x00}, 56-16))), nil, big.NewInt(56))
tree.Insert([]byte{2 << 2}, []byte("one-time-key"+string(bytes.Repeat([]byte{0x00}, 56-12))), nil, big.NewInt(56))
tree.Insert([]byte{3 << 2}, []byte("verification-key"+string(bytes.Repeat([]byte{0x00}, 56-16))), nil, big.NewInt(56))
@ -137,11 +137,11 @@ func TestValidTransactionWithMocks(t *testing.T) {
rdfMultiprover,
)
if err := tx.Prove(FRAME_2_1_CUTOVER + 1); err != nil {
if err := tx.Prove(FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 1); err != nil {
t.Fatal(err)
}
if valid, err := tx.Verify(FRAME_2_1_CUTOVER + 1); !valid {
if valid, err := tx.Verify(FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END + 2); !valid {
t.Fatal("Expected transaction to verify but it failed", err)
}
}

View File

@ -38,7 +38,7 @@ func (tx *PendingTransaction) FromBytes(
}
// Convert from protobuf
converted, err := PendingTransactionFromProtobuf(pb)
converted, err := PendingTransactionFromProtobuf(pb, inclusionProver)
if err != nil {
return errors.Wrap(err, "from bytes")
}
@ -46,22 +46,8 @@ func (tx *PendingTransaction) FromBytes(
// Copy converted fields
*tx = *converted
tx.rdfHypergraphSchema = rdfHypergraphSchema
// Convert TraversalProof from protobuf if present
if pb.TraversalProof != nil {
tp, err := TraversalProofFromProtobuf(pb.TraversalProof, inclusionProver)
if err != nil {
return errors.Wrap(err, "deserializing traversal proof")
}
tx.TraversalProof = tp
if tx.TraversalProof != nil && len(tx.TraversalProof.SubProofs) == 0 {
return errors.Wrap(errors.New("invalid payload"), "from bytes")
}
}
// Set injected values
tx.rdfHypergraphSchema = rdfHypergraphSchema
tx.hypergraph = hypergraph
tx.bulletproofProver = bulletproofProver
tx.inclusionProver = inclusionProver
@ -103,7 +89,7 @@ func (tx *Transaction) FromBytes(
}
// Convert from protobuf
converted, err := TransactionFromProtobuf(pb)
converted, err := TransactionFromProtobuf(pb, inclusionProver)
if err != nil {
return errors.Wrap(err, "from bytes")
}
@ -114,15 +100,6 @@ func (tx *Transaction) FromBytes(
// Load intrinsic for RDF schema
tx.rdfHypergraphSchema = rdfHypergraphSchema
// Convert TraversalProof from protobuf if present
if pb.TraversalProof != nil {
tp, err := TraversalProofFromProtobuf(pb.TraversalProof, inclusionProver)
if err != nil {
return errors.Wrap(err, "deserializing traversal proof")
}
tx.TraversalProof = tp
}
// Set injected values
tx.hypergraph = hypergraph
tx.bulletproofProver = bulletproofProver

View File

@ -20,6 +20,7 @@ import (
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token"
"source.quilibrium.com/quilibrium/monorepo/protobufs"
"source.quilibrium.com/quilibrium/monorepo/types/compiler"
"source.quilibrium.com/quilibrium/monorepo/types/consensus"
"source.quilibrium.com/quilibrium/monorepo/types/crypto"
"source.quilibrium.com/quilibrium/monorepo/types/execution"
"source.quilibrium.com/quilibrium/monorepo/types/execution/intrinsics"
@ -46,6 +47,9 @@ type ExecutionEngineManager struct {
decafConstructor crypto.DecafConstructor
compiler compiler.CircuitCompiler
frameProver crypto.FrameProver
rewardIssuance consensus.RewardIssuance
proverRegistry consensus.ProverRegistry
blsConstructor crypto.BlsConstructor
includeGlobal bool
quit chan struct{}
wg sync.WaitGroup
@ -65,6 +69,9 @@ func NewExecutionEngineManager(
decafConstructor crypto.DecafConstructor,
compiler compiler.CircuitCompiler,
frameProver crypto.FrameProver,
rewardIssuance consensus.RewardIssuance,
proverRegistry consensus.ProverRegistry,
blsConstructor crypto.BlsConstructor,
includeGlobal bool,
) (*ExecutionEngineManager, error) {
return &ExecutionEngineManager{
@ -83,6 +90,9 @@ func NewExecutionEngineManager(
decafConstructor: decafConstructor,
compiler: compiler,
frameProver: frameProver,
rewardIssuance: rewardIssuance,
proverRegistry: proverRegistry,
blsConstructor: blsConstructor,
includeGlobal: includeGlobal,
quit: make(chan struct{}),
}, nil
@ -106,6 +116,9 @@ func (m *ExecutionEngineManager) InitializeEngines() error {
m.decafConstructor,
m.compiler,
m.frameProver,
m.rewardIssuance,
m.proverRegistry,
m.blsConstructor,
m.includeGlobal,
)
if err != nil {
@ -627,13 +640,13 @@ func (m *ExecutionEngineManager) Lock(
frameNumber uint64,
address []byte,
message []byte,
) error {
) ([][]byte, error) {
m.enginesMu.RLock()
defer m.enginesMu.RUnlock()
engine := m.selectEngine(address)
if engine == nil {
return errors.Errorf("no execution engine found for address: %x", address)
return nil, errors.Errorf("no execution engine found for address: %x", address)
}
return engine.Lock(frameNumber, address, message)

Some files were not shown because too many files have changed in this diff Show More