mirror of
https://github.com/QuilibriumNetwork/ceremonyclient.git
synced 2026-02-21 10:27:26 +08:00
Merge branch 'develop-2.1-milestone3-fork-test' into develop-2.1
This commit is contained in:
commit
df81a35cdb
25
Cargo.lock
generated
25
Cargo.lock
generated
@ -1517,6 +1517,19 @@ dependencies = [
|
||||
"opaque-debug 0.2.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sha2"
|
||||
version = "0.9.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800"
|
||||
dependencies = [
|
||||
"block-buffer 0.9.0",
|
||||
"cfg-if",
|
||||
"cpufeatures",
|
||||
"digest 0.9.0",
|
||||
"opaque-debug 0.3.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sha2"
|
||||
version = "0.10.8"
|
||||
@ -1834,6 +1847,18 @@ dependencies = [
|
||||
"uniffi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "verenc"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"ed448-goldilocks-plus",
|
||||
"hex 0.4.3",
|
||||
"rand",
|
||||
"serde",
|
||||
"sha2 0.9.9",
|
||||
"uniffi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "version_check"
|
||||
version = "0.9.4"
|
||||
|
||||
@ -20,6 +20,7 @@ members = [
|
||||
"crates/bls48581",
|
||||
"crates/ed448-rust",
|
||||
"crates/rpm",
|
||||
"crates/verenc",
|
||||
]
|
||||
|
||||
[profile.release]
|
||||
|
||||
@ -76,6 +76,10 @@ RUN ./generate.sh
|
||||
WORKDIR /opt/ceremonyclient/bls48581
|
||||
RUN ./generate.sh
|
||||
|
||||
## Generate Rust bindings for VerEnc
|
||||
WORKDIR /opt/ceremonyclient/verenc
|
||||
RUN ./generate.sh
|
||||
|
||||
# Build and install the node
|
||||
WORKDIR /opt/ceremonyclient/node
|
||||
|
||||
|
||||
@ -36,6 +36,7 @@ tasks:
|
||||
cmds:
|
||||
- vdf/generate.sh
|
||||
- bls48581/generate.sh
|
||||
- verenc/generate.sh
|
||||
- node/build.sh -o build/arm64_macos/node
|
||||
|
||||
build_qclient_arm64_macos:
|
||||
|
||||
@ -19,14 +19,14 @@ case "$os_type" in
|
||||
# Check if the architecture is ARM
|
||||
if [[ "$(uname -m)" == "arm64" ]]; then
|
||||
# MacOS ld doesn't support -Bstatic and -Bdynamic, so it's important that there is only a static version of the library
|
||||
go build -ldflags "-linkmode 'external' -extldflags '-L$BINARIES_DIR -L/opt/homebrew/Cellar/mpfr/4.2.1/lib -I/opt/homebrew/Cellar/mpfr/4.2.1/include -L/opt/homebrew/Cellar/gmp/6.3.0/lib -I/opt/homebrew/Cellar/gmp/6.3.0/include -L/opt/homebrew/Cellar/flint/3.1.3-p1/lib -I/opt/homebrew/Cellar/flint/3.1.3-p1/include -lbls48581 -lstdc++ -lvdf -ldl -lm -lflint -lgmp -lmpfr'" "$@"
|
||||
go build -ldflags "-linkmode 'external' -extldflags '-L$BINARIES_DIR -lbls48581 -lstdc++ -lvdf -lverenc -ldl -lm -lflint -lgmp -lmpfr'" "$@"
|
||||
else
|
||||
echo "Unsupported platform"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
"Linux")
|
||||
go build -ldflags "-linkmode 'external' -extldflags '-L$BINARIES_DIR -Wl,-Bstatic -lvdf -lbls48581 -Wl,-Bdynamic -lstdc++ -ldl -lm -lflint -lgmp -lmpfr'" "$@"
|
||||
go build -ldflags "-linkmode 'external' -extldflags '-L$BINARIES_DIR -Wl,-Bstatic -lvdf -lbls48581 -lverenc -Wl,-Bdynamic -lstdc++ -ldl -lm -lflint -lgmp -lmpfr'" "$@"
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported platform"
|
||||
|
||||
@ -10,6 +10,8 @@ replace source.quilibrium.com/quilibrium/monorepo/bls48581 => ../bls48581
|
||||
|
||||
replace source.quilibrium.com/quilibrium/monorepo/vdf => ../vdf
|
||||
|
||||
replace source.quilibrium.com/quilibrium/monorepo/verenc => ../verenc
|
||||
|
||||
replace github.com/multiformats/go-multiaddr => ../go-multiaddr
|
||||
|
||||
replace github.com/multiformats/go-multiaddr-dns => ../go-multiaddr-dns
|
||||
@ -176,6 +178,7 @@ require (
|
||||
source.quilibrium.com/quilibrium/monorepo/bls48581 v0.0.0-00010101000000-000000000000 // indirect
|
||||
source.quilibrium.com/quilibrium/monorepo/nekryptology v0.0.0-00010101000000-000000000000 // indirect
|
||||
source.quilibrium.com/quilibrium/monorepo/vdf v0.0.0-00010101000000-000000000000 // indirect
|
||||
source.quilibrium.com/quilibrium/monorepo/verenc v0.0.0-00010101000000-000000000000 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
|
||||
10
crates/verenc/.gitignore
vendored
Normal file
10
crates/verenc/.gitignore
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
# Generated by Cargo
|
||||
# will have compiled files and executables
|
||||
/target/
|
||||
|
||||
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
|
||||
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
|
||||
Cargo.lock
|
||||
|
||||
# These are backup files generated by rustfmt
|
||||
**/*.rs.bk
|
||||
22
crates/verenc/Cargo.toml
Normal file
22
crates/verenc/Cargo.toml
Normal file
@ -0,0 +1,22 @@
|
||||
[package]
|
||||
name = "verenc"
|
||||
version = "0.1.0"
|
||||
authors = ["Anonymous conference submission", "Cassandra Heart"]
|
||||
edition = "2018"
|
||||
description = "Implementation of verifiable encryption of discrete logarithms with the DKG-in-the-head approach, adapted to Curve448"
|
||||
license = "MIT"
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib", "staticlib"]
|
||||
name = "verenc"
|
||||
|
||||
[dependencies]
|
||||
sha2 = "0.9.0"
|
||||
hex = "0.4.0"
|
||||
rand = "0.8"
|
||||
ed448-goldilocks-plus = "0.11.2"
|
||||
uniffi = { version= "0.25", features = ["cli"]}
|
||||
serde = "1.0.208"
|
||||
|
||||
[build-dependencies]
|
||||
uniffi = { version = "0.25", features = [ "build" ] }
|
||||
28
crates/verenc/README.md
Normal file
28
crates/verenc/README.md
Normal file
@ -0,0 +1,28 @@
|
||||
|
||||
## Introduction
|
||||
Implementation of the DKG-in-the-head (DKGitH) and Robust DKG-in-the-head (RDKGitH) verifiable encryption schemes.
|
||||
|
||||
## Description
|
||||
These verifiable encryption (VE) schemes allow one to encrypt a discrete logarithm instance under an Elgamal public key and prove to anyone that the correct value is encrypted.
|
||||
|
||||
We use the elliptic curve implementation of [`arkworks`](https://github.com/arkworks-rs), and our implementation defaults to using the `secp256r1` curve, but is generic over the choice of curve and can easily be modified used to other curves implemented in `arkworks`.
|
||||
|
||||
Hashing is done with SHA512, using the Rust [`sha2`](https://docs.rs/sha2/latest/sha2/) crate.
|
||||
|
||||
Our seed tree implementation is inspired by the one in the C implementation of the [LegRoast](https://github.com/WardBeullens/LegRoast) signature scheme.
|
||||
|
||||
## Running Tests and Benchmarks
|
||||
To run unit tests type `cargo test --release`.
|
||||
|
||||
Sizes of the proofs and ciphertexts for the two schemes are computed in unit tests, use the script `run_size_benchmarks.sh` to run the tests and display the output.
|
||||
|
||||
Benchmarks of the time required to run the main VE operations `Prove()`, `Verify()`, `Compress()` and `Recover()`
|
||||
are also provided, and can be run with `cargo bench`. To run only the DKGitH benchmarks, use
|
||||
```
|
||||
cargo bench -- "^DKGitH"
|
||||
```
|
||||
and to run only the RDKGitH benchmarks use
|
||||
```
|
||||
cargo bench -- "^RDKGitH"
|
||||
```
|
||||
|
||||
5
crates/verenc/build.rs
Normal file
5
crates/verenc/build.rs
Normal file
@ -0,0 +1,5 @@
|
||||
fn main() {
|
||||
println!("cargo:rerun-if-changed=build.rs");
|
||||
|
||||
uniffi::generate_scaffolding("src/lib.udl").expect("uniffi generation failed");
|
||||
}
|
||||
7
crates/verenc/run_size_benchmarks.sh
Executable file
7
crates/verenc/run_size_benchmarks.sh
Executable file
@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "------------------------------------";
|
||||
echo "Printing sizes for RDKGitH instances:";
|
||||
echo "------------------------------------";
|
||||
cargo test --release -- --exact --nocapture rdkgith::tests::test_ve_print_sizes;
|
||||
|
||||
526
crates/verenc/src/lib.rs
Normal file
526
crates/verenc/src/lib.rs
Normal file
@ -0,0 +1,526 @@
|
||||
pub mod rdkgith;
|
||||
pub mod utils;
|
||||
pub mod pke;
|
||||
pub mod ve;
|
||||
pub mod seed_tree;
|
||||
|
||||
use std::convert::TryFrom;
|
||||
use std::convert::TryInto;
|
||||
|
||||
use ed448_goldilocks_plus::CompressedEdwardsY;
|
||||
use ed448_goldilocks_plus::EdwardsPoint;
|
||||
use ed448_goldilocks_plus::Scalar;
|
||||
use rand::rngs::OsRng;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub use crate::rdkgith::*;
|
||||
pub use crate::utils::*;
|
||||
pub use crate::pke::*;
|
||||
pub use crate::ve::*;
|
||||
pub use crate::seed_tree::*;
|
||||
|
||||
uniffi::include_scaffolding!("lib");
|
||||
|
||||
#[derive(Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct VerencCiphertext {
|
||||
pub c1: Vec<u8>,
|
||||
pub c2: Vec<u8>,
|
||||
pub i: u64,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct VerencShare {
|
||||
pub s1: Vec<u8>,
|
||||
pub s2: Vec<u8>,
|
||||
pub i: u64,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct VerencProofAndBlindingKey {
|
||||
pub blinding_key: Vec<u8>,
|
||||
pub blinding_pubkey: Vec<u8>,
|
||||
pub decryption_key: Vec<u8>,
|
||||
pub encryption_key: Vec<u8>,
|
||||
pub statement: Vec<u8>,
|
||||
pub challenge: Vec<u8>,
|
||||
pub polycom: Vec<Vec<u8>>,
|
||||
pub ctexts: Vec<VerencCiphertext>,
|
||||
pub shares_rands: Vec<VerencShare>,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct VerencDecrypt {
|
||||
pub blinding_pubkey: Vec<u8>,
|
||||
pub decryption_key: Vec<u8>,
|
||||
pub statement: Vec<u8>,
|
||||
pub ciphertexts: CompressedCiphertext,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct VerencProof {
|
||||
pub blinding_pubkey: Vec<u8>,
|
||||
pub encryption_key: Vec<u8>,
|
||||
pub statement: Vec<u8>,
|
||||
pub challenge: Vec<u8>,
|
||||
pub polycom: Vec<Vec<u8>>,
|
||||
pub ctexts: Vec<VerencCiphertext>,
|
||||
pub shares_rands: Vec<VerencShare>,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct CompressedCiphertext {
|
||||
pub ctexts: Vec<VerencCiphertext>,
|
||||
pub aux: Vec<Vec<u8>>,
|
||||
}
|
||||
|
||||
pub fn new_verenc_proof(data: Vec<u8>) -> VerencProofAndBlindingKey {
|
||||
if data.len() != 56 {
|
||||
return VerencProofAndBlindingKey{
|
||||
blinding_key: vec![],
|
||||
blinding_pubkey: vec![],
|
||||
decryption_key: vec![],
|
||||
encryption_key: vec![],
|
||||
statement: vec![],
|
||||
challenge: vec![],
|
||||
polycom: vec![],
|
||||
ctexts: vec![],
|
||||
shares_rands: vec![],
|
||||
};
|
||||
}
|
||||
|
||||
let blind = Scalar::random(&mut OsRng);
|
||||
let params = CurveParams::init(EdwardsPoint::GENERATOR * blind);
|
||||
let pke = Elgamal::setup(¶ms);
|
||||
let (N, t, n) = RVE_PARAMS[0];
|
||||
let vparams = RDkgithParams{ N, t, n };
|
||||
let mut ve = RDkgith::setup(¶ms, &vparams, pke.clone());
|
||||
|
||||
let dk = ve.kgen();
|
||||
let (stm, wit) = ve.igen(&data.try_into().unwrap());
|
||||
let pi = ve.prove(&stm, &wit);
|
||||
|
||||
return VerencProofAndBlindingKey {
|
||||
blinding_key: blind.to_bytes().to_vec(),
|
||||
blinding_pubkey: (EdwardsPoint::GENERATOR * blind).compress().to_bytes().to_vec(),
|
||||
decryption_key: dk.to_bytes().to_vec(),
|
||||
encryption_key: (EdwardsPoint::GENERATOR * dk).compress().to_bytes().to_vec(),
|
||||
statement: stm.compress().to_bytes().to_vec(),
|
||||
challenge: pi.challenge,
|
||||
polycom: pi.polycom.iter().map(|p| p.compress().to_bytes().to_vec()).collect(),
|
||||
ctexts: pi.ctexts.iter().map(|c| VerencCiphertext {
|
||||
c1: c.0.c1.compress().to_bytes().to_vec(),
|
||||
c2: c.0.c2.to_bytes().to_vec(),
|
||||
i: c.1 as u64,
|
||||
}).collect(),
|
||||
shares_rands: pi.shares_rands.iter().map(|s| VerencShare {
|
||||
s1: s.0.to_bytes().to_vec(),
|
||||
s2: s.1.to_bytes().to_vec(),
|
||||
i: s.2 as u64,
|
||||
}).collect(),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn new_verenc_proof_encrypt_only(data: Vec<u8>, encryption_key_bytes: Vec<u8>) -> VerencProofAndBlindingKey {
|
||||
if data.len() != 56 {
|
||||
return VerencProofAndBlindingKey{
|
||||
blinding_key: vec![],
|
||||
blinding_pubkey: vec![],
|
||||
decryption_key: vec![],
|
||||
encryption_key: vec![],
|
||||
statement: vec![],
|
||||
challenge: vec![],
|
||||
polycom: vec![],
|
||||
ctexts: vec![],
|
||||
shares_rands: vec![],
|
||||
};
|
||||
}
|
||||
|
||||
let encryption_key = point_from_bytes(encryption_key_bytes.clone());
|
||||
if encryption_key.is_none() {
|
||||
return VerencProofAndBlindingKey{
|
||||
blinding_key: vec![],
|
||||
blinding_pubkey: vec![],
|
||||
decryption_key: vec![],
|
||||
encryption_key: vec![],
|
||||
statement: vec![],
|
||||
challenge: vec![],
|
||||
polycom: vec![],
|
||||
ctexts: vec![],
|
||||
shares_rands: vec![],
|
||||
};
|
||||
}
|
||||
|
||||
let blind = Scalar::random(&mut OsRng);
|
||||
let params = CurveParams::init(EdwardsPoint::GENERATOR * blind);
|
||||
let pke = Elgamal::setup(¶ms);
|
||||
let (N, t, n) = RVE_PARAMS[0];
|
||||
let vparams = RDkgithParams{ N, t, n };
|
||||
let mut ve = RDkgith::setup(¶ms, &vparams, pke.clone());
|
||||
|
||||
ve.set_ek(PKEPublicKey{
|
||||
ek: encryption_key.unwrap(),
|
||||
});
|
||||
let (stm, wit) = ve.igen(&data.try_into().unwrap());
|
||||
let pi = ve.prove(&stm, &wit);
|
||||
|
||||
return VerencProofAndBlindingKey {
|
||||
blinding_key: blind.to_bytes().to_vec(),
|
||||
blinding_pubkey: (EdwardsPoint::GENERATOR * blind).compress().to_bytes().to_vec(),
|
||||
decryption_key: vec![],
|
||||
encryption_key: encryption_key_bytes.clone(),
|
||||
statement: stm.compress().to_bytes().to_vec(),
|
||||
challenge: pi.challenge,
|
||||
polycom: pi.polycom.iter().map(|p| p.compress().to_bytes().to_vec()).collect(),
|
||||
ctexts: pi.ctexts.iter().map(|c| VerencCiphertext {
|
||||
c1: c.0.c1.compress().to_bytes().to_vec(),
|
||||
c2: c.0.c2.to_bytes().to_vec(),
|
||||
i: c.1 as u64,
|
||||
}).collect(),
|
||||
shares_rands: pi.shares_rands.iter().map(|s| VerencShare {
|
||||
s1: s.0.to_bytes().to_vec(),
|
||||
s2: s.1.to_bytes().to_vec(),
|
||||
i: s.2 as u64,
|
||||
}).collect(),
|
||||
};
|
||||
}
|
||||
|
||||
fn point_from_bytes(bytes: Vec<u8>) -> Option<EdwardsPoint> {
|
||||
if bytes.len() != 57 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let key_bytes: Result<[u8; 57], _> = bytes.try_into();
|
||||
if key_bytes.is_err() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let compressed_key = CompressedEdwardsY::try_from(key_bytes.unwrap());
|
||||
if compressed_key.is_err() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let key = compressed_key.unwrap().decompress();
|
||||
if key.is_none().into() {
|
||||
return None;
|
||||
}
|
||||
|
||||
return Some(key.unwrap());
|
||||
}
|
||||
|
||||
pub fn verenc_verify(proof: VerencProof) -> bool {
|
||||
let blinding_key = point_from_bytes(proof.blinding_pubkey);
|
||||
if blinding_key.is_none() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let statement = point_from_bytes(proof.statement);
|
||||
if statement.is_none() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let encryption_key = point_from_bytes(proof.encryption_key);
|
||||
if encryption_key.is_none() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let mut polycom: Vec<EdwardsPoint> = Vec::new();
|
||||
for p in proof.polycom {
|
||||
let com = point_from_bytes(p);
|
||||
if com.is_none() {
|
||||
return false;
|
||||
}
|
||||
|
||||
polycom.push(com.unwrap());
|
||||
}
|
||||
|
||||
let mut ctexts: Vec<(PKECipherText, usize)> = Vec::new();
|
||||
for c in proof.ctexts {
|
||||
let c1 = point_from_bytes(c.c1);
|
||||
if c1.is_none() {
|
||||
return false;
|
||||
}
|
||||
|
||||
if c.c2.len() != 56 {
|
||||
return false;
|
||||
}
|
||||
|
||||
let c2 = Scalar::from_bytes(&c.c2.try_into().unwrap());
|
||||
ctexts.push((PKECipherText{c1: c1.unwrap(), c2: c2}, c.i as usize));
|
||||
}
|
||||
|
||||
let mut shares: Vec<(Scalar, Scalar, usize)> = Vec::new();
|
||||
for s in proof.shares_rands {
|
||||
if s.s1.len() != 56 {
|
||||
return false;
|
||||
}
|
||||
|
||||
if s.s2.len() != 56 {
|
||||
return false;
|
||||
}
|
||||
|
||||
let s1 = Scalar::from_bytes(&s.s1.try_into().unwrap());
|
||||
let s2 = Scalar::from_bytes(&s.s2.try_into().unwrap());
|
||||
shares.push((s1, s2, s.i as usize));
|
||||
}
|
||||
|
||||
let params = CurveParams::init(blinding_key.unwrap());
|
||||
let pke = Elgamal::setup(¶ms);
|
||||
let (N, t, n) = RVE_PARAMS[0];
|
||||
let vparams = RDkgithParams{ N, t, n };
|
||||
let mut ve = RDkgith::setup(¶ms, &vparams, pke.clone());
|
||||
ve.set_ek(PKEPublicKey{
|
||||
ek: encryption_key.unwrap(),
|
||||
});
|
||||
|
||||
return ve.verify(&statement.unwrap(), &RDkgithProof{
|
||||
challenge: proof.challenge,
|
||||
polycom: polycom,
|
||||
ctexts: ctexts,
|
||||
shares_rands: shares,
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
pub fn verenc_compress(proof: VerencProof) -> CompressedCiphertext {
|
||||
let blinding_key = point_from_bytes(proof.blinding_pubkey);
|
||||
if blinding_key.is_none() {
|
||||
return CompressedCiphertext{
|
||||
ctexts: vec![],
|
||||
aux: vec![],
|
||||
};
|
||||
}
|
||||
|
||||
let statement = point_from_bytes(proof.statement);
|
||||
if statement.is_none() {
|
||||
return CompressedCiphertext{
|
||||
ctexts: vec![],
|
||||
aux: vec![],
|
||||
};
|
||||
}
|
||||
|
||||
let encryption_key = point_from_bytes(proof.encryption_key);
|
||||
if encryption_key.is_none() {
|
||||
return CompressedCiphertext{
|
||||
ctexts: vec![],
|
||||
aux: vec![],
|
||||
};
|
||||
}
|
||||
|
||||
let mut polycom: Vec<EdwardsPoint> = Vec::new();
|
||||
for p in proof.polycom {
|
||||
let com = point_from_bytes(p);
|
||||
if com.is_none() {
|
||||
return CompressedCiphertext{
|
||||
ctexts: vec![],
|
||||
aux: vec![],
|
||||
};
|
||||
}
|
||||
|
||||
polycom.push(com.unwrap());
|
||||
}
|
||||
|
||||
let mut ctexts: Vec<(PKECipherText, usize)> = Vec::new();
|
||||
for c in proof.ctexts {
|
||||
let c1 = point_from_bytes(c.c1);
|
||||
if c1.is_none() {
|
||||
return CompressedCiphertext{
|
||||
ctexts: vec![],
|
||||
aux: vec![],
|
||||
};
|
||||
}
|
||||
|
||||
if c.c2.len() != 56 {
|
||||
return CompressedCiphertext{
|
||||
ctexts: vec![],
|
||||
aux: vec![],
|
||||
};
|
||||
}
|
||||
|
||||
let c2 = Scalar::from_bytes(&c.c2.try_into().unwrap());
|
||||
ctexts.push((PKECipherText{c1: c1.unwrap(), c2: c2}, c.i as usize));
|
||||
}
|
||||
|
||||
let mut shares: Vec<(Scalar, Scalar, usize)> = Vec::new();
|
||||
for s in proof.shares_rands {
|
||||
if s.s1.len() != 56 {
|
||||
return CompressedCiphertext{
|
||||
ctexts: vec![],
|
||||
aux: vec![],
|
||||
};
|
||||
}
|
||||
|
||||
if s.s2.len() != 56 {
|
||||
return CompressedCiphertext{
|
||||
ctexts: vec![],
|
||||
aux: vec![],
|
||||
};
|
||||
}
|
||||
|
||||
let s1 = Scalar::from_bytes(&s.s1.try_into().unwrap());
|
||||
let s2 = Scalar::from_bytes(&s.s2.try_into().unwrap());
|
||||
shares.push((s1, s2, s.i as usize));
|
||||
}
|
||||
|
||||
let params = CurveParams::init(blinding_key.unwrap());
|
||||
let pke = Elgamal::setup(¶ms);
|
||||
let (N, t, n) = RVE_PARAMS[0];
|
||||
let vparams = RDkgithParams{ N, t, n };
|
||||
let mut ve = RDkgith::setup(¶ms, &vparams, pke.clone());
|
||||
ve.set_ek(PKEPublicKey{
|
||||
ek: encryption_key.unwrap(),
|
||||
});
|
||||
let ve_ct = ve.compress(&statement.unwrap(), &RDkgithProof{
|
||||
challenge: proof.challenge,
|
||||
polycom: polycom,
|
||||
ctexts: ctexts,
|
||||
shares_rands: shares,
|
||||
});
|
||||
return CompressedCiphertext{
|
||||
ctexts: ve_ct.ctexts.iter().map(|v| VerencCiphertext{
|
||||
c1: v.c1.compress().to_bytes().to_vec(),
|
||||
c2: v.c2.to_bytes().to_vec(),
|
||||
i: 0,
|
||||
}).collect(),
|
||||
aux: ve_ct.aux.iter().map(|a| a.to_bytes().to_vec()).collect(),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn verenc_recover(recovery: VerencDecrypt) -> Vec<u8> {
|
||||
let blinding_key = point_from_bytes(recovery.blinding_pubkey);
|
||||
if blinding_key.is_none() {
|
||||
return vec![];
|
||||
}
|
||||
|
||||
let statement = point_from_bytes(recovery.statement);
|
||||
if statement.is_none() {
|
||||
return vec![];
|
||||
}
|
||||
|
||||
if recovery.decryption_key.len() != 56 {
|
||||
return vec![];
|
||||
}
|
||||
|
||||
let decryption_key = Scalar::from_bytes(&recovery.decryption_key.try_into().unwrap());
|
||||
let mut ctexts: Vec<PKECipherText> = Vec::new();
|
||||
let mut aux: Vec<Scalar> = Vec::new();
|
||||
for c in recovery.ciphertexts.ctexts {
|
||||
let c1 = point_from_bytes(c.c1);
|
||||
if c1.is_none() {
|
||||
return vec![];
|
||||
}
|
||||
|
||||
if c.c2.len() != 56 {
|
||||
return vec![];
|
||||
}
|
||||
|
||||
let c2 = Scalar::from_bytes(&c.c2.try_into().unwrap());
|
||||
ctexts.push(PKECipherText{c1: c1.unwrap(), c2: c2});
|
||||
}
|
||||
|
||||
for c in recovery.ciphertexts.aux {
|
||||
if c.len() != 56 {
|
||||
return vec![];
|
||||
}
|
||||
|
||||
let a = Scalar::from_bytes(&c.try_into().unwrap());
|
||||
aux.push(a);
|
||||
}
|
||||
|
||||
let ve_ct = RDkgithCipherText{
|
||||
ctexts: ctexts,
|
||||
aux: aux,
|
||||
};
|
||||
|
||||
let params = CurveParams::init(blinding_key.unwrap());
|
||||
let pke = Elgamal::setup(¶ms);
|
||||
let (N, t, n) = RVE_PARAMS[0];
|
||||
let vparams = RDkgithParams{ N, t, n };
|
||||
let mut ve = RDkgith::setup(¶ms, &vparams, pke.clone());
|
||||
let wit_recover = ve.recover(&statement.unwrap(), &decryption_key, &ve_ct);
|
||||
return wit_recover.to_bytes().to_vec();
|
||||
}
|
||||
|
||||
pub fn chunk_data_for_verenc(data: Vec<u8>) -> Vec<Vec<u8>> {
|
||||
return encode_to_curve448_scalars(&data);
|
||||
}
|
||||
|
||||
pub fn combine_chunked_data(chunks: Vec<Vec<u8>>) -> Vec<u8> {
|
||||
return decode_from_curve448_scalars(&chunks);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use rand::RngCore;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_verenc() {
|
||||
let data = vec![0, 'h' as u8, 'e' as u8, 'l' as u8, 'l' as u8, 'o' as u8, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0];
|
||||
let proof = new_verenc_proof(data.clone());
|
||||
let proofdata = proof.clone();
|
||||
let pubproof = VerencProof { blinding_pubkey: proof.blinding_pubkey, encryption_key: proof.encryption_key, statement: proof.statement, challenge: proof.challenge, polycom: proof.polycom, ctexts: proof.ctexts, shares_rands: proof.shares_rands };
|
||||
assert!(verenc_verify(pubproof.clone()));
|
||||
let compressed = verenc_compress(pubproof);
|
||||
let result = verenc_recover(VerencDecrypt{
|
||||
blinding_pubkey: proofdata.blinding_pubkey,
|
||||
decryption_key: proof.decryption_key,
|
||||
statement: proofdata.statement,
|
||||
ciphertexts: compressed,
|
||||
});
|
||||
|
||||
assert!(data == result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_chunking() {
|
||||
for i in 0..1000 {
|
||||
let mut data: [u8; 1300] = [0u8;1300];
|
||||
OsRng::fill_bytes(&mut OsRng, &mut data);
|
||||
let chunks = chunk_data_for_verenc(data.to_vec());
|
||||
for chunk in chunks.clone() {
|
||||
let scalar_chunk = Scalar::from_bytes(&chunk.clone().try_into().unwrap());
|
||||
assert!(scalar_chunk.to_bytes().to_vec() == chunk)
|
||||
}
|
||||
let result = combine_chunked_data(chunks);
|
||||
let mut padded_data = data.to_vec();
|
||||
while result.len() > padded_data.len() {
|
||||
padded_data.push(0);
|
||||
}
|
||||
assert!(padded_data == result);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_full_verenc() {
|
||||
let mut data: [u8; 128] = [0u8;128];
|
||||
OsRng::fill_bytes(&mut OsRng, &mut data);
|
||||
let chunks = chunk_data_for_verenc(data.to_vec());
|
||||
for chunk in chunks.clone() {
|
||||
let proof = new_verenc_proof(chunk.clone());
|
||||
let proofdata = proof.clone();
|
||||
let pubproof = VerencProof { blinding_pubkey: proof.blinding_pubkey, encryption_key: proof.encryption_key, statement: proof.statement, challenge: proof.challenge, polycom: proof.polycom, ctexts: proof.ctexts, shares_rands: proof.shares_rands };
|
||||
assert!(verenc_verify(pubproof.clone()));
|
||||
let compressed = verenc_compress(pubproof);
|
||||
let result = verenc_recover(VerencDecrypt{
|
||||
blinding_pubkey: proofdata.blinding_pubkey,
|
||||
decryption_key: proof.decryption_key,
|
||||
statement: proofdata.statement,
|
||||
ciphertexts: compressed,
|
||||
});
|
||||
|
||||
assert!(chunk == result);
|
||||
}
|
||||
let result = combine_chunked_data(chunks);
|
||||
let mut padded_data = data.to_vec();
|
||||
while result.len() > padded_data.len() {
|
||||
padded_data.push(0);
|
||||
}
|
||||
assert!(padded_data != result);
|
||||
}
|
||||
}
|
||||
55
crates/verenc/src/lib.udl
Normal file
55
crates/verenc/src/lib.udl
Normal file
@ -0,0 +1,55 @@
|
||||
namespace verenc {
|
||||
VerencProofAndBlindingKey new_verenc_proof(sequence<u8> data);
|
||||
VerencProofAndBlindingKey new_verenc_proof_encrypt_only(sequence<u8> data, sequence<u8> encryption_key_bytes);
|
||||
boolean verenc_verify(VerencProof proof);
|
||||
CompressedCiphertext verenc_compress(VerencProof proof);
|
||||
sequence<u8> verenc_recover(VerencDecrypt recovery);
|
||||
sequence<sequence<u8>> chunk_data_for_verenc(sequence<u8> data);
|
||||
sequence<u8> combine_chunked_data(sequence<sequence<u8>> chunks);
|
||||
};
|
||||
|
||||
dictionary VerencCiphertext {
|
||||
sequence<u8> c1;
|
||||
sequence<u8> c2;
|
||||
u64 i;
|
||||
};
|
||||
|
||||
dictionary VerencShare {
|
||||
sequence<u8> s1;
|
||||
sequence<u8> s2;
|
||||
u64 i;
|
||||
};
|
||||
|
||||
dictionary VerencProofAndBlindingKey {
|
||||
sequence<u8> blinding_key;
|
||||
sequence<u8> blinding_pubkey;
|
||||
sequence<u8> decryption_key;
|
||||
sequence<u8> encryption_key;
|
||||
sequence<u8> statement;
|
||||
sequence<u8> challenge;
|
||||
sequence<sequence<u8>> polycom;
|
||||
sequence<VerencCiphertext> ctexts;
|
||||
sequence<VerencShare> shares_rands;
|
||||
};
|
||||
|
||||
dictionary VerencDecrypt {
|
||||
sequence<u8> blinding_pubkey;
|
||||
sequence<u8> decryption_key;
|
||||
sequence<u8> statement;
|
||||
CompressedCiphertext ciphertexts;
|
||||
};
|
||||
|
||||
dictionary VerencProof {
|
||||
sequence<u8> blinding_pubkey;
|
||||
sequence<u8> encryption_key;
|
||||
sequence<u8> statement;
|
||||
sequence<u8> challenge;
|
||||
sequence<sequence<u8>> polycom;
|
||||
sequence<VerencCiphertext> ctexts;
|
||||
sequence<VerencShare> shares_rands;
|
||||
};
|
||||
|
||||
dictionary CompressedCiphertext {
|
||||
sequence<VerencCiphertext> ctexts;
|
||||
sequence<sequence<u8>> aux;
|
||||
};
|
||||
116
crates/verenc/src/pke.rs
Normal file
116
crates/verenc/src/pke.rs
Normal file
@ -0,0 +1,116 @@
|
||||
/* Hashed Elgamal implementation */
|
||||
#![allow(dead_code)]
|
||||
#![allow(non_snake_case)]
|
||||
|
||||
use crate::utils::*;
|
||||
|
||||
use ed448_goldilocks_plus::elliptic_curve::Group;
|
||||
use rand::rngs::OsRng;
|
||||
use ed448_goldilocks_plus::EdwardsPoint as GGA;
|
||||
use ed448_goldilocks_plus::Scalar as FF;
|
||||
|
||||
const WINDOW_SIZE : usize = 7;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Elgamal {
|
||||
pub(crate) params: CurveParams,
|
||||
pub(crate) G : GGA
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Default, Debug)]
|
||||
pub struct PKECipherText {
|
||||
pub(crate) c1 : GGA,
|
||||
pub(crate) c2 : FF,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct PKEPublicKey {
|
||||
pub(crate) ek : GGA,
|
||||
}
|
||||
|
||||
impl PKECipherText {
|
||||
pub fn zero() -> Self {
|
||||
PKECipherText {c1: GGA::IDENTITY, c2: FF::ZERO}
|
||||
}
|
||||
}
|
||||
|
||||
impl PKECipherText {
|
||||
pub fn to_bytes(&self) -> Vec<u8> {
|
||||
let c1_bytes = self.c1.compress().to_bytes().to_vec();
|
||||
let c2_bytes = self.c2.to_bytes().to_vec();
|
||||
[c1_bytes, c2_bytes].concat()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl Elgamal {
|
||||
pub fn setup(params: &CurveParams) -> Self {
|
||||
// we skip a lot of precomputation utilizing edwards curves
|
||||
let precomp_G = GGA::generator();
|
||||
|
||||
Elgamal { params: params.clone(), G: precomp_G }
|
||||
}
|
||||
|
||||
pub fn kgen(&self) -> (PKEPublicKey, FF) {
|
||||
let x = FF::random(&mut OsRng);
|
||||
let Y = self.mul_G(x);
|
||||
|
||||
let pk = PKEPublicKey{ek: Y};
|
||||
|
||||
return (pk, x);
|
||||
}
|
||||
|
||||
pub fn encrypt(&self, ek: &PKEPublicKey, msg: &FF) -> PKECipherText {
|
||||
self.encrypt_given_r(ek, msg, &FF::random(&mut OsRng))
|
||||
}
|
||||
|
||||
fn mul_G(&self, scalar : FF) -> GGA {
|
||||
self.G * scalar
|
||||
}
|
||||
fn mul_ek(ek : &GGA, scalar : FF) -> GGA {
|
||||
ek * scalar
|
||||
}
|
||||
|
||||
pub fn encrypt_given_r(&self, ek: &PKEPublicKey, msg: &FF, r: &FF) -> PKECipherText {
|
||||
let c1 = self.mul_G(*r);
|
||||
self.encrypt_given_c1(ek, msg, r, c1)
|
||||
}
|
||||
|
||||
// Encryption where c1 = G^r is given
|
||||
pub fn encrypt_given_c1(&self, ek: &PKEPublicKey, msg: &FF, r: &FF, c1 : GGA) -> PKECipherText {
|
||||
let keyseed = Self::mul_ek(&ek.ek, *r);
|
||||
let hash = hash_to_FF(&keyseed);
|
||||
let c2 = hash + msg;
|
||||
PKECipherText { c1, c2 }
|
||||
}
|
||||
|
||||
pub fn decrypt(&self, dk: &FF, ct: &PKECipherText) -> FF {
|
||||
let pt = ct.c1 * dk;
|
||||
let hash = hash_to_FF(&pt);
|
||||
ct.c2 - hash
|
||||
}
|
||||
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
#[test]
|
||||
fn test_pke_kgen() {
|
||||
let params = CurveParams::init(GGA::generator());
|
||||
let pke = Elgamal::setup(¶ms);
|
||||
let (ek, dk) = pke.kgen();
|
||||
assert_eq!(params.G * dk, ek.ek);
|
||||
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pke_enc_dec() {
|
||||
let params = CurveParams::init(GGA::generator());
|
||||
let pke = Elgamal::setup(¶ms);
|
||||
let (ek, dk) = pke.kgen();
|
||||
let m = FF::random(&mut OsRng);
|
||||
let ct = pke.encrypt(&ek, &m);
|
||||
let pt = pke.decrypt(&dk, &ct);
|
||||
assert_eq!(m, pt);
|
||||
}
|
||||
}
|
||||
531
crates/verenc/src/rdkgith.rs
Normal file
531
crates/verenc/src/rdkgith.rs
Normal file
@ -0,0 +1,531 @@
|
||||
#![allow(non_snake_case)]
|
||||
|
||||
use crate::utils::*;
|
||||
use crate::pke::*;
|
||||
use crate::ve::*;
|
||||
|
||||
use rand::rngs::OsRng;
|
||||
use rand::seq::IteratorRandom;
|
||||
use sha2::{Digest, Sha512};
|
||||
|
||||
|
||||
use ed448_goldilocks_plus::elliptic_curve::{Group, PrimeField};
|
||||
use ed448_goldilocks_plus::EdwardsPoint as GGA;
|
||||
use ed448_goldilocks_plus::Scalar as FF;
|
||||
|
||||
pub const RVE_PARAMS : [(usize, usize, usize); 1] = [(64, 22, 3)];
|
||||
|
||||
pub const WINDOW_SIZE : usize = 7;
|
||||
pub const FIELD_ELT_BYTES : usize = ((FF::NUM_BITS + 7) / 8) as usize;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RDkgithParams {
|
||||
pub N: usize, // number of parties
|
||||
pub t: usize, // number of parallel repetitions
|
||||
pub n: usize, // size of random subset
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RDkgithProof {
|
||||
pub(crate) challenge : Vec<u8>,
|
||||
pub(crate) polycom: Vec<GGA>, // A_1,..., A_t
|
||||
pub(crate) ctexts : Vec<(PKECipherText, usize)>, // unopened ciphertexts ct_i
|
||||
//pub(crate) shares: Vec<Vec<FF>>, // opened (s_i)_{i\in I}
|
||||
//pub(crate) rands: Vec<Vec<FF>>, // opened (r_i)_{i\in I}
|
||||
pub(crate) shares_rands: Vec<(FF, FF, usize)>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RDkgithCipherText {
|
||||
pub(crate) ctexts : Vec<PKECipherText>,
|
||||
pub(crate) aux: Vec<FF>
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RDkgith {
|
||||
pub(crate) params: CurveParams,
|
||||
pub(crate) vparams: RDkgithParams,
|
||||
pub(crate) pke: Elgamal,
|
||||
pub(crate) ek: PKEPublicKey,
|
||||
pub(crate) precomp_G : GGA
|
||||
}
|
||||
|
||||
impl RDkgith {
|
||||
pub fn check_instance(&self, stm: &GGA, wit: &FF) -> bool {
|
||||
if &(self.params.G * wit) == stm {
|
||||
return true
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
pub fn expand_challenge(&self, challenge: &Vec<u8>) -> Vec<usize> {
|
||||
let length_required = self.vparams.N - self.vparams.t;
|
||||
let mut output = Vec::<usize>::new();
|
||||
let mut c = challenge.clone();
|
||||
while output.len() < length_required {
|
||||
|
||||
let ints = bytes_to_u32(&c);
|
||||
for i in 0..ints.len() {
|
||||
let idx = (ints[i] as usize) % self.vparams.N;
|
||||
if !output.contains(&idx) {
|
||||
output.push(idx);
|
||||
}
|
||||
if output.len() == length_required {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if output.len() != length_required {
|
||||
c = hash_SHA512(c.as_slice());
|
||||
}
|
||||
}
|
||||
output.sort();
|
||||
output
|
||||
}
|
||||
|
||||
fn mul_G(&self, scalar : FF) -> GGA {
|
||||
self.precomp_G * scalar
|
||||
}
|
||||
}
|
||||
|
||||
impl VerEnc for RDkgith {
|
||||
type SystemParams = CurveParams;
|
||||
type Statement = GGA;
|
||||
type Witness = FF;
|
||||
type PKE = Elgamal;
|
||||
type EncKey = PKEPublicKey;
|
||||
type DecKey = FF;
|
||||
type VEParams = RDkgithParams;
|
||||
type VEProof = RDkgithProof;
|
||||
type VECipherText = RDkgithCipherText;
|
||||
|
||||
fn setup(params: &CurveParams, vparams: &Self::VEParams, pke: Self::PKE) -> Self {
|
||||
let precomp_G = GGA::generator();
|
||||
RDkgith { params: params.clone(), vparams: vparams.clone(), pke,
|
||||
ek : PKEPublicKey{ek: GGA::identity()},
|
||||
precomp_G
|
||||
}
|
||||
}
|
||||
|
||||
fn set_ek(&mut self, ek: PKEPublicKey) {
|
||||
self.ek = ek;
|
||||
}
|
||||
|
||||
fn kgen(&mut self) -> Self::DecKey {
|
||||
let (ek, dk) = self.pke.kgen();
|
||||
self.ek = ek;
|
||||
return dk;
|
||||
}
|
||||
|
||||
fn get_public_key(&self) -> &Self::EncKey {
|
||||
&self.ek
|
||||
}
|
||||
|
||||
fn igen(&self, w: &[u8;56]) -> (Self::Statement, Self::Witness) {
|
||||
let x = FF::from_bytes(w);
|
||||
let Y = if self.params.G == GGA::generator() {
|
||||
self.params.G * x
|
||||
} else {
|
||||
self.params.G * x
|
||||
};
|
||||
return (Y, x);
|
||||
}
|
||||
|
||||
fn prove(&self, stm: &Self::Statement, wit: &Self::Witness) -> Self::VEProof {
|
||||
let N = self.vparams.N;
|
||||
let t = self.vparams.t;
|
||||
let mut hasher = Sha512::new();
|
||||
|
||||
|
||||
let mut coeffs = Vec::<FF>::with_capacity(t+1);
|
||||
let mut polycom = Vec::<GGA>::with_capacity(t+1);
|
||||
|
||||
let mut ctexts = Vec::<PKECipherText>::with_capacity(N);
|
||||
let mut shares = Vec::<FF>::with_capacity(N);
|
||||
let mut rands = Vec::<FF>::with_capacity(N);
|
||||
let mut ret_ctexts = Vec::<(PKECipherText, usize)>::with_capacity(N-t);
|
||||
let mut ret_shares_rands = Vec::<(FF, FF, usize)>::with_capacity(t);
|
||||
|
||||
|
||||
|
||||
/* Sample and commit to polynomial */
|
||||
for j in 0..t+1 {
|
||||
let aj =
|
||||
if j == 0 {
|
||||
wit.clone()
|
||||
} else {
|
||||
FF::random(&mut OsRng)
|
||||
};
|
||||
let Aj = self.mul_G(aj);
|
||||
|
||||
coeffs.insert(j, aj);
|
||||
polycom.insert(j, Aj);
|
||||
|
||||
// hash
|
||||
let Aj_bytes = Aj.compress().to_bytes().to_vec();
|
||||
hasher.update(Aj_bytes);
|
||||
}
|
||||
|
||||
for i in 0..N {
|
||||
let mut s = coeffs[0];
|
||||
let x = FF::from((i+1) as u32);
|
||||
let mut xi = x;
|
||||
|
||||
for j in 1..coeffs.len() {
|
||||
let term = coeffs[j] * xi;
|
||||
xi *= x;
|
||||
s += term;
|
||||
}
|
||||
let r = FF::random(&mut OsRng);
|
||||
let ct = self.pke.encrypt_given_r(&self.ek, &s, &r);
|
||||
shares.insert(i, s);
|
||||
rands.insert(i, r);
|
||||
ctexts.insert(i, ct);
|
||||
|
||||
// hash
|
||||
hasher.update(ct.to_bytes());
|
||||
}
|
||||
|
||||
// Hash stm and ek
|
||||
let stm_bytes = stm.compress().to_bytes().to_vec();
|
||||
let ek_bytes = self.ek.ek.compress().to_bytes().to_vec();
|
||||
hasher.update(stm_bytes);
|
||||
hasher.update(ek_bytes);
|
||||
|
||||
let chal = hasher.finalize().to_vec();
|
||||
let p_indices = self.expand_challenge(&chal);
|
||||
|
||||
// construct proof
|
||||
for i in 0..N {
|
||||
if p_indices.contains(&i) {
|
||||
ret_ctexts.push((ctexts[i], i));
|
||||
} else {
|
||||
ret_shares_rands.push((shares[i], rands[i], i));
|
||||
}
|
||||
}
|
||||
|
||||
RDkgithProof {
|
||||
challenge: chal,
|
||||
polycom,
|
||||
ctexts: ret_ctexts,
|
||||
shares_rands: ret_shares_rands
|
||||
}
|
||||
}
|
||||
|
||||
fn verify(&self, stm: &Self::Statement, pi: &Self::VEProof) -> bool {
|
||||
let N = self.vparams.N;
|
||||
let t = self.vparams.t;
|
||||
let mut hasher = Sha512::new();
|
||||
|
||||
|
||||
// index of hidden parties
|
||||
let p_indices = self.expand_challenge(&pi.challenge);
|
||||
|
||||
// hash polycom
|
||||
for j in 0..t+1 {
|
||||
let Aj = pi.polycom[j];
|
||||
let Aj_bytes = Aj.compress().to_bytes().to_vec();
|
||||
hasher.update(Aj_bytes);
|
||||
}
|
||||
|
||||
// check input format
|
||||
if pi.ctexts.len() != N-t || pi.shares_rands.len() != t || p_indices.len() != N-t {
|
||||
return false;
|
||||
}
|
||||
// Reconstruct missing ciphertexts
|
||||
let mut ctr_hide = 0;
|
||||
let mut ctr_open = 0;
|
||||
for i in 0..N {
|
||||
if p_indices.contains(&i) {
|
||||
let (ct, idx) = pi.ctexts[ctr_hide];
|
||||
hasher.update(ct.to_bytes());
|
||||
if i != idx {
|
||||
return false;
|
||||
}
|
||||
ctr_hide += 1;
|
||||
|
||||
} else {
|
||||
let (s, r, idx) = pi.shares_rands[ctr_open];
|
||||
let ct = self.pke.encrypt_given_r(&self.ek, &s, &r);
|
||||
hasher.update(ct.to_bytes());
|
||||
if i != idx {
|
||||
return false;
|
||||
}
|
||||
ctr_open += 1;
|
||||
}
|
||||
}
|
||||
// Hash stm and ek
|
||||
let stm_bytes = stm.compress().to_bytes().to_vec();
|
||||
let ek_bytes = self.ek.ek.compress().to_bytes().to_vec();
|
||||
hasher.update(stm_bytes);
|
||||
hasher.update(ek_bytes);
|
||||
|
||||
// check hash
|
||||
let chal_rec = hasher.finalize().to_vec();
|
||||
if chal_rec != pi.challenge {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check shares -- Batched implementation: requires computing 1 MSM with t+1 terms
|
||||
// See the "small exponents test" from the paper:
|
||||
// Fast batch verification for modular exponentiation and digital signatures. Mihir Bellare, Juan A. Garay & Tal Rabin, EUROCRYPT'98
|
||||
// Basically the verifier takes a random linear combination of the LHSs and RHSs
|
||||
let mut left_scalar = FF::ZERO;
|
||||
let mut right_scalars = vec![FF::ZERO; t+1];
|
||||
|
||||
for (s, _, i) in &pi.shares_rands {
|
||||
let random_d = FF::random(&mut OsRng);
|
||||
// Compute scalars for RHS
|
||||
let i_FF = FF::from(*i as u32 + 1);
|
||||
let mut i_pow = FF::from(1 as u32);
|
||||
for j in 0..t+1 {
|
||||
right_scalars[j] += i_pow * random_d;
|
||||
i_pow = i_pow * i_FF;
|
||||
}
|
||||
left_scalar += s * &random_d;
|
||||
|
||||
}
|
||||
let left = self.mul_G(left_scalar);
|
||||
let mut right = GGA::identity();
|
||||
for i in 0..pi.polycom.len() {
|
||||
right += pi.polycom[i] * &right_scalars[i]
|
||||
}
|
||||
if left != right {
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
// Lagrange coeff: product delta_i(0) = prod_{j\neq i} j/(j-i)
|
||||
// Postprocessed ciphertext for party index i^*:
|
||||
// c1 = r * G
|
||||
// c2 = delta_{i^*}(0) (H(r * ek) + s_{i^*}) + sum_{i\neq i^*} delta_{i}(0) s_i
|
||||
fn compress(&self, _stm: &Self::Statement, pi: &Self::VEProof) -> Self::VECipherText {
|
||||
let N = self.vparams.N;
|
||||
let t = self.vparams.t;
|
||||
let n = self.vparams.n;
|
||||
let mut new_ctexts = Vec::<PKECipherText>::with_capacity(n);
|
||||
let mut aux = Vec::<FF>::with_capacity(n);
|
||||
let hide_indices = self.expand_challenge(&pi.challenge);
|
||||
let mut open_indices = Vec::<usize>::with_capacity(t);
|
||||
|
||||
let mut lagrange = vec![FF::ZERO; N];
|
||||
for i in 0..N {
|
||||
if !hide_indices.contains(&i) {
|
||||
open_indices.push(i);
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(open_indices.len(), t);
|
||||
|
||||
// preprocess lagrange
|
||||
for i in open_indices.iter() {
|
||||
let i_FF = FF::from(*i as u32 + 1);
|
||||
let mut prod = FF::from(1 as u32);
|
||||
let mut denom = FF::from(1 as u32);
|
||||
for j in open_indices.iter() {
|
||||
if j != i {
|
||||
let j_FF = FF::from(*j as u32 + 1);
|
||||
prod = prod * j_FF;
|
||||
denom = denom * (j_FF - i_FF);
|
||||
}
|
||||
}
|
||||
lagrange[*i] = prod * denom.invert();
|
||||
}
|
||||
|
||||
// sample random subset of size n
|
||||
let subset= hide_indices.iter().choose_multiple(&mut OsRng, n);
|
||||
|
||||
let mut ctr_hide = 0;
|
||||
// process each ciphertext
|
||||
for i_hide in hide_indices.iter() {
|
||||
if !subset.contains(&i_hide) {
|
||||
ctr_hide += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
let (ct, _idx) = pi.ctexts[ctr_hide];
|
||||
let c1_new = ct.c1;
|
||||
let mut c2_new = ct.c2;
|
||||
let i_hide_FF = FF::from(*i_hide as u32 + 1);
|
||||
let mut prod = FF::from(1 as u32);
|
||||
|
||||
// multiply c2 by i_hide's lagrange
|
||||
for j in open_indices.iter() {
|
||||
if j != i_hide {
|
||||
let j_FF = FF::from(*j as u32 + 1);
|
||||
prod = (prod * j_FF) * (j_FF - i_hide_FF).invert();
|
||||
}
|
||||
}
|
||||
c2_new = c2_new * prod;
|
||||
|
||||
// add sum of lagrange * s_i to c2
|
||||
let mut ctr_open = 0;
|
||||
for i in open_indices.iter() {
|
||||
let i_FF = FF::from(*i as u32 + 1);
|
||||
let mut delta_i = lagrange[*i];
|
||||
delta_i = (delta_i * i_hide_FF) * (i_hide_FF - i_FF).invert(); // update delta_i using i_hide
|
||||
let (s,_,_) = pi.shares_rands[ctr_open];
|
||||
c2_new = c2_new + delta_i * s;
|
||||
ctr_open += 1;
|
||||
}
|
||||
|
||||
new_ctexts.push(PKECipherText { c1: c1_new, c2: c2_new });
|
||||
aux.push(prod);
|
||||
|
||||
ctr_hide += 1;
|
||||
|
||||
}
|
||||
|
||||
RDkgithCipherText {
|
||||
ctexts: new_ctexts,
|
||||
aux // TODO: maybe receiver can recompute this from party indices
|
||||
}
|
||||
}
|
||||
|
||||
fn recover(&self, stm: &Self::Statement, dk: &Self::DecKey, ve_ct: &Self::VECipherText) -> Self::Witness {
|
||||
let n = self.vparams.n;
|
||||
for i in 0..n {
|
||||
let ct = ve_ct.ctexts[i];
|
||||
let delta = ve_ct.aux[i];
|
||||
let pt = ct.c1 * dk;
|
||||
let hash = hash_to_FF(&pt);
|
||||
let ptext = ct.c2 - hash * delta;
|
||||
if self.check_instance(stm, &ptext) {
|
||||
return ptext;
|
||||
}
|
||||
}
|
||||
print!("recovery failed!");
|
||||
FF::ZERO
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::time::Instant;
|
||||
|
||||
use ed448_goldilocks_plus::Scalar;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_ve_kgen() {
|
||||
let params = CurveParams::init(GGA::generator());
|
||||
let pke = Elgamal::setup(¶ms);
|
||||
let vparams = RDkgithParams{ N: 8, t: 4, n: 4};
|
||||
let mut ve = RDkgith::setup(¶ms, &vparams, pke);
|
||||
let dk = ve.kgen();
|
||||
|
||||
assert_eq!(params.G * dk, ve.get_public_key().ek);
|
||||
assert_eq!(params.G * dk, ve.get_public_key().ek);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ve_igen() {
|
||||
let params = CurveParams::init(GGA::generator());
|
||||
let pke = Elgamal::setup(¶ms);
|
||||
let vparams = RDkgithParams{ N: 8, t: 4, n: 4};
|
||||
let ve = RDkgith::setup(¶ms, &vparams, pke);
|
||||
let w = Scalar::random(&mut OsRng);
|
||||
let (stm, wit) = ve.igen(&w.to_bytes());
|
||||
assert_eq!(params.G * wit, stm)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ve_prove_verify() {
|
||||
let params = CurveParams::init(GGA::generator());
|
||||
let pke = Elgamal::setup(¶ms);
|
||||
for (N, t, n) in &RVE_PARAMS[0..1] {
|
||||
let vparams = RDkgithParams{ N: *N, t: *t, n: *n };
|
||||
let mut ve = RDkgith::setup(¶ms, &vparams, pke.clone());
|
||||
let _dk = ve.kgen();
|
||||
let w = Scalar::random(&mut OsRng);
|
||||
let (stm, wit) = ve.igen(&w.to_bytes());
|
||||
let pi = ve.prove(&stm, &wit);
|
||||
println!("proof generated");
|
||||
let result = ve.verify(&stm, &pi);
|
||||
println!("proof verified");
|
||||
assert!(result);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ve_prove_compress_recover() {
|
||||
let params = CurveParams::init(GGA::generator());
|
||||
let pke = Elgamal::setup(¶ms);
|
||||
for (N, t, n) in &RVE_PARAMS[0..1] {
|
||||
let vparams = RDkgithParams{ N: *N, t: *t, n: *n };
|
||||
let mut ve = RDkgith::setup(¶ms, &vparams, pke.clone());
|
||||
let dk = ve.kgen();
|
||||
let w = Scalar::random(&mut OsRng);
|
||||
let (stm, wit) = ve.igen(&w.to_bytes());
|
||||
let pi = ve.prove(&stm, &wit);
|
||||
println!("proof generated");
|
||||
let ve_ct = ve.compress(&stm, &pi);
|
||||
println!("VE ciphertext generated");
|
||||
let wit_recover = ve.recover(&stm, &dk, &ve_ct);
|
||||
assert_eq!(wit_recover, wit);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub fn proof_size(pi : &RDkgithProof) -> usize {
|
||||
let group_elt_bytes = 57;
|
||||
|
||||
let mut size = pi.challenge.len();
|
||||
size += pi.polycom.len() * group_elt_bytes;
|
||||
size += pi.ctexts.len() * (pke_ctext_size(&pi.ctexts[0].0) + 8);
|
||||
size += pi.shares_rands.len() * (2*FIELD_ELT_BYTES + 8);
|
||||
|
||||
size
|
||||
}
|
||||
pub fn ctext_size(ctext : &RDkgithCipherText) -> usize {
|
||||
let mut size = ctext.ctexts.len() * pke_ctext_size(&ctext.ctexts[0]);
|
||||
size += ctext.aux.len() * FIELD_ELT_BYTES;
|
||||
|
||||
size
|
||||
}
|
||||
pub fn pke_ctext_size(_ctext : &PKECipherText) -> usize {
|
||||
let group_elt_bytes = 57;
|
||||
let size = group_elt_bytes + FIELD_ELT_BYTES;
|
||||
|
||||
size
|
||||
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ve_print_sizes() {
|
||||
let blind = GGA::random(&mut OsRng);
|
||||
let params = CurveParams::init(blind);
|
||||
let pke = Elgamal::setup(¶ms);
|
||||
|
||||
for (N, t, n) in RVE_PARAMS {
|
||||
let vparams = RDkgithParams{ N, t, n};
|
||||
let mut ve = RDkgith::setup(¶ms, &vparams, pke.clone());
|
||||
let dk = ve.kgen();
|
||||
let w = Scalar::random(&mut OsRng);
|
||||
let (stm, wit) = ve.igen(&w.to_bytes());
|
||||
let start = Instant::now();
|
||||
let pi = ve.prove(&stm, &wit);
|
||||
let duration = start.elapsed();
|
||||
print!("\nN = {}, t = {}, n = {}\n", N, t, n);
|
||||
print!("Proof size : {}, duration : {:?}\n", proof_size(&pi), duration);
|
||||
let start = Instant::now();
|
||||
assert!(ve.verify(&stm, &pi));
|
||||
let duration = start.elapsed();
|
||||
print!("verification duration : {:?}\n", duration);
|
||||
let start = Instant::now();
|
||||
let ve_ct = ve.compress(&stm, &pi);
|
||||
let duration = start.elapsed();
|
||||
print!("Ctext size : {}\n", (N-t) * (pke_ctext_size(&ve_ct.ctexts[0]) + FIELD_ELT_BYTES));
|
||||
print!("Ctext size (RS): {}, compression duration : {:?}\n", ctext_size(&ve_ct), duration);
|
||||
let wit_recover = ve.recover(&stm, &dk, &ve_ct);
|
||||
|
||||
assert_eq!(wit_recover, wit);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
199
crates/verenc/src/seed_tree.rs
Normal file
199
crates/verenc/src/seed_tree.rs
Normal file
@ -0,0 +1,199 @@
|
||||
#![allow(non_snake_case)]
|
||||
|
||||
use std::convert::TryInto;
|
||||
use sha2::{Digest, Sha256};
|
||||
use rand::RngCore;
|
||||
use rand::rngs::OsRng;
|
||||
|
||||
// Implementation of the seed tree optimization
|
||||
// Port of the LegRoast C implementation to Rust
|
||||
// https://github.com/WardBeullens/LegRoast, main branch at cac7406)
|
||||
// See merkletree.c
|
||||
|
||||
// To convert seeds to finite field elements see utils::seed_to_FF
|
||||
|
||||
pub const SEED_BYTES : usize = 16;
|
||||
pub type Seed = [u8; SEED_BYTES];
|
||||
|
||||
pub struct SeedTree {
|
||||
seeds : Vec<Seed>, // length must be (2*PARTIES-1)
|
||||
depth : usize, // log_2(N)
|
||||
num_leaves: usize // N
|
||||
}
|
||||
|
||||
impl SeedTree {
|
||||
|
||||
fn expand(salt : &[u8], rep_index : u16, seed_index : u16, seed : &Seed) -> (Seed, Seed) {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(salt);
|
||||
hasher.update(rep_index.to_le_bytes());
|
||||
hasher.update(seed_index.to_le_bytes());
|
||||
hasher.update(seed);
|
||||
let digest = hasher.finalize();
|
||||
|
||||
( digest[0..SEED_BYTES].try_into().expect("Hash digest too short, needs to be twice the seed length"),
|
||||
digest[SEED_BYTES..].try_into().expect("Hash digest too short, needs to be twice the seed length") )
|
||||
}
|
||||
|
||||
fn left_child(i : usize) -> usize {
|
||||
2*i+1
|
||||
}
|
||||
fn right_child(i : usize) -> usize {
|
||||
2*i+2
|
||||
}
|
||||
fn parent(i : usize) -> usize {
|
||||
(i-1)/2
|
||||
}
|
||||
fn sibling(i : usize) -> usize {
|
||||
// ((i)%2)? i+1 : i-1
|
||||
if i % 2 == 1 {
|
||||
i + 1
|
||||
} else {
|
||||
i - 1
|
||||
}
|
||||
}
|
||||
|
||||
pub fn zero_seed() -> Seed {
|
||||
[0; SEED_BYTES]
|
||||
}
|
||||
|
||||
pub fn random_seed() -> Seed {
|
||||
let mut random_vector = [0u8; SEED_BYTES];
|
||||
OsRng.fill_bytes(&mut random_vector);
|
||||
random_vector
|
||||
}
|
||||
|
||||
pub fn create(root_seed : &Seed, depth : usize, salt : &[u8], rep_index : usize) -> Self {
|
||||
|
||||
let num_leaves = 1 << depth;
|
||||
let mut seeds = vec![Self::zero_seed(); 2*num_leaves - 1];
|
||||
seeds[0] = root_seed.clone();
|
||||
let rep_index = rep_index as u16;
|
||||
|
||||
for i in 0 .. num_leaves - 1 {
|
||||
let i_u16 : u16 = i.try_into().unwrap();
|
||||
let (left, right) = Self::expand(salt, rep_index, i_u16, &seeds[i]);
|
||||
seeds[Self::left_child(i)] = left;
|
||||
seeds[Self::right_child(i)] = right;
|
||||
}
|
||||
|
||||
SeedTree{seeds, depth, num_leaves}
|
||||
}
|
||||
|
||||
// Unopened party index is given in [0, .., N-1]
|
||||
pub fn open_seeds(&self, unopened_index : usize) -> Vec<Seed> {
|
||||
let mut unopened_index = unopened_index + (1 << self.depth) - 1;
|
||||
let mut out = Vec::new();
|
||||
let mut to_reveal = 0;
|
||||
while to_reveal < self.depth {
|
||||
out.push(self.seeds[Self::sibling(unopened_index)]);
|
||||
unopened_index = Self::parent(unopened_index);
|
||||
to_reveal += 1;
|
||||
}
|
||||
|
||||
out
|
||||
}
|
||||
|
||||
// Callers must ensure that revealed.size() == depth
|
||||
pub fn reconstruct_tree(depth : usize, salt : &[u8], rep_index : usize, unopened_index : usize, revealed : &Vec<Seed>) -> Self {
|
||||
let num_leaves = 1 << depth;
|
||||
let mut unopened_index = unopened_index + num_leaves - 1;
|
||||
let mut seeds = vec![Self::zero_seed(); 2 * num_leaves - 1];
|
||||
let mut next_insert = 0;
|
||||
assert!(revealed.len() == depth);
|
||||
while next_insert < depth {
|
||||
seeds[Self::sibling(unopened_index)] = revealed[next_insert];
|
||||
unopened_index = Self::parent(unopened_index);
|
||||
next_insert += 1;
|
||||
}
|
||||
|
||||
let zero_seed = seeds[0]; // we'll never have the root
|
||||
for i in 0 .. num_leaves - 1 {
|
||||
if seeds[i] != zero_seed {
|
||||
let (left, right) = Self::expand(salt, rep_index as u16, i as u16, &seeds[i]);
|
||||
seeds[Self::left_child(i)] = left;
|
||||
seeds[Self::right_child(i)] = right;
|
||||
}
|
||||
}
|
||||
|
||||
SeedTree { seeds, depth, num_leaves }
|
||||
}
|
||||
|
||||
pub fn get_leaf(&self, i : usize) -> Seed {
|
||||
assert!(i < self.num_leaves, "get_leaf: leaf index too large"); // Caller bug
|
||||
|
||||
self.seeds[self.num_leaves - 1 + i]
|
||||
}
|
||||
|
||||
pub fn print_tree(&self, label : &str) {
|
||||
print!("Tree {}:\n", label);
|
||||
for i in 0..self.seeds.len() {
|
||||
print!("seed {} = {}\n", i, hex::encode_upper(self.seeds[i]));
|
||||
if i == self.num_leaves - 2 {
|
||||
print!("---- leaves follow ----\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rand::rngs::OsRng;
|
||||
use rand::RngCore;
|
||||
|
||||
fn random_vec(len :usize) -> Vec<u8> {
|
||||
let mut random_vector = vec![0u8; len];
|
||||
OsRng.fill_bytes(&mut random_vector);
|
||||
random_vector
|
||||
}
|
||||
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_seed_tree_create() {
|
||||
let N = 8;
|
||||
let logN = 3;
|
||||
let root_seed = SeedTree::random_seed();
|
||||
let salt = random_vec(32);
|
||||
let rep_index = 5;
|
||||
|
||||
let tree = SeedTree::create(&root_seed, logN, salt.as_slice(), rep_index);
|
||||
assert!(tree.num_leaves == N);
|
||||
for i in 0..tree.num_leaves {
|
||||
let leaf_seed_i = tree.get_leaf(i);
|
||||
assert!(leaf_seed_i != SeedTree::zero_seed());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_seed_tree_roundtrip() {
|
||||
let N = 8;
|
||||
let logN = 3;
|
||||
let root_seed = SeedTree::random_seed();
|
||||
let salt = random_vec(32);
|
||||
let rep_index = 5;
|
||||
|
||||
let tree = SeedTree::create(&root_seed, logN, salt.as_slice(), rep_index);
|
||||
assert!(tree.num_leaves == N);
|
||||
|
||||
for unopened_party in 0 .. N-1 {
|
||||
let opening_data = tree.open_seeds(unopened_party);
|
||||
let tree2 = SeedTree::reconstruct_tree(logN, &salt, rep_index, unopened_party, &opening_data);
|
||||
assert!(tree2.num_leaves == N);
|
||||
|
||||
for i in 0..N {
|
||||
if i != unopened_party {
|
||||
assert!(tree.get_leaf(i) == tree2.get_leaf(i));
|
||||
}
|
||||
else {
|
||||
assert!(tree2.get_leaf(i) == SeedTree::zero_seed());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
189
crates/verenc/src/utils.rs
Normal file
189
crates/verenc/src/utils.rs
Normal file
@ -0,0 +1,189 @@
|
||||
#![allow(non_snake_case)]
|
||||
|
||||
use sha2::{Digest, Sha512};
|
||||
use crate::Seed;
|
||||
use std::convert::TryInto;
|
||||
|
||||
use ed448_goldilocks_plus::EdwardsPoint as GGA;
|
||||
use ed448_goldilocks_plus::Scalar as FF;
|
||||
|
||||
|
||||
pub type Statement = GGA;
|
||||
pub type Witness = FF;
|
||||
|
||||
|
||||
#[derive(Clone, Default, PartialEq, Debug)]
|
||||
pub struct CurveParams {
|
||||
pub(crate) G: GGA,
|
||||
}
|
||||
|
||||
impl CurveParams {
|
||||
pub fn init(blind: GGA) -> Self {
|
||||
let G = blind;
|
||||
|
||||
CurveParams {
|
||||
G
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Utility functions */
|
||||
pub fn hash_to_FF(point: &GGA) -> FF {
|
||||
let digest = hash_SHA512(&point.compress().to_bytes()[..]);
|
||||
|
||||
FF::from_bytes(&(digest[0..56].try_into().unwrap()))
|
||||
}
|
||||
|
||||
pub fn hash_SHA512(input : &[u8]) -> Vec<u8> {
|
||||
let mut hasher = Sha512::new();
|
||||
hasher.update(input);
|
||||
|
||||
hasher.finalize().to_vec()
|
||||
}
|
||||
|
||||
pub fn bytes_to_u32(input : &Vec<u8>) -> Vec<u32> {
|
||||
let extra = input.len() % 4;
|
||||
let mut output = Vec::<u32>::new();
|
||||
for i in (0..input.len()-extra).step_by(4) {
|
||||
let next_bytes : [u8 ; 4] = input[i..i+4].try_into().unwrap();
|
||||
output.push(u32::from_le_bytes(next_bytes));
|
||||
}
|
||||
output
|
||||
}
|
||||
|
||||
// Derive a uniformly random field element from a seed,
|
||||
// assuming the bitlength of the field is less than 448 bits
|
||||
// Used to convert seeds to shares.
|
||||
pub fn seed_to_FF(seed: Seed, salt: &[u8], rep_index : usize, party_index : usize, additional_input : Option<&[u8]>) -> FF {
|
||||
let rep_index = rep_index as u16;
|
||||
let party_index = party_index as u16;
|
||||
let mut hasher = Sha512::new();
|
||||
hasher.update(salt);
|
||||
hasher.update(seed);
|
||||
hasher.update(rep_index.to_le_bytes());
|
||||
hasher.update(party_index.to_le_bytes());
|
||||
if additional_input.is_some() {
|
||||
hasher.update(additional_input.unwrap());
|
||||
}
|
||||
|
||||
let digest = hasher.finalize();
|
||||
|
||||
FF::from_bytes(&digest[0..56].try_into().unwrap())
|
||||
}
|
||||
|
||||
// A simple bit–reader over a byte slice.
|
||||
struct BitReader<'a> {
|
||||
data: &'a [u8],
|
||||
// This counts the total number of bits read so far.
|
||||
bit_pos: usize,
|
||||
}
|
||||
|
||||
impl<'a> BitReader<'a> {
|
||||
fn new(data: &'a [u8]) -> Self {
|
||||
Self { data, bit_pos: 0 }
|
||||
}
|
||||
|
||||
// Reads a single bit from the input.
|
||||
fn read_bit(&mut self) -> Option<bool> {
|
||||
if self.bit_pos >= self.data.len() * 8 {
|
||||
return None;
|
||||
}
|
||||
// In little–endian order within a byte, the bit at position (bit_pos % 8)
|
||||
// is extracted. (This is just one valid choice; what matters is consistency.)
|
||||
let byte = self.data[self.bit_pos / 8];
|
||||
let bit = (byte >> (self.bit_pos % 8)) & 1;
|
||||
self.bit_pos += 1;
|
||||
Some(bit != 0)
|
||||
}
|
||||
|
||||
// Reads `count` bits and returns them as the lower `count` bits of a u64.
|
||||
// (Assumes count <= 64.)
|
||||
fn read_bits(&mut self, count: usize) -> Option<u64> {
|
||||
let mut value = 0u64;
|
||||
for i in 0..count {
|
||||
// Each bit read is placed in position i (i.e. we’re building a little–endian number).
|
||||
let bit = self.read_bit();
|
||||
if bit.is_some() && bit.unwrap() {
|
||||
value |= 1 << i;
|
||||
}
|
||||
if i == 0 && bit.is_none() {
|
||||
return None
|
||||
}
|
||||
}
|
||||
Some(value)
|
||||
}
|
||||
}
|
||||
|
||||
// Encodes an arbitrary byte slice into a vector of Curve448 clamped scalars.
|
||||
// Each scalar encodes 432 bits of data (i.e. about 54 bytes).
|
||||
//
|
||||
// The mapping is as follows (little–endian view):
|
||||
// - Byte 0: empty
|
||||
// - Bytes 1..54: all 8 bits are free
|
||||
// - Byte 55: empty
|
||||
//
|
||||
// If the final chunk has fewer than 432 bits, it is padded with zero bits.
|
||||
pub fn encode_to_curve448_scalars(input: &[u8]) -> Vec<Vec<u8>> {
|
||||
let mut reader = BitReader::new(input);
|
||||
let mut scalars = Vec::new();
|
||||
|
||||
// Continue until no more bits are available.
|
||||
while let Some(_) = reader.read_bits(1) {
|
||||
// (We already advanced one bit; move back one step.)
|
||||
reader.bit_pos -= 1;
|
||||
|
||||
let mut scalar = vec![0;56];
|
||||
|
||||
for i in 1..55 {
|
||||
// If there aren’t enough bits, pad with 0.
|
||||
let byte = reader.read_bits(8).unwrap_or(0);
|
||||
scalar[i] = byte as u8;
|
||||
}
|
||||
|
||||
scalars.push(scalar);
|
||||
}
|
||||
|
||||
scalars
|
||||
}
|
||||
|
||||
// Recombines a slice of 56-byte scalars (each containing 432 free bits)
|
||||
// into the original bit–stream, returned as a Vec<u8>.
|
||||
//
|
||||
// The packing was as follows (little–endian bit–order):
|
||||
// - Byte 0: empty
|
||||
// - Bytes 1..54: all 8 bits are free (432 bits total)
|
||||
// - Byte 55: empty
|
||||
pub fn decode_from_curve448_scalars(scalars: &Vec<Vec<u8>>) -> Vec<u8> {
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
// We'll accumulate bits in `acc` (lowest-order bits are the oldest)
|
||||
// and keep track of how many bits we have in `bits_in_acc`.
|
||||
let mut acc: u64 = 0;
|
||||
let mut bits_in_acc: usize = 0;
|
||||
|
||||
// A helper macro to push bits into our accumulator and flush bytes when possible.
|
||||
macro_rules! push_bits {
|
||||
($value:expr, $num_bits:expr) => {{
|
||||
// Append the new bits to the accumulator.
|
||||
acc |= ($value as u64) << bits_in_acc;
|
||||
bits_in_acc += $num_bits;
|
||||
// While we have a full byte, flush it.
|
||||
while bits_in_acc >= 8 {
|
||||
output.push((acc & 0xFF) as u8);
|
||||
acc >>= 8;
|
||||
bits_in_acc -= 8;
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
for scalar in scalars {
|
||||
if scalar.len() != 56 {
|
||||
return vec![];
|
||||
}
|
||||
|
||||
for &byte in &scalar[1..55] {
|
||||
push_bits!(byte, 8);
|
||||
}
|
||||
}
|
||||
|
||||
output
|
||||
}
|
||||
30
crates/verenc/src/ve.rs
Normal file
30
crates/verenc/src/ve.rs
Normal file
@ -0,0 +1,30 @@
|
||||
pub trait VerEnc {
|
||||
type SystemParams;
|
||||
type Statement;
|
||||
type Witness;
|
||||
type PKE;
|
||||
type EncKey;
|
||||
type DecKey;
|
||||
type VEParams;
|
||||
type VEProof;
|
||||
type VECipherText;
|
||||
|
||||
fn setup(params: &Self::SystemParams, vparams: &Self::VEParams, pke: Self::PKE) -> Self;
|
||||
|
||||
fn set_ek(&mut self, ek: Self::EncKey);
|
||||
|
||||
fn kgen(&mut self) -> Self::DecKey;
|
||||
|
||||
fn get_public_key(&self) -> &Self::EncKey;
|
||||
|
||||
fn igen(&self, w: &[u8;56]) -> (Self::Statement, Self::Witness);
|
||||
|
||||
fn prove(&self, stm: &Self::Statement, wit: &Self::Witness) -> Self::VEProof;
|
||||
|
||||
fn verify(&self, stm: &Self::Statement, pi: &Self::VEProof) -> bool;
|
||||
|
||||
fn compress(&self, stm: &Self::Statement, pi: &Self::VEProof) -> Self::VECipherText;
|
||||
|
||||
fn recover(&self, stm: &Self::Statement, dk: &Self::DecKey, ve_ct: &Self::VECipherText) -> Self::Witness;
|
||||
|
||||
}
|
||||
@ -58,11 +58,13 @@ var storeSet = wire.NewSet(
|
||||
store.NewPebbleCoinStore,
|
||||
store.NewPebbleKeyStore,
|
||||
store.NewPebbleDataProofStore,
|
||||
store.NewPebbleHypergraphStore,
|
||||
store.NewPeerstoreDatastore,
|
||||
wire.Bind(new(store.ClockStore), new(*store.PebbleClockStore)),
|
||||
wire.Bind(new(store.CoinStore), new(*store.PebbleCoinStore)),
|
||||
wire.Bind(new(store.KeyStore), new(*store.PebbleKeyStore)),
|
||||
wire.Bind(new(store.DataProofStore), new(*store.PebbleDataProofStore)),
|
||||
wire.Bind(new(store.HypergraphStore), new(*store.PebbleHypergraphStore)),
|
||||
wire.Bind(new(store.Peerstore), new(*store.PeerstoreDatastore)),
|
||||
)
|
||||
|
||||
|
||||
@ -47,11 +47,12 @@ func NewDebugNode(configConfig *config.Config, selfTestReport *protobufs.SelfTes
|
||||
blossomSub := p2p.NewBlossomSub(p2PConfig, zapLogger)
|
||||
frameProver := crypto.NewCachedWesolowskiFrameProver(zapLogger)
|
||||
kzgInclusionProver := crypto.NewKZGInclusionProver(zapLogger)
|
||||
pebbleHypergraphStore := store.NewPebbleHypergraphStore(pebbleDB, zapLogger)
|
||||
engineConfig := configConfig.Engine
|
||||
masterTimeReel := time.NewMasterTimeReel(zapLogger, pebbleClockStore, engineConfig, frameProver)
|
||||
inMemoryPeerInfoManager := p2p.NewInMemoryPeerInfoManager(zapLogger)
|
||||
pebbleKeyStore := store.NewPebbleKeyStore(pebbleDB, zapLogger)
|
||||
tokenExecutionEngine := token.NewTokenExecutionEngine(zapLogger, configConfig, fileKeyManager, blossomSub, frameProver, kzgInclusionProver, pebbleClockStore, pebbleDataProofStore, pebbleCoinStore, masterTimeReel, inMemoryPeerInfoManager, pebbleKeyStore, selfTestReport)
|
||||
tokenExecutionEngine := token.NewTokenExecutionEngine(zapLogger, configConfig, fileKeyManager, blossomSub, frameProver, kzgInclusionProver, pebbleClockStore, pebbleDataProofStore, pebbleHypergraphStore, pebbleCoinStore, masterTimeReel, inMemoryPeerInfoManager, pebbleKeyStore, selfTestReport)
|
||||
masterClockConsensusEngine := master.NewMasterClockConsensusEngine(engineConfig, zapLogger, pebbleClockStore, fileKeyManager, blossomSub, kzgInclusionProver, frameProver, masterTimeReel, inMemoryPeerInfoManager, selfTestReport)
|
||||
node, err := newNode(zapLogger, pebbleDataProofStore, pebbleClockStore, pebbleCoinStore, fileKeyManager, blossomSub, tokenExecutionEngine, masterClockConsensusEngine, pebbleDB)
|
||||
if err != nil {
|
||||
@ -73,11 +74,12 @@ func NewNode(configConfig *config.Config, selfTestReport *protobufs.SelfTestRepo
|
||||
blossomSub := p2p.NewBlossomSub(p2PConfig, zapLogger)
|
||||
frameProver := crypto.NewCachedWesolowskiFrameProver(zapLogger)
|
||||
kzgInclusionProver := crypto.NewKZGInclusionProver(zapLogger)
|
||||
pebbleHypergraphStore := store.NewPebbleHypergraphStore(pebbleDB, zapLogger)
|
||||
engineConfig := configConfig.Engine
|
||||
masterTimeReel := time.NewMasterTimeReel(zapLogger, pebbleClockStore, engineConfig, frameProver)
|
||||
inMemoryPeerInfoManager := p2p.NewInMemoryPeerInfoManager(zapLogger)
|
||||
pebbleKeyStore := store.NewPebbleKeyStore(pebbleDB, zapLogger)
|
||||
tokenExecutionEngine := token.NewTokenExecutionEngine(zapLogger, configConfig, fileKeyManager, blossomSub, frameProver, kzgInclusionProver, pebbleClockStore, pebbleDataProofStore, pebbleCoinStore, masterTimeReel, inMemoryPeerInfoManager, pebbleKeyStore, selfTestReport)
|
||||
tokenExecutionEngine := token.NewTokenExecutionEngine(zapLogger, configConfig, fileKeyManager, blossomSub, frameProver, kzgInclusionProver, pebbleClockStore, pebbleDataProofStore, pebbleHypergraphStore, pebbleCoinStore, masterTimeReel, inMemoryPeerInfoManager, pebbleKeyStore, selfTestReport)
|
||||
masterClockConsensusEngine := master.NewMasterClockConsensusEngine(engineConfig, zapLogger, pebbleClockStore, fileKeyManager, blossomSub, kzgInclusionProver, frameProver, masterTimeReel, inMemoryPeerInfoManager, selfTestReport)
|
||||
node, err := newNode(zapLogger, pebbleDataProofStore, pebbleClockStore, pebbleCoinStore, fileKeyManager, blossomSub, tokenExecutionEngine, masterClockConsensusEngine, pebbleDB)
|
||||
if err != nil {
|
||||
@ -132,7 +134,7 @@ var debugLoggerSet = wire.NewSet(
|
||||
|
||||
var keyManagerSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "Key"), keys.NewFileKeyManager, wire.Bind(new(keys.KeyManager), new(*keys.FileKeyManager)))
|
||||
|
||||
var storeSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "DB"), store.NewPebbleDB, wire.Bind(new(store.KVDB), new(*store.PebbleDB)), store.NewPebbleClockStore, store.NewPebbleCoinStore, store.NewPebbleKeyStore, store.NewPebbleDataProofStore, store.NewPeerstoreDatastore, wire.Bind(new(store.ClockStore), new(*store.PebbleClockStore)), wire.Bind(new(store.CoinStore), new(*store.PebbleCoinStore)), wire.Bind(new(store.KeyStore), new(*store.PebbleKeyStore)), wire.Bind(new(store.DataProofStore), new(*store.PebbleDataProofStore)), wire.Bind(new(store.Peerstore), new(*store.PeerstoreDatastore)))
|
||||
var storeSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "DB"), store.NewPebbleDB, wire.Bind(new(store.KVDB), new(*store.PebbleDB)), store.NewPebbleClockStore, store.NewPebbleCoinStore, store.NewPebbleKeyStore, store.NewPebbleDataProofStore, store.NewPebbleHypergraphStore, store.NewPeerstoreDatastore, wire.Bind(new(store.ClockStore), new(*store.PebbleClockStore)), wire.Bind(new(store.CoinStore), new(*store.PebbleCoinStore)), wire.Bind(new(store.KeyStore), new(*store.PebbleKeyStore)), wire.Bind(new(store.DataProofStore), new(*store.PebbleDataProofStore)), wire.Bind(new(store.HypergraphStore), new(*store.PebbleHypergraphStore)), wire.Bind(new(store.Peerstore), new(*store.PeerstoreDatastore)))
|
||||
|
||||
var pubSubSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "P2P"), p2p.NewInMemoryPeerInfoManager, p2p.NewBlossomSub, wire.Bind(new(p2p.PubSub), new(*p2p.BlossomSub)), wire.Bind(new(p2p.PeerInfoManager), new(*p2p.InMemoryPeerInfoManager)))
|
||||
|
||||
|
||||
@ -19,14 +19,14 @@ case "$os_type" in
|
||||
# Check if the architecture is ARM
|
||||
if [[ "$(uname -m)" == "arm64" ]]; then
|
||||
# MacOS ld doesn't support -Bstatic and -Bdynamic, so it's important that there is only a static version of the library
|
||||
go build -ldflags "-linkmode 'external' -extldflags '-L$BINARIES_DIR -lbls48581 -lvdf -ldl -lm -lflint -lgmp -lmpfr'" "$@"
|
||||
go build -ldflags "-linkmode 'external' -extldflags '-L$BINARIES_DIR -lbls48581 -lvdf -lverenc -ldl -lm -lflint -lgmp -lmpfr'" "$@"
|
||||
else
|
||||
echo "Unsupported platform"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
"Linux")
|
||||
export CGO_LDFLAGS="-L/usr/local/lib -lflint -lgmp -lmpfr -ldl -lm -L$BINARIES_DIR -lvdf -lbls48581 -static"
|
||||
export CGO_LDFLAGS="-L/usr/local/lib -lflint -lgmp -lmpfr -ldl -lm -L$BINARIES_DIR -lvdf -lverenc -lbls48581 -static"
|
||||
go build -ldflags "-linkmode 'external'" "$@"
|
||||
;;
|
||||
*)
|
||||
|
||||
@ -165,7 +165,7 @@ var unlock *SignedGenesisUnlock
|
||||
func DownloadAndVerifyGenesis(network uint) (*SignedGenesisUnlock, error) {
|
||||
if network != 0 {
|
||||
unlock = &SignedGenesisUnlock{
|
||||
GenesisSeedHex: "726573697374206d7563682c206f626579206c6974746c657c00000000000000000000000C",
|
||||
GenesisSeedHex: "726573697374206d7563682c206f626579206c6974746c657c00000000000000000000000D",
|
||||
Beacon: []byte{
|
||||
0x58, 0xef, 0xd9, 0x7e, 0xdd, 0x0e, 0xb6, 0x2f,
|
||||
0x51, 0xc7, 0x5d, 0x00, 0x29, 0x12, 0x45, 0x49,
|
||||
|
||||
@ -74,6 +74,7 @@ func (e *DataClockConsensusEngine) syncWithMesh() error {
|
||||
func (e *DataClockConsensusEngine) prove(
|
||||
previousFrame *protobufs.ClockFrame,
|
||||
) (*protobufs.ClockFrame, error) {
|
||||
time.Sleep(40 * time.Second)
|
||||
if e.lastProven >= previousFrame.FrameNumber && e.lastProven != 0 {
|
||||
return previousFrame, nil
|
||||
}
|
||||
@ -410,22 +411,41 @@ func (e *DataClockConsensusEngine) syncWithPeer(
|
||||
zap.Uint64("frame_number", response.ClockFrame.FrameNumber),
|
||||
zap.Duration("frame_age", frametime.Since(response.ClockFrame)),
|
||||
)
|
||||
|
||||
if !e.IsInProverTrie(
|
||||
response.ClockFrame.GetPublicKeySignatureEd448().PublicKey.KeyValue,
|
||||
) {
|
||||
cooperative = false
|
||||
}
|
||||
|
||||
if err := e.frameProver.VerifyDataClockFrame(
|
||||
response.ClockFrame,
|
||||
); err != nil {
|
||||
return latest, doneChs, errors.Wrap(err, "sync")
|
||||
}
|
||||
|
||||
// Useful for testnet, immediately handles equivocation from multiple
|
||||
// genesis events:
|
||||
if response.ClockFrame.FrameNumber == 1 {
|
||||
genesis, _, _ := e.clockStore.GetDataClockFrame(e.filter, 0, true)
|
||||
selector, _ := genesis.GetSelector()
|
||||
if !bytes.Equal(
|
||||
response.ClockFrame.ParentSelector,
|
||||
selector.FillBytes(make([]byte, 32)),
|
||||
) {
|
||||
cooperative = false
|
||||
return latest, doneChs, errors.Wrap(errors.New("invalid frame"), "sync")
|
||||
}
|
||||
}
|
||||
|
||||
doneCh, err := e.dataTimeReel.Insert(e.ctx, response.ClockFrame)
|
||||
if err != nil {
|
||||
return latest, doneChs, errors.Wrap(err, "sync")
|
||||
}
|
||||
|
||||
doneChs = append(doneChs, doneCh)
|
||||
latest = response.ClockFrame
|
||||
|
||||
if latest.FrameNumber >= maxFrame {
|
||||
return latest, doneChs, nil
|
||||
}
|
||||
|
||||
@ -120,6 +120,10 @@ outer:
|
||||
continue
|
||||
}
|
||||
|
||||
if head.FrameNumber <= maxFrames {
|
||||
continue
|
||||
}
|
||||
|
||||
to := head.FrameNumber - maxFrames
|
||||
for i := from; i < to; i += batchSize {
|
||||
start, stop := i, min(i+batchSize, to)
|
||||
|
||||
@ -117,7 +117,7 @@ func (e *DataClockConsensusEngine) validateTxMessage(peerID peer.ID, message *pb
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if frameNumber+2 < head.FrameNumber {
|
||||
if frameNumber+1 < head.FrameNumber {
|
||||
return p2p.ValidationResultIgnore
|
||||
}
|
||||
}
|
||||
|
||||
@ -458,13 +458,11 @@ func (e *DataClockConsensusEngine) handleMint(
|
||||
return nil, errors.Wrap(err, "handle mint")
|
||||
}
|
||||
returnAddr = proofAddr
|
||||
stateTree := &crypto.VectorCommitmentTree{}
|
||||
err = e.coinStore.PutPreCoinProof(
|
||||
txn,
|
||||
head.FrameNumber,
|
||||
proofAddr,
|
||||
add,
|
||||
stateTree,
|
||||
)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
@ -503,13 +501,11 @@ func (e *DataClockConsensusEngine) handleMint(
|
||||
return nil, errors.Wrap(err, "handle mint")
|
||||
}
|
||||
returnAddr = proofAddr
|
||||
stateTree := &crypto.VectorCommitmentTree{}
|
||||
err = e.coinStore.PutPreCoinProof(
|
||||
txn,
|
||||
head.FrameNumber,
|
||||
proofAddr,
|
||||
proof,
|
||||
stateTree,
|
||||
)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
@ -555,12 +551,10 @@ func (e *DataClockConsensusEngine) handleMint(
|
||||
txn.Abort()
|
||||
return nil, errors.Wrap(err, "handle mint")
|
||||
}
|
||||
stateTree := &crypto.VectorCommitmentTree{}
|
||||
e.coinStore.DeletePreCoinProof(
|
||||
txn,
|
||||
a,
|
||||
deletes[0].GetDeletedProof(),
|
||||
stateTree,
|
||||
)
|
||||
}
|
||||
if err := txn.Commit(); err != nil {
|
||||
|
||||
@ -665,31 +665,30 @@ func TestHandlePreMidnightMint(t *testing.T) {
|
||||
|
||||
assert.Len(t, success.Requests, 1)
|
||||
assert.Len(t, fail.Requests, 1)
|
||||
stateTree := &qcrypto.VectorCommitmentTree{}
|
||||
txn, _ := app.CoinStore.NewTransaction(false)
|
||||
for i, o := range app.TokenOutputs.Outputs {
|
||||
switch e := o.Output.(type) {
|
||||
case *protobufs.TokenOutput_Coin:
|
||||
a, err := GetAddressOfCoin(e.Coin, 1, uint64(i))
|
||||
assert.NoError(t, err)
|
||||
err = app.CoinStore.PutCoin(txn, 1, a, e.Coin, stateTree)
|
||||
err = app.CoinStore.PutCoin(txn, 1, a, e.Coin)
|
||||
assert.NoError(t, err)
|
||||
case *protobufs.TokenOutput_DeletedCoin:
|
||||
c, err := app.CoinStore.GetCoinByAddress(nil, e.DeletedCoin.Address)
|
||||
assert.NoError(t, err)
|
||||
err = app.CoinStore.DeleteCoin(txn, e.DeletedCoin.Address, c, stateTree)
|
||||
err = app.CoinStore.DeleteCoin(txn, e.DeletedCoin.Address, c)
|
||||
assert.NoError(t, err)
|
||||
case *protobufs.TokenOutput_Proof:
|
||||
a, err := GetAddressOfPreCoinProof(e.Proof)
|
||||
assert.NoError(t, err)
|
||||
err = app.CoinStore.PutPreCoinProof(txn, 1, a, e.Proof, stateTree)
|
||||
err = app.CoinStore.PutPreCoinProof(txn, 1, a, e.Proof)
|
||||
assert.NoError(t, err)
|
||||
case *protobufs.TokenOutput_DeletedProof:
|
||||
a, err := GetAddressOfPreCoinProof(e.DeletedProof)
|
||||
assert.NoError(t, err)
|
||||
c, err := app.CoinStore.GetPreCoinProofByAddress(a)
|
||||
assert.NoError(t, err)
|
||||
err = app.CoinStore.DeletePreCoinProof(txn, a, c, stateTree)
|
||||
err = app.CoinStore.DeletePreCoinProof(txn, a, c)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -7,6 +7,7 @@ import (
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
rbls48581 "source.quilibrium.com/quilibrium/monorepo/bls48581"
|
||||
)
|
||||
@ -23,38 +24,52 @@ const (
|
||||
)
|
||||
|
||||
type VectorCommitmentNode interface {
|
||||
Commit() []byte
|
||||
Commit(recalculate bool) []byte
|
||||
GetSize() *big.Int
|
||||
}
|
||||
|
||||
type VectorCommitmentLeafNode struct {
|
||||
Key []byte
|
||||
Value []byte
|
||||
HashTarget []byte
|
||||
Commitment []byte
|
||||
Size *big.Int
|
||||
}
|
||||
|
||||
type VectorCommitmentBranchNode struct {
|
||||
Prefix []int
|
||||
Children [BranchNodes]VectorCommitmentNode
|
||||
Commitment []byte
|
||||
Prefix []int
|
||||
Children [BranchNodes]VectorCommitmentNode
|
||||
Commitment []byte
|
||||
Size *big.Int
|
||||
LeafCount int
|
||||
LongestBranch int
|
||||
}
|
||||
|
||||
func (n *VectorCommitmentLeafNode) Commit() []byte {
|
||||
if n.Commitment == nil {
|
||||
func (n *VectorCommitmentLeafNode) Commit(recalculate bool) []byte {
|
||||
if n.Commitment == nil || recalculate {
|
||||
h := sha512.New()
|
||||
h.Write([]byte{0})
|
||||
h.Write(n.Key)
|
||||
h.Write(n.Value)
|
||||
if len(n.HashTarget) != 0 {
|
||||
h.Write(n.HashTarget)
|
||||
} else {
|
||||
h.Write(n.Value)
|
||||
}
|
||||
n.Commitment = h.Sum(nil)
|
||||
}
|
||||
return n.Commitment
|
||||
}
|
||||
|
||||
func (n *VectorCommitmentBranchNode) Commit() []byte {
|
||||
if n.Commitment == nil {
|
||||
func (n *VectorCommitmentLeafNode) GetSize() *big.Int {
|
||||
return n.Size
|
||||
}
|
||||
|
||||
func (n *VectorCommitmentBranchNode) Commit(recalculate bool) []byte {
|
||||
if n.Commitment == nil || recalculate {
|
||||
data := []byte{}
|
||||
for _, child := range n.Children {
|
||||
if child != nil {
|
||||
out := child.Commit()
|
||||
out := child.Commit(recalculate)
|
||||
switch c := child.(type) {
|
||||
case *VectorCommitmentBranchNode:
|
||||
h := sha512.New()
|
||||
@ -84,7 +99,7 @@ func (n *VectorCommitmentBranchNode) Verify(index int, proof []byte) bool {
|
||||
if n.Commitment == nil {
|
||||
for _, child := range n.Children {
|
||||
if child != nil {
|
||||
out := child.Commit()
|
||||
out := child.Commit(false)
|
||||
switch c := child.(type) {
|
||||
case *VectorCommitmentBranchNode:
|
||||
h := sha512.New()
|
||||
@ -108,7 +123,7 @@ func (n *VectorCommitmentBranchNode) Verify(index int, proof []byte) bool {
|
||||
} else {
|
||||
child := n.Children[index]
|
||||
if child != nil {
|
||||
out := child.Commit()
|
||||
out := child.Commit(false)
|
||||
switch c := child.(type) {
|
||||
case *VectorCommitmentBranchNode:
|
||||
h := sha512.New()
|
||||
@ -130,11 +145,15 @@ func (n *VectorCommitmentBranchNode) Verify(index int, proof []byte) bool {
|
||||
return rbls48581.VerifyRaw(data, n.Commitment, uint64(index), proof, 64)
|
||||
}
|
||||
|
||||
func (n *VectorCommitmentBranchNode) GetSize() *big.Int {
|
||||
return n.Size
|
||||
}
|
||||
|
||||
func (n *VectorCommitmentBranchNode) Prove(index int) []byte {
|
||||
data := []byte{}
|
||||
for _, child := range n.Children {
|
||||
if child != nil {
|
||||
out := child.Commit()
|
||||
out := child.Commit(false)
|
||||
switch c := child.(type) {
|
||||
case *VectorCommitmentBranchNode:
|
||||
h := sha512.New()
|
||||
@ -204,13 +223,44 @@ func getNibblesUntilDiverge(key1, key2 []byte, startDepth int) ([]int, int) {
|
||||
}
|
||||
}
|
||||
|
||||
// getLastNibble returns the final nibble after applying a prefix
|
||||
func getLastNibble(key []byte, prefixLen int) int {
|
||||
return getNextNibble(key, prefixLen*BranchBits)
|
||||
func recalcMetadata(node VectorCommitmentNode) (
|
||||
leafCount int,
|
||||
longestBranch int,
|
||||
size *big.Int,
|
||||
) {
|
||||
switch n := node.(type) {
|
||||
case *VectorCommitmentLeafNode:
|
||||
// A leaf counts as one, and its depth (from itself) is zero.
|
||||
return 1, 0, n.Size
|
||||
case *VectorCommitmentBranchNode:
|
||||
totalLeaves := 0
|
||||
maxChildDepth := 0
|
||||
size := new(big.Int)
|
||||
for _, child := range n.Children {
|
||||
if child != nil {
|
||||
cLeaves, cDepth, cSize := recalcMetadata(child)
|
||||
totalLeaves += cLeaves
|
||||
size.Add(size, cSize)
|
||||
if cDepth > maxChildDepth {
|
||||
maxChildDepth = cDepth
|
||||
}
|
||||
}
|
||||
}
|
||||
// Store the aggregated values in the branch node.
|
||||
n.LeafCount = totalLeaves
|
||||
// The branch’s longest branch is one more than its deepest child.
|
||||
n.LongestBranch = maxChildDepth + 1
|
||||
n.Size = size
|
||||
return totalLeaves, n.LongestBranch, n.Size
|
||||
}
|
||||
return 0, 0, new(big.Int)
|
||||
}
|
||||
|
||||
// Insert adds or updates a key-value pair in the tree
|
||||
func (t *VectorCommitmentTree) Insert(key, value []byte) error {
|
||||
func (t *VectorCommitmentTree) Insert(
|
||||
key, value, hashTarget []byte,
|
||||
size *big.Int,
|
||||
) error {
|
||||
if len(key) == 0 {
|
||||
return errors.New("empty key not allowed")
|
||||
}
|
||||
@ -218,14 +268,21 @@ func (t *VectorCommitmentTree) Insert(key, value []byte) error {
|
||||
var insert func(node VectorCommitmentNode, depth int) VectorCommitmentNode
|
||||
insert = func(node VectorCommitmentNode, depth int) VectorCommitmentNode {
|
||||
if node == nil {
|
||||
return &VectorCommitmentLeafNode{Key: key, Value: value}
|
||||
return &VectorCommitmentLeafNode{
|
||||
Key: key,
|
||||
Value: value,
|
||||
HashTarget: hashTarget,
|
||||
Size: size,
|
||||
}
|
||||
}
|
||||
|
||||
switch n := node.(type) {
|
||||
case *VectorCommitmentLeafNode:
|
||||
if bytes.Equal(n.Key, key) {
|
||||
n.Value = value
|
||||
n.HashTarget = hashTarget
|
||||
n.Commitment = nil
|
||||
n.Size = size
|
||||
return n
|
||||
}
|
||||
|
||||
@ -241,7 +298,12 @@ func (t *VectorCommitmentTree) Insert(key, value []byte) error {
|
||||
finalOldNibble := getNextNibble(n.Key, divergeDepth)
|
||||
finalNewNibble := getNextNibble(key, divergeDepth)
|
||||
branch.Children[finalOldNibble] = n
|
||||
branch.Children[finalNewNibble] = &VectorCommitmentLeafNode{Key: key, Value: value}
|
||||
branch.Children[finalNewNibble] = &VectorCommitmentLeafNode{
|
||||
Key: key,
|
||||
Value: value,
|
||||
HashTarget: hashTarget,
|
||||
Size: size,
|
||||
}
|
||||
|
||||
return branch
|
||||
|
||||
@ -258,20 +320,32 @@ func (t *VectorCommitmentTree) Insert(key, value []byte) error {
|
||||
// Position old branch and new leaf
|
||||
newBranch.Children[expectedNibble] = n
|
||||
n.Prefix = n.Prefix[i+1:] // remove shared prefix from old branch
|
||||
newBranch.Children[actualNibble] = &VectorCommitmentLeafNode{Key: key, Value: value}
|
||||
newBranch.Children[actualNibble] = &VectorCommitmentLeafNode{
|
||||
Key: key,
|
||||
Value: value,
|
||||
HashTarget: hashTarget,
|
||||
Size: size,
|
||||
}
|
||||
recalcMetadata(newBranch)
|
||||
return newBranch
|
||||
}
|
||||
}
|
||||
|
||||
// Key matches prefix, continue with final nibble
|
||||
finalNibble := getNextNibble(key, depth+len(n.Prefix)*BranchBits)
|
||||
n.Children[finalNibble] = insert(n.Children[finalNibble], depth+len(n.Prefix)*BranchBits+BranchBits)
|
||||
n.Children[finalNibble] = insert(
|
||||
n.Children[finalNibble],
|
||||
depth+len(n.Prefix)*BranchBits+BranchBits,
|
||||
)
|
||||
n.Commitment = nil
|
||||
recalcMetadata(n)
|
||||
return n
|
||||
} else {
|
||||
// Simple branch without prefix
|
||||
nibble := getNextNibble(key, depth)
|
||||
n.Children[nibble] = insert(n.Children[nibble], depth+BranchBits)
|
||||
n.Commitment = nil
|
||||
recalcMetadata(n)
|
||||
return n
|
||||
}
|
||||
}
|
||||
@ -416,13 +490,14 @@ func (t *VectorCommitmentTree) Delete(key []byte) error {
|
||||
return errors.New("empty key not allowed")
|
||||
}
|
||||
|
||||
var delete func(node VectorCommitmentNode, depth int) VectorCommitmentNode
|
||||
delete = func(node VectorCommitmentNode, depth int) VectorCommitmentNode {
|
||||
var remove func(node VectorCommitmentNode, depth int) VectorCommitmentNode
|
||||
remove = func(node VectorCommitmentNode, depth int) VectorCommitmentNode {
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch n := node.(type) {
|
||||
|
||||
case *VectorCommitmentLeafNode:
|
||||
if bytes.Equal(n.Key, key) {
|
||||
return nil
|
||||
@ -430,66 +505,92 @@ func (t *VectorCommitmentTree) Delete(key []byte) error {
|
||||
return n
|
||||
|
||||
case *VectorCommitmentBranchNode:
|
||||
// Check prefix match
|
||||
for i, expectedNibble := range n.Prefix {
|
||||
currentNibble := getNextNibble(key, depth+i*BranchBits)
|
||||
if currentNibble != expectedNibble {
|
||||
return n // Key doesn't match prefix, nothing to delete
|
||||
return n
|
||||
}
|
||||
}
|
||||
|
||||
// Delete at final position after prefix
|
||||
finalNibble := getNextNibble(key, depth+len(n.Prefix)*BranchBits)
|
||||
n.Children[finalNibble] = delete(n.Children[finalNibble], depth+len(n.Prefix)*BranchBits+BranchBits)
|
||||
n.Children[finalNibble] =
|
||||
remove(n.Children[finalNibble], depth+len(n.Prefix)*BranchBits+BranchBits)
|
||||
|
||||
n.Commitment = nil
|
||||
|
||||
// Count remaining children
|
||||
childCount := 0
|
||||
var lastChild VectorCommitmentNode
|
||||
var lastIndex int
|
||||
var lastChildIndex int
|
||||
for i, child := range n.Children {
|
||||
if child != nil {
|
||||
childCount++
|
||||
lastChild = child
|
||||
lastIndex = i
|
||||
lastChildIndex = i
|
||||
}
|
||||
}
|
||||
|
||||
if childCount == 0 {
|
||||
return nil
|
||||
} else if childCount == 1 {
|
||||
// If the only child is a leaf, keep structure if its path matches
|
||||
if leaf, ok := lastChild.(*VectorCommitmentLeafNode); ok {
|
||||
if lastIndex == getLastNibble(leaf.Key, len(n.Prefix)) {
|
||||
return n
|
||||
}
|
||||
return leaf
|
||||
}
|
||||
// If it's a branch, merge the prefixes
|
||||
if branch, ok := lastChild.(*VectorCommitmentBranchNode); ok {
|
||||
branch.Prefix = append(n.Prefix, branch.Prefix...)
|
||||
return branch
|
||||
var retNode VectorCommitmentNode
|
||||
switch childCount {
|
||||
case 0:
|
||||
retNode = nil
|
||||
case 1:
|
||||
if childBranch, ok := lastChild.(*VectorCommitmentBranchNode); ok {
|
||||
// Merge:
|
||||
// n.Prefix + [lastChildIndex] + childBranch.Prefix
|
||||
mergedPrefix := make([]int, 0, len(n.Prefix)+1+len(childBranch.Prefix))
|
||||
mergedPrefix = append(mergedPrefix, n.Prefix...)
|
||||
mergedPrefix = append(mergedPrefix, lastChildIndex)
|
||||
mergedPrefix = append(mergedPrefix, childBranch.Prefix...)
|
||||
|
||||
childBranch.Prefix = mergedPrefix
|
||||
childBranch.Commitment = nil
|
||||
retNode = childBranch
|
||||
} else {
|
||||
retNode = lastChild
|
||||
}
|
||||
default:
|
||||
retNode = n
|
||||
}
|
||||
return n
|
||||
|
||||
if branch, ok := retNode.(*VectorCommitmentBranchNode); ok {
|
||||
recalcMetadata(branch)
|
||||
}
|
||||
|
||||
return retNode
|
||||
default:
|
||||
return node
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
t.Root = delete(t.Root, 0)
|
||||
t.Root = remove(t.Root, 0)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *VectorCommitmentTree) GetMetadata() (leafCount int, longestBranch int) {
|
||||
switch root := t.Root.(type) {
|
||||
case nil:
|
||||
return 0, 0
|
||||
case *VectorCommitmentLeafNode:
|
||||
return 1, 0
|
||||
case *VectorCommitmentBranchNode:
|
||||
return root.LeafCount, root.LongestBranch
|
||||
}
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
// Commit returns the root of the tree
|
||||
func (t *VectorCommitmentTree) Commit() []byte {
|
||||
func (t *VectorCommitmentTree) Commit(recalculate bool) []byte {
|
||||
if t.Root == nil {
|
||||
return make([]byte, 64)
|
||||
}
|
||||
return t.Root.Commit()
|
||||
return t.Root.Commit(recalculate)
|
||||
}
|
||||
|
||||
func debugNode(node VectorCommitmentNode, depth int, prefix string) {
|
||||
func (t *VectorCommitmentTree) GetSize() *big.Int {
|
||||
return t.Root.GetSize()
|
||||
}
|
||||
|
||||
func DebugNode(node VectorCommitmentNode, depth int, prefix string) {
|
||||
if node == nil {
|
||||
return
|
||||
}
|
||||
@ -502,7 +603,7 @@ func debugNode(node VectorCommitmentNode, depth int, prefix string) {
|
||||
for i, child := range n.Children {
|
||||
if child != nil {
|
||||
fmt.Printf("%s [%d]:\n", prefix, i)
|
||||
debugNode(child, depth+1, prefix+" ")
|
||||
DebugNode(child, depth+1, prefix+" ")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"source.quilibrium.com/quilibrium/monorepo/bls48581/generated/bls48581"
|
||||
@ -17,7 +18,7 @@ func BenchmarkVectorCommitmentTreeInsert(b *testing.B) {
|
||||
d := make([]byte, 32)
|
||||
rand.Read(d)
|
||||
addresses = append(addresses, d)
|
||||
err := tree.Insert(d, d)
|
||||
err := tree.Insert(d, d, nil, big.NewInt(1))
|
||||
if err != nil {
|
||||
b.Errorf("Failed to insert item %d: %v", i, err)
|
||||
}
|
||||
@ -32,11 +33,11 @@ func BenchmarkVectorCommitmentTreeCommit(b *testing.B) {
|
||||
d := make([]byte, 32)
|
||||
rand.Read(d)
|
||||
addresses = append(addresses, d)
|
||||
err := tree.Insert(d, d)
|
||||
err := tree.Insert(d, d, nil, big.NewInt(1))
|
||||
if err != nil {
|
||||
b.Errorf("Failed to insert item %d: %v", i, err)
|
||||
}
|
||||
tree.Commit()
|
||||
tree.Commit(false)
|
||||
}
|
||||
}
|
||||
|
||||
@ -48,7 +49,7 @@ func BenchmarkVectorCommitmentTreeProve(b *testing.B) {
|
||||
d := make([]byte, 32)
|
||||
rand.Read(d)
|
||||
addresses = append(addresses, d)
|
||||
err := tree.Insert(d, d)
|
||||
err := tree.Insert(d, d, nil, big.NewInt(1))
|
||||
if err != nil {
|
||||
b.Errorf("Failed to insert item %d: %v", i, err)
|
||||
}
|
||||
@ -64,7 +65,7 @@ func BenchmarkVectorCommitmentTreeVerify(b *testing.B) {
|
||||
d := make([]byte, 32)
|
||||
rand.Read(d)
|
||||
addresses = append(addresses, d)
|
||||
err := tree.Insert(d, d)
|
||||
err := tree.Insert(d, d, nil, big.NewInt(1))
|
||||
if err != nil {
|
||||
b.Errorf("Failed to insert item %d: %v", i, err)
|
||||
}
|
||||
@ -80,13 +81,13 @@ func TestVectorCommitmentTrees(t *testing.T) {
|
||||
tree := &VectorCommitmentTree{}
|
||||
|
||||
// Test single insert
|
||||
err := tree.Insert([]byte("key1"), []byte("value1"))
|
||||
err := tree.Insert([]byte("key1"), []byte("value1"), nil, big.NewInt(1))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to insert: %v", err)
|
||||
}
|
||||
|
||||
// Test duplicate key
|
||||
err = tree.Insert([]byte("key1"), []byte("value2"))
|
||||
err = tree.Insert([]byte("key1"), []byte("value2"), nil, big.NewInt(1))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to update existing key: %v", err)
|
||||
}
|
||||
@ -100,7 +101,7 @@ func TestVectorCommitmentTrees(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test empty key
|
||||
err = tree.Insert([]byte{}, []byte("value"))
|
||||
err = tree.Insert([]byte{}, []byte("value"), nil, big.NewInt(1))
|
||||
if err == nil {
|
||||
t.Error("Expected error for empty key, got none")
|
||||
}
|
||||
@ -114,7 +115,7 @@ func TestVectorCommitmentTrees(t *testing.T) {
|
||||
}
|
||||
|
||||
// Insert and get
|
||||
tree.Insert([]byte("key1"), []byte("value1"))
|
||||
tree.Insert([]byte("key1"), []byte("value1"), nil, big.NewInt(1))
|
||||
value, err = tree.Get([]byte("key1"))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get value: %v", err)
|
||||
@ -138,7 +139,7 @@ func TestVectorCommitmentTrees(t *testing.T) {
|
||||
}
|
||||
|
||||
// Insert and delete
|
||||
tree.Insert([]byte("key1"), []byte("value1"))
|
||||
tree.Insert([]byte("key1"), []byte("value1"), nil, big.NewInt(1))
|
||||
err = tree.Delete([]byte("key1"))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to delete: %v", err)
|
||||
@ -167,7 +168,7 @@ func TestVectorCommitmentTrees(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, key := range keys {
|
||||
err := tree.Insert([]byte(key), []byte("value"+string(rune('1'+i))))
|
||||
err := tree.Insert([]byte(key), []byte("value"+string(rune('1'+i))), nil, big.NewInt(1))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to insert key %s: %v", key, err)
|
||||
}
|
||||
@ -220,16 +221,16 @@ func TestVectorCommitmentTrees(t *testing.T) {
|
||||
}
|
||||
|
||||
// Root should change after insert
|
||||
tree.Insert([]byte("key1"), []byte("value1"))
|
||||
firstRoot := tree.Root.Commit()
|
||||
tree.Insert([]byte("key1"), []byte("value1"), nil, big.NewInt(1))
|
||||
firstRoot := tree.Root.Commit(false)
|
||||
|
||||
if bytes.Equal(firstRoot, bytes.Repeat([]byte{0x00}, 64)) {
|
||||
t.Error("Root hash should change after insert")
|
||||
}
|
||||
|
||||
// Root should change after update
|
||||
tree.Insert([]byte("key1"), []byte("value2"))
|
||||
secondRoot := tree.Root.Commit()
|
||||
tree.Insert([]byte("key1"), []byte("value2"), nil, big.NewInt(1))
|
||||
secondRoot := tree.Root.Commit(false)
|
||||
|
||||
if bytes.Equal(secondRoot, firstRoot) {
|
||||
t.Error("Root hash should change after update")
|
||||
@ -248,34 +249,51 @@ func TestVectorCommitmentTrees(t *testing.T) {
|
||||
|
||||
addresses := [][]byte{}
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
for i := 0; i < 10000; i++ {
|
||||
d := make([]byte, 32)
|
||||
rand.Read(d)
|
||||
addresses = append(addresses, d)
|
||||
}
|
||||
|
||||
// Insert 1000 items
|
||||
for i := 0; i < 1000; i++ {
|
||||
kept := [][]byte{}
|
||||
for i := 0; i < 5000; i++ {
|
||||
kept = append(kept, addresses[i])
|
||||
}
|
||||
|
||||
newAdditions := [][]byte{}
|
||||
for i := 0; i < 5000; i++ {
|
||||
d := make([]byte, 32)
|
||||
rand.Read(d)
|
||||
newAdditions = append(newAdditions, d)
|
||||
kept = append(kept, d)
|
||||
}
|
||||
|
||||
// Insert 10000 items
|
||||
for i := 0; i < 10000; i++ {
|
||||
key := addresses[i]
|
||||
value := addresses[i]
|
||||
err := tree.Insert(key, value)
|
||||
err := tree.Insert(key, value, nil, big.NewInt(1))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to insert item %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Insert 1000 items in reverse
|
||||
for i := 999; i >= 0; i-- {
|
||||
if tree.GetSize().Cmp(big.NewInt(10000)) != 0 {
|
||||
t.Errorf("invalid tree size: %s", tree.GetSize().String())
|
||||
}
|
||||
|
||||
// Insert 10000 items in reverse
|
||||
for i := 9999; i >= 0; i-- {
|
||||
key := addresses[i]
|
||||
value := addresses[i]
|
||||
err := cmptree.Insert(key, value)
|
||||
err := cmptree.Insert(key, value, nil, big.NewInt(1))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to insert item %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify all items
|
||||
for i := 0; i < 1000; i++ {
|
||||
for i := 0; i < 10000; i++ {
|
||||
key := addresses[i]
|
||||
expected := addresses[i]
|
||||
value, err := tree.Get(key)
|
||||
@ -294,8 +312,52 @@ func TestVectorCommitmentTrees(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
tcommit := tree.Root.Commit()
|
||||
cmptcommit := cmptree.Root.Commit()
|
||||
// delete keys
|
||||
for i := 5000; i < 10000; i++ {
|
||||
key := addresses[i]
|
||||
fmt.Printf("delete %x\n", key)
|
||||
tree.Delete(key)
|
||||
}
|
||||
|
||||
if tree.GetSize().Cmp(big.NewInt(5000)) != 0 {
|
||||
t.Errorf("invalid tree size: %s", tree.GetSize().String())
|
||||
}
|
||||
|
||||
// add new
|
||||
for i := 0; i < 5000; i++ {
|
||||
tree.Insert(newAdditions[i], newAdditions[i], nil, big.NewInt(1))
|
||||
}
|
||||
|
||||
if tree.GetSize().Cmp(big.NewInt(10000)) != 0 {
|
||||
t.Errorf("invalid tree size: %s", tree.GetSize().String())
|
||||
}
|
||||
|
||||
cmptree = &VectorCommitmentTree{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
cmptree.Insert(kept[i], kept[i], nil, big.NewInt(1))
|
||||
}
|
||||
// Verify all items
|
||||
for i := 0; i < 10000; i++ {
|
||||
key := kept[i]
|
||||
expected := kept[i]
|
||||
value, err := tree.Get(key)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get item %d: %v", i, err)
|
||||
}
|
||||
cmpvalue, err := cmptree.Get(key)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get item %d: %v", i, err)
|
||||
}
|
||||
if !bytes.Equal(value, expected) {
|
||||
t.Errorf("Item %d: expected %x, got %x", i, string(expected), string(value))
|
||||
}
|
||||
if !bytes.Equal(expected, cmpvalue) {
|
||||
t.Errorf("Item %d: expected %x, got %x", i, string(value), string(cmpvalue))
|
||||
}
|
||||
}
|
||||
tcommit := tree.Root.Commit(false)
|
||||
cmptcommit := cmptree.Root.Commit(false)
|
||||
|
||||
if !bytes.Equal(tcommit, cmptcommit) {
|
||||
t.Errorf("tree mismatch, %x, %x", tcommit, cmptcommit)
|
||||
@ -306,7 +368,16 @@ func TestVectorCommitmentTrees(t *testing.T) {
|
||||
t.Errorf("proof failed")
|
||||
}
|
||||
|
||||
for _, p := range proofs {
|
||||
fmt.Printf("%x\n", p)
|
||||
leaves, longestBranch := tree.GetMetadata()
|
||||
|
||||
if leaves != 10000 {
|
||||
t.Errorf("incorrect leaf count, %d, %d,", 10000, leaves)
|
||||
}
|
||||
|
||||
// Statistical assumption, can be flaky
|
||||
if longestBranch != 4 {
|
||||
t.Errorf("incorrect longest branch count, %d, %d,", 4, longestBranch)
|
||||
}
|
||||
|
||||
DebugNode(tree.Root, 0, "")
|
||||
}
|
||||
|
||||
258
node/crypto/tree_compare.go
Normal file
258
node/crypto/tree_compare.go
Normal file
@ -0,0 +1,258 @@
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// CompareTreesAtHeight compares two vector commitment trees at each level
|
||||
func CompareTreesAtHeight(tree1, tree2 *VectorCommitmentTree) [][]ComparisonResult {
|
||||
if tree1 == nil || tree2 == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var results [][]ComparisonResult
|
||||
maxHeight := getMaxHeight(tree1.Root, tree2.Root)
|
||||
|
||||
// Compare level by level
|
||||
for height := 0; height <= maxHeight; height++ {
|
||||
levelResults := compareLevelCommits(tree1.Root, tree2.Root, height, 0)
|
||||
results = append(results, levelResults)
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
type ComparisonResult struct {
|
||||
Path []int // Path taken to reach this node (nibble values)
|
||||
Height int // Current height in the tree
|
||||
Commit1 []byte // Commitment from first tree
|
||||
Commit2 []byte // Commitment from second tree
|
||||
Matches bool // Whether the commitments match
|
||||
}
|
||||
|
||||
func getMaxHeight(node1, node2 VectorCommitmentNode) int {
|
||||
height1 := getHeight(node1)
|
||||
height2 := getHeight(node2)
|
||||
if height1 > height2 {
|
||||
return height1
|
||||
}
|
||||
return height2
|
||||
}
|
||||
|
||||
func getHeight(node VectorCommitmentNode) int {
|
||||
if node == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
switch n := node.(type) {
|
||||
case *VectorCommitmentLeafNode:
|
||||
return 0
|
||||
case *VectorCommitmentBranchNode:
|
||||
maxChildHeight := 0
|
||||
for _, child := range n.Children {
|
||||
childHeight := getHeight(child)
|
||||
if childHeight > maxChildHeight {
|
||||
maxChildHeight = childHeight
|
||||
}
|
||||
}
|
||||
return maxChildHeight + 1 + len(n.Prefix)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func compareLevelCommits(node1, node2 VectorCommitmentNode, targetHeight, currentHeight int) []ComparisonResult {
|
||||
if node1 == nil && node2 == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If we've reached the target height, compare the commits
|
||||
if currentHeight == targetHeight {
|
||||
var commit1, commit2 []byte
|
||||
if node1 != nil {
|
||||
commit1 = node1.Commit(false)
|
||||
}
|
||||
if node2 != nil {
|
||||
commit2 = node2.Commit(false)
|
||||
}
|
||||
|
||||
return []ComparisonResult{{
|
||||
Height: targetHeight,
|
||||
Commit1: commit1,
|
||||
Commit2: commit2,
|
||||
Matches: bytes.Equal(commit1, commit2),
|
||||
}}
|
||||
}
|
||||
|
||||
// If we haven't reached the target height, traverse deeper
|
||||
var results []ComparisonResult
|
||||
|
||||
// Handle branch nodes
|
||||
switch n1 := node1.(type) {
|
||||
case *VectorCommitmentBranchNode:
|
||||
n2, ok := node2.(*VectorCommitmentBranchNode)
|
||||
if !ok {
|
||||
// Trees have different structure at this point
|
||||
return results
|
||||
}
|
||||
|
||||
// Account for prefix lengths
|
||||
nextHeight := currentHeight
|
||||
if len(n1.Prefix) > 0 {
|
||||
nextHeight += len(n1.Prefix)
|
||||
}
|
||||
|
||||
// If we're still below target height after prefix, traverse children
|
||||
if nextHeight < targetHeight {
|
||||
for i := 0; i < BranchNodes; i++ {
|
||||
childResults := compareLevelCommits(n1.Children[i], n2.Children[i], targetHeight, nextHeight+1)
|
||||
results = append(results, childResults...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
// TraverseAndCompare provides a channel-based iterator for comparing trees
|
||||
func TraverseAndCompare(tree1, tree2 *VectorCommitmentTree) chan ComparisonResult {
|
||||
resultChan := make(chan ComparisonResult)
|
||||
|
||||
go func() {
|
||||
defer close(resultChan)
|
||||
|
||||
if tree1 == nil || tree2 == nil {
|
||||
return
|
||||
}
|
||||
|
||||
maxHeight := getMaxHeight(tree1.Root, tree2.Root)
|
||||
|
||||
// Traverse each height
|
||||
for height := 0; height <= maxHeight; height++ {
|
||||
results := compareLevelCommits(tree1.Root, tree2.Root, height, 0)
|
||||
for _, result := range results {
|
||||
resultChan <- result
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return resultChan
|
||||
}
|
||||
|
||||
// Example usage:
|
||||
// LeafDifference contains information about leaves that differ between trees
|
||||
type LeafDifference struct {
|
||||
Key []byte // The key of the leaf
|
||||
OnlyInTree1 bool // True if the leaf only exists in tree1
|
||||
OnlyInTree2 bool // True if the leaf only exists in tree2
|
||||
Value1 []byte // Value from tree1 (if present)
|
||||
Value2 []byte // Value from tree2 (if present)
|
||||
}
|
||||
|
||||
// CompareLeaves returns all leaves that differ between the two trees
|
||||
func CompareLeaves(tree1, tree2 *VectorCommitmentTree) []LeafDifference {
|
||||
// Get all leaves from both trees
|
||||
leaves1 := GetAllLeaves(tree1.Root)
|
||||
leaves2 := GetAllLeaves(tree2.Root)
|
||||
|
||||
differences := make([]LeafDifference, 0)
|
||||
|
||||
// Use maps for efficient lookup
|
||||
leafMap1 := make(map[string]*VectorCommitmentLeafNode)
|
||||
leafMap2 := make(map[string]*VectorCommitmentLeafNode)
|
||||
|
||||
// Build maps
|
||||
for _, leaf := range leaves1 {
|
||||
leafMap1[string(leaf.Key)] = leaf
|
||||
}
|
||||
for _, leaf := range leaves2 {
|
||||
leafMap2[string(leaf.Key)] = leaf
|
||||
}
|
||||
|
||||
// Find leaves only in tree1 or with different values
|
||||
for _, leaf1 := range leaves1 {
|
||||
key := string(leaf1.Key)
|
||||
if leaf2, exists := leafMap2[key]; exists {
|
||||
// Leaf exists in both trees, check if values match
|
||||
if !bytes.Equal(leaf1.Value, leaf2.Value) {
|
||||
differences = append(differences, LeafDifference{
|
||||
Key: leaf1.Key,
|
||||
Value1: leaf1.Value,
|
||||
Value2: leaf2.Value,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
// Leaf only exists in tree1
|
||||
differences = append(differences, LeafDifference{
|
||||
Key: leaf1.Key,
|
||||
OnlyInTree1: true,
|
||||
Value1: leaf1.Value,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Find leaves only in tree2
|
||||
for _, leaf2 := range leaves2 {
|
||||
key := string(leaf2.Key)
|
||||
if _, exists := leafMap1[key]; !exists {
|
||||
differences = append(differences, LeafDifference{
|
||||
Key: leaf2.Key,
|
||||
OnlyInTree2: true,
|
||||
Value2: leaf2.Value,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return differences
|
||||
}
|
||||
|
||||
// GetAllLeaves returns all leaf nodes in the tree
|
||||
func GetAllLeaves(node VectorCommitmentNode) []*VectorCommitmentLeafNode {
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var leaves []*VectorCommitmentLeafNode
|
||||
|
||||
switch n := node.(type) {
|
||||
case *VectorCommitmentLeafNode:
|
||||
leaves = append(leaves, n)
|
||||
case *VectorCommitmentBranchNode:
|
||||
for _, child := range n.Children {
|
||||
if child != nil {
|
||||
childLeaves := GetAllLeaves(child)
|
||||
leaves = append(leaves, childLeaves...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return leaves
|
||||
}
|
||||
|
||||
func ExampleComparison() {
|
||||
// Create and populate two trees
|
||||
tree1 := &VectorCommitmentTree{}
|
||||
tree2 := &VectorCommitmentTree{}
|
||||
|
||||
// Compare trees using channel-based iterator
|
||||
for result := range TraverseAndCompare(tree1, tree2) {
|
||||
if !result.Matches {
|
||||
fmt.Printf("Mismatch at height %d\n", result.Height)
|
||||
fmt.Printf("Tree1 commit: %x\n", result.Commit1)
|
||||
fmt.Printf("Tree2 commit: %x\n", result.Commit2)
|
||||
}
|
||||
}
|
||||
|
||||
// Compare leaves between trees
|
||||
differences := CompareLeaves(tree1, tree2)
|
||||
for _, diff := range differences {
|
||||
if diff.OnlyInTree1 {
|
||||
fmt.Printf("Key %x only exists in tree1 with value %x\n", diff.Key, diff.Value1)
|
||||
} else if diff.OnlyInTree2 {
|
||||
fmt.Printf("Key %x only exists in tree2 with value %x\n", diff.Key, diff.Value2)
|
||||
} else {
|
||||
fmt.Printf("Key %x has different values: tree1=%x, tree2=%x\n",
|
||||
diff.Key, diff.Value1, diff.Value2)
|
||||
}
|
||||
}
|
||||
}
|
||||
283
node/crypto/verifiable_encryption.go
Normal file
283
node/crypto/verifiable_encryption.go
Normal file
@ -0,0 +1,283 @@
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru/v2"
|
||||
"source.quilibrium.com/quilibrium/monorepo/verenc"
|
||||
generated "source.quilibrium.com/quilibrium/monorepo/verenc/generated/verenc"
|
||||
)
|
||||
|
||||
type VerEnc interface {
|
||||
ToBytes() []byte
|
||||
GetStatement() []byte
|
||||
Verify(proof []byte) bool
|
||||
}
|
||||
|
||||
type VerEncProof interface {
|
||||
ToBytes() []byte
|
||||
Compress() VerEnc
|
||||
Verify() bool
|
||||
}
|
||||
|
||||
type VerifiableEncryptor interface {
|
||||
Encrypt(
|
||||
data []byte,
|
||||
publicKey []byte,
|
||||
) []VerEncProof
|
||||
Decrypt(
|
||||
encrypted []VerEnc,
|
||||
decryptionKey []byte,
|
||||
) []byte
|
||||
}
|
||||
|
||||
var _ VerifiableEncryptor = (*MPCitHVerifiableEncryptor)(nil)
|
||||
|
||||
type MPCitHVerEncProof struct {
|
||||
generated.VerencProof
|
||||
}
|
||||
|
||||
type MPCitHVerEnc struct {
|
||||
generated.CompressedCiphertext
|
||||
BlindingPubkey []uint8
|
||||
Statement []uint8
|
||||
}
|
||||
|
||||
func MPCitHVerEncProofFromBytes(data []byte) MPCitHVerEncProof {
|
||||
if len(data) != 9012 {
|
||||
return MPCitHVerEncProof{}
|
||||
}
|
||||
|
||||
polycom := [][]byte{}
|
||||
for i := 0; i < 23; i++ {
|
||||
polycom = append(polycom, data[235+(i*57):292+(i*57)])
|
||||
}
|
||||
|
||||
ctexts := []generated.VerencCiphertext{}
|
||||
srs := []generated.VerencShare{}
|
||||
|
||||
for i := 0; i < 42; i++ {
|
||||
ctexts = append(ctexts, generated.VerencCiphertext{
|
||||
C1: data[1546+(i*(57+56+4)) : 1603+(i*(57+56+4))],
|
||||
C2: data[1603+(i*(57+56+4)) : 1659+(i*(57+56+4))],
|
||||
I: binary.BigEndian.Uint64(data[1659+(i*(57+56+4)) : 1663+(i*(57+56+4))]),
|
||||
})
|
||||
}
|
||||
|
||||
for i := 0; i < 22; i++ {
|
||||
srs = append(srs, generated.VerencShare{
|
||||
S1: data[6460+(i*(56+56+4)) : 6516+(i*(56+56+4))],
|
||||
S2: data[6516+(i*(56+56+4)) : 6572+(i*(56+56+4))],
|
||||
I: binary.BigEndian.Uint64(data[6572+(i*(56+56+4)) : 6576+(i*(56+56+4))]),
|
||||
})
|
||||
}
|
||||
|
||||
return MPCitHVerEncProof{
|
||||
generated.VerencProof{
|
||||
BlindingPubkey: data[:57],
|
||||
EncryptionKey: data[57:114],
|
||||
Statement: data[114:171],
|
||||
Challenge: data[171:235],
|
||||
Polycom: polycom,
|
||||
Ctexts: ctexts,
|
||||
SharesRands: srs,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (p MPCitHVerEncProof) ToBytes() []byte {
|
||||
output := []byte{}
|
||||
output = append(output, p.BlindingPubkey...)
|
||||
output = append(output, p.EncryptionKey...)
|
||||
output = append(output, p.Statement...)
|
||||
output = append(output, p.Challenge...)
|
||||
|
||||
for _, pol := range p.Polycom {
|
||||
output = append(output, pol...)
|
||||
}
|
||||
|
||||
for _, ct := range p.Ctexts {
|
||||
output = append(output, ct.C1...)
|
||||
output = append(output, ct.C2...)
|
||||
output = binary.BigEndian.AppendUint64(output, ct.I)
|
||||
}
|
||||
|
||||
for _, sr := range p.SharesRands {
|
||||
output = append(output, sr.S1...)
|
||||
output = append(output, sr.S2...)
|
||||
output = binary.BigEndian.AppendUint64(output, sr.I)
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
func (p MPCitHVerEncProof) Compress() VerEnc {
|
||||
compressed := verenc.VerencCompress(p.VerencProof)
|
||||
return MPCitHVerEnc{
|
||||
CompressedCiphertext: compressed,
|
||||
BlindingPubkey: p.BlindingPubkey,
|
||||
Statement: p.Statement,
|
||||
}
|
||||
}
|
||||
|
||||
func (p MPCitHVerEncProof) Verify() bool {
|
||||
return verenc.VerencVerify(p.VerencProof)
|
||||
}
|
||||
|
||||
type InlineEnc struct {
|
||||
iv []byte
|
||||
ciphertext []byte
|
||||
}
|
||||
|
||||
func MPCitHVerEncFromBytes(data []byte) MPCitHVerEnc {
|
||||
ciphertext := generated.CompressedCiphertext{}
|
||||
for i := 0; i < 3; i++ {
|
||||
ciphertext.Ctexts = append(ciphertext.Ctexts, generated.VerencCiphertext{
|
||||
C1: data[0+(i*(57+56)) : 57+(i*(57+56))],
|
||||
C2: data[57+(i*(57+56)) : 113+(i*(57+56))],
|
||||
})
|
||||
ciphertext.Aux = append(ciphertext.Aux, data[507+(i*56):563+(i*56)])
|
||||
}
|
||||
return MPCitHVerEnc{
|
||||
CompressedCiphertext: ciphertext,
|
||||
BlindingPubkey: data[731:788],
|
||||
Statement: data[788:845],
|
||||
}
|
||||
}
|
||||
|
||||
func (e MPCitHVerEnc) ToBytes() []byte {
|
||||
output := []byte{}
|
||||
for _, ct := range e.Ctexts {
|
||||
output = append(output, ct.C1...)
|
||||
output = append(output, ct.C2...)
|
||||
}
|
||||
for _, a := range e.Aux {
|
||||
output = append(output, a...)
|
||||
}
|
||||
output = append(output, e.BlindingPubkey...)
|
||||
output = append(output, e.Statement...)
|
||||
return output
|
||||
}
|
||||
|
||||
func (e MPCitHVerEnc) GetStatement() []byte {
|
||||
return e.Statement
|
||||
}
|
||||
|
||||
func (e MPCitHVerEnc) Verify(proof []byte) bool {
|
||||
proofData := MPCitHVerEncProofFromBytes(proof)
|
||||
return proofData.Verify()
|
||||
}
|
||||
|
||||
type MPCitHVerifiableEncryptor struct {
|
||||
parallelism int
|
||||
lruCache *lru.Cache[string, VerEnc]
|
||||
}
|
||||
|
||||
func NewMPCitHVerifiableEncryptor(parallelism int) *MPCitHVerifiableEncryptor {
|
||||
cache, err := lru.New[string, VerEnc](10000)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return &MPCitHVerifiableEncryptor{
|
||||
parallelism: parallelism,
|
||||
lruCache: cache,
|
||||
}
|
||||
}
|
||||
|
||||
func (v *MPCitHVerifiableEncryptor) Encrypt(
|
||||
data []byte,
|
||||
publicKey []byte,
|
||||
) []VerEncProof {
|
||||
chunks := verenc.ChunkDataForVerenc(data)
|
||||
results := make([]VerEncProof, len(chunks))
|
||||
var wg sync.WaitGroup
|
||||
throttle := make(chan struct{}, v.parallelism)
|
||||
for i, chunk := range chunks {
|
||||
throttle <- struct{}{}
|
||||
wg.Add(1)
|
||||
go func(chunk []byte, i int) {
|
||||
defer func() { <-throttle }()
|
||||
defer wg.Done()
|
||||
proof := verenc.NewVerencProofEncryptOnly(chunk, publicKey)
|
||||
results[i] = MPCitHVerEncProof{
|
||||
generated.VerencProof{
|
||||
BlindingPubkey: proof.BlindingPubkey,
|
||||
EncryptionKey: proof.EncryptionKey,
|
||||
Statement: proof.Statement,
|
||||
Challenge: proof.Challenge,
|
||||
Polycom: proof.Polycom,
|
||||
Ctexts: proof.Ctexts,
|
||||
SharesRands: proof.SharesRands,
|
||||
},
|
||||
}
|
||||
}(chunk, i)
|
||||
}
|
||||
wg.Wait()
|
||||
return results
|
||||
}
|
||||
|
||||
func (v *MPCitHVerifiableEncryptor) EncryptAndCompress(
|
||||
data []byte,
|
||||
publicKey []byte,
|
||||
) []VerEnc {
|
||||
chunks := verenc.ChunkDataForVerenc(data)
|
||||
results := make([]VerEnc, len(chunks))
|
||||
var wg sync.WaitGroup
|
||||
throttle := make(chan struct{}, v.parallelism)
|
||||
for i, chunk := range chunks {
|
||||
throttle <- struct{}{}
|
||||
wg.Add(1)
|
||||
go func(chunk []byte, i int) {
|
||||
defer func() { <-throttle }()
|
||||
defer wg.Done()
|
||||
existing, ok := v.lruCache.Get(string(publicKey) + string(chunk))
|
||||
if ok {
|
||||
results[i] = existing
|
||||
} else {
|
||||
proof := verenc.NewVerencProofEncryptOnly(chunk, publicKey)
|
||||
result := MPCitHVerEncProof{
|
||||
generated.VerencProof{
|
||||
BlindingPubkey: proof.BlindingPubkey,
|
||||
EncryptionKey: proof.EncryptionKey,
|
||||
Statement: proof.Statement,
|
||||
Challenge: proof.Challenge,
|
||||
Polycom: proof.Polycom,
|
||||
Ctexts: proof.Ctexts,
|
||||
SharesRands: proof.SharesRands,
|
||||
},
|
||||
}
|
||||
results[i] = result.Compress()
|
||||
v.lruCache.Add(string(publicKey)+string(chunk), results[i])
|
||||
}
|
||||
}(chunk, i)
|
||||
}
|
||||
wg.Wait()
|
||||
return results
|
||||
}
|
||||
|
||||
func (v *MPCitHVerifiableEncryptor) Decrypt(
|
||||
encrypted []VerEnc,
|
||||
decyptionKey []byte,
|
||||
) []byte {
|
||||
results := make([][]byte, len(encrypted))
|
||||
var wg sync.WaitGroup
|
||||
throttle := make(chan struct{}, v.parallelism)
|
||||
for i, chunk := range encrypted {
|
||||
throttle <- struct{}{}
|
||||
wg.Add(1)
|
||||
go func(chunk VerEnc, i int) {
|
||||
defer func() { <-throttle }()
|
||||
defer wg.Done()
|
||||
results[i] = verenc.VerencRecover(generated.VerencDecrypt{
|
||||
BlindingPubkey: chunk.(MPCitHVerEnc).BlindingPubkey,
|
||||
DecryptionKey: decyptionKey,
|
||||
Statement: chunk.(MPCitHVerEnc).Statement,
|
||||
Ciphertexts: chunk.(MPCitHVerEnc).CompressedCiphertext,
|
||||
})
|
||||
}(chunk, i)
|
||||
}
|
||||
wg.Wait()
|
||||
return verenc.CombineChunkedData(results)
|
||||
}
|
||||
@ -209,7 +209,7 @@ func (a *TokenApplication) ApplyTransitions(
|
||||
continue
|
||||
} else if len(t.Mint.Proofs) >= 3 && currentFrameNumber > PROOF_FRAME_CUTOFF {
|
||||
frameNumber := binary.BigEndian.Uint64(t.Mint.Proofs[2])
|
||||
if frameNumber < currentFrameNumber-2 {
|
||||
if frameNumber < currentFrameNumber-1 {
|
||||
fails[i] = transition
|
||||
continue
|
||||
}
|
||||
@ -414,6 +414,9 @@ func (a *TokenApplication) ApplyTransitions(
|
||||
wg.Wait()
|
||||
|
||||
for i, transition := range set {
|
||||
if transition == nil {
|
||||
continue
|
||||
}
|
||||
if fails[i] != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -19,12 +19,12 @@ import (
|
||||
)
|
||||
|
||||
// for tests, these need to be var
|
||||
var PROOF_FRAME_CUTOFF = uint64(46500)
|
||||
var PROOF_FRAME_RING_RESET = uint64(52000)
|
||||
var PROOF_FRAME_RING_RESET_2 = uint64(53028)
|
||||
var PROOF_FRAME_COMBINE_CUTOFF = uint64(162000)
|
||||
var PROOF_FRAME_CUTOFF = uint64(0)
|
||||
var PROOF_FRAME_RING_RESET = uint64(0)
|
||||
var PROOF_FRAME_RING_RESET_2 = uint64(0)
|
||||
var PROOF_FRAME_COMBINE_CUTOFF = uint64(0)
|
||||
|
||||
const PROOF_FRAME_SENIORITY_REPAIR = 59029
|
||||
const PROOF_FRAME_SENIORITY_REPAIR = 0
|
||||
|
||||
type processedMint struct {
|
||||
isPre2 bool
|
||||
@ -160,12 +160,12 @@ func (a *TokenApplication) preProcessMint(
|
||||
)
|
||||
}
|
||||
|
||||
// Current frame - 2 is because the current frame is the newly created frame,
|
||||
// Current frame - 1 is because the current frame is the newly created frame,
|
||||
// and the provers are submitting proofs on the frame preceding the one they
|
||||
// last saw. This enforces liveness and creates a punishment for being
|
||||
// late.
|
||||
if (previousFrame != nil && newFrameNumber <= previousFrame.FrameNumber) ||
|
||||
newFrameNumber < currentFrameNumber-2 {
|
||||
newFrameNumber < currentFrameNumber-1 {
|
||||
previousFrameNumber := uint64(0)
|
||||
if previousFrame != nil {
|
||||
previousFrameNumber = previousFrame.FrameNumber
|
||||
@ -338,12 +338,12 @@ func (a *TokenApplication) preProcessMint(
|
||||
)
|
||||
}
|
||||
|
||||
// Current frame - 2 is because the current frame is the newly created frame,
|
||||
// Current frame - 1 is because the current frame is the newly created frame,
|
||||
// and the provers are submitting proofs on the frame preceding the one they
|
||||
// last saw. This enforces liveness and creates a punishment for being
|
||||
// late.
|
||||
if (previousFrame != nil && newFrameNumber <= previousFrame.FrameNumber) ||
|
||||
newFrameNumber < currentFrameNumber-2 {
|
||||
newFrameNumber < currentFrameNumber-1 {
|
||||
previousFrameNumber := uint64(0)
|
||||
if previousFrame != nil {
|
||||
previousFrameNumber = previousFrame.FrameNumber
|
||||
|
||||
@ -45,10 +45,12 @@ func (a *TokenApplication) handleDataAnnounceProverJoin(
|
||||
error,
|
||||
) {
|
||||
if currentFrameNumber < PROOF_FRAME_CUTOFF {
|
||||
a.Logger.Debug("join earlier than cutoff", zap.Uint64("current_frame", currentFrameNumber), zap.Uint64("cutoff", PROOF_FRAME_CUTOFF))
|
||||
return nil, errors.Wrap(ErrInvalidStateTransition, "handle join")
|
||||
}
|
||||
|
||||
if err := t.Validate(); err != nil {
|
||||
a.Logger.Debug("invalid join", zap.Error(err))
|
||||
return nil, errors.Wrap(ErrInvalidStateTransition, "handle join")
|
||||
}
|
||||
|
||||
@ -94,6 +96,7 @@ func (a *TokenApplication) handleDataAnnounceProverJoin(
|
||||
if t.Announce != nil {
|
||||
outputs, err = a.handleAnnounce(currentFrameNumber, lockMap, t.Announce)
|
||||
if err != nil {
|
||||
a.Logger.Debug("bad announce", zap.Error(err))
|
||||
return nil, errors.Wrap(ErrInvalidStateTransition, "handle join")
|
||||
}
|
||||
}
|
||||
|
||||
@ -330,24 +330,23 @@ func TestHandleProverJoin(t *testing.T) {
|
||||
assert.Len(t, success.Requests, 1)
|
||||
assert.Len(t, app.TokenOutputs.Outputs, 1)
|
||||
txn, _ = app.CoinStore.NewTransaction(false)
|
||||
stateTree := &qcrypto.VectorCommitmentTree{}
|
||||
for i, o := range app.TokenOutputs.Outputs {
|
||||
switch e := o.Output.(type) {
|
||||
case *protobufs.TokenOutput_Coin:
|
||||
a, err := token.GetAddressOfCoin(e.Coin, 1, uint64(i))
|
||||
assert.NoError(t, err)
|
||||
err = app.CoinStore.PutCoin(txn, 1, a, e.Coin, stateTree)
|
||||
err = app.CoinStore.PutCoin(txn, 1, a, e.Coin)
|
||||
assert.NoError(t, err)
|
||||
case *protobufs.TokenOutput_DeletedCoin:
|
||||
c, err := app.CoinStore.GetCoinByAddress(nil, e.DeletedCoin.Address)
|
||||
assert.NoError(t, err)
|
||||
err = app.CoinStore.DeleteCoin(txn, e.DeletedCoin.Address, c, stateTree)
|
||||
err = app.CoinStore.DeleteCoin(txn, e.DeletedCoin.Address, c)
|
||||
assert.NoError(t, err)
|
||||
case *protobufs.TokenOutput_Proof:
|
||||
a, err := token.GetAddressOfPreCoinProof(e.Proof)
|
||||
fmt.Printf("add addr %x\n", a)
|
||||
assert.NoError(t, err)
|
||||
err = app.CoinStore.PutPreCoinProof(txn, 1, a, e.Proof, stateTree)
|
||||
err = app.CoinStore.PutPreCoinProof(txn, 1, a, e.Proof)
|
||||
assert.NoError(t, err)
|
||||
case *protobufs.TokenOutput_DeletedProof:
|
||||
a, err := token.GetAddressOfPreCoinProof(e.DeletedProof)
|
||||
@ -355,7 +354,7 @@ func TestHandleProverJoin(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
c, err := app.CoinStore.GetPreCoinProofByAddress(a)
|
||||
assert.NoError(t, err)
|
||||
err = app.CoinStore.DeletePreCoinProof(txn, a, c, stateTree)
|
||||
err = app.CoinStore.DeletePreCoinProof(txn, a, c)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
@ -385,18 +384,18 @@ func TestHandleProverJoin(t *testing.T) {
|
||||
case *protobufs.TokenOutput_Coin:
|
||||
a, err := token.GetAddressOfCoin(e.Coin, 4, uint64(i))
|
||||
assert.NoError(t, err)
|
||||
err = app.CoinStore.PutCoin(txn, 4, a, e.Coin, stateTree)
|
||||
err = app.CoinStore.PutCoin(txn, 4, a, e.Coin)
|
||||
assert.NoError(t, err)
|
||||
case *protobufs.TokenOutput_DeletedCoin:
|
||||
c, err := app.CoinStore.GetCoinByAddress(txn, e.DeletedCoin.Address)
|
||||
assert.NoError(t, err)
|
||||
err = app.CoinStore.DeleteCoin(txn, e.DeletedCoin.Address, c, stateTree)
|
||||
err = app.CoinStore.DeleteCoin(txn, e.DeletedCoin.Address, c)
|
||||
assert.NoError(t, err)
|
||||
case *protobufs.TokenOutput_Proof:
|
||||
a, err := token.GetAddressOfPreCoinProof(e.Proof)
|
||||
fmt.Printf("add addr %x\n", a)
|
||||
assert.NoError(t, err)
|
||||
err = app.CoinStore.PutPreCoinProof(txn, 4, a, e.Proof, stateTree)
|
||||
err = app.CoinStore.PutPreCoinProof(txn, 4, a, e.Proof)
|
||||
assert.NoError(t, err)
|
||||
case *protobufs.TokenOutput_DeletedProof:
|
||||
a, err := token.GetAddressOfPreCoinProof(e.DeletedProof)
|
||||
@ -404,7 +403,7 @@ func TestHandleProverJoin(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
c, err := app.CoinStore.GetPreCoinProofByAddress(a)
|
||||
assert.NoError(t, err)
|
||||
err = app.CoinStore.DeletePreCoinProof(txn, a, c, stateTree)
|
||||
err = app.CoinStore.DeletePreCoinProof(txn, a, c)
|
||||
assert.NoError(t, err)
|
||||
case *protobufs.TokenOutput_Penalty:
|
||||
// gotPenalty = true
|
||||
@ -438,19 +437,19 @@ func TestHandleProverJoin(t *testing.T) {
|
||||
case *protobufs.TokenOutput_Coin:
|
||||
a, err := token.GetAddressOfCoin(e.Coin, 5, uint64(i))
|
||||
assert.NoError(t, err)
|
||||
err = app.CoinStore.PutCoin(txn, 5, a, e.Coin, stateTree)
|
||||
err = app.CoinStore.PutCoin(txn, 5, a, e.Coin)
|
||||
assert.NoError(t, err)
|
||||
coins = append(coins, a)
|
||||
case *protobufs.TokenOutput_DeletedCoin:
|
||||
c, err := app.CoinStore.GetCoinByAddress(txn, e.DeletedCoin.Address)
|
||||
assert.NoError(t, err)
|
||||
err = app.CoinStore.DeleteCoin(txn, e.DeletedCoin.Address, c, stateTree)
|
||||
err = app.CoinStore.DeleteCoin(txn, e.DeletedCoin.Address, c)
|
||||
assert.NoError(t, err)
|
||||
case *protobufs.TokenOutput_Proof:
|
||||
a, err := token.GetAddressOfPreCoinProof(e.Proof)
|
||||
fmt.Printf("add addr %x\n", a)
|
||||
assert.NoError(t, err)
|
||||
err = app.CoinStore.PutPreCoinProof(txn, 5, a, e.Proof, stateTree)
|
||||
err = app.CoinStore.PutPreCoinProof(txn, 5, a, e.Proof)
|
||||
assert.NoError(t, err)
|
||||
case *protobufs.TokenOutput_DeletedProof:
|
||||
a, err := token.GetAddressOfPreCoinProof(e.DeletedProof)
|
||||
@ -458,7 +457,7 @@ func TestHandleProverJoin(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
c, err := app.CoinStore.GetPreCoinProofByAddress(a)
|
||||
assert.NoError(t, err)
|
||||
err = app.CoinStore.DeletePreCoinProof(txn, a, c, stateTree)
|
||||
err = app.CoinStore.DeletePreCoinProof(txn, a, c)
|
||||
assert.NoError(t, err)
|
||||
case *protobufs.TokenOutput_Penalty:
|
||||
// gotPenalty = true
|
||||
@ -493,25 +492,25 @@ func TestHandleProverJoin(t *testing.T) {
|
||||
case *protobufs.TokenOutput_Coin:
|
||||
a, err := token.GetAddressOfCoin(e.Coin, 5, uint64(i))
|
||||
assert.NoError(t, err)
|
||||
err = app.CoinStore.PutCoin(txn, 5, a, e.Coin, stateTree)
|
||||
err = app.CoinStore.PutCoin(txn, 5, a, e.Coin)
|
||||
assert.NoError(t, err)
|
||||
coins = append(coins, a)
|
||||
case *protobufs.TokenOutput_DeletedCoin:
|
||||
c, err := app.CoinStore.GetCoinByAddress(txn, e.DeletedCoin.Address)
|
||||
assert.NoError(t, err)
|
||||
err = app.CoinStore.DeleteCoin(txn, e.DeletedCoin.Address, c, stateTree)
|
||||
err = app.CoinStore.DeleteCoin(txn, e.DeletedCoin.Address, c)
|
||||
assert.NoError(t, err)
|
||||
case *protobufs.TokenOutput_Proof:
|
||||
a, err := token.GetAddressOfPreCoinProof(e.Proof)
|
||||
assert.NoError(t, err)
|
||||
err = app.CoinStore.PutPreCoinProof(txn, 1, a, e.Proof, stateTree)
|
||||
err = app.CoinStore.PutPreCoinProof(txn, 1, a, e.Proof)
|
||||
assert.NoError(t, err)
|
||||
case *protobufs.TokenOutput_DeletedProof:
|
||||
a, err := token.GetAddressOfPreCoinProof(e.DeletedProof)
|
||||
assert.NoError(t, err)
|
||||
c, err := app.CoinStore.GetPreCoinProofByAddress(a)
|
||||
assert.NoError(t, err)
|
||||
err = app.CoinStore.DeletePreCoinProof(txn, a, c, stateTree)
|
||||
err = app.CoinStore.DeletePreCoinProof(txn, a, c)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -5,7 +5,9 @@ import (
|
||||
"context"
|
||||
"crypto"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -26,6 +28,7 @@ import (
|
||||
qcrypto "source.quilibrium.com/quilibrium/monorepo/node/crypto"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/execution"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token/application"
|
||||
hypergraph "source.quilibrium.com/quilibrium/monorepo/node/hypergraph/application"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/internal/frametime"
|
||||
qruntime "source.quilibrium.com/quilibrium/monorepo/node/internal/runtime"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/keys"
|
||||
@ -87,6 +90,7 @@ type TokenExecutionEngine struct {
|
||||
logger *zap.Logger
|
||||
clock *data.DataClockConsensusEngine
|
||||
clockStore store.ClockStore
|
||||
hypergraphStore store.HypergraphStore
|
||||
coinStore store.CoinStore
|
||||
keyStore store.KeyStore
|
||||
keyManager keys.KeyManager
|
||||
@ -104,7 +108,8 @@ type TokenExecutionEngine struct {
|
||||
intrinsicFilter []byte
|
||||
frameProver qcrypto.FrameProver
|
||||
peerSeniority *PeerSeniority
|
||||
stateTree *qcrypto.VectorCommitmentTree
|
||||
hypergraph *hypergraph.Hypergraph
|
||||
mpcithVerEnc *qcrypto.MPCitHVerifiableEncryptor
|
||||
}
|
||||
|
||||
func NewTokenExecutionEngine(
|
||||
@ -116,6 +121,7 @@ func NewTokenExecutionEngine(
|
||||
inclusionProver qcrypto.InclusionProver,
|
||||
clockStore store.ClockStore,
|
||||
dataProofStore store.DataProofStore,
|
||||
hypergraphStore store.HypergraphStore,
|
||||
coinStore store.CoinStore,
|
||||
masterTimeReel *time.MasterTimeReel,
|
||||
peerInfoManager p2p.PeerInfoManager,
|
||||
@ -138,7 +144,10 @@ func NewTokenExecutionEngine(
|
||||
var inclusionProof *qcrypto.InclusionAggregateProof
|
||||
var proverKeys [][]byte
|
||||
var peerSeniority map[string]uint64
|
||||
stateTree := &qcrypto.VectorCommitmentTree{}
|
||||
hypergraph := hypergraph.NewHypergraph()
|
||||
mpcithVerEnc := qcrypto.NewMPCitHVerifiableEncryptor(
|
||||
runtime.NumCPU(),
|
||||
)
|
||||
|
||||
if err != nil && errors.Is(err, store.ErrNotFound) {
|
||||
origin, inclusionProof, proverKeys, peerSeniority = CreateGenesisState(
|
||||
@ -148,7 +157,9 @@ func NewTokenExecutionEngine(
|
||||
inclusionProver,
|
||||
clockStore,
|
||||
coinStore,
|
||||
stateTree,
|
||||
hypergraphStore,
|
||||
hypergraph,
|
||||
mpcithVerEnc,
|
||||
uint(cfg.P2P.Network),
|
||||
)
|
||||
if err := coinStore.SetMigrationVersion(
|
||||
@ -159,25 +170,29 @@ func NewTokenExecutionEngine(
|
||||
} else if err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
err := coinStore.Migrate(
|
||||
intrinsicFilter,
|
||||
config.GetGenesis().GenesisSeedHex,
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err = clockStore.GetEarliestDataClockFrame(intrinsicFilter)
|
||||
if err != nil && errors.Is(err, store.ErrNotFound) {
|
||||
origin, inclusionProof, proverKeys, peerSeniority = CreateGenesisState(
|
||||
logger,
|
||||
cfg.Engine,
|
||||
nil,
|
||||
inclusionProver,
|
||||
clockStore,
|
||||
coinStore,
|
||||
stateTree,
|
||||
uint(cfg.P2P.Network),
|
||||
if pubSub.GetNetwork() == 0 {
|
||||
err := coinStore.Migrate(
|
||||
intrinsicFilter,
|
||||
config.GetGenesis().GenesisSeedHex,
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err = clockStore.GetEarliestDataClockFrame(intrinsicFilter)
|
||||
if err != nil && errors.Is(err, store.ErrNotFound) {
|
||||
origin, inclusionProof, proverKeys, peerSeniority = CreateGenesisState(
|
||||
logger,
|
||||
cfg.Engine,
|
||||
nil,
|
||||
inclusionProver,
|
||||
clockStore,
|
||||
coinStore,
|
||||
hypergraphStore,
|
||||
hypergraph,
|
||||
mpcithVerEnc,
|
||||
uint(cfg.P2P.Network),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -222,6 +237,7 @@ func NewTokenExecutionEngine(
|
||||
keyManager: keyManager,
|
||||
clockStore: clockStore,
|
||||
coinStore: coinStore,
|
||||
hypergraphStore: hypergraphStore,
|
||||
keyStore: keyStore,
|
||||
pubSub: pubSub,
|
||||
inclusionProver: inclusionProver,
|
||||
@ -231,6 +247,7 @@ func NewTokenExecutionEngine(
|
||||
alreadyPublishedShare: false,
|
||||
intrinsicFilter: intrinsicFilter,
|
||||
peerSeniority: NewFromMap(peerSeniority),
|
||||
mpcithVerEnc: mpcithVerEnc,
|
||||
}
|
||||
|
||||
alwaysSend := false
|
||||
@ -346,13 +363,21 @@ func NewTokenExecutionEngine(
|
||||
e.proverPublicKey = publicKeyBytes
|
||||
e.provingKeyAddress = provingKeyAddress
|
||||
|
||||
e.stateTree, err = e.clockStore.GetDataStateTree(e.intrinsicFilter)
|
||||
if err != nil && !errors.Is(err, store.ErrNotFound) {
|
||||
panic(err)
|
||||
}
|
||||
_, _, err = e.clockStore.GetLatestDataClockFrame(e.intrinsicFilter)
|
||||
if err != nil {
|
||||
e.rebuildHypergraph()
|
||||
} else {
|
||||
e.hypergraph, err = e.hypergraphStore.LoadHypergraph()
|
||||
if err != nil && !errors.Is(err, store.ErrNotFound) {
|
||||
e.logger.Error(
|
||||
"error encountered while fetching hypergraph, rebuilding",
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
|
||||
if e.stateTree == nil {
|
||||
e.rebuildStateTree()
|
||||
if e.hypergraph == nil || len(e.hypergraph.GetVertexAdds()) == 0 {
|
||||
e.rebuildHypergraph()
|
||||
}
|
||||
}
|
||||
|
||||
e.wg.Add(1)
|
||||
@ -419,34 +444,107 @@ func NewTokenExecutionEngine(
|
||||
|
||||
var _ execution.ExecutionEngine = (*TokenExecutionEngine)(nil)
|
||||
|
||||
func (e *TokenExecutionEngine) rebuildStateTree() {
|
||||
e.logger.Info("rebuilding state tree")
|
||||
e.stateTree = &qcrypto.VectorCommitmentTree{}
|
||||
func (e *TokenExecutionEngine) addBatchToHypergraph(batchKey [][]byte, batchValue [][]byte) {
|
||||
var wg sync.WaitGroup
|
||||
throttle := make(chan struct{}, runtime.NumCPU())
|
||||
batchCompressed := make([][]hypergraph.Encrypted, len(batchKey))
|
||||
for i, chunk := range batchValue {
|
||||
throttle <- struct{}{}
|
||||
wg.Add(1)
|
||||
go func(chunk []byte, i int) {
|
||||
defer func() { <-throttle }()
|
||||
defer wg.Done()
|
||||
e.logger.Debug(
|
||||
"encrypting coin",
|
||||
zap.String("address", hex.EncodeToString(batchKey[i])),
|
||||
)
|
||||
data := e.mpcithVerEnc.EncryptAndCompress(
|
||||
chunk,
|
||||
config.GetGenesis().Beacon,
|
||||
)
|
||||
compressed := []hypergraph.Encrypted{}
|
||||
for _, d := range data {
|
||||
compressed = append(compressed, d)
|
||||
}
|
||||
e.logger.Debug(
|
||||
"encrypted coin",
|
||||
zap.String("address", hex.EncodeToString(batchKey[i])),
|
||||
)
|
||||
batchCompressed[i] = compressed
|
||||
}(chunk, i)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
for i := range batchKey {
|
||||
if err := e.hypergraph.AddVertex(
|
||||
hypergraph.NewVertex(
|
||||
[32]byte(application.TOKEN_ADDRESS),
|
||||
[32]byte(batchKey[i]),
|
||||
batchCompressed[i],
|
||||
),
|
||||
); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *TokenExecutionEngine) rebuildHypergraph() {
|
||||
e.logger.Info("rebuilding hypergraph")
|
||||
e.hypergraph = hypergraph.NewHypergraph()
|
||||
iter, err := e.coinStore.RangeCoins()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var batchKey, batchValue [][]byte
|
||||
for iter.First(); iter.Valid(); iter.Next() {
|
||||
e.stateTree.Insert(iter.Key()[2:], iter.Value())
|
||||
key := make([]byte, len(iter.Key()[2:]))
|
||||
copy(key, iter.Key()[2:])
|
||||
batchKey = append(batchKey, key)
|
||||
|
||||
coin := &protobufs.Coin{}
|
||||
err := proto.Unmarshal(iter.Value()[8:], coin)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
value := []byte{}
|
||||
value = append(value, iter.Value()[:8]...)
|
||||
value = append(value, coin.Amount...)
|
||||
// implicit
|
||||
value = append(value, 0x00)
|
||||
value = append(value, coin.Owner.GetImplicitAccount().GetAddress()...)
|
||||
// domain len
|
||||
value = append(value, 0x00)
|
||||
value = append(value, coin.Intersection...)
|
||||
batchValue = append(batchValue, value)
|
||||
|
||||
if len(batchKey) == runtime.NumCPU() {
|
||||
e.addBatchToHypergraph(batchKey, batchValue)
|
||||
batchKey = [][]byte{}
|
||||
batchValue = [][]byte{}
|
||||
}
|
||||
}
|
||||
iter.Close()
|
||||
|
||||
iter, err = e.coinStore.RangePreCoinProofs()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
if len(batchKey) != 0 {
|
||||
e.addBatchToHypergraph(batchKey, batchValue)
|
||||
}
|
||||
for iter.First(); iter.Valid(); iter.Next() {
|
||||
e.stateTree.Insert(iter.Key()[2:], iter.Value())
|
||||
}
|
||||
iter.Close()
|
||||
e.logger.Info("saving rebuilt state tree")
|
||||
|
||||
txn, err := e.clockStore.NewTransaction(false)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
err = e.clockStore.SetDataStateTree(txn, e.intrinsicFilter, e.stateTree)
|
||||
e.logger.Info("committing hypergraph")
|
||||
|
||||
roots := e.hypergraph.Commit()
|
||||
|
||||
e.logger.Info(
|
||||
"committed hypergraph state",
|
||||
zap.String("root", fmt.Sprintf("%x", roots[0])),
|
||||
)
|
||||
|
||||
err = e.hypergraphStore.SaveHypergraph(txn, e.hypergraph)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
panic(err)
|
||||
@ -623,10 +721,10 @@ func (e *TokenExecutionEngine) ProcessFrame(
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
stateTree, err := e.clockStore.GetDataStateTree(e.intrinsicFilter)
|
||||
hg, err := e.hypergraphStore.LoadHypergraph()
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
panic(err)
|
||||
}
|
||||
|
||||
for i, output := range app.TokenOutputs.Outputs {
|
||||
@ -642,12 +740,43 @@ func (e *TokenExecutionEngine) ProcessFrame(
|
||||
frame.FrameNumber,
|
||||
address,
|
||||
o.Coin,
|
||||
stateTree,
|
||||
)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
|
||||
value := []byte{}
|
||||
value = append(value, make([]byte, 8)...)
|
||||
value = append(value, o.Coin.Amount...)
|
||||
// implicit
|
||||
value = append(value, 0x00)
|
||||
value = append(
|
||||
value,
|
||||
o.Coin.Owner.GetImplicitAccount().GetAddress()...,
|
||||
)
|
||||
// domain len
|
||||
value = append(value, 0x00)
|
||||
value = append(value, o.Coin.Intersection...)
|
||||
|
||||
proofs := e.mpcithVerEnc.EncryptAndCompress(
|
||||
value,
|
||||
config.GetGenesis().Beacon,
|
||||
)
|
||||
compressed := []hypergraph.Encrypted{}
|
||||
for _, d := range proofs {
|
||||
compressed = append(compressed, d)
|
||||
}
|
||||
if err := hg.AddVertex(
|
||||
hypergraph.NewVertex(
|
||||
[32]byte(application.TOKEN_ADDRESS),
|
||||
[32]byte(address),
|
||||
compressed,
|
||||
),
|
||||
); err != nil {
|
||||
txn.Abort()
|
||||
panic(err)
|
||||
}
|
||||
case *protobufs.TokenOutput_DeletedCoin:
|
||||
coin, err := e.coinStore.GetCoinByAddress(nil, o.DeletedCoin.Address)
|
||||
if err != nil {
|
||||
@ -658,12 +787,43 @@ func (e *TokenExecutionEngine) ProcessFrame(
|
||||
txn,
|
||||
o.DeletedCoin.Address,
|
||||
coin,
|
||||
stateTree,
|
||||
)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
|
||||
value := []byte{}
|
||||
value = append(value, make([]byte, 8)...)
|
||||
value = append(value, coin.Amount...)
|
||||
// implicit
|
||||
value = append(value, 0x00)
|
||||
value = append(
|
||||
value,
|
||||
coin.Owner.GetImplicitAccount().GetAddress()...,
|
||||
)
|
||||
// domain len
|
||||
value = append(value, 0x00)
|
||||
value = append(value, coin.Intersection...)
|
||||
|
||||
proofs := e.mpcithVerEnc.EncryptAndCompress(
|
||||
value,
|
||||
config.GetGenesis().Beacon,
|
||||
)
|
||||
compressed := []hypergraph.Encrypted{}
|
||||
for _, d := range proofs {
|
||||
compressed = append(compressed, d)
|
||||
}
|
||||
if err := hg.RemoveVertex(
|
||||
hypergraph.NewVertex(
|
||||
[32]byte(application.TOKEN_ADDRESS),
|
||||
[32]byte(o.DeletedCoin.Address),
|
||||
compressed,
|
||||
),
|
||||
); err != nil {
|
||||
txn.Abort()
|
||||
panic(err)
|
||||
}
|
||||
case *protobufs.TokenOutput_Proof:
|
||||
address, err := outputAddresses[i], outputAddressErrors[i]
|
||||
if err != nil {
|
||||
@ -675,12 +835,12 @@ func (e *TokenExecutionEngine) ProcessFrame(
|
||||
frame.FrameNumber,
|
||||
address,
|
||||
o.Proof,
|
||||
stateTree,
|
||||
)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
|
||||
if len(o.Proof.Amount) == 32 &&
|
||||
!bytes.Equal(o.Proof.Amount, make([]byte, 32)) &&
|
||||
o.Proof.Commitment != nil {
|
||||
@ -713,7 +873,6 @@ func (e *TokenExecutionEngine) ProcessFrame(
|
||||
txn,
|
||||
address,
|
||||
o.DeletedProof,
|
||||
stateTree,
|
||||
)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
@ -1029,17 +1188,25 @@ func (e *TokenExecutionEngine) ProcessFrame(
|
||||
}
|
||||
}
|
||||
|
||||
err = e.clockStore.SetDataStateTree(
|
||||
e.logger.Info("committing hypergraph")
|
||||
|
||||
roots := hg.Commit()
|
||||
|
||||
e.logger.Info(
|
||||
"commited hypergraph",
|
||||
zap.String("root", fmt.Sprintf("%x", roots[0])),
|
||||
)
|
||||
|
||||
err = e.hypergraphStore.SaveHypergraph(
|
||||
txn,
|
||||
e.intrinsicFilter,
|
||||
stateTree,
|
||||
hg,
|
||||
)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
|
||||
e.stateTree = stateTree
|
||||
e.hypergraph = hg
|
||||
|
||||
return app.Tries, nil
|
||||
}
|
||||
|
||||
@ -2,6 +2,7 @@ package token
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@ -17,9 +18,9 @@ import (
|
||||
"google.golang.org/protobuf/proto"
|
||||
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/vdf"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/config"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
|
||||
qcrypto "source.quilibrium.com/quilibrium/monorepo/node/crypto"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token/application"
|
||||
hypergraph "source.quilibrium.com/quilibrium/monorepo/node/hypergraph/application"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/store"
|
||||
@ -504,7 +505,9 @@ func CreateGenesisState(
|
||||
inclusionProver qcrypto.InclusionProver,
|
||||
clockStore store.ClockStore,
|
||||
coinStore store.CoinStore,
|
||||
stateTree *crypto.VectorCommitmentTree,
|
||||
hypergraphStore store.HypergraphStore,
|
||||
hg *hypergraph.Hypergraph,
|
||||
mpcithVerEnc *qcrypto.MPCitHVerifiableEncryptor,
|
||||
network uint,
|
||||
) (
|
||||
[]byte,
|
||||
@ -865,11 +868,41 @@ func CreateGenesisState(
|
||||
0,
|
||||
address,
|
||||
output.GetCoin(),
|
||||
stateTree,
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
value := []byte{}
|
||||
value = append(value, make([]byte, 8)...)
|
||||
value = append(value, output.GetCoin().Amount...)
|
||||
// implicit
|
||||
value = append(value, 0x00)
|
||||
value = append(
|
||||
value,
|
||||
output.GetCoin().Owner.GetImplicitAccount().GetAddress()...,
|
||||
)
|
||||
// domain len
|
||||
value = append(value, 0x00)
|
||||
value = append(value, output.GetCoin().Intersection...)
|
||||
|
||||
proofs := mpcithVerEnc.EncryptAndCompress(
|
||||
value,
|
||||
config.GetGenesis().Beacon,
|
||||
)
|
||||
compressed := []hypergraph.Encrypted{}
|
||||
for _, d := range proofs {
|
||||
compressed = append(compressed, d)
|
||||
}
|
||||
if err := hg.AddVertex(
|
||||
hypergraph.NewVertex(
|
||||
[32]byte(application.TOKEN_ADDRESS),
|
||||
[32]byte(address),
|
||||
compressed,
|
||||
),
|
||||
); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
if err := txn.Commit(); err != nil {
|
||||
panic(err)
|
||||
@ -890,7 +923,7 @@ func CreateGenesisState(
|
||||
}
|
||||
|
||||
intrinsicFilter := p2p.GetBloomFilter(application.TOKEN_ADDRESS, 256, 3)
|
||||
err = clockStore.SetDataStateTree(txn, intrinsicFilter, stateTree)
|
||||
err = hypergraphStore.SaveHypergraph(txn, hg)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
panic(err)
|
||||
@ -1012,14 +1045,38 @@ func CreateGenesisState(
|
||||
0,
|
||||
address,
|
||||
output.GetCoin(),
|
||||
stateTree,
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
coinBytes, err := proto.Marshal(output.GetCoin())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
data := []byte{}
|
||||
data = binary.BigEndian.AppendUint64(data, 0)
|
||||
data = append(data, coinBytes...)
|
||||
proofs := mpcithVerEnc.EncryptAndCompress(
|
||||
data,
|
||||
config.GetGenesis().Beacon,
|
||||
)
|
||||
compressed := []hypergraph.Encrypted{}
|
||||
for _, d := range proofs {
|
||||
compressed = append(compressed, d)
|
||||
}
|
||||
if err := hg.AddVertex(
|
||||
hypergraph.NewVertex(
|
||||
[32]byte(application.TOKEN_ADDRESS),
|
||||
[32]byte(address),
|
||||
compressed,
|
||||
),
|
||||
); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
intrinsicFilter := p2p.GetBloomFilter(application.TOKEN_ADDRESS, 256, 3)
|
||||
err = clockStore.SetDataStateTree(txn, intrinsicFilter, stateTree)
|
||||
err = hypergraphStore.SaveHypergraph(txn, hg)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
panic(err)
|
||||
|
||||
@ -11,6 +11,8 @@ replace source.quilibrium.com/quilibrium/monorepo/bls48581 => ../bls48581
|
||||
|
||||
replace source.quilibrium.com/quilibrium/monorepo/vdf => ../vdf
|
||||
|
||||
replace source.quilibrium.com/quilibrium/monorepo/verenc => ../verenc
|
||||
|
||||
replace github.com/multiformats/go-multiaddr => ../go-multiaddr
|
||||
|
||||
replace github.com/multiformats/go-multiaddr-dns => ../go-multiaddr-dns
|
||||
@ -38,11 +40,13 @@ require (
|
||||
source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub v0.0.0-00010101000000-000000000000
|
||||
source.quilibrium.com/quilibrium/monorepo/nekryptology v0.0.0-00010101000000-000000000000
|
||||
source.quilibrium.com/quilibrium/monorepo/vdf v0.0.0-00010101000000-000000000000
|
||||
source.quilibrium.com/quilibrium/monorepo/verenc v0.0.0-00010101000000-000000000000
|
||||
)
|
||||
|
||||
require (
|
||||
filippo.io/edwards25519 v1.0.0-rc.1 // indirect
|
||||
github.com/deiu/gon3 v0.0.0-20230411081920-f0f8f879f597 // indirect
|
||||
github.com/google/subcommands v1.0.1 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 // indirect
|
||||
github.com/libp2p/go-libp2p-routing-helpers v0.7.2 // indirect
|
||||
github.com/linkeddata/gojsonld v0.0.0-20170418210642-4f5db6791326 // indirect
|
||||
|
||||
@ -183,6 +183,7 @@ github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OI
|
||||
github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 h1:E/LAvt58di64hlYjx7AsNS6C/ysHWYo+2qPCZKTQhRo=
|
||||
github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/subcommands v1.0.1 h1:/eqq+otEXm5vhfBrbREPCSVQbvofip6kIz+mX5TUH7k=
|
||||
github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
|
||||
@ -1,12 +1,26 @@
|
||||
package application
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"bytes"
|
||||
"crypto/sha512"
|
||||
"encoding/gob"
|
||||
"math/big"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
|
||||
)
|
||||
|
||||
type AtomType string
|
||||
type PhaseType string
|
||||
|
||||
const (
|
||||
VertexAtomType AtomType = "vertex"
|
||||
HyperedgeAtomType AtomType = "hyperedge"
|
||||
AddsPhaseType PhaseType = "adds"
|
||||
RemovesPhaseType PhaseType = "removes"
|
||||
)
|
||||
|
||||
type Location [64]byte // 32 bytes for AppAddress + 32 bytes for DataAddress
|
||||
|
||||
var ErrInvalidAtomType = errors.New("invalid atom type for set")
|
||||
@ -14,86 +28,275 @@ var ErrInvalidLocation = errors.New("invalid location")
|
||||
var ErrMissingExtrinsics = errors.New("missing extrinsics")
|
||||
var ErrIsExtrinsic = errors.New("is extrinsic")
|
||||
|
||||
// Extract only needed methods of VEnc interface
|
||||
type Encrypted interface {
|
||||
RawRepresentation() []byte
|
||||
Verify() bool
|
||||
ToBytes() []byte
|
||||
GetStatement() []byte
|
||||
Verify(proof []byte) bool
|
||||
}
|
||||
|
||||
type Vertex struct {
|
||||
AppAddress [32]byte
|
||||
DataAddress [32]byte
|
||||
Data Encrypted
|
||||
type Vertex interface {
|
||||
GetID() [64]byte
|
||||
GetAtomType() AtomType
|
||||
GetLocation() Location
|
||||
GetAppAddress() [32]byte
|
||||
GetDataAddress() [32]byte
|
||||
ToBytes() []byte
|
||||
GetData() []Encrypted
|
||||
GetSize() *big.Int
|
||||
Commit() []byte
|
||||
}
|
||||
|
||||
type Hyperedge struct {
|
||||
AppAddress [32]byte
|
||||
DataAddress [32]byte
|
||||
Extrinsics map[[64]byte]Atom
|
||||
type Hyperedge interface {
|
||||
GetID() [64]byte
|
||||
GetAtomType() AtomType
|
||||
GetLocation() Location
|
||||
GetAppAddress() [32]byte
|
||||
GetDataAddress() [32]byte
|
||||
ToBytes() []byte
|
||||
AddExtrinsic(a Atom)
|
||||
RemoveExtrinsic(a Atom)
|
||||
GetExtrinsics() map[[64]byte]Atom
|
||||
GetSize() *big.Int
|
||||
Commit() []byte
|
||||
}
|
||||
|
||||
type vertex struct {
|
||||
appAddress [32]byte
|
||||
dataAddress [32]byte
|
||||
data []Encrypted
|
||||
dataTree *crypto.VectorCommitmentTree
|
||||
}
|
||||
|
||||
type hyperedge struct {
|
||||
appAddress [32]byte
|
||||
dataAddress [32]byte
|
||||
extrinsics map[[64]byte]Atom
|
||||
extTree *crypto.VectorCommitmentTree
|
||||
}
|
||||
|
||||
var _ Vertex = (*vertex)(nil)
|
||||
var _ Hyperedge = (*hyperedge)(nil)
|
||||
|
||||
type Atom interface {
|
||||
GetID() [64]byte
|
||||
GetAtomType() AtomType
|
||||
GetLocation() Location
|
||||
GetAppAddress() [32]byte
|
||||
GetDataAddress() [32]byte
|
||||
GetSize() *big.Int
|
||||
ToBytes() []byte
|
||||
Commit() []byte
|
||||
}
|
||||
|
||||
func (v *Vertex) GetID() [64]byte {
|
||||
func atomFromBytes(data []byte) Atom {
|
||||
tree := &crypto.VectorCommitmentTree{}
|
||||
var b bytes.Buffer
|
||||
b.Write(data[65:])
|
||||
dec := gob.NewDecoder(&b)
|
||||
if err := dec.Decode(tree); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if data[0] == 0x00 {
|
||||
encData := []Encrypted{}
|
||||
for _, d := range crypto.GetAllLeaves(tree) {
|
||||
verencData := crypto.MPCitHVerEncFromBytes(d.Value)
|
||||
encData = append(encData, verencData)
|
||||
}
|
||||
return &vertex{
|
||||
appAddress: [32]byte(data[1:33]),
|
||||
dataAddress: [32]byte(data[33:65]),
|
||||
data: encData,
|
||||
dataTree: tree,
|
||||
}
|
||||
} else {
|
||||
extrinsics := make(map[[64]byte]Atom)
|
||||
for _, a := range crypto.GetAllLeaves(tree) {
|
||||
atom := atomFromBytes(a.Value)
|
||||
extrinsics[[64]byte(a.Key)] = atom
|
||||
}
|
||||
return &hyperedge{
|
||||
appAddress: [32]byte(data[1:33]),
|
||||
dataAddress: [32]byte(data[33:65]),
|
||||
extrinsics: extrinsics,
|
||||
extTree: tree,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func NewVertex(
|
||||
appAddress [32]byte,
|
||||
dataAddress [32]byte,
|
||||
data []Encrypted,
|
||||
) Vertex {
|
||||
dataTree := &crypto.VectorCommitmentTree{}
|
||||
for _, d := range data {
|
||||
dataBytes := d.ToBytes()
|
||||
id := sha512.Sum512(dataBytes)
|
||||
dataTree.Insert(id[:], dataBytes, d.GetStatement(), big.NewInt(int64(len(data)*54)))
|
||||
}
|
||||
return &vertex{
|
||||
appAddress,
|
||||
dataAddress,
|
||||
data,
|
||||
dataTree,
|
||||
}
|
||||
}
|
||||
|
||||
func NewHyperedge(
|
||||
appAddress [32]byte,
|
||||
dataAddress [32]byte,
|
||||
) Hyperedge {
|
||||
return &hyperedge{
|
||||
appAddress: appAddress,
|
||||
dataAddress: dataAddress,
|
||||
extrinsics: make(map[[64]byte]Atom),
|
||||
extTree: &crypto.VectorCommitmentTree{},
|
||||
}
|
||||
}
|
||||
|
||||
func (v *vertex) GetID() [64]byte {
|
||||
id := [64]byte{}
|
||||
copy(id[:32], v.AppAddress[:])
|
||||
copy(id[32:64], v.DataAddress[:])
|
||||
copy(id[:32], v.appAddress[:])
|
||||
copy(id[32:64], v.dataAddress[:])
|
||||
return id
|
||||
}
|
||||
|
||||
func (v *Vertex) GetAtomType() AtomType {
|
||||
return "vertex"
|
||||
func (v *vertex) GetSize() *big.Int {
|
||||
return big.NewInt(int64(len(v.data) * 54))
|
||||
}
|
||||
|
||||
func (v *Vertex) GetLocation() Location {
|
||||
func (v *vertex) GetAtomType() AtomType {
|
||||
return VertexAtomType
|
||||
}
|
||||
|
||||
func (v *vertex) GetLocation() Location {
|
||||
var loc Location
|
||||
copy(loc[:32], v.AppAddress[:])
|
||||
copy(loc[32:], v.DataAddress[:])
|
||||
copy(loc[:32], v.appAddress[:])
|
||||
copy(loc[32:], v.dataAddress[:])
|
||||
return loc
|
||||
}
|
||||
|
||||
func (v *Vertex) GetAppAddress() [32]byte {
|
||||
return v.AppAddress
|
||||
func (v *vertex) GetAppAddress() [32]byte {
|
||||
return v.appAddress
|
||||
}
|
||||
|
||||
func (v *Vertex) GetDataAddress() [32]byte {
|
||||
return v.DataAddress
|
||||
func (v *vertex) GetDataAddress() [32]byte {
|
||||
return v.dataAddress
|
||||
}
|
||||
|
||||
func (h *Hyperedge) GetID() [64]byte {
|
||||
func (v *vertex) GetData() []Encrypted {
|
||||
return v.data
|
||||
}
|
||||
|
||||
func (v *vertex) ToBytes() []byte {
|
||||
var buf bytes.Buffer
|
||||
enc := gob.NewEncoder(&buf)
|
||||
if err := enc.Encode(v.dataTree); err != nil {
|
||||
return nil
|
||||
}
|
||||
return append(
|
||||
append(
|
||||
append(
|
||||
[]byte{0x00},
|
||||
v.appAddress[:]...,
|
||||
),
|
||||
v.dataAddress[:]...,
|
||||
),
|
||||
buf.Bytes()...,
|
||||
)
|
||||
}
|
||||
|
||||
func (v *vertex) Commit() []byte {
|
||||
return v.dataTree.Commit(false)
|
||||
}
|
||||
|
||||
func (h *hyperedge) GetID() [64]byte {
|
||||
id := [64]byte{}
|
||||
copy(id[:32], h.AppAddress[:])
|
||||
copy(id[32:], h.DataAddress[:])
|
||||
copy(id[:32], h.appAddress[:])
|
||||
copy(id[32:], h.dataAddress[:])
|
||||
return id
|
||||
}
|
||||
|
||||
func (h *Hyperedge) GetAtomType() AtomType {
|
||||
return "hyperedge"
|
||||
func (h *hyperedge) GetSize() *big.Int {
|
||||
return big.NewInt(int64(len(h.extrinsics)))
|
||||
}
|
||||
|
||||
func (h *Hyperedge) GetLocation() Location {
|
||||
func (h *hyperedge) GetAtomType() AtomType {
|
||||
return HyperedgeAtomType
|
||||
}
|
||||
|
||||
func (h *hyperedge) GetLocation() Location {
|
||||
var loc Location
|
||||
copy(loc[:32], h.AppAddress[:])
|
||||
copy(loc[32:], h.DataAddress[:])
|
||||
copy(loc[:32], h.appAddress[:])
|
||||
copy(loc[32:], h.dataAddress[:])
|
||||
return loc
|
||||
}
|
||||
|
||||
func (h *Hyperedge) GetAppAddress() [32]byte {
|
||||
return h.AppAddress
|
||||
func (h *hyperedge) GetAppAddress() [32]byte {
|
||||
return h.appAddress
|
||||
}
|
||||
|
||||
func (h *Hyperedge) GetDataAddress() [32]byte {
|
||||
return h.DataAddress
|
||||
func (h *hyperedge) GetDataAddress() [32]byte {
|
||||
return h.dataAddress
|
||||
}
|
||||
|
||||
func (h *hyperedge) ToBytes() []byte {
|
||||
var buf bytes.Buffer
|
||||
enc := gob.NewEncoder(&buf)
|
||||
if err := enc.Encode(h.extrinsics); err != nil {
|
||||
return nil
|
||||
}
|
||||
return append(
|
||||
append(
|
||||
append(
|
||||
[]byte{0x01},
|
||||
h.appAddress[:]...,
|
||||
),
|
||||
h.dataAddress[:]...,
|
||||
),
|
||||
buf.Bytes()...,
|
||||
)
|
||||
}
|
||||
|
||||
func (h *hyperedge) AddExtrinsic(a Atom) {
|
||||
id := a.GetID()
|
||||
atomType := []byte{0x00}
|
||||
if a.GetAtomType() == HyperedgeAtomType {
|
||||
atomType = []byte{0x01}
|
||||
}
|
||||
h.extTree.Insert(id[:], append(atomType, id[:]...), nil, a.GetSize())
|
||||
h.extrinsics[id] = a
|
||||
}
|
||||
|
||||
func (h *hyperedge) RemoveExtrinsic(a Atom) {
|
||||
id := a.GetID()
|
||||
h.extTree.Delete(id[:])
|
||||
delete(h.extrinsics, id)
|
||||
}
|
||||
|
||||
func (h *hyperedge) GetExtrinsics() map[[64]byte]Atom {
|
||||
ext := make(map[[64]byte]Atom)
|
||||
for id := range h.extrinsics {
|
||||
ext[id] = h.extrinsics[id]
|
||||
}
|
||||
return ext
|
||||
}
|
||||
|
||||
func (h *hyperedge) Commit() []byte {
|
||||
return h.extTree.Commit(false)
|
||||
}
|
||||
|
||||
type ShardAddress struct {
|
||||
L1 [3]byte
|
||||
L2 [64]byte
|
||||
L2 [32]byte
|
||||
L3 [32]byte
|
||||
}
|
||||
|
||||
type ShardKey struct {
|
||||
L1 [3]byte
|
||||
L2 [32]byte
|
||||
}
|
||||
|
||||
func GetShardAddress(a Atom) ShardAddress {
|
||||
@ -102,59 +305,198 @@ func GetShardAddress(a Atom) ShardAddress {
|
||||
|
||||
return ShardAddress{
|
||||
L1: [3]byte(p2p.GetBloomFilterIndices(appAddress[:], 256, 3)),
|
||||
L2: [64]byte(append(append([]byte{}, appAddress[:]...), dataAddress[:]...)),
|
||||
L2: [32]byte(append([]byte{}, appAddress[:]...)),
|
||||
L3: [32]byte(append([]byte{}, dataAddress[:]...)),
|
||||
}
|
||||
}
|
||||
|
||||
func GetShardKey(a Atom) ShardKey {
|
||||
s := GetShardAddress(a)
|
||||
return ShardKey{L1: s.L1, L2: s.L2}
|
||||
}
|
||||
|
||||
type IdSet struct {
|
||||
dirty bool
|
||||
atomType AtomType
|
||||
atoms map[[64]byte]Atom
|
||||
tree *crypto.VectorCommitmentTree
|
||||
}
|
||||
|
||||
func NewIdSet(atomType AtomType) *IdSet {
|
||||
return &IdSet{atomType: atomType, atoms: make(map[[64]byte]Atom)}
|
||||
return &IdSet{
|
||||
dirty: false,
|
||||
atomType: atomType,
|
||||
atoms: make(map[[64]byte]Atom),
|
||||
tree: &crypto.VectorCommitmentTree{},
|
||||
}
|
||||
}
|
||||
|
||||
func (set *IdSet) FromBytes(treeData []byte) error {
|
||||
set.tree = &crypto.VectorCommitmentTree{}
|
||||
var b bytes.Buffer
|
||||
b.Write(treeData)
|
||||
dec := gob.NewDecoder(&b)
|
||||
if err := dec.Decode(set.tree); err != nil {
|
||||
return errors.Wrap(err, "load set")
|
||||
}
|
||||
|
||||
for _, leaf := range crypto.GetAllLeaves(set.tree.Root) {
|
||||
set.atoms[[64]byte(leaf.Key)] = atomFromBytes(leaf.Value)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (set *IdSet) IsDirty() bool {
|
||||
return set.dirty
|
||||
}
|
||||
|
||||
func (set *IdSet) ToBytes() []byte {
|
||||
var buf bytes.Buffer
|
||||
enc := gob.NewEncoder(&buf)
|
||||
if err := enc.Encode(set.tree); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (set *IdSet) Add(atom Atom) error {
|
||||
if atom.GetAtomType() != set.atomType {
|
||||
return ErrInvalidAtomType
|
||||
}
|
||||
if _, exists := set.atoms[atom.GetID()]; !exists {
|
||||
set.atoms[atom.GetID()] = atom
|
||||
|
||||
id := atom.GetID()
|
||||
set.atoms[id] = atom
|
||||
set.dirty = true
|
||||
return set.tree.Insert(id[:], atom.ToBytes(), atom.Commit(), atom.GetSize())
|
||||
}
|
||||
|
||||
func (set *IdSet) GetSize() *big.Int {
|
||||
size := set.tree.GetSize()
|
||||
if size == nil {
|
||||
size = big.NewInt(0)
|
||||
}
|
||||
return nil
|
||||
return size
|
||||
}
|
||||
|
||||
func (set *IdSet) Delete(atom Atom) bool {
|
||||
if _, exists := set.atoms[atom.GetID()]; exists {
|
||||
delete(set.atoms, atom.GetID())
|
||||
return true
|
||||
if atom.GetAtomType() != set.atomType {
|
||||
return false
|
||||
}
|
||||
return false
|
||||
|
||||
id := atom.GetID()
|
||||
if err := set.tree.Delete(id[:]); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
set.dirty = true
|
||||
delete(set.atoms, id)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (set *IdSet) Has(atom Atom) bool {
|
||||
_, exists := set.atoms[atom.GetID()]
|
||||
return exists
|
||||
func (set *IdSet) Has(key [64]byte) bool {
|
||||
_, ok := set.atoms[key]
|
||||
return ok
|
||||
}
|
||||
|
||||
type Hypergraph struct {
|
||||
vertexAdds map[ShardAddress]*IdSet
|
||||
vertexRemoves map[ShardAddress]*IdSet
|
||||
hyperedgeAdds map[ShardAddress]*IdSet
|
||||
hyperedgeRemoves map[ShardAddress]*IdSet
|
||||
size *big.Int
|
||||
vertexAdds map[ShardKey]*IdSet
|
||||
vertexRemoves map[ShardKey]*IdSet
|
||||
hyperedgeAdds map[ShardKey]*IdSet
|
||||
hyperedgeRemoves map[ShardKey]*IdSet
|
||||
}
|
||||
|
||||
func NewHypergraph() *Hypergraph {
|
||||
return &Hypergraph{
|
||||
vertexAdds: make(map[ShardAddress]*IdSet),
|
||||
vertexRemoves: make(map[ShardAddress]*IdSet),
|
||||
hyperedgeAdds: make(map[ShardAddress]*IdSet),
|
||||
hyperedgeRemoves: make(map[ShardAddress]*IdSet),
|
||||
size: big.NewInt(0),
|
||||
vertexAdds: make(map[ShardKey]*IdSet),
|
||||
vertexRemoves: make(map[ShardKey]*IdSet),
|
||||
hyperedgeAdds: make(map[ShardKey]*IdSet),
|
||||
hyperedgeRemoves: make(map[ShardKey]*IdSet),
|
||||
}
|
||||
}
|
||||
|
||||
func (hg *Hypergraph) getOrCreateIdSet(shardAddr ShardAddress, addMap map[ShardAddress]*IdSet, removeMap map[ShardAddress]*IdSet, atomType AtomType) (*IdSet, *IdSet) {
|
||||
func (hg *Hypergraph) GetVertexAdds() map[ShardKey]*IdSet {
|
||||
return hg.vertexAdds
|
||||
}
|
||||
|
||||
func (hg *Hypergraph) GetVertexRemoves() map[ShardKey]*IdSet {
|
||||
return hg.vertexRemoves
|
||||
}
|
||||
|
||||
func (hg *Hypergraph) GetHyperedgeAdds() map[ShardKey]*IdSet {
|
||||
return hg.hyperedgeAdds
|
||||
}
|
||||
|
||||
func (hg *Hypergraph) GetHyperedgeRemoves() map[ShardKey]*IdSet {
|
||||
return hg.hyperedgeRemoves
|
||||
}
|
||||
|
||||
func (hg *Hypergraph) Commit() [][]byte {
|
||||
commits := [][]byte{}
|
||||
for _, vertexAdds := range hg.vertexAdds {
|
||||
commits = append(commits, vertexAdds.tree.Commit(false))
|
||||
}
|
||||
for _, vertexRemoves := range hg.vertexRemoves {
|
||||
commits = append(commits, vertexRemoves.tree.Commit(false))
|
||||
}
|
||||
for _, hyperedgeAdds := range hg.hyperedgeAdds {
|
||||
commits = append(commits, hyperedgeAdds.tree.Commit(false))
|
||||
}
|
||||
for _, hyperedgeRemoves := range hg.hyperedgeRemoves {
|
||||
commits = append(commits, hyperedgeRemoves.tree.Commit(false))
|
||||
}
|
||||
return commits
|
||||
}
|
||||
|
||||
func (hg *Hypergraph) ImportFromBytes(
|
||||
atomType AtomType,
|
||||
phaseType PhaseType,
|
||||
shardKey ShardKey,
|
||||
data []byte,
|
||||
) error {
|
||||
set := NewIdSet(atomType)
|
||||
if err := set.FromBytes(data); err != nil {
|
||||
return errors.Wrap(err, "import from bytes")
|
||||
}
|
||||
|
||||
switch atomType {
|
||||
case VertexAtomType:
|
||||
switch phaseType {
|
||||
case AddsPhaseType:
|
||||
hg.size.Add(hg.size, set.GetSize())
|
||||
hg.vertexAdds[shardKey] = set
|
||||
case RemovesPhaseType:
|
||||
hg.size.Sub(hg.size, set.GetSize())
|
||||
hg.vertexRemoves[shardKey] = set
|
||||
}
|
||||
case HyperedgeAtomType:
|
||||
switch phaseType {
|
||||
case AddsPhaseType:
|
||||
hg.size.Add(hg.size, set.GetSize())
|
||||
hg.hyperedgeAdds[shardKey] = set
|
||||
case RemovesPhaseType:
|
||||
hg.size.Sub(hg.size, set.GetSize())
|
||||
hg.hyperedgeRemoves[shardKey] = set
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hg *Hypergraph) GetSize() *big.Int {
|
||||
return hg.size
|
||||
}
|
||||
|
||||
func (hg *Hypergraph) getOrCreateIdSet(
|
||||
shardAddr ShardKey,
|
||||
addMap map[ShardKey]*IdSet,
|
||||
removeMap map[ShardKey]*IdSet,
|
||||
atomType AtomType,
|
||||
) (*IdSet, *IdSet) {
|
||||
if _, ok := addMap[shardAddr]; !ok {
|
||||
addMap[shardAddr] = NewIdSet(atomType)
|
||||
}
|
||||
@ -164,88 +506,149 @@ func (hg *Hypergraph) getOrCreateIdSet(shardAddr ShardAddress, addMap map[ShardA
|
||||
return addMap[shardAddr], removeMap[shardAddr]
|
||||
}
|
||||
|
||||
func (hg *Hypergraph) AddVertex(v *Vertex) error {
|
||||
shardAddr := GetShardAddress(v)
|
||||
addSet, _ := hg.getOrCreateIdSet(shardAddr, hg.vertexAdds, hg.vertexRemoves, "vertex")
|
||||
return addSet.Add(v)
|
||||
func (hg *Hypergraph) AddVertex(v Vertex) error {
|
||||
shardAddr := GetShardKey(v)
|
||||
addSet, _ := hg.getOrCreateIdSet(
|
||||
shardAddr,
|
||||
hg.vertexAdds,
|
||||
hg.vertexRemoves,
|
||||
VertexAtomType,
|
||||
)
|
||||
hg.size.Add(hg.size, v.GetSize())
|
||||
return errors.Wrap(addSet.Add(v), "add vertex")
|
||||
}
|
||||
|
||||
func (hg *Hypergraph) AddHyperedge(h *Hyperedge) error {
|
||||
if !hg.LookupAtomSet(h.Extrinsics) {
|
||||
func (hg *Hypergraph) AddHyperedge(h Hyperedge) error {
|
||||
if !hg.LookupAtomSet(&h.(*hyperedge).extrinsics) {
|
||||
return ErrMissingExtrinsics
|
||||
}
|
||||
shardAddr := GetShardAddress(h)
|
||||
addSet, _ := hg.getOrCreateIdSet(shardAddr, hg.hyperedgeAdds, hg.hyperedgeRemoves, "hyperedge")
|
||||
return addSet.Add(h)
|
||||
shardAddr := GetShardKey(h)
|
||||
addSet, removeSet := hg.getOrCreateIdSet(
|
||||
shardAddr,
|
||||
hg.hyperedgeAdds,
|
||||
hg.hyperedgeRemoves,
|
||||
HyperedgeAtomType,
|
||||
)
|
||||
id := h.GetID()
|
||||
if !removeSet.Has(id) {
|
||||
hg.size.Add(hg.size, h.GetSize())
|
||||
return errors.Wrap(addSet.Add(h), "add hyperedge")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hg *Hypergraph) RemoveVertex(v *Vertex) error {
|
||||
shardAddr := GetShardAddress(v)
|
||||
|
||||
if !hg.LookupVertex(v) {
|
||||
_, removeSet := hg.getOrCreateIdSet(shardAddr, hg.vertexAdds, hg.vertexRemoves, "vertex")
|
||||
return removeSet.Add(v)
|
||||
func (hg *Hypergraph) RemoveVertex(v Vertex) error {
|
||||
shardKey := GetShardKey(v)
|
||||
if !hg.LookupVertex(v.(*vertex)) {
|
||||
addSet, removeSet := hg.getOrCreateIdSet(
|
||||
shardKey,
|
||||
hg.vertexAdds,
|
||||
hg.vertexRemoves,
|
||||
VertexAtomType,
|
||||
)
|
||||
if err := addSet.Add(v); err != nil {
|
||||
return errors.Wrap(err, "remove vertex")
|
||||
}
|
||||
return errors.Wrap(removeSet.Add(v), "remove vertex")
|
||||
}
|
||||
|
||||
id := v.GetID()
|
||||
|
||||
for _, hyperedgeAdds := range hg.hyperedgeAdds {
|
||||
for _, atom := range hyperedgeAdds.atoms {
|
||||
if he, ok := atom.(*Hyperedge); ok {
|
||||
if _, ok := he.Extrinsics[v.GetID()]; ok {
|
||||
if he, ok := atom.(*hyperedge); ok {
|
||||
if _, ok := he.extrinsics[id]; ok {
|
||||
return ErrIsExtrinsic
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
_, removeSet := hg.getOrCreateIdSet(shardAddr, hg.vertexAdds, hg.vertexRemoves, "vertex")
|
||||
return removeSet.Add(v)
|
||||
_, removeSet := hg.getOrCreateIdSet(
|
||||
shardKey,
|
||||
hg.vertexAdds,
|
||||
hg.vertexRemoves,
|
||||
VertexAtomType,
|
||||
)
|
||||
hg.size.Sub(hg.size, v.GetSize())
|
||||
err := removeSet.Add(v)
|
||||
return err
|
||||
}
|
||||
|
||||
func (hg *Hypergraph) RemoveHyperedge(h *Hyperedge) error {
|
||||
shardAddr := GetShardAddress(h)
|
||||
func (hg *Hypergraph) RemoveHyperedge(h Hyperedge) error {
|
||||
shardKey := GetShardKey(h)
|
||||
wasPresent := hg.LookupHyperedge(h.(*hyperedge))
|
||||
if !wasPresent {
|
||||
addSet, removeSet := hg.getOrCreateIdSet(
|
||||
shardKey,
|
||||
hg.hyperedgeAdds,
|
||||
hg.hyperedgeRemoves,
|
||||
HyperedgeAtomType,
|
||||
)
|
||||
if err := addSet.Add(h); err != nil {
|
||||
return errors.Wrap(err, "remove hyperedge")
|
||||
}
|
||||
|
||||
if !hg.LookupHyperedge(h) {
|
||||
_, removeSet := hg.getOrCreateIdSet(shardAddr, hg.hyperedgeAdds, hg.hyperedgeRemoves, "hyperedge")
|
||||
return removeSet.Add(h)
|
||||
return errors.Wrap(removeSet.Add(h), "remove hyperedge")
|
||||
}
|
||||
|
||||
id := h.GetID()
|
||||
for _, hyperedgeAdds := range hg.hyperedgeAdds {
|
||||
for _, atom := range hyperedgeAdds.atoms {
|
||||
if he, ok := atom.(*Hyperedge); ok {
|
||||
if _, ok := he.Extrinsics[h.GetID()]; ok {
|
||||
if he, ok := atom.(*hyperedge); ok {
|
||||
if _, ok := he.extrinsics[id]; ok {
|
||||
return ErrIsExtrinsic
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
_, removeSet := hg.getOrCreateIdSet(shardAddr, hg.hyperedgeAdds, hg.hyperedgeRemoves, "hyperedge")
|
||||
return removeSet.Add(h)
|
||||
_, removeSet := hg.getOrCreateIdSet(
|
||||
shardKey,
|
||||
hg.hyperedgeAdds,
|
||||
hg.hyperedgeRemoves,
|
||||
HyperedgeAtomType,
|
||||
)
|
||||
hg.size.Sub(hg.size, h.GetSize())
|
||||
err := removeSet.Add(h)
|
||||
return err
|
||||
}
|
||||
|
||||
func (hg *Hypergraph) LookupVertex(v *Vertex) bool {
|
||||
shardAddr := GetShardAddress(v)
|
||||
addSet, removeSet := hg.getOrCreateIdSet(shardAddr, hg.vertexAdds, hg.vertexRemoves, "vertex")
|
||||
return addSet.Has(v) && !removeSet.Has(v)
|
||||
func (hg *Hypergraph) LookupVertex(v Vertex) bool {
|
||||
shardAddr := GetShardKey(v)
|
||||
addSet, removeSet := hg.getOrCreateIdSet(
|
||||
shardAddr,
|
||||
hg.vertexAdds,
|
||||
hg.vertexRemoves,
|
||||
VertexAtomType,
|
||||
)
|
||||
id := v.GetID()
|
||||
return addSet.Has(id) && !removeSet.Has(id)
|
||||
}
|
||||
|
||||
func (hg *Hypergraph) LookupHyperedge(h *Hyperedge) bool {
|
||||
shardAddr := GetShardAddress(h)
|
||||
addSet, removeSet := hg.getOrCreateIdSet(shardAddr, hg.hyperedgeAdds, hg.hyperedgeRemoves, "hyperedge")
|
||||
return hg.LookupAtomSet(h.Extrinsics) && addSet.Has(h) && !removeSet.Has(h)
|
||||
func (hg *Hypergraph) LookupHyperedge(h Hyperedge) bool {
|
||||
shardAddr := GetShardKey(h)
|
||||
addSet, removeSet := hg.getOrCreateIdSet(
|
||||
shardAddr,
|
||||
hg.hyperedgeAdds,
|
||||
hg.hyperedgeRemoves,
|
||||
HyperedgeAtomType,
|
||||
)
|
||||
id := h.GetID()
|
||||
return hg.LookupAtomSet(&h.(*hyperedge).extrinsics) && addSet.Has(id) && !removeSet.Has(id)
|
||||
}
|
||||
|
||||
func (hg *Hypergraph) LookupAtom(a Atom) bool {
|
||||
switch v := a.(type) {
|
||||
case *Vertex:
|
||||
case *vertex:
|
||||
return hg.LookupVertex(v)
|
||||
case *Hyperedge:
|
||||
case *hyperedge:
|
||||
return hg.LookupHyperedge(v)
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (hg *Hypergraph) LookupAtomSet(atomSet map[[64]byte]Atom) bool {
|
||||
for _, atom := range atomSet {
|
||||
func (hg *Hypergraph) LookupAtomSet(atomSet *map[[64]byte]Atom) bool {
|
||||
for _, atom := range *atomSet {
|
||||
if !hg.LookupAtom(atom) {
|
||||
return false
|
||||
}
|
||||
@ -254,12 +657,13 @@ func (hg *Hypergraph) LookupAtomSet(atomSet map[[64]byte]Atom) bool {
|
||||
}
|
||||
|
||||
func (hg *Hypergraph) Within(a, h Atom) bool {
|
||||
if he, ok := h.(*Hyperedge); ok {
|
||||
if _, ok := he.Extrinsics[a.GetID()]; ok || a.GetID() == h.GetID() {
|
||||
if he, ok := h.(*hyperedge); ok {
|
||||
addr := a.GetID()
|
||||
if _, ok := he.extrinsics[addr]; ok || a.GetID() == h.GetID() {
|
||||
return true
|
||||
}
|
||||
for _, extrinsic := range he.Extrinsics {
|
||||
if nestedHe, ok := extrinsic.(*Hyperedge); ok {
|
||||
for _, extrinsic := range he.extrinsics {
|
||||
if nestedHe, ok := extrinsic.(*hyperedge); ok {
|
||||
if hg.LookupHyperedge(nestedHe) && hg.Within(a, nestedHe) {
|
||||
return true
|
||||
}
|
||||
@ -269,15 +673,15 @@ func (hg *Hypergraph) Within(a, h Atom) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetReconciledVertexSetForShard computes the set of vertices that have been added but
|
||||
// not removed for a specific shard.
|
||||
func (hg *Hypergraph) GetReconciledVertexSetForShard(shardAddr ShardAddress) *IdSet {
|
||||
vertices := NewIdSet("vertex")
|
||||
func (hg *Hypergraph) GetReconciledVertexSetForShard(
|
||||
shardKey ShardKey,
|
||||
) *IdSet {
|
||||
vertices := NewIdSet(VertexAtomType)
|
||||
|
||||
if addSet, ok := hg.vertexAdds[shardAddr]; ok {
|
||||
removeSet := hg.vertexRemoves[shardAddr]
|
||||
for _, v := range addSet.atoms {
|
||||
if !removeSet.Has(v) {
|
||||
if addSet, ok := hg.vertexAdds[shardKey]; ok {
|
||||
removeSet := hg.vertexRemoves[shardKey]
|
||||
for id, v := range addSet.atoms {
|
||||
if !removeSet.Has(id) {
|
||||
vertices.Add(v)
|
||||
}
|
||||
}
|
||||
@ -286,15 +690,15 @@ func (hg *Hypergraph) GetReconciledVertexSetForShard(shardAddr ShardAddress) *Id
|
||||
return vertices
|
||||
}
|
||||
|
||||
// GetReconciledHyperedgeSetForShard computes the set of hyperedges that have been added
|
||||
// but not removed for a specific shard.
|
||||
func (hg *Hypergraph) GetReconciledHyperedgeSetForShard(shardAddr ShardAddress) *IdSet {
|
||||
hyperedges := NewIdSet("hyperedge")
|
||||
func (hg *Hypergraph) GetReconciledHyperedgeSetForShard(
|
||||
shardKey ShardKey,
|
||||
) *IdSet {
|
||||
hyperedges := NewIdSet(HyperedgeAtomType)
|
||||
|
||||
if addSet, ok := hg.hyperedgeAdds[shardAddr]; ok {
|
||||
removeSet := hg.hyperedgeRemoves[shardAddr]
|
||||
if addSet, ok := hg.hyperedgeAdds[shardKey]; ok {
|
||||
removeSet := hg.hyperedgeRemoves[shardKey]
|
||||
for _, h := range addSet.atoms {
|
||||
if !removeSet.Has(h) {
|
||||
if !removeSet.Has(h.GetID()) {
|
||||
hyperedges.Add(h)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,56 +1,59 @@
|
||||
package application_test
|
||||
|
||||
import (
|
||||
crand "crypto/rand"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cloudflare/circl/sign/ed448"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/hypergraph/application"
|
||||
)
|
||||
|
||||
type Operation struct {
|
||||
Type string // "AddVertex", "RemoveVertex", "AddHyperedge", "RemoveHyperedge"
|
||||
Vertex *application.Vertex
|
||||
Hyperedge *application.Hyperedge
|
||||
Vertex application.Vertex
|
||||
Hyperedge application.Hyperedge
|
||||
}
|
||||
|
||||
func TestConvergence(t *testing.T) {
|
||||
numParties := 3
|
||||
numOperations := 100
|
||||
|
||||
// Generate a set of vertices and hyperedges
|
||||
vertices := make([]*application.Vertex, numOperations)
|
||||
numParties := 4
|
||||
numOperations := 100000
|
||||
enc := crypto.NewMPCitHVerifiableEncryptor(1)
|
||||
pub, _, _ := ed448.GenerateKey(crand.Reader)
|
||||
enc.Encrypt(make([]byte, 20), pub)
|
||||
vertices := make([]application.Vertex, numOperations)
|
||||
for i := 0; i < numOperations; i++ {
|
||||
vertices[i] = &application.Vertex{
|
||||
AppAddress: [32]byte{byte(i % 256)},
|
||||
DataAddress: [32]byte{byte(i / 256)},
|
||||
}
|
||||
vertices[i] = application.NewVertex(
|
||||
[32]byte{byte((i >> 8) % 256), byte((i % 256))},
|
||||
[32]byte{byte((i >> 8) / 256), byte(i / 256)},
|
||||
[]application.Encrypted{},
|
||||
)
|
||||
}
|
||||
|
||||
hyperedges := make([]*application.Hyperedge, numOperations/10)
|
||||
hyperedges := make([]application.Hyperedge, numOperations/10)
|
||||
for i := 0; i < numOperations/10; i++ {
|
||||
hyperedges[i] = &application.Hyperedge{
|
||||
AppAddress: [32]byte{byte(i % 256)},
|
||||
DataAddress: [32]byte{byte(i / 256)},
|
||||
Extrinsics: make(map[[64]byte]application.Atom),
|
||||
}
|
||||
// Add some random vertices as extrinsics
|
||||
hyperedges[i] = application.NewHyperedge(
|
||||
[32]byte{0, 0, byte((i >> 8) % 256), byte(i % 256)},
|
||||
[32]byte{0, 0, byte((i >> 8) / 256), byte(i / 256)},
|
||||
)
|
||||
for j := 0; j < 3; j++ {
|
||||
v := vertices[rand.Intn(len(vertices))]
|
||||
hyperedges[i].Extrinsics[v.GetID()] = v
|
||||
hyperedges[i].AddExtrinsic(v)
|
||||
}
|
||||
}
|
||||
|
||||
// Generate a sequence of operations
|
||||
operations1 := make([]Operation, numOperations)
|
||||
operations2 := make([]Operation, numOperations)
|
||||
for i := 0; i < numOperations; i++ {
|
||||
op := rand.Intn(2)
|
||||
switch op {
|
||||
case 0:
|
||||
operations1[i] = Operation{Type: "AddVertex", Vertex: vertices[rand.Intn(len(vertices))]}
|
||||
operations1[i] = Operation{Type: "AddVertex", Vertex: vertices[i]}
|
||||
case 1:
|
||||
operations1[i] = Operation{Type: "RemoveVertex", Vertex: vertices[rand.Intn(len(vertices))]}
|
||||
operations1[i] = Operation{Type: "AddVertex", Vertex: vertices[i]}
|
||||
}
|
||||
}
|
||||
for i := 0; i < numOperations; i++ {
|
||||
@ -63,13 +66,11 @@ func TestConvergence(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Create CRDTs for each party
|
||||
crdts := make([]*application.Hypergraph, numParties)
|
||||
for i := 0; i < numParties; i++ {
|
||||
crdts[i] = application.NewHypergraph()
|
||||
}
|
||||
|
||||
// Apply operations in different orders for each party
|
||||
for i := 0; i < numParties; i++ {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
rand.Shuffle(len(operations1), func(i, j int) { operations1[i], operations1[j] = operations1[j], operations1[i] })
|
||||
@ -94,15 +95,17 @@ func TestConvergence(t *testing.T) {
|
||||
case "RemoveVertex":
|
||||
crdts[i].RemoveVertex(op.Vertex)
|
||||
case "AddHyperedge":
|
||||
fmt.Println("add", i, op)
|
||||
crdts[i].AddHyperedge(op.Hyperedge)
|
||||
case "RemoveHyperedge":
|
||||
fmt.Println("remove", i, op)
|
||||
crdts[i].RemoveHyperedge(op.Hyperedge)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that all CRDTs have converged to the same state
|
||||
// Additional verification: check specific vertices and hyperedges
|
||||
crdts[0].GetSize()
|
||||
|
||||
for _, v := range vertices {
|
||||
state := crdts[0].LookupVertex(v)
|
||||
for i := 1; i < numParties; i++ {
|
||||
@ -111,12 +114,11 @@ func TestConvergence(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, h := range hyperedges {
|
||||
state := crdts[0].LookupHyperedge(h)
|
||||
for i := 1; i < numParties; i++ {
|
||||
if crdts[i].LookupHyperedge(h) != state {
|
||||
t.Errorf("Hyperedge %v has different state in CRDT %d", h, i)
|
||||
t.Errorf("Hyperedge %v has different state in CRDT %d, %v", h, i, state)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2,6 +2,7 @@ package application_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/hypergraph/application"
|
||||
@ -12,8 +13,8 @@ func TestHypergraph(t *testing.T) {
|
||||
|
||||
// Test vertex operations
|
||||
t.Run("Vertex Operations", func(t *testing.T) {
|
||||
v1 := &application.Vertex{AppAddress: [32]byte{1}, DataAddress: [32]byte{1}}
|
||||
v2 := &application.Vertex{AppAddress: [32]byte{1}, DataAddress: [32]byte{2}}
|
||||
v1 := application.NewVertex([32]byte{1}, [32]byte{1}, []application.Encrypted{})
|
||||
v2 := application.NewVertex([32]byte{1}, [32]byte{2}, []application.Encrypted{})
|
||||
|
||||
// Add vertices
|
||||
err := hg.AddVertex(v1)
|
||||
@ -48,16 +49,14 @@ func TestHypergraph(t *testing.T) {
|
||||
|
||||
// Test hyperedge operations
|
||||
t.Run("Hyperedge Operations", func(t *testing.T) {
|
||||
v3 := &application.Vertex{AppAddress: [32]byte{2}, DataAddress: [32]byte{1}}
|
||||
v4 := &application.Vertex{AppAddress: [32]byte{2}, DataAddress: [32]byte{2}}
|
||||
v3 := application.NewVertex([32]byte{2}, [32]byte{1}, []application.Encrypted{})
|
||||
v4 := application.NewVertex([32]byte{2}, [32]byte{2}, []application.Encrypted{})
|
||||
hg.AddVertex(v3)
|
||||
hg.AddVertex(v4)
|
||||
|
||||
h1 := &application.Hyperedge{
|
||||
AppAddress: [32]byte{3},
|
||||
DataAddress: [32]byte{1},
|
||||
Extrinsics: map[[64]byte]application.Atom{v3.GetID(): v3, v4.GetID(): v4},
|
||||
}
|
||||
h1 := application.NewHyperedge([32]byte{3}, [32]byte{1})
|
||||
h1.AddExtrinsic(v3)
|
||||
h1.AddExtrinsic(v4)
|
||||
|
||||
// Add hyperedge
|
||||
err := hg.AddHyperedge(h1)
|
||||
@ -82,16 +81,14 @@ func TestHypergraph(t *testing.T) {
|
||||
|
||||
// Test "within" relationship
|
||||
t.Run("Within Relationship", func(t *testing.T) {
|
||||
v5 := &application.Vertex{AppAddress: [32]byte{4}, DataAddress: [32]byte{1}}
|
||||
v6 := &application.Vertex{AppAddress: [32]byte{4}, DataAddress: [32]byte{2}}
|
||||
v5 := application.NewVertex([32]byte{4}, [32]byte{1}, []application.Encrypted{})
|
||||
v6 := application.NewVertex([32]byte{4}, [32]byte{2}, []application.Encrypted{})
|
||||
hg.AddVertex(v5)
|
||||
hg.AddVertex(v6)
|
||||
|
||||
h2 := &application.Hyperedge{
|
||||
AppAddress: [32]byte{5},
|
||||
DataAddress: [32]byte{1},
|
||||
Extrinsics: map[[64]byte]application.Atom{v5.GetID(): v5, v6.GetID(): v6},
|
||||
}
|
||||
h2 := application.NewHyperedge([32]byte{5}, [32]byte{1})
|
||||
h2.AddExtrinsic(v5)
|
||||
h2.AddExtrinsic(v6)
|
||||
hg.AddHyperedge(h2)
|
||||
|
||||
if !hg.Within(v5, h2) {
|
||||
@ -101,7 +98,7 @@ func TestHypergraph(t *testing.T) {
|
||||
t.Error("v6 should be within h2")
|
||||
}
|
||||
|
||||
v7 := &application.Vertex{AppAddress: [32]byte{4}, DataAddress: [32]byte{3}}
|
||||
v7 := application.NewVertex([32]byte{4}, [32]byte{3}, []application.Encrypted{})
|
||||
hg.AddVertex(v7)
|
||||
if hg.Within(v7, h2) {
|
||||
t.Error("v7 should not be within h2")
|
||||
@ -110,21 +107,16 @@ func TestHypergraph(t *testing.T) {
|
||||
|
||||
// Test nested hyperedges
|
||||
t.Run("Nested Hyperedges", func(t *testing.T) {
|
||||
v8 := &application.Vertex{AppAddress: [32]byte{6}, DataAddress: [32]byte{1}}
|
||||
v9 := &application.Vertex{AppAddress: [32]byte{6}, DataAddress: [32]byte{2}}
|
||||
v8 := application.NewVertex([32]byte{6}, [32]byte{1}, []application.Encrypted{})
|
||||
v9 := application.NewVertex([32]byte{6}, [32]byte{2}, []application.Encrypted{})
|
||||
hg.AddVertex(v8)
|
||||
hg.AddVertex(v9)
|
||||
|
||||
h3 := &application.Hyperedge{
|
||||
AppAddress: [32]byte{7},
|
||||
DataAddress: [32]byte{1},
|
||||
Extrinsics: map[[64]byte]application.Atom{v8.GetID(): v8},
|
||||
}
|
||||
h4 := &application.Hyperedge{
|
||||
AppAddress: [32]byte{7},
|
||||
DataAddress: [32]byte{2},
|
||||
Extrinsics: map[[64]byte]application.Atom{h3.GetID(): h3, v9.GetID(): v9},
|
||||
}
|
||||
h3 := application.NewHyperedge([32]byte{7}, [32]byte{1})
|
||||
h3.AddExtrinsic(v8)
|
||||
h4 := application.NewHyperedge([32]byte{7}, [32]byte{2})
|
||||
h4.AddExtrinsic(h3)
|
||||
h4.AddExtrinsic(v9)
|
||||
hg.AddHyperedge(h3)
|
||||
hg.AddHyperedge(h4)
|
||||
|
||||
@ -138,12 +130,10 @@ func TestHypergraph(t *testing.T) {
|
||||
|
||||
// Test error cases
|
||||
t.Run("Error Cases", func(t *testing.T) {
|
||||
v10 := &application.Vertex{AppAddress: [32]byte{8}, DataAddress: [32]byte{1}}
|
||||
h5 := &application.Hyperedge{
|
||||
AppAddress: [32]byte{8},
|
||||
DataAddress: [32]byte{2},
|
||||
Extrinsics: map[[64]byte]application.Atom{v10.GetID(): v10},
|
||||
}
|
||||
v10 := application.NewVertex([32]byte{8}, [32]byte{1}, []application.Encrypted{})
|
||||
|
||||
h5 := application.NewHyperedge([32]byte{8}, [32]byte{2})
|
||||
h5.AddExtrinsic(v10)
|
||||
|
||||
// Try to add hyperedge with non-existent vertex
|
||||
err := hg.AddHyperedge(h5)
|
||||
@ -153,8 +143,8 @@ func TestHypergraph(t *testing.T) {
|
||||
|
||||
// Add vertex and hyperedge
|
||||
hg.AddVertex(v10)
|
||||
fmt.Println("add hyperedge")
|
||||
hg.AddHyperedge(h5)
|
||||
|
||||
// Try to remove vertex that is an extrinsic
|
||||
err = hg.RemoveVertex(v10)
|
||||
if err != application.ErrIsExtrinsic {
|
||||
@ -164,8 +154,8 @@ func TestHypergraph(t *testing.T) {
|
||||
|
||||
// Test sharding
|
||||
t.Run("Sharding", func(t *testing.T) {
|
||||
v11 := &application.Vertex{AppAddress: [32]byte{9}, DataAddress: [32]byte{1}}
|
||||
v12 := &application.Vertex{AppAddress: [32]byte{9}, DataAddress: [32]byte{2}}
|
||||
v11 := application.NewVertex([32]byte{9}, [32]byte{1}, []application.Encrypted{})
|
||||
v12 := application.NewVertex([32]byte{9}, [32]byte{2}, []application.Encrypted{})
|
||||
hg.AddVertex(v11)
|
||||
hg.AddVertex(v12)
|
||||
|
||||
@ -173,8 +163,9 @@ func TestHypergraph(t *testing.T) {
|
||||
shard12 := application.GetShardAddress(v12)
|
||||
|
||||
if !bytes.Equal(shard11.L1[:], shard12.L1[:]) ||
|
||||
bytes.Equal(shard11.L2[:], shard12.L2[:]) {
|
||||
t.Error("v11 and v12 should be in the same L1 shard and not the same L2 shard")
|
||||
!bytes.Equal(shard11.L2[:], shard12.L2[:]) ||
|
||||
bytes.Equal(shard11.L3[:], shard12.L3[:]) {
|
||||
t.Error("v11 and v12 should be in the same L1 shard and the same L2 shard but not the same L3 shard")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
554
node/main.go
554
node/main.go
@ -7,7 +7,6 @@ import (
|
||||
_ "embed"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
@ -22,14 +21,12 @@ import (
|
||||
"runtime"
|
||||
rdebug "runtime/debug"
|
||||
"runtime/pprof"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/cloudflare/circl/sign/ed448"
|
||||
"github.com/iden3/go-iden3-crypto/poseidon"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pbnjay/memory"
|
||||
@ -42,14 +39,10 @@ import (
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/config"
|
||||
qcrypto "source.quilibrium.com/quilibrium/monorepo/node/crypto"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/crypto/kzg"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token/application"
|
||||
qruntime "source.quilibrium.com/quilibrium/monorepo/node/internal/runtime"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/rpc"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/store"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/tries"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/utils"
|
||||
)
|
||||
|
||||
@ -497,8 +490,6 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
RunForkRepairIfNeeded(nodeConfig)
|
||||
|
||||
done := make(chan os.Signal, 1)
|
||||
signal.Notify(done, syscall.SIGINT, syscall.SIGTERM)
|
||||
var node *app.Node
|
||||
@ -605,551 +596,6 @@ func stopDataWorkers() {
|
||||
}
|
||||
}
|
||||
|
||||
//go:embed overrideFrames.json
|
||||
var overrideFramesData []byte
|
||||
|
||||
func RunForkRepairIfNeeded(
|
||||
nodeConfig *config.Config,
|
||||
) {
|
||||
logger, _ := zap.NewDevelopment()
|
||||
db := store.NewPebbleDB(&config.DBConfig{Path: nodeConfig.DB.Path})
|
||||
defer db.Close()
|
||||
clockStore := store.NewPebbleClockStore(db, logger)
|
||||
coinStore := store.NewPebbleCoinStore(db, logger)
|
||||
filter := p2p.GetBloomFilter(application.TOKEN_ADDRESS, 256, 3)
|
||||
frame, _, err := clockStore.GetDataClockFrame(filter, uint64(48995), false)
|
||||
if err != nil {
|
||||
fmt.Println("No repair needed.")
|
||||
return
|
||||
}
|
||||
|
||||
compareSel, _ := frame.GetSelector()
|
||||
badFrameSelector, _ := hex.DecodeString("16515bf99a55d24c35d1dd0a0c7d778154e5ffa6dfa3ad164f11355f4cb00056")
|
||||
|
||||
if bytes.Equal(badFrameSelector, compareSel.FillBytes(make([]byte, 32))) {
|
||||
logger.Info("performing fork repair")
|
||||
txn, _ := coinStore.NewTransaction(false)
|
||||
_, outs, _ := application.GetOutputsFromClockFrame(frame)
|
||||
logger.Info("removing invalid frame at position 48995")
|
||||
for i, output := range outs.Outputs {
|
||||
switch o := output.Output.(type) {
|
||||
case *protobufs.TokenOutput_Coin:
|
||||
address, _ := token.GetAddressOfCoin(o.Coin, frame.FrameNumber, uint64(i))
|
||||
coin, err := coinStore.GetCoinByAddress(nil, address)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
stateTree := &qcrypto.VectorCommitmentTree{}
|
||||
if err = coinStore.DeleteCoin(txn, address, coin, stateTree); err != nil {
|
||||
txn.Abort()
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
case *protobufs.TokenOutput_Proof:
|
||||
address, _ := token.GetAddressOfPreCoinProof(o.Proof)
|
||||
proof, err := coinStore.GetPreCoinProofByAddress(address)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
stateTree := &qcrypto.VectorCommitmentTree{}
|
||||
if err = coinStore.DeletePreCoinProof(txn, address, proof, stateTree); err != nil {
|
||||
txn.Abort()
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err = txn.Commit(); err != nil {
|
||||
txn.Abort()
|
||||
|
||||
logger.Error("could not commit data", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
logger.Info("inserting valid frame starting at position 48995")
|
||||
type OverrideFrames struct {
|
||||
FrameData []byte `json:"frameData"`
|
||||
}
|
||||
overrideFramesJson := []*OverrideFrames{}
|
||||
if err = json.Unmarshal(overrideFramesData, &overrideFramesJson); err != nil {
|
||||
txn.Abort()
|
||||
logger.Error("could not unmarshal overriding frame data", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
for _, overrideFrame := range overrideFramesJson {
|
||||
override := &protobufs.ClockFrame{}
|
||||
if err := proto.Unmarshal(overrideFrame.FrameData, override); err != nil {
|
||||
logger.Error("could not unmarshal frame data", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
txn, _ := clockStore.NewTransaction(false)
|
||||
if err := overrideHead(
|
||||
txn,
|
||||
clockStore,
|
||||
coinStore,
|
||||
override,
|
||||
logger,
|
||||
); err != nil {
|
||||
txn.Abort()
|
||||
logger.Error("could not override frame data", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
if err = txn.Commit(); err != nil {
|
||||
txn.Abort()
|
||||
|
||||
logger.Error("could not commit data", zap.Error(err))
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fmt.Println("No repair needed.")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func overrideHead(
|
||||
txn store.Transaction,
|
||||
clockStore store.ClockStore,
|
||||
coinStore store.CoinStore,
|
||||
frame *protobufs.ClockFrame,
|
||||
logger *zap.Logger,
|
||||
) error {
|
||||
selector, err := frame.GetSelector()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
filter := p2p.GetBloomFilter(application.TOKEN_ADDRESS, 256, 3)
|
||||
|
||||
_, ts, err := clockStore.GetDataClockFrame(
|
||||
filter,
|
||||
frame.FrameNumber-1,
|
||||
false,
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error("could not get frame", zap.Error(err), zap.Uint64("frame", frame.FrameNumber-1))
|
||||
return errors.Wrap(err, "set head")
|
||||
}
|
||||
|
||||
if err := clockStore.StageDataClockFrame(
|
||||
selector.FillBytes(make([]byte, 32)),
|
||||
frame,
|
||||
txn,
|
||||
); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if ts, err = processFrame(txn, frame, ts, coinStore, clockStore, logger); err != nil {
|
||||
logger.Error("invalid frame execution, unwinding", zap.Error(err))
|
||||
txn.Abort()
|
||||
return errors.Wrap(err, "set head")
|
||||
}
|
||||
|
||||
if err := clockStore.CommitDataClockFrame(
|
||||
filter,
|
||||
frame.FrameNumber,
|
||||
selector.FillBytes(make([]byte, 32)),
|
||||
ts,
|
||||
txn,
|
||||
false,
|
||||
); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func processFrame(
|
||||
txn store.Transaction,
|
||||
frame *protobufs.ClockFrame,
|
||||
triesAtFrame []*tries.RollingFrecencyCritbitTrie,
|
||||
coinStore store.CoinStore,
|
||||
clockStore store.ClockStore,
|
||||
logger *zap.Logger,
|
||||
) ([]*tries.RollingFrecencyCritbitTrie, error) {
|
||||
f, err := coinStore.GetLatestFrameProcessed()
|
||||
if err != nil || f == frame.FrameNumber {
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
|
||||
logger.Info(
|
||||
"evaluating next frame",
|
||||
zap.Uint64(
|
||||
"frame_number",
|
||||
frame.FrameNumber,
|
||||
),
|
||||
)
|
||||
m, err := clockStore.GetPeerSeniorityMap(frame.Filter)
|
||||
if err != nil {
|
||||
logger.Error(
|
||||
"error while materializing seniority map",
|
||||
zap.Error(err),
|
||||
)
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
peerSeniority := token.NewFromMap(m)
|
||||
|
||||
app, err := application.MaterializeApplicationFromFrame(
|
||||
nil,
|
||||
frame,
|
||||
triesAtFrame,
|
||||
coinStore,
|
||||
clockStore,
|
||||
nil,
|
||||
logger,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error(
|
||||
"error while materializing application from frame",
|
||||
zap.Error(err),
|
||||
)
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
|
||||
proverTrieJoinRequests := [][]byte{}
|
||||
proverTrieLeaveRequests := [][]byte{}
|
||||
|
||||
for i, output := range app.TokenOutputs.Outputs {
|
||||
i := i
|
||||
if frame.FrameNumber == 0 {
|
||||
i = 0
|
||||
}
|
||||
switch o := output.Output.(type) {
|
||||
case *protobufs.TokenOutput_Coin:
|
||||
address, err := token.GetAddressOfCoin(o.Coin, frame.FrameNumber, uint64(i))
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
stateTree := &qcrypto.VectorCommitmentTree{}
|
||||
err = coinStore.PutCoin(
|
||||
txn,
|
||||
frame.FrameNumber,
|
||||
address,
|
||||
o.Coin,
|
||||
stateTree,
|
||||
)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
case *protobufs.TokenOutput_DeletedCoin:
|
||||
coin, err := coinStore.GetCoinByAddress(txn, o.DeletedCoin.Address)
|
||||
if err != nil {
|
||||
if frame.FrameNumber == 48997 {
|
||||
// special case, the fork happened at 48995, state replayed here
|
||||
continue
|
||||
}
|
||||
txn.Abort()
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
stateTree := &qcrypto.VectorCommitmentTree{}
|
||||
err = coinStore.DeleteCoin(
|
||||
txn,
|
||||
o.DeletedCoin.Address,
|
||||
coin,
|
||||
stateTree,
|
||||
)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
case *protobufs.TokenOutput_Proof:
|
||||
address, err := token.GetAddressOfPreCoinProof(o.Proof)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
stateTree := &qcrypto.VectorCommitmentTree{}
|
||||
err = coinStore.PutPreCoinProof(
|
||||
txn,
|
||||
frame.FrameNumber,
|
||||
address,
|
||||
o.Proof,
|
||||
stateTree,
|
||||
)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
if len(o.Proof.Amount) == 32 &&
|
||||
!bytes.Equal(o.Proof.Amount, make([]byte, 32)) &&
|
||||
o.Proof.Commitment != nil {
|
||||
addr := string(o.Proof.Owner.GetImplicitAccount().Address)
|
||||
for _, t := range app.Tries {
|
||||
if t.Contains([]byte(addr)) {
|
||||
t.Add([]byte(addr), frame.FrameNumber)
|
||||
break
|
||||
}
|
||||
}
|
||||
if _, ok := (*peerSeniority)[addr]; !ok {
|
||||
(*peerSeniority)[addr] = token.NewPeerSeniorityItem(10, addr)
|
||||
} else {
|
||||
(*peerSeniority)[addr] = token.NewPeerSeniorityItem(
|
||||
(*peerSeniority)[addr].GetSeniority()+10,
|
||||
addr,
|
||||
)
|
||||
}
|
||||
}
|
||||
case *protobufs.TokenOutput_DeletedProof:
|
||||
address, err := token.GetAddressOfPreCoinProof(o.DeletedProof)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
stateTree := &qcrypto.VectorCommitmentTree{}
|
||||
err = coinStore.DeletePreCoinProof(
|
||||
txn,
|
||||
address,
|
||||
o.DeletedProof,
|
||||
stateTree,
|
||||
)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
case *protobufs.TokenOutput_Announce:
|
||||
peerIds := []string{}
|
||||
for _, sig := range o.Announce.PublicKeySignaturesEd448 {
|
||||
peerId, err := getPeerIdFromSignature(sig)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
|
||||
peerIds = append(peerIds, peerId.String())
|
||||
}
|
||||
|
||||
mergeable := true
|
||||
for i, peerId := range peerIds {
|
||||
addr, err := getAddressFromSignature(
|
||||
o.Announce.PublicKeySignaturesEd448[i],
|
||||
)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
sen, ok := (*peerSeniority)[string(addr)]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
peer := new(big.Int).SetUint64(sen.GetSeniority())
|
||||
if peer.Cmp(token.GetAggregatedSeniority([]string{peerId})) != 0 {
|
||||
mergeable = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if mergeable {
|
||||
addr, err := getAddressFromSignature(
|
||||
o.Announce.PublicKeySignaturesEd448[0],
|
||||
)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
|
||||
additional := uint64(0)
|
||||
_, prfs, err := coinStore.GetPreCoinProofsForOwner(addr)
|
||||
if err != nil && !errors.Is(err, store.ErrNotFound) {
|
||||
txn.Abort()
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
|
||||
for _, pr := range prfs {
|
||||
if pr.IndexProof == nil && pr.Difficulty == 0 && pr.Commitment == nil {
|
||||
// approximate average per interval:
|
||||
add := new(big.Int).SetBytes(pr.Amount)
|
||||
add.Quo(add, big.NewInt(58800000))
|
||||
if add.Cmp(big.NewInt(4000000)) > 0 {
|
||||
add = big.NewInt(4000000)
|
||||
}
|
||||
additional = add.Uint64()
|
||||
}
|
||||
}
|
||||
|
||||
(*peerSeniority)[string(addr)] = token.NewPeerSeniorityItem(
|
||||
token.GetAggregatedSeniority(peerIds).Uint64()+additional,
|
||||
string(addr),
|
||||
)
|
||||
|
||||
for _, sig := range o.Announce.PublicKeySignaturesEd448[1:] {
|
||||
addr, err := getAddressFromSignature(
|
||||
sig,
|
||||
)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
|
||||
(*peerSeniority)[string(addr)] = token.NewPeerSeniorityItem(0, string(addr))
|
||||
}
|
||||
}
|
||||
case *protobufs.TokenOutput_Join:
|
||||
addr, err := getAddressFromSignature(o.Join.PublicKeySignatureEd448)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
if _, ok := (*peerSeniority)[string(addr)]; !ok {
|
||||
(*peerSeniority)[string(addr)] = token.NewPeerSeniorityItem(20, string(addr))
|
||||
} else {
|
||||
(*peerSeniority)[string(addr)] = token.NewPeerSeniorityItem(
|
||||
(*peerSeniority)[string(addr)].GetSeniority()+20,
|
||||
string(addr),
|
||||
)
|
||||
}
|
||||
proverTrieJoinRequests = append(proverTrieJoinRequests, addr)
|
||||
case *protobufs.TokenOutput_Leave:
|
||||
addr, err := getAddressFromSignature(o.Leave.PublicKeySignatureEd448)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
proverTrieLeaveRequests = append(proverTrieLeaveRequests, addr)
|
||||
case *protobufs.TokenOutput_Pause:
|
||||
_, err := getAddressFromSignature(o.Pause.PublicKeySignatureEd448)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
case *protobufs.TokenOutput_Resume:
|
||||
_, err := getAddressFromSignature(o.Resume.PublicKeySignatureEd448)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
case *protobufs.TokenOutput_Penalty:
|
||||
addr := string(o.Penalty.Account.GetImplicitAccount().Address)
|
||||
if _, ok := (*peerSeniority)[addr]; !ok {
|
||||
(*peerSeniority)[addr] = token.NewPeerSeniorityItem(0, addr)
|
||||
proverTrieLeaveRequests = append(proverTrieLeaveRequests, []byte(addr))
|
||||
} else {
|
||||
if (*peerSeniority)[addr].GetSeniority() > o.Penalty.Quantity {
|
||||
for _, t := range app.Tries {
|
||||
if t.Contains([]byte(addr)) {
|
||||
v := t.Get([]byte(addr))
|
||||
latest := v.LatestFrame
|
||||
if frame.FrameNumber-latest > 100 {
|
||||
proverTrieLeaveRequests = append(proverTrieLeaveRequests, []byte(addr))
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
(*peerSeniority)[addr] = token.NewPeerSeniorityItem(
|
||||
(*peerSeniority)[addr].GetSeniority()-o.Penalty.Quantity,
|
||||
addr,
|
||||
)
|
||||
} else {
|
||||
(*peerSeniority)[addr] = token.NewPeerSeniorityItem(0, addr)
|
||||
proverTrieLeaveRequests = append(proverTrieLeaveRequests, []byte(addr))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
joinAddrs := tries.NewMinHeap[token.PeerSeniorityItem]()
|
||||
leaveAddrs := tries.NewMinHeap[token.PeerSeniorityItem]()
|
||||
for _, addr := range proverTrieJoinRequests {
|
||||
if _, ok := (*peerSeniority)[string(addr)]; !ok {
|
||||
joinAddrs.Push(token.NewPeerSeniorityItem(0, string(addr)))
|
||||
} else {
|
||||
joinAddrs.Push((*peerSeniority)[string(addr)])
|
||||
}
|
||||
}
|
||||
for _, addr := range proverTrieLeaveRequests {
|
||||
if _, ok := (*peerSeniority)[string(addr)]; !ok {
|
||||
leaveAddrs.Push(token.NewPeerSeniorityItem(0, string(addr)))
|
||||
} else {
|
||||
leaveAddrs.Push((*peerSeniority)[string(addr)])
|
||||
}
|
||||
}
|
||||
|
||||
joinReqs := make([]token.PeerSeniorityItem, len(joinAddrs.All()))
|
||||
copy(joinReqs, joinAddrs.All())
|
||||
slices.Reverse(joinReqs)
|
||||
leaveReqs := make([]token.PeerSeniorityItem, len(leaveAddrs.All()))
|
||||
copy(leaveReqs, leaveAddrs.All())
|
||||
slices.Reverse(leaveReqs)
|
||||
|
||||
token.ProcessJoinsAndLeaves(joinReqs, leaveReqs, app, peerSeniority, frame)
|
||||
|
||||
err = clockStore.PutPeerSeniorityMap(
|
||||
txn,
|
||||
frame.Filter,
|
||||
token.ToSerializedMap(peerSeniority),
|
||||
)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
|
||||
err = coinStore.SetLatestFrameProcessed(txn, frame.FrameNumber)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
return nil, errors.Wrap(err, "process frame")
|
||||
}
|
||||
|
||||
return app.Tries, nil
|
||||
}
|
||||
|
||||
func getPeerIdFromSignature(
|
||||
sig *protobufs.Ed448Signature,
|
||||
) (peer.ID, error) {
|
||||
if sig.PublicKey == nil || sig.PublicKey.KeyValue == nil {
|
||||
return "", errors.New("invalid data")
|
||||
}
|
||||
|
||||
pk, err := crypto.UnmarshalEd448PublicKey(
|
||||
sig.PublicKey.KeyValue,
|
||||
)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "get address from signature")
|
||||
}
|
||||
|
||||
peerId, err := peer.IDFromPublicKey(pk)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "get address from signature")
|
||||
}
|
||||
|
||||
return peerId, nil
|
||||
}
|
||||
|
||||
func getAddressFromSignature(
|
||||
sig *protobufs.Ed448Signature,
|
||||
) ([]byte, error) {
|
||||
if sig.PublicKey == nil || sig.PublicKey.KeyValue == nil {
|
||||
return nil, errors.New("invalid data")
|
||||
}
|
||||
|
||||
pk, err := crypto.UnmarshalEd448PublicKey(
|
||||
sig.PublicKey.KeyValue,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get address from signature")
|
||||
}
|
||||
|
||||
peerId, err := peer.IDFromPublicKey(pk)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get address from signature")
|
||||
}
|
||||
|
||||
altAddr, err := poseidon.HashBytes([]byte(peerId))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get address from signature")
|
||||
}
|
||||
|
||||
return altAddr.FillBytes(make([]byte, 32)), nil
|
||||
}
|
||||
|
||||
func RunSelfTestIfNeeded(
|
||||
configDir string,
|
||||
nodeConfig *config.Config,
|
||||
|
||||
@ -10,7 +10,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
|
||||
)
|
||||
|
||||
@ -31,26 +30,22 @@ type CoinStore interface {
|
||||
frameNumber uint64,
|
||||
address []byte,
|
||||
coin *protobufs.Coin,
|
||||
stateTree *crypto.VectorCommitmentTree,
|
||||
) error
|
||||
DeleteCoin(
|
||||
txn Transaction,
|
||||
address []byte,
|
||||
coin *protobufs.Coin,
|
||||
stateTree *crypto.VectorCommitmentTree,
|
||||
) error
|
||||
PutPreCoinProof(
|
||||
txn Transaction,
|
||||
frameNumber uint64,
|
||||
address []byte,
|
||||
preCoinProof *protobufs.PreCoinProof,
|
||||
stateTree *crypto.VectorCommitmentTree,
|
||||
) error
|
||||
DeletePreCoinProof(
|
||||
txn Transaction,
|
||||
address []byte,
|
||||
preCoinProof *protobufs.PreCoinProof,
|
||||
stateTree *crypto.VectorCommitmentTree,
|
||||
) error
|
||||
GetLatestFrameProcessed() (uint64, error)
|
||||
SetLatestFrameProcessed(txn Transaction, frameNumber uint64) error
|
||||
@ -285,7 +280,6 @@ func (p *PebbleCoinStore) PutCoin(
|
||||
frameNumber uint64,
|
||||
address []byte,
|
||||
coin *protobufs.Coin,
|
||||
stateTree *crypto.VectorCommitmentTree,
|
||||
) error {
|
||||
coinBytes, err := proto.Marshal(coin)
|
||||
if err != nil {
|
||||
@ -311,10 +305,6 @@ func (p *PebbleCoinStore) PutCoin(
|
||||
return errors.Wrap(err, "put coin")
|
||||
}
|
||||
|
||||
if err = stateTree.Insert(address, data); err != nil {
|
||||
return errors.Wrap(err, "put coin")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -322,7 +312,6 @@ func (p *PebbleCoinStore) DeleteCoin(
|
||||
txn Transaction,
|
||||
address []byte,
|
||||
coin *protobufs.Coin,
|
||||
stateTree *crypto.VectorCommitmentTree,
|
||||
) error {
|
||||
err := txn.Delete(coinKey(address))
|
||||
if err != nil {
|
||||
@ -336,10 +325,6 @@ func (p *PebbleCoinStore) DeleteCoin(
|
||||
return errors.Wrap(err, "delete coin")
|
||||
}
|
||||
|
||||
if err = stateTree.Delete(address); err != nil {
|
||||
return errors.Wrap(err, "delete coin")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -348,7 +333,6 @@ func (p *PebbleCoinStore) PutPreCoinProof(
|
||||
frameNumber uint64,
|
||||
address []byte,
|
||||
preCoinProof *protobufs.PreCoinProof,
|
||||
stateTree *crypto.VectorCommitmentTree,
|
||||
) error {
|
||||
proofBytes, err := proto.Marshal(preCoinProof)
|
||||
if err != nil {
|
||||
@ -374,10 +358,6 @@ func (p *PebbleCoinStore) PutPreCoinProof(
|
||||
return errors.Wrap(err, "put pre coin proof")
|
||||
}
|
||||
|
||||
if err = stateTree.Insert(address, data); err != nil {
|
||||
return errors.Wrap(err, "put pre coin proof")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -385,7 +365,6 @@ func (p *PebbleCoinStore) DeletePreCoinProof(
|
||||
txn Transaction,
|
||||
address []byte,
|
||||
preCoinProof *protobufs.PreCoinProof,
|
||||
stateTree *crypto.VectorCommitmentTree,
|
||||
) error {
|
||||
err := txn.Delete(proofKey(address))
|
||||
if err != nil {
|
||||
@ -406,10 +385,6 @@ func (p *PebbleCoinStore) DeletePreCoinProof(
|
||||
return errors.Wrap(err, "delete pre coin proof")
|
||||
}
|
||||
|
||||
if err = stateTree.Delete(address); err != nil {
|
||||
return errors.Wrap(err, "delete pre coin proof")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
238
node/store/hypergraph.go
Normal file
238
node/store/hypergraph.go
Normal file
@ -0,0 +1,238 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"go.uber.org/zap"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/hypergraph/application"
|
||||
)
|
||||
|
||||
type HypergraphStore interface {
|
||||
NewTransaction(indexed bool) (Transaction, error)
|
||||
LoadHypergraph() (
|
||||
*application.Hypergraph,
|
||||
error,
|
||||
)
|
||||
SaveHypergraph(
|
||||
txn Transaction,
|
||||
hg *application.Hypergraph,
|
||||
) error
|
||||
}
|
||||
|
||||
var _ HypergraphStore = (*PebbleHypergraphStore)(nil)
|
||||
|
||||
type PebbleHypergraphStore struct {
|
||||
db KVDB
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
func NewPebbleHypergraphStore(
|
||||
db KVDB,
|
||||
logger *zap.Logger,
|
||||
) *PebbleHypergraphStore {
|
||||
return &PebbleHypergraphStore{
|
||||
db,
|
||||
logger,
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
HYPERGRAPH_SHARD = 0x09
|
||||
VERTEX_ADDS = 0x00
|
||||
VERTEX_REMOVES = 0x10
|
||||
HYPEREDGE_ADDS = 0x01
|
||||
HYPEREDGE_REMOVES = 0x11
|
||||
)
|
||||
|
||||
func hypergraphVertexAddsKey(shardKey application.ShardKey) []byte {
|
||||
key := []byte{HYPERGRAPH_SHARD, VERTEX_ADDS}
|
||||
key = append(key, shardKey.L1[:]...)
|
||||
key = append(key, shardKey.L2[:]...)
|
||||
return key
|
||||
}
|
||||
|
||||
func hypergraphVertexRemovesKey(shardKey application.ShardKey) []byte {
|
||||
key := []byte{HYPERGRAPH_SHARD, VERTEX_REMOVES}
|
||||
key = append(key, shardKey.L1[:]...)
|
||||
key = append(key, shardKey.L2[:]...)
|
||||
return key
|
||||
}
|
||||
|
||||
func hypergraphHyperedgeAddsKey(shardKey application.ShardKey) []byte {
|
||||
key := []byte{HYPERGRAPH_SHARD, HYPEREDGE_ADDS}
|
||||
key = append(key, shardKey.L1[:]...)
|
||||
key = append(key, shardKey.L2[:]...)
|
||||
return key
|
||||
}
|
||||
|
||||
func hypergraphHyperedgeRemovesKey(shardKey application.ShardKey) []byte {
|
||||
key := []byte{HYPERGRAPH_SHARD, HYPEREDGE_REMOVES}
|
||||
key = append(key, shardKey.L1[:]...)
|
||||
key = append(key, shardKey.L2[:]...)
|
||||
return key
|
||||
}
|
||||
|
||||
func shardKeyFromKey(key []byte) application.ShardKey {
|
||||
return application.ShardKey{
|
||||
L1: [3]byte(key[2:5]),
|
||||
L2: [32]byte(key[5:]),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PebbleHypergraphStore) NewTransaction(indexed bool) (
|
||||
Transaction,
|
||||
error,
|
||||
) {
|
||||
return p.db.NewBatch(indexed), nil
|
||||
}
|
||||
|
||||
func (p *PebbleHypergraphStore) LoadHypergraph() (
|
||||
*application.Hypergraph,
|
||||
error,
|
||||
) {
|
||||
hg := application.NewHypergraph()
|
||||
vertexAddsIter, err := p.db.NewIter(
|
||||
[]byte{HYPERGRAPH_SHARD, VERTEX_ADDS},
|
||||
[]byte{HYPERGRAPH_SHARD, VERTEX_REMOVES},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "load hypergraph")
|
||||
}
|
||||
defer vertexAddsIter.Close()
|
||||
for vertexAddsIter.First(); vertexAddsIter.Valid(); vertexAddsIter.Next() {
|
||||
shardKey := make([]byte, len(vertexAddsIter.Key()))
|
||||
copy(shardKey, vertexAddsIter.Key())
|
||||
|
||||
err := hg.ImportFromBytes(
|
||||
application.VertexAtomType,
|
||||
application.AddsPhaseType,
|
||||
shardKeyFromKey(shardKey),
|
||||
vertexAddsIter.Value(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "load hypergraph")
|
||||
}
|
||||
}
|
||||
|
||||
vertexRemovesIter, err := p.db.NewIter(
|
||||
[]byte{HYPERGRAPH_SHARD, VERTEX_REMOVES},
|
||||
[]byte{HYPERGRAPH_SHARD, VERTEX_REMOVES + 1},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "load hypergraph")
|
||||
}
|
||||
defer vertexRemovesIter.Close()
|
||||
for vertexRemovesIter.First(); vertexRemovesIter.Valid(); vertexRemovesIter.Next() {
|
||||
shardKey := make([]byte, len(vertexRemovesIter.Key()))
|
||||
copy(shardKey, vertexRemovesIter.Key())
|
||||
|
||||
err := hg.ImportFromBytes(
|
||||
application.VertexAtomType,
|
||||
application.RemovesPhaseType,
|
||||
shardKeyFromKey(shardKey),
|
||||
vertexRemovesIter.Value(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "load hypergraph")
|
||||
}
|
||||
}
|
||||
|
||||
hyperedgeAddsIter, err := p.db.NewIter(
|
||||
[]byte{HYPERGRAPH_SHARD, HYPEREDGE_ADDS},
|
||||
[]byte{HYPERGRAPH_SHARD, HYPEREDGE_REMOVES},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "load hypergraph")
|
||||
}
|
||||
defer hyperedgeAddsIter.Close()
|
||||
for hyperedgeAddsIter.First(); hyperedgeAddsIter.Valid(); hyperedgeAddsIter.Next() {
|
||||
shardKey := make([]byte, len(hyperedgeAddsIter.Key()))
|
||||
copy(shardKey, hyperedgeAddsIter.Key())
|
||||
|
||||
err := hg.ImportFromBytes(
|
||||
application.HyperedgeAtomType,
|
||||
application.AddsPhaseType,
|
||||
shardKeyFromKey(shardKey),
|
||||
hyperedgeAddsIter.Value(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "load hypergraph")
|
||||
}
|
||||
}
|
||||
|
||||
hyperedgeRemovesIter, err := p.db.NewIter(
|
||||
[]byte{HYPERGRAPH_SHARD, HYPEREDGE_REMOVES},
|
||||
[]byte{HYPERGRAPH_SHARD, HYPEREDGE_REMOVES + 1},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "load hypergraph")
|
||||
}
|
||||
defer hyperedgeRemovesIter.Close()
|
||||
for hyperedgeRemovesIter.First(); hyperedgeRemovesIter.Valid(); hyperedgeRemovesIter.Next() {
|
||||
shardKey := make([]byte, len(hyperedgeRemovesIter.Key()))
|
||||
copy(shardKey, hyperedgeRemovesIter.Key())
|
||||
|
||||
err := hg.ImportFromBytes(
|
||||
application.HyperedgeAtomType,
|
||||
application.RemovesPhaseType,
|
||||
shardKeyFromKey(shardKey),
|
||||
hyperedgeRemovesIter.Value(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "load hypergraph")
|
||||
}
|
||||
}
|
||||
|
||||
return hg, nil
|
||||
}
|
||||
|
||||
func (p *PebbleHypergraphStore) SaveHypergraph(
|
||||
txn Transaction,
|
||||
hg *application.Hypergraph,
|
||||
) error {
|
||||
for shardKey, vertexAdds := range hg.GetVertexAdds() {
|
||||
if vertexAdds.IsDirty() {
|
||||
err := txn.Set(hypergraphVertexAddsKey(shardKey), vertexAdds.ToBytes())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "save hypergraph")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for shardKey, vertexRemoves := range hg.GetVertexRemoves() {
|
||||
if vertexRemoves.IsDirty() {
|
||||
err := txn.Set(
|
||||
hypergraphVertexRemovesKey(shardKey),
|
||||
vertexRemoves.ToBytes(),
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "save hypergraph")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for shardKey, hyperedgeAdds := range hg.GetHyperedgeAdds() {
|
||||
if hyperedgeAdds.IsDirty() {
|
||||
err := txn.Set(
|
||||
hypergraphHyperedgeAddsKey(shardKey),
|
||||
hyperedgeAdds.ToBytes(),
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "save hypergraph")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for shardKey, hyperedgeRemoves := range hg.GetHyperedgeRemoves() {
|
||||
if hyperedgeRemoves.IsDirty() {
|
||||
err := txn.Set(
|
||||
hypergraphHyperedgeRemovesKey(shardKey),
|
||||
hyperedgeRemoves.ToBytes(),
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "save hypergraph")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -11,6 +11,6 @@ BINARIES_DIR="$ROOT_DIR/target/release"
|
||||
|
||||
# Link the native VDF and execute tests
|
||||
pushd "$NODE_DIR" > /dev/null
|
||||
CGO_LDFLAGS="-L$BINARIES_DIR -L/opt/homebrew/Cellar/mpfr/4.2.1/lib -I/opt/homebrew/Cellar/mpfr/4.2.1/include -L/opt/homebrew/Cellar/gmp/6.3.0/lib -I/opt/homebrew/Cellar/gmp/6.3.0/include -L/opt/homebrew/Cellar/flint/3.1.3-p1/lib -I/opt/homebrew/Cellar/flint/3.1.3-p1/include -lbls48581 -lstdc++ -lvdf -ldl -lm -lflint -lgmp -lmpfr" \
|
||||
CGO_LDFLAGS="-L$BINARIES_DIR -lbls48581 -lverenc -lvdf -ldl -lm -lflint -lgmp -lmpfr" \
|
||||
CGO_ENABLED=1 \
|
||||
go test "$@"
|
||||
|
||||
1
verenc/.gitignore
vendored
Normal file
1
verenc/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
generated
|
||||
9
verenc/README.md
Normal file
9
verenc/README.md
Normal file
@ -0,0 +1,9 @@
|
||||
# VerEnc
|
||||
|
||||
Wrapper for the Rust implementation of Verifiable Encryption (VerEnc) in [crates/verenc](../crates/verenc).
|
||||
|
||||
## Generate Go bindings
|
||||
|
||||
```sh
|
||||
go generate
|
||||
```
|
||||
14
verenc/generate.sh
Executable file
14
verenc/generate.sh
Executable file
@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
set -euxo pipefail
|
||||
|
||||
ROOT_DIR="${ROOT_DIR:-$( cd "$(dirname "$(realpath "$( dirname "${BASH_SOURCE[0]}" )")")" >/dev/null 2>&1 && pwd )}"
|
||||
|
||||
RUST_VERENC_PACKAGE="$ROOT_DIR/crates/verenc"
|
||||
BINDINGS_DIR="$ROOT_DIR/verenc"
|
||||
|
||||
# Build the Rust VerEnc package in release mode
|
||||
cargo build -p verenc --release
|
||||
|
||||
# Generate Go bindings
|
||||
pushd "$RUST_VERENC_PACKAGE" > /dev/null
|
||||
uniffi-bindgen-go src/lib.udl -o "$BINDINGS_DIR"/generated
|
||||
8
verenc/go.mod
Normal file
8
verenc/go.mod
Normal file
@ -0,0 +1,8 @@
|
||||
module source.quilibrium.com/quilibrium/monorepo/verenc
|
||||
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/sys v0.21.0 // indirect
|
||||
)
|
||||
4
verenc/go.sum
Normal file
4
verenc/go.sum
Normal file
@ -0,0 +1,4 @@
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
16
verenc/test.sh
Executable file
16
verenc/test.sh
Executable file
@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
set -euxo pipefail
|
||||
|
||||
# Run tests for the verenc package. Takes care of linking the native VerEnc library.
|
||||
# Assumes that the VerEnc library has been built by running the generate.sh script in the same directory.
|
||||
|
||||
ROOT_DIR="${ROOT_DIR:-$( cd "$(dirname "$(realpath "$( dirname "${BASH_SOURCE[0]}" )")")" >/dev/null 2>&1 && pwd )}"
|
||||
|
||||
NODE_DIR="$ROOT_DIR/verenc"
|
||||
BINARIES_DIR="$ROOT_DIR/target/release"
|
||||
|
||||
# Link the native VerEnc library and execute tests
|
||||
pushd "$NODE_DIR" > /dev/null
|
||||
CGO_LDFLAGS="-L$BINARIES_DIR -lverenc -ldl -lm" \
|
||||
CGO_ENABLED=1 \
|
||||
go test "$@"
|
||||
35
verenc/verenc.go
Normal file
35
verenc/verenc.go
Normal file
@ -0,0 +1,35 @@
|
||||
package verenc
|
||||
|
||||
import (
|
||||
generated "source.quilibrium.com/quilibrium/monorepo/verenc/generated/verenc"
|
||||
)
|
||||
|
||||
//go:generate ./generate.sh
|
||||
|
||||
func NewVerencProof(data []byte) generated.VerencProofAndBlindingKey {
|
||||
return generated.NewVerencProof(data)
|
||||
}
|
||||
|
||||
func NewVerencProofEncryptOnly(data []byte, encryptionKey []byte) generated.VerencProofAndBlindingKey {
|
||||
return generated.NewVerencProofEncryptOnly(data, encryptionKey)
|
||||
}
|
||||
|
||||
func VerencVerify(proof generated.VerencProof) bool {
|
||||
return generated.VerencVerify(proof)
|
||||
}
|
||||
|
||||
func VerencCompress(proof generated.VerencProof) generated.CompressedCiphertext {
|
||||
return generated.VerencCompress(proof)
|
||||
}
|
||||
|
||||
func VerencRecover(recovery generated.VerencDecrypt) []byte {
|
||||
return generated.VerencRecover(recovery)
|
||||
}
|
||||
|
||||
func ChunkDataForVerenc(data []byte) [][]byte {
|
||||
return generated.ChunkDataForVerenc(data)
|
||||
}
|
||||
|
||||
func CombineChunkedData(chunks [][]byte) []byte {
|
||||
return generated.CombineChunkedData(chunks)
|
||||
}
|
||||
102
verenc/verenc_test.go
Normal file
102
verenc/verenc_test.go
Normal file
@ -0,0 +1,102 @@
|
||||
package verenc_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"source.quilibrium.com/quilibrium/monorepo/verenc"
|
||||
generated "source.quilibrium.com/quilibrium/monorepo/verenc/generated/verenc"
|
||||
)
|
||||
|
||||
func TestVerenc(t *testing.T) {
|
||||
data := make([]byte, 56)
|
||||
copy(data[1:6], []byte("hello"))
|
||||
proof := verenc.NewVerencProof(data)
|
||||
if !verenc.VerencVerify(generated.VerencProof{
|
||||
BlindingPubkey: proof.BlindingPubkey,
|
||||
EncryptionKey: proof.EncryptionKey,
|
||||
Statement: proof.Statement,
|
||||
Challenge: proof.Challenge,
|
||||
Polycom: proof.Polycom,
|
||||
Ctexts: proof.Ctexts,
|
||||
SharesRands: proof.SharesRands,
|
||||
}) {
|
||||
t.FailNow()
|
||||
}
|
||||
compressed := verenc.VerencCompress(generated.VerencProof{
|
||||
BlindingPubkey: proof.BlindingPubkey,
|
||||
EncryptionKey: proof.EncryptionKey,
|
||||
Statement: proof.Statement,
|
||||
Challenge: proof.Challenge,
|
||||
Polycom: proof.Polycom,
|
||||
Ctexts: proof.Ctexts,
|
||||
SharesRands: proof.SharesRands,
|
||||
})
|
||||
recovered := verenc.VerencRecover(generated.VerencDecrypt{
|
||||
BlindingPubkey: proof.BlindingPubkey,
|
||||
Statement: proof.Statement,
|
||||
DecryptionKey: proof.DecryptionKey,
|
||||
Ciphertexts: compressed,
|
||||
})
|
||||
if !bytes.Equal(data, recovered) {
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataChunking(t *testing.T) {
|
||||
data := make([]byte, 1300)
|
||||
rand.Read(data)
|
||||
chunks := verenc.ChunkDataForVerenc(data)
|
||||
result := verenc.CombineChunkedData(chunks)
|
||||
if !bytes.Equal(data, result[:1300]) {
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerencWithChunking(t *testing.T) {
|
||||
data := make([]byte, 1300)
|
||||
rand.Read(data)
|
||||
chunks := verenc.ChunkDataForVerenc(data)
|
||||
results := [][]byte{}
|
||||
for i, chunk := range chunks {
|
||||
proof := verenc.NewVerencProof(chunk)
|
||||
if !verenc.VerencVerify(generated.VerencProof{
|
||||
BlindingPubkey: proof.BlindingPubkey,
|
||||
EncryptionKey: proof.EncryptionKey,
|
||||
Statement: proof.Statement,
|
||||
Challenge: proof.Challenge,
|
||||
Polycom: proof.Polycom,
|
||||
Ctexts: proof.Ctexts,
|
||||
SharesRands: proof.SharesRands,
|
||||
}) {
|
||||
t.FailNow()
|
||||
}
|
||||
compressed := verenc.VerencCompress(generated.VerencProof{
|
||||
BlindingPubkey: proof.BlindingPubkey,
|
||||
EncryptionKey: proof.EncryptionKey,
|
||||
Statement: proof.Statement,
|
||||
Challenge: proof.Challenge,
|
||||
Polycom: proof.Polycom,
|
||||
Ctexts: proof.Ctexts,
|
||||
SharesRands: proof.SharesRands,
|
||||
})
|
||||
recovered := verenc.VerencRecover(generated.VerencDecrypt{
|
||||
BlindingPubkey: proof.BlindingPubkey,
|
||||
Statement: proof.Statement,
|
||||
DecryptionKey: proof.DecryptionKey,
|
||||
Ciphertexts: compressed,
|
||||
})
|
||||
if !bytes.Equal(chunk, recovered) {
|
||||
fmt.Printf("recovered did not equal chunk %d: %x, %x\n", i, recovered, chunk)
|
||||
t.FailNow()
|
||||
}
|
||||
results = append(results, recovered)
|
||||
}
|
||||
result := verenc.CombineChunkedData(results)
|
||||
if !bytes.Equal(data, result[:1300]) {
|
||||
fmt.Printf("result did not equal original data, %x, %x\n", result[:1300], data)
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
Loading…
Reference in New Issue
Block a user