v2.1.0.18 (#508)

* experiment: reject bad peer info messages

* v2.1.0.18 preview

* add tagged sync

* Add missing hypergraph changes

* small tweaks to sync

* allow local sync, use it for provers with workers

* missing file

* resolve build error

* resolve sync issue, remove raw sync

* resolve deletion promotion bug

* resolve sync abstraction leak from tree deletion changes

* rearrange prover sync

* remove pruning from sync

* restore removed sync flag

* fix: sync, event stream deadlock, heuristic scoring of better shards

* resolve hanging shutdown + pubsub proxy issue

* further bugfixes: sync (restore old leaf sync), pubsub shutdown, merge events

* fix: clean up rust ffi, background coverage events, and sync tweaks

* fix: linking issue for channel, connectivity test aggression, sync regression, join tests

* fix: disjoint sync, improper application of filter

* resolve sync/reel/validation deadlock

* adjust sync to handle no leaf edge cases, multi-path segment traversal

* use simpler sync

* faster, simpler sync with some debug extras

* migration to recalculate

* don't use batch

* square up the roots

* fix nil pointer

* fix: seniority calculation, sync race condition, migration

* make sync dumber

* fix: tree deletion issue

* fix: missing seniority merge request canonical serialization

* address issues from previous commit test

* stale workers should be cleared

* remove missing gap check

* rearrange collect, reduce sync logging noise

* fix: the disjoint leaf/branch sync case

* nuclear option on sync failures

* v2.1.0.18, finalized
This commit is contained in:
Cassandra Heart 2026-02-08 23:51:51 -06:00 committed by GitHub
parent d2b0651e2d
commit 12996487c3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
167 changed files with 42172 additions and 3588 deletions

150
Cargo.lock generated
View File

@ -235,6 +235,22 @@ version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f59bbe95d4e52a6398ec21238d31577f2b28a9d86807f06ca59d191d8440d0bb"
[[package]]
name = "bitcoin-internals"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9425c3bf7089c983facbae04de54513cce73b41c7f9ff8c845b54e7bc64ebbfb"
[[package]]
name = "bitcoin_hashes"
version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1930a4dabfebb8d7d9992db18ebe3ae2876f0a305fab206fd168df931ede293b"
dependencies = [
"bitcoin-internals",
"hex-conservative",
]
[[package]]
name = "bitflags"
version = "1.3.2"
@ -447,6 +463,7 @@ dependencies = [
"base64 0.22.1",
"criterion 0.4.0",
"ed448-goldilocks-plus 0.11.2",
"ed448-rust",
"hex 0.4.3",
"hkdf",
"hmac",
@ -881,10 +898,59 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
dependencies = [
"block-buffer 0.10.4",
"const-oid",
"crypto-common",
"subtle",
]
[[package]]
name = "dkls23"
version = "0.1.1"
dependencies = [
"bitcoin_hashes",
"elliptic-curve",
"getrandom 0.2.15",
"hex 0.4.3",
"k256",
"p256",
"rand 0.8.5",
"serde",
"serde_bytes",
"sha3 0.10.8",
]
[[package]]
name = "dkls23_ffi"
version = "0.1.0"
dependencies = [
"criterion 0.5.1",
"dkls23",
"hex 0.4.3",
"k256",
"p256",
"rand 0.8.5",
"serde",
"serde_json",
"sha2 0.10.8",
"thiserror 1.0.63",
"uniffi",
]
[[package]]
name = "ecdsa"
version = "0.16.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca"
dependencies = [
"der",
"digest 0.10.7",
"elliptic-curve",
"rfc6979",
"serdect 0.2.0",
"signature",
"spki",
]
[[package]]
name = "ed448-bulletproofs"
version = "1.0.0"
@ -1186,6 +1252,12 @@ version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
[[package]]
name = "hex-conservative"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "212ab92002354b4819390025006c897e8140934349e8635c9b077f47b4dcbd20"
[[package]]
name = "hkdf"
version = "0.12.4"
@ -1273,6 +1345,21 @@ dependencies = [
"wasm-bindgen",
]
[[package]]
name = "k256"
version = "0.13.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b"
dependencies = [
"cfg-if",
"ecdsa",
"elliptic-curve",
"once_cell",
"serdect 0.2.0",
"sha2 0.10.8",
"signature",
]
[[package]]
name = "keccak"
version = "0.1.5"
@ -1475,6 +1562,19 @@ version = "6.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1"
[[package]]
name = "p256"
version = "0.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b"
dependencies = [
"ecdsa",
"elliptic-curve",
"primeorder",
"serdect 0.2.0",
"sha2 0.10.8",
]
[[package]]
name = "paste"
version = "1.0.15"
@ -1558,6 +1658,16 @@ version = "0.2.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
[[package]]
name = "primeorder"
version = "0.13.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6"
dependencies = [
"elliptic-curve",
"serdect 0.2.0",
]
[[package]]
name = "proc-macro2"
version = "1.0.94"
@ -1697,6 +1807,16 @@ version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56"
[[package]]
name = "rfc6979"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2"
dependencies = [
"hmac",
"subtle",
]
[[package]]
name = "rpm"
version = "0.1.0"
@ -1798,13 +1918,24 @@ dependencies = [
[[package]]
name = "serde"
version = "1.0.219"
version = "1.0.228"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e"
dependencies = [
"serde_core",
"serde_derive",
]
[[package]]
name = "serde_bytes"
version = "0.11.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a5d440709e79d88e51ac01c4b72fc6cb7314017bb7da9eeff678aa94c10e3ea8"
dependencies = [
"serde",
"serde_core",
]
[[package]]
name = "serde_cbor"
version = "0.11.2"
@ -1816,10 +1947,19 @@ dependencies = [
]
[[package]]
name = "serde_derive"
version = "1.0.219"
name = "serde_core"
version = "1.0.228"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.228"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79"
dependencies = [
"proc-macro2",
"quote",

View File

@ -25,7 +25,8 @@ members = [
"crates/rpm",
"crates/bulletproofs",
"crates/verenc",
"crates/ferret"
"crates/ferret",
"crates/dkls23_ffi"
]
[profile.release]

View File

@ -19,7 +19,7 @@ case "$os_type" in
# Check if the architecture is ARM
if [[ "$(uname -m)" == "arm64" ]]; then
# MacOS ld doesn't support -Bstatic and -Bdynamic, so it's important that there is only a static version of the library
go build -ldflags "-linkmode 'external' -extldflags '-L$BINARIES_DIR -L/usr/local/lib/ -L/opt/homebrew/Cellar/openssl@3/3.4.1/lib -lstdc++ -lferret -ldl -lm -lcrypto -lssl'" "$@"
go build -ldflags "-linkmode 'external' -extldflags '-L$BINARIES_DIR -L/usr/local/lib/ -L/opt/homebrew/Cellar/openssl@3/3.6.1/lib -lbls48581 -lferret -lbulletproofs -ldl -lm -lflint -lgmp -lmpfr -lstdc++ -lcrypto -lssl'" "$@"
else
echo "Unsupported platform"
exit 1

View File

@ -10,6 +10,8 @@ replace source.quilibrium.com/quilibrium/monorepo/consensus => ../consensus
replace github.com/libp2p/go-libp2p => ../go-libp2p
replace github.com/multiformats/go-multiaddr => ../go-multiaddr
require (
github.com/markkurossi/tabulate v0.0.0-20230223130100-d4965869b123
github.com/pkg/errors v0.9.1

View File

@ -38,8 +38,6 @@ github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aG
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
github.com/multiformats/go-multiaddr v0.16.1 h1:fgJ0Pitow+wWXzN9do+1b8Pyjmo8m5WhGfzpL82MpCw=
github.com/multiformats/go-multiaddr v0.16.1/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0=
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo=

View File

@ -75,8 +75,8 @@ func (d *DoubleRatchetEncryptedChannel) EstablishTwoPartyChannel(
}
state := NewDoubleRatchet(
sessionKey[:36],
sessionKey[36:64],
sessionKey[:32],
sessionKey[32:64],
sessionKey[64:],
isSender,
sendingSignedPrePrivateKey,
@ -95,7 +95,10 @@ func (d *DoubleRatchetEncryptedChannel) EncryptTwoPartyMessage(
Message: message, // buildutils:allow-slice-alias this assignment is ephemeral
}
result := DoubleRatchetEncrypt(stateAndMessage)
result, err := DoubleRatchetEncrypt(stateAndMessage)
if err != nil {
return "", nil, errors.Wrap(err, "encrypt two party message")
}
envelope = &channel.P2PChannelEnvelope{}
err = json.Unmarshal([]byte(result.Envelope), envelope)
if err != nil {
@ -120,7 +123,10 @@ func (d *DoubleRatchetEncryptedChannel) DecryptTwoPartyMessage(
Envelope: string(envelopeJson),
}
result := DoubleRatchetDecrypt(stateAndEnvelope)
result, err := DoubleRatchetDecrypt(stateAndEnvelope)
if err != nil {
return "", nil, errors.Wrap(err, "decrypt two party message")
}
return result.RatchetState, result.Message, nil
}
@ -162,45 +168,88 @@ func NewTripleRatchet(
func DoubleRatchetEncrypt(
ratchetStateAndMessage generated.DoubleRatchetStateAndMessage,
) generated.DoubleRatchetStateAndEnvelope {
return generated.DoubleRatchetEncrypt(ratchetStateAndMessage)
) (generated.DoubleRatchetStateAndEnvelope, error) {
result, err := generated.DoubleRatchetEncrypt(ratchetStateAndMessage)
if err != nil {
return generated.DoubleRatchetStateAndEnvelope{}, err
}
return result, nil
}
func DoubleRatchetDecrypt(
ratchetStateAndEnvelope generated.DoubleRatchetStateAndEnvelope,
) generated.DoubleRatchetStateAndMessage {
return generated.DoubleRatchetDecrypt(ratchetStateAndEnvelope)
) (generated.DoubleRatchetStateAndMessage, error) {
result, err := generated.DoubleRatchetDecrypt(ratchetStateAndEnvelope)
if err != nil {
return generated.DoubleRatchetStateAndMessage{}, err
}
return result, nil
}
func TripleRatchetInitRound1(
ratchetStateAndMetadata generated.TripleRatchetStateAndMetadata,
) generated.TripleRatchetStateAndMetadata {
return generated.TripleRatchetInitRound1(ratchetStateAndMetadata)
result, err := generated.TripleRatchetInitRound1(ratchetStateAndMetadata)
if err != nil {
return generated.TripleRatchetStateAndMetadata{
Metadata: map[string]string{"error": err.Error()},
}
}
return result
}
func TripleRatchetInitRound2(
ratchetStateAndMetadata generated.TripleRatchetStateAndMetadata,
) generated.TripleRatchetStateAndMetadata {
return generated.TripleRatchetInitRound2(ratchetStateAndMetadata)
result, err := generated.TripleRatchetInitRound2(ratchetStateAndMetadata)
if err != nil {
return generated.TripleRatchetStateAndMetadata{
Metadata: map[string]string{"error": err.Error()},
}
}
return result
}
func TripleRatchetInitRound3(
ratchetStateAndMetadata generated.TripleRatchetStateAndMetadata,
) generated.TripleRatchetStateAndMetadata {
return generated.TripleRatchetInitRound3(ratchetStateAndMetadata)
result, err := generated.TripleRatchetInitRound3(ratchetStateAndMetadata)
if err != nil {
return generated.TripleRatchetStateAndMetadata{
Metadata: map[string]string{"error": err.Error()},
}
}
return result
}
func TripleRatchetInitRound4(
ratchetStateAndMetadata generated.TripleRatchetStateAndMetadata,
) generated.TripleRatchetStateAndMetadata {
return generated.TripleRatchetInitRound4(ratchetStateAndMetadata)
result, err := generated.TripleRatchetInitRound4(ratchetStateAndMetadata)
if err != nil {
return generated.TripleRatchetStateAndMetadata{
Metadata: map[string]string{"error": err.Error()},
}
}
return result
}
func TripleRatchetEncrypt(
ratchetStateAndMessage generated.TripleRatchetStateAndMessage,
) generated.TripleRatchetStateAndEnvelope {
return generated.TripleRatchetEncrypt(ratchetStateAndMessage)
result, err := generated.TripleRatchetEncrypt(ratchetStateAndMessage)
if err != nil {
return generated.TripleRatchetStateAndEnvelope{}
}
return result
}
func TripleRatchetDecrypt(
ratchetStateAndEnvelope generated.TripleRatchetStateAndEnvelope,
) generated.TripleRatchetStateAndMessage {
return generated.TripleRatchetDecrypt(ratchetStateAndEnvelope)
result, err := generated.TripleRatchetDecrypt(ratchetStateAndEnvelope)
if err != nil {
return generated.TripleRatchetStateAndMessage{}
}
return result
}

View File

@ -4,6 +4,7 @@ import (
"bytes"
"crypto/rand"
"encoding/base64"
"encoding/json"
"fmt"
"sort"
"testing"
@ -60,6 +61,320 @@ func remapOutputs(maps map[string]map[string]string) map[string]map[string]strin
return out
}
// TestX3DHAndDoubleRatchet tests X3DH key agreement and double ratchet session
// establishment between two parties.
func TestX3DHAndDoubleRatchet(t *testing.T) {
// Generate two peers with their identity and pre-keys
// Using ScalarEd448 which produces 56-byte private keys (Scalars)
// and 57-byte public keys (Edwards compressed)
alice := generatePeer()
bob := generatePeer()
// Log key sizes for debugging
t.Logf("Alice identity private key size: %d bytes", len(alice.identityKey.Bytes()))
t.Logf("Alice identity public key size: %d bytes", len(alice.identityPubKey.ToAffineCompressed()))
t.Logf("Alice signed pre-key private size: %d bytes", len(alice.signedPreKey.Bytes()))
t.Logf("Alice signed pre-key public size: %d bytes", len(alice.signedPrePubKey.ToAffineCompressed()))
// Test X3DH key agreement
// Alice is sender, Bob is receiver
// Sender needs: own identity private, own ephemeral private, peer identity public, peer signed pre public
// Receiver needs: own identity private, own signed pre private, peer identity public, peer ephemeral public
// For X3DH, Alice uses her signedPreKey as the ephemeral key
aliceSessionKeyJson := generated.SenderX3dh(
alice.identityKey.Bytes(), // sending identity private key (56 bytes)
alice.signedPreKey.Bytes(), // sending ephemeral private key (56 bytes)
bob.identityPubKey.ToAffineCompressed(), // receiving identity public key (57 bytes)
bob.signedPrePubKey.ToAffineCompressed(), // receiving signed pre-key public (57 bytes)
96, // session key length
)
t.Logf("Alice X3DH result: %s", aliceSessionKeyJson)
// Check if Alice got an error
if len(aliceSessionKeyJson) == 0 || aliceSessionKeyJson[0] != '"' {
t.Fatalf("Alice X3DH failed: %s", aliceSessionKeyJson)
}
// Bob performs receiver side X3DH
bobSessionKeyJson := generated.ReceiverX3dh(
bob.identityKey.Bytes(), // sending identity private key (56 bytes)
bob.signedPreKey.Bytes(), // sending signed pre private key (56 bytes)
alice.identityPubKey.ToAffineCompressed(), // receiving identity public key (57 bytes)
alice.signedPrePubKey.ToAffineCompressed(), // receiving ephemeral public key (57 bytes)
96, // session key length
)
t.Logf("Bob X3DH result: %s", bobSessionKeyJson)
// Check if Bob got an error
if len(bobSessionKeyJson) == 0 || bobSessionKeyJson[0] != '"' {
t.Fatalf("Bob X3DH failed: %s", bobSessionKeyJson)
}
// Decode session keys and verify they match
var aliceSessionKeyB64, bobSessionKeyB64 string
if err := json.Unmarshal([]byte(aliceSessionKeyJson), &aliceSessionKeyB64); err != nil {
t.Fatalf("Failed to parse Alice session key: %v", err)
}
if err := json.Unmarshal([]byte(bobSessionKeyJson), &bobSessionKeyB64); err != nil {
t.Fatalf("Failed to parse Bob session key: %v", err)
}
aliceSessionKey, err := base64.StdEncoding.DecodeString(aliceSessionKeyB64)
if err != nil {
t.Fatalf("Failed to decode Alice session key: %v", err)
}
bobSessionKey, err := base64.StdEncoding.DecodeString(bobSessionKeyB64)
if err != nil {
t.Fatalf("Failed to decode Bob session key: %v", err)
}
assert.Equal(t, 96, len(aliceSessionKey), "Alice session key should be 96 bytes")
assert.Equal(t, 96, len(bobSessionKey), "Bob session key should be 96 bytes")
assert.Equal(t, aliceSessionKey, bobSessionKey, "Session keys should match")
t.Logf("X3DH session key established successfully (%d bytes)", len(aliceSessionKey))
// Now test double ratchet session establishment
// Use the DoubleRatchetEncryptedChannel interface
ch := channel.NewDoubleRatchetEncryptedChannel()
// Alice establishes session as sender
aliceState, err := ch.EstablishTwoPartyChannel(
true, // isSender
alice.identityKey.Bytes(),
alice.signedPreKey.Bytes(),
bob.identityPubKey.ToAffineCompressed(),
bob.signedPrePubKey.ToAffineCompressed(),
)
if err != nil {
t.Fatalf("Alice failed to establish channel: %v", err)
}
t.Logf("Alice established double ratchet session")
// Bob establishes session as receiver
bobState, err := ch.EstablishTwoPartyChannel(
false, // isSender (receiver)
bob.identityKey.Bytes(),
bob.signedPreKey.Bytes(),
alice.identityPubKey.ToAffineCompressed(),
alice.signedPrePubKey.ToAffineCompressed(),
)
if err != nil {
t.Fatalf("Bob failed to establish channel: %v", err)
}
t.Logf("Bob established double ratchet session")
// Debug: log the ratchet states
t.Logf("Alice initial state length: %d", len(aliceState))
t.Logf("Bob initial state length: %d", len(bobState))
// Test message encryption/decryption
testMessage := []byte("Hello, Bob! This is a secret message from Alice.")
// Alice encrypts
newAliceState, envelope, err := ch.EncryptTwoPartyMessage(aliceState, testMessage)
if err != nil {
t.Fatalf("Alice failed to encrypt: %v", err)
}
t.Logf("Alice encrypted message")
t.Logf("Alice state after encrypt length: %d", len(newAliceState))
t.Logf("Envelope: %+v", envelope)
aliceState = newAliceState
// Bob decrypts
newBobState, decrypted, err := ch.DecryptTwoPartyMessage(bobState, envelope)
if err != nil {
t.Fatalf("Bob failed to decrypt: %v", err)
}
t.Logf("Bob state after decrypt length: %d", len(newBobState))
t.Logf("Decrypted message length: %d", len(decrypted))
// Check if decryption actually worked
if len(newBobState) == 0 {
t.Logf("WARNING: Bob's new ratchet state is empty - decryption likely failed silently")
}
assert.Equal(t, testMessage, decrypted, "Decrypted message should match original")
t.Logf("Bob decrypted message successfully: %s", string(decrypted))
bobState = newBobState
// Test reverse direction: Bob sends to Alice
replyMessage := []byte("Hi Alice! Got your message.")
bobState, envelope2, err := ch.EncryptTwoPartyMessage(bobState, replyMessage)
if err != nil {
t.Fatalf("Bob failed to encrypt reply: %v", err)
}
aliceState, decrypted2, err := ch.DecryptTwoPartyMessage(aliceState, envelope2)
if err != nil {
t.Fatalf("Alice failed to decrypt reply: %v", err)
}
assert.Equal(t, replyMessage, decrypted2, "Decrypted reply should match original")
t.Logf("Alice decrypted reply successfully: %s", string(decrypted2))
// Suppress unused variable warnings
_ = aliceState
_ = bobState
}
// TestReceiverSendsFirst tests that the X3DH "receiver" CANNOT send first
// This confirms that Signal protocol requires sender to send first.
// The test is expected to fail - documenting the protocol limitation.
func TestReceiverSendsFirst(t *testing.T) {
t.Skip("Expected to fail - Signal protocol requires sender to send first")
alice := generatePeer()
bob := generatePeer()
ch := channel.NewDoubleRatchetEncryptedChannel()
// Alice establishes as sender
aliceState, err := ch.EstablishTwoPartyChannel(
true,
alice.identityKey.Bytes(),
alice.signedPreKey.Bytes(),
bob.identityPubKey.ToAffineCompressed(),
bob.signedPrePubKey.ToAffineCompressed(),
)
if err != nil {
t.Fatalf("Alice failed to establish: %v", err)
}
// Bob establishes as receiver
bobState, err := ch.EstablishTwoPartyChannel(
false,
bob.identityKey.Bytes(),
bob.signedPreKey.Bytes(),
alice.identityPubKey.ToAffineCompressed(),
alice.signedPrePubKey.ToAffineCompressed(),
)
if err != nil {
t.Fatalf("Bob failed to establish: %v", err)
}
// BOB SENDS FIRST (he's the X3DH receiver but sends first) - THIS WILL FAIL
bobMessage := []byte("Hello Alice! I'm the receiver but I'm sending first.")
bobState, envelope, err := ch.EncryptTwoPartyMessage(bobState, bobMessage)
if err != nil {
t.Fatalf("Bob (receiver) failed to encrypt first message: %v", err)
}
t.Logf("Bob (X3DH receiver) encrypted first message successfully")
// Alice decrypts - THIS FAILS because receiver can't send first
aliceState, decrypted, err := ch.DecryptTwoPartyMessage(aliceState, envelope)
if err != nil {
t.Fatalf("Alice failed to decrypt Bob's first message: %v", err)
}
assert.Equal(t, bobMessage, decrypted)
t.Logf("Alice decrypted Bob's first message: %s", string(decrypted))
_ = aliceState
_ = bobState
}
// TestHandshakePattern tests the correct handshake pattern:
// Sender (Alice) sends hello first, then receiver (Bob) can send.
func TestHandshakePattern(t *testing.T) {
alice := generatePeer()
bob := generatePeer()
ch := channel.NewDoubleRatchetEncryptedChannel()
// Alice establishes as sender
aliceState, err := ch.EstablishTwoPartyChannel(
true,
alice.identityKey.Bytes(),
alice.signedPreKey.Bytes(),
bob.identityPubKey.ToAffineCompressed(),
bob.signedPrePubKey.ToAffineCompressed(),
)
if err != nil {
t.Fatalf("Alice failed to establish: %v", err)
}
// Bob establishes as receiver
bobState, err := ch.EstablishTwoPartyChannel(
false,
bob.identityKey.Bytes(),
bob.signedPreKey.Bytes(),
alice.identityPubKey.ToAffineCompressed(),
alice.signedPrePubKey.ToAffineCompressed(),
)
if err != nil {
t.Fatalf("Bob failed to establish: %v", err)
}
// Step 1: Alice (sender) sends hello first
helloMsg := []byte("hello")
aliceState, helloEnvelope, err := ch.EncryptTwoPartyMessage(aliceState, helloMsg)
if err != nil {
t.Fatalf("Alice failed to encrypt hello: %v", err)
}
t.Logf("Alice sent hello")
// Step 2: Bob receives hello
bobState, decryptedHello, err := ch.DecryptTwoPartyMessage(bobState, helloEnvelope)
if err != nil {
t.Fatalf("Bob failed to decrypt hello: %v", err)
}
assert.Equal(t, helloMsg, decryptedHello)
t.Logf("Bob received hello: %s", string(decryptedHello))
// Step 3: Bob sends ack (now Bob can send after receiving)
ackMsg := []byte("ack")
bobState, ackEnvelope, err := ch.EncryptTwoPartyMessage(bobState, ackMsg)
if err != nil {
t.Fatalf("Bob failed to encrypt ack: %v", err)
}
t.Logf("Bob sent ack")
// Step 4: Alice receives ack
aliceState, decryptedAck, err := ch.DecryptTwoPartyMessage(aliceState, ackEnvelope)
if err != nil {
t.Fatalf("Alice failed to decrypt ack: %v", err)
}
assert.Equal(t, ackMsg, decryptedAck)
t.Logf("Alice received ack: %s", string(decryptedAck))
// Now both parties can send freely
// Bob sends a real message
bobMessage := []byte("Now I can send real messages!")
bobState, bobEnvelope, err := ch.EncryptTwoPartyMessage(bobState, bobMessage)
if err != nil {
t.Fatalf("Bob failed to encrypt message: %v", err)
}
aliceState, decryptedBob, err := ch.DecryptTwoPartyMessage(aliceState, bobEnvelope)
if err != nil {
t.Fatalf("Alice failed to decrypt Bob's message: %v", err)
}
assert.Equal(t, bobMessage, decryptedBob)
t.Logf("Alice received Bob's message: %s", string(decryptedBob))
// Alice sends a real message
aliceMessage := []byte("And I can keep sending too!")
aliceState, aliceEnvelope, err := ch.EncryptTwoPartyMessage(aliceState, aliceMessage)
if err != nil {
t.Fatalf("Alice failed to encrypt message: %v", err)
}
bobState, decryptedAlice, err := ch.DecryptTwoPartyMessage(bobState, aliceEnvelope)
if err != nil {
t.Fatalf("Bob failed to decrypt Alice's message: %v", err)
}
assert.Equal(t, aliceMessage, decryptedAlice)
t.Logf("Bob received Alice's message: %s", string(decryptedAlice))
_ = aliceState
_ = bobState
}
func TestChannel(t *testing.T) {
peers := []*peer{}
for i := 0; i < 4; i++ {

View File

@ -344,11 +344,20 @@ func uniffiCheckChecksums() {
// If this happens try cleaning and rebuilding your project
panic("channel: UniFFI contract version mismatch")
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_decrypt_inbox_message()
})
if checksum != 59344 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_decrypt_inbox_message: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_double_ratchet_decrypt()
})
if checksum != 13335 {
if checksum != 59687 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_double_ratchet_decrypt: UniFFI API checksum mismatch")
}
@ -357,11 +366,56 @@ func uniffiCheckChecksums() {
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_double_ratchet_encrypt()
})
if checksum != 59209 {
if checksum != 57909 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_double_ratchet_encrypt: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_encrypt_inbox_message()
})
if checksum != 48273 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_encrypt_inbox_message: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_generate_ed448()
})
if checksum != 62612 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_generate_ed448: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_generate_x448()
})
if checksum != 40212 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_generate_x448: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_get_pubkey_ed448()
})
if checksum != 46020 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_get_pubkey_ed448: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_get_pubkey_x448()
})
if checksum != 37789 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_get_pubkey_x448: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_new_double_ratchet()
@ -398,11 +452,20 @@ func uniffiCheckChecksums() {
panic("channel: uniffi_channel_checksum_func_sender_x3dh: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_sign_ed448()
})
if checksum != 28573 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_sign_ed448: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_triple_ratchet_decrypt()
})
if checksum != 42324 {
if checksum != 15842 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_triple_ratchet_decrypt: UniFFI API checksum mismatch")
}
@ -411,7 +474,7 @@ func uniffiCheckChecksums() {
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_triple_ratchet_encrypt()
})
if checksum != 61617 {
if checksum != 23451 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_triple_ratchet_encrypt: UniFFI API checksum mismatch")
}
@ -420,7 +483,7 @@ func uniffiCheckChecksums() {
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_triple_ratchet_init_round_1()
})
if checksum != 42612 {
if checksum != 63112 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_triple_ratchet_init_round_1: UniFFI API checksum mismatch")
}
@ -429,7 +492,7 @@ func uniffiCheckChecksums() {
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_triple_ratchet_init_round_2()
})
if checksum != 11875 {
if checksum != 34197 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_triple_ratchet_init_round_2: UniFFI API checksum mismatch")
}
@ -438,7 +501,7 @@ func uniffiCheckChecksums() {
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_triple_ratchet_init_round_3()
})
if checksum != 50331 {
if checksum != 39476 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_triple_ratchet_init_round_3: UniFFI API checksum mismatch")
}
@ -447,11 +510,29 @@ func uniffiCheckChecksums() {
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_triple_ratchet_init_round_4()
})
if checksum != 14779 {
if checksum != 19263 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_triple_ratchet_init_round_4: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_triple_ratchet_resize()
})
if checksum != 57124 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_triple_ratchet_resize: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_channel_checksum_func_verify_ed448()
})
if checksum != 57200 {
// If this happens try cleaning and rebuilding your project
panic("channel: uniffi_channel_checksum_func_verify_ed448: UniFFI API checksum mismatch")
}
}
}
type FfiConverterUint8 struct{}
@ -783,6 +864,228 @@ func (_ FfiDestroyerTripleRatchetStateAndMetadata) Destroy(value TripleRatchetSt
value.Destroy()
}
type CryptoError struct {
err error
}
// Convience method to turn *CryptoError into error
// Avoiding treating nil pointer as non nil error interface
func (err *CryptoError) AsError() error {
if err == nil {
return nil
} else {
return err
}
}
func (err CryptoError) Error() string {
return fmt.Sprintf("CryptoError: %s", err.err.Error())
}
func (err CryptoError) Unwrap() error {
return err.err
}
// Err* are used for checking error type with `errors.Is`
var ErrCryptoErrorInvalidState = fmt.Errorf("CryptoErrorInvalidState")
var ErrCryptoErrorInvalidEnvelope = fmt.Errorf("CryptoErrorInvalidEnvelope")
var ErrCryptoErrorDecryptionFailed = fmt.Errorf("CryptoErrorDecryptionFailed")
var ErrCryptoErrorEncryptionFailed = fmt.Errorf("CryptoErrorEncryptionFailed")
var ErrCryptoErrorSerializationFailed = fmt.Errorf("CryptoErrorSerializationFailed")
var ErrCryptoErrorInvalidInput = fmt.Errorf("CryptoErrorInvalidInput")
// Variant structs
type CryptoErrorInvalidState struct {
message string
}
func NewCryptoErrorInvalidState() *CryptoError {
return &CryptoError{err: &CryptoErrorInvalidState{}}
}
func (e CryptoErrorInvalidState) destroy() {
}
func (err CryptoErrorInvalidState) Error() string {
return fmt.Sprintf("InvalidState: %s", err.message)
}
func (self CryptoErrorInvalidState) Is(target error) bool {
return target == ErrCryptoErrorInvalidState
}
type CryptoErrorInvalidEnvelope struct {
message string
}
func NewCryptoErrorInvalidEnvelope() *CryptoError {
return &CryptoError{err: &CryptoErrorInvalidEnvelope{}}
}
func (e CryptoErrorInvalidEnvelope) destroy() {
}
func (err CryptoErrorInvalidEnvelope) Error() string {
return fmt.Sprintf("InvalidEnvelope: %s", err.message)
}
func (self CryptoErrorInvalidEnvelope) Is(target error) bool {
return target == ErrCryptoErrorInvalidEnvelope
}
type CryptoErrorDecryptionFailed struct {
message string
}
func NewCryptoErrorDecryptionFailed() *CryptoError {
return &CryptoError{err: &CryptoErrorDecryptionFailed{}}
}
func (e CryptoErrorDecryptionFailed) destroy() {
}
func (err CryptoErrorDecryptionFailed) Error() string {
return fmt.Sprintf("DecryptionFailed: %s", err.message)
}
func (self CryptoErrorDecryptionFailed) Is(target error) bool {
return target == ErrCryptoErrorDecryptionFailed
}
type CryptoErrorEncryptionFailed struct {
message string
}
func NewCryptoErrorEncryptionFailed() *CryptoError {
return &CryptoError{err: &CryptoErrorEncryptionFailed{}}
}
func (e CryptoErrorEncryptionFailed) destroy() {
}
func (err CryptoErrorEncryptionFailed) Error() string {
return fmt.Sprintf("EncryptionFailed: %s", err.message)
}
func (self CryptoErrorEncryptionFailed) Is(target error) bool {
return target == ErrCryptoErrorEncryptionFailed
}
type CryptoErrorSerializationFailed struct {
message string
}
func NewCryptoErrorSerializationFailed() *CryptoError {
return &CryptoError{err: &CryptoErrorSerializationFailed{}}
}
func (e CryptoErrorSerializationFailed) destroy() {
}
func (err CryptoErrorSerializationFailed) Error() string {
return fmt.Sprintf("SerializationFailed: %s", err.message)
}
func (self CryptoErrorSerializationFailed) Is(target error) bool {
return target == ErrCryptoErrorSerializationFailed
}
type CryptoErrorInvalidInput struct {
message string
}
func NewCryptoErrorInvalidInput() *CryptoError {
return &CryptoError{err: &CryptoErrorInvalidInput{}}
}
func (e CryptoErrorInvalidInput) destroy() {
}
func (err CryptoErrorInvalidInput) Error() string {
return fmt.Sprintf("InvalidInput: %s", err.message)
}
func (self CryptoErrorInvalidInput) Is(target error) bool {
return target == ErrCryptoErrorInvalidInput
}
type FfiConverterCryptoError struct{}
var FfiConverterCryptoErrorINSTANCE = FfiConverterCryptoError{}
func (c FfiConverterCryptoError) Lift(eb RustBufferI) *CryptoError {
return LiftFromRustBuffer[*CryptoError](c, eb)
}
func (c FfiConverterCryptoError) Lower(value *CryptoError) C.RustBuffer {
return LowerIntoRustBuffer[*CryptoError](c, value)
}
func (c FfiConverterCryptoError) Read(reader io.Reader) *CryptoError {
errorID := readUint32(reader)
message := FfiConverterStringINSTANCE.Read(reader)
switch errorID {
case 1:
return &CryptoError{&CryptoErrorInvalidState{message}}
case 2:
return &CryptoError{&CryptoErrorInvalidEnvelope{message}}
case 3:
return &CryptoError{&CryptoErrorDecryptionFailed{message}}
case 4:
return &CryptoError{&CryptoErrorEncryptionFailed{message}}
case 5:
return &CryptoError{&CryptoErrorSerializationFailed{message}}
case 6:
return &CryptoError{&CryptoErrorInvalidInput{message}}
default:
panic(fmt.Sprintf("Unknown error code %d in FfiConverterCryptoError.Read()", errorID))
}
}
func (c FfiConverterCryptoError) Write(writer io.Writer, value *CryptoError) {
switch variantValue := value.err.(type) {
case *CryptoErrorInvalidState:
writeInt32(writer, 1)
case *CryptoErrorInvalidEnvelope:
writeInt32(writer, 2)
case *CryptoErrorDecryptionFailed:
writeInt32(writer, 3)
case *CryptoErrorEncryptionFailed:
writeInt32(writer, 4)
case *CryptoErrorSerializationFailed:
writeInt32(writer, 5)
case *CryptoErrorInvalidInput:
writeInt32(writer, 6)
default:
_ = variantValue
panic(fmt.Sprintf("invalid error value `%v` in FfiConverterCryptoError.Write", value))
}
}
type FfiDestroyerCryptoError struct{}
func (_ FfiDestroyerCryptoError) Destroy(value *CryptoError) {
switch variantValue := value.err.(type) {
case CryptoErrorInvalidState:
variantValue.destroy()
case CryptoErrorInvalidEnvelope:
variantValue.destroy()
case CryptoErrorDecryptionFailed:
variantValue.destroy()
case CryptoErrorEncryptionFailed:
variantValue.destroy()
case CryptoErrorSerializationFailed:
variantValue.destroy()
case CryptoErrorInvalidInput:
variantValue.destroy()
default:
_ = variantValue
panic(fmt.Sprintf("invalid error value `%v` in FfiDestroyerCryptoError.Destroy", value))
}
}
type FfiConverterSequenceUint8 struct{}
var FfiConverterSequenceUint8INSTANCE = FfiConverterSequenceUint8{}
@ -913,19 +1216,79 @@ func (_ FfiDestroyerMapStringString) Destroy(mapValue map[string]string) {
}
}
func DoubleRatchetDecrypt(ratchetStateAndEnvelope DoubleRatchetStateAndEnvelope) DoubleRatchetStateAndMessage {
return FfiConverterDoubleRatchetStateAndMessageINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
func DecryptInboxMessage(input string) string {
return FfiConverterStringINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_double_ratchet_decrypt(FfiConverterDoubleRatchetStateAndEnvelopeINSTANCE.Lower(ratchetStateAndEnvelope), _uniffiStatus),
inner: C.uniffi_channel_fn_func_decrypt_inbox_message(FfiConverterStringINSTANCE.Lower(input), _uniffiStatus),
}
}))
}
func DoubleRatchetEncrypt(ratchetStateAndMessage DoubleRatchetStateAndMessage) DoubleRatchetStateAndEnvelope {
return FfiConverterDoubleRatchetStateAndEnvelopeINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
func DoubleRatchetDecrypt(ratchetStateAndEnvelope DoubleRatchetStateAndEnvelope) (DoubleRatchetStateAndMessage, error) {
_uniffiRV, _uniffiErr := rustCallWithError[CryptoError](FfiConverterCryptoError{}, func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_double_ratchet_decrypt(FfiConverterDoubleRatchetStateAndEnvelopeINSTANCE.Lower(ratchetStateAndEnvelope), _uniffiStatus),
}
})
if _uniffiErr != nil {
var _uniffiDefaultValue DoubleRatchetStateAndMessage
return _uniffiDefaultValue, _uniffiErr
} else {
return FfiConverterDoubleRatchetStateAndMessageINSTANCE.Lift(_uniffiRV), nil
}
}
func DoubleRatchetEncrypt(ratchetStateAndMessage DoubleRatchetStateAndMessage) (DoubleRatchetStateAndEnvelope, error) {
_uniffiRV, _uniffiErr := rustCallWithError[CryptoError](FfiConverterCryptoError{}, func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_double_ratchet_encrypt(FfiConverterDoubleRatchetStateAndMessageINSTANCE.Lower(ratchetStateAndMessage), _uniffiStatus),
}
})
if _uniffiErr != nil {
var _uniffiDefaultValue DoubleRatchetStateAndEnvelope
return _uniffiDefaultValue, _uniffiErr
} else {
return FfiConverterDoubleRatchetStateAndEnvelopeINSTANCE.Lift(_uniffiRV), nil
}
}
func EncryptInboxMessage(input string) string {
return FfiConverterStringINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_encrypt_inbox_message(FfiConverterStringINSTANCE.Lower(input), _uniffiStatus),
}
}))
}
func GenerateEd448() string {
return FfiConverterStringINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_generate_ed448(_uniffiStatus),
}
}))
}
func GenerateX448() string {
return FfiConverterStringINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_generate_x448(_uniffiStatus),
}
}))
}
func GetPubkeyEd448(key string) string {
return FfiConverterStringINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_get_pubkey_ed448(FfiConverterStringINSTANCE.Lower(key), _uniffiStatus),
}
}))
}
func GetPubkeyX448(key string) string {
return FfiConverterStringINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_get_pubkey_x448(FfiConverterStringINSTANCE.Lower(key), _uniffiStatus),
}
}))
}
@ -961,50 +1324,110 @@ func SenderX3dh(sendingIdentityPrivateKey []uint8, sendingEphemeralPrivateKey []
}))
}
func TripleRatchetDecrypt(ratchetStateAndEnvelope TripleRatchetStateAndEnvelope) TripleRatchetStateAndMessage {
return FfiConverterTripleRatchetStateAndMessageINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
func SignEd448(key string, message string) string {
return FfiConverterStringINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_sign_ed448(FfiConverterStringINSTANCE.Lower(key), FfiConverterStringINSTANCE.Lower(message), _uniffiStatus),
}
}))
}
func TripleRatchetDecrypt(ratchetStateAndEnvelope TripleRatchetStateAndEnvelope) (TripleRatchetStateAndMessage, error) {
_uniffiRV, _uniffiErr := rustCallWithError[CryptoError](FfiConverterCryptoError{}, func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_triple_ratchet_decrypt(FfiConverterTripleRatchetStateAndEnvelopeINSTANCE.Lower(ratchetStateAndEnvelope), _uniffiStatus),
}
}))
})
if _uniffiErr != nil {
var _uniffiDefaultValue TripleRatchetStateAndMessage
return _uniffiDefaultValue, _uniffiErr
} else {
return FfiConverterTripleRatchetStateAndMessageINSTANCE.Lift(_uniffiRV), nil
}
}
func TripleRatchetEncrypt(ratchetStateAndMessage TripleRatchetStateAndMessage) TripleRatchetStateAndEnvelope {
return FfiConverterTripleRatchetStateAndEnvelopeINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
func TripleRatchetEncrypt(ratchetStateAndMessage TripleRatchetStateAndMessage) (TripleRatchetStateAndEnvelope, error) {
_uniffiRV, _uniffiErr := rustCallWithError[CryptoError](FfiConverterCryptoError{}, func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_triple_ratchet_encrypt(FfiConverterTripleRatchetStateAndMessageINSTANCE.Lower(ratchetStateAndMessage), _uniffiStatus),
}
}))
})
if _uniffiErr != nil {
var _uniffiDefaultValue TripleRatchetStateAndEnvelope
return _uniffiDefaultValue, _uniffiErr
} else {
return FfiConverterTripleRatchetStateAndEnvelopeINSTANCE.Lift(_uniffiRV), nil
}
}
func TripleRatchetInitRound1(ratchetStateAndMetadata TripleRatchetStateAndMetadata) TripleRatchetStateAndMetadata {
return FfiConverterTripleRatchetStateAndMetadataINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
func TripleRatchetInitRound1(ratchetStateAndMetadata TripleRatchetStateAndMetadata) (TripleRatchetStateAndMetadata, error) {
_uniffiRV, _uniffiErr := rustCallWithError[CryptoError](FfiConverterCryptoError{}, func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_triple_ratchet_init_round_1(FfiConverterTripleRatchetStateAndMetadataINSTANCE.Lower(ratchetStateAndMetadata), _uniffiStatus),
}
}))
})
if _uniffiErr != nil {
var _uniffiDefaultValue TripleRatchetStateAndMetadata
return _uniffiDefaultValue, _uniffiErr
} else {
return FfiConverterTripleRatchetStateAndMetadataINSTANCE.Lift(_uniffiRV), nil
}
}
func TripleRatchetInitRound2(ratchetStateAndMetadata TripleRatchetStateAndMetadata) TripleRatchetStateAndMetadata {
return FfiConverterTripleRatchetStateAndMetadataINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
func TripleRatchetInitRound2(ratchetStateAndMetadata TripleRatchetStateAndMetadata) (TripleRatchetStateAndMetadata, error) {
_uniffiRV, _uniffiErr := rustCallWithError[CryptoError](FfiConverterCryptoError{}, func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_triple_ratchet_init_round_2(FfiConverterTripleRatchetStateAndMetadataINSTANCE.Lower(ratchetStateAndMetadata), _uniffiStatus),
}
}))
})
if _uniffiErr != nil {
var _uniffiDefaultValue TripleRatchetStateAndMetadata
return _uniffiDefaultValue, _uniffiErr
} else {
return FfiConverterTripleRatchetStateAndMetadataINSTANCE.Lift(_uniffiRV), nil
}
}
func TripleRatchetInitRound3(ratchetStateAndMetadata TripleRatchetStateAndMetadata) TripleRatchetStateAndMetadata {
return FfiConverterTripleRatchetStateAndMetadataINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
func TripleRatchetInitRound3(ratchetStateAndMetadata TripleRatchetStateAndMetadata) (TripleRatchetStateAndMetadata, error) {
_uniffiRV, _uniffiErr := rustCallWithError[CryptoError](FfiConverterCryptoError{}, func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_triple_ratchet_init_round_3(FfiConverterTripleRatchetStateAndMetadataINSTANCE.Lower(ratchetStateAndMetadata), _uniffiStatus),
}
}))
})
if _uniffiErr != nil {
var _uniffiDefaultValue TripleRatchetStateAndMetadata
return _uniffiDefaultValue, _uniffiErr
} else {
return FfiConverterTripleRatchetStateAndMetadataINSTANCE.Lift(_uniffiRV), nil
}
}
func TripleRatchetInitRound4(ratchetStateAndMetadata TripleRatchetStateAndMetadata) TripleRatchetStateAndMetadata {
return FfiConverterTripleRatchetStateAndMetadataINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
func TripleRatchetInitRound4(ratchetStateAndMetadata TripleRatchetStateAndMetadata) (TripleRatchetStateAndMetadata, error) {
_uniffiRV, _uniffiErr := rustCallWithError[CryptoError](FfiConverterCryptoError{}, func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_triple_ratchet_init_round_4(FfiConverterTripleRatchetStateAndMetadataINSTANCE.Lower(ratchetStateAndMetadata), _uniffiStatus),
}
})
if _uniffiErr != nil {
var _uniffiDefaultValue TripleRatchetStateAndMetadata
return _uniffiDefaultValue, _uniffiErr
} else {
return FfiConverterTripleRatchetStateAndMetadataINSTANCE.Lift(_uniffiRV), nil
}
}
func TripleRatchetResize(ratchetState string, other string, id uint64, total uint64) [][]uint8 {
return FfiConverterSequenceSequenceUint8INSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_triple_ratchet_resize(FfiConverterStringINSTANCE.Lower(ratchetState), FfiConverterStringINSTANCE.Lower(other), FfiConverterUint64INSTANCE.Lower(id), FfiConverterUint64INSTANCE.Lower(total), _uniffiStatus),
}
}))
}
func VerifyEd448(publicKey string, message string, signature string) string {
return FfiConverterStringINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_channel_fn_func_verify_ed448(FfiConverterStringINSTANCE.Lower(publicKey), FfiConverterStringINSTANCE.Lower(message), FfiConverterStringINSTANCE.Lower(signature), _uniffiStatus),
}
}))
}

View File

@ -377,6 +377,11 @@ static void call_UniffiForeignFutureCompleteVoid(
}
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_DECRYPT_INBOX_MESSAGE
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_DECRYPT_INBOX_MESSAGE
RustBuffer uniffi_channel_fn_func_decrypt_inbox_message(RustBuffer input, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_DOUBLE_RATCHET_DECRYPT
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_DOUBLE_RATCHET_DECRYPT
@ -388,6 +393,33 @@ RustBuffer uniffi_channel_fn_func_double_ratchet_decrypt(RustBuffer ratchet_stat
RustBuffer uniffi_channel_fn_func_double_ratchet_encrypt(RustBuffer ratchet_state_and_message, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_ENCRYPT_INBOX_MESSAGE
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_ENCRYPT_INBOX_MESSAGE
RustBuffer uniffi_channel_fn_func_encrypt_inbox_message(RustBuffer input, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_GENERATE_ED448
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_GENERATE_ED448
RustBuffer uniffi_channel_fn_func_generate_ed448(RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_GENERATE_X448
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_GENERATE_X448
RustBuffer uniffi_channel_fn_func_generate_x448(RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_GET_PUBKEY_ED448
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_GET_PUBKEY_ED448
RustBuffer uniffi_channel_fn_func_get_pubkey_ed448(RustBuffer key, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_GET_PUBKEY_X448
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_GET_PUBKEY_X448
RustBuffer uniffi_channel_fn_func_get_pubkey_x448(RustBuffer key, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_NEW_DOUBLE_RATCHET
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_NEW_DOUBLE_RATCHET
RustBuffer uniffi_channel_fn_func_new_double_ratchet(RustBuffer session_key, RustBuffer sending_header_key, RustBuffer next_receiving_header_key, int8_t is_sender, RustBuffer sending_ephemeral_private_key, RustBuffer receiving_ephemeral_key, RustCallStatus *out_status
@ -408,6 +440,11 @@ RustBuffer uniffi_channel_fn_func_receiver_x3dh(RustBuffer sending_identity_priv
RustBuffer uniffi_channel_fn_func_sender_x3dh(RustBuffer sending_identity_private_key, RustBuffer sending_ephemeral_private_key, RustBuffer receiving_identity_key, RustBuffer receiving_signed_pre_key, uint64_t session_key_length, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_SIGN_ED448
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_SIGN_ED448
RustBuffer uniffi_channel_fn_func_sign_ed448(RustBuffer key, RustBuffer message, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_TRIPLE_RATCHET_DECRYPT
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_TRIPLE_RATCHET_DECRYPT
RustBuffer uniffi_channel_fn_func_triple_ratchet_decrypt(RustBuffer ratchet_state_and_envelope, RustCallStatus *out_status
@ -438,6 +475,16 @@ RustBuffer uniffi_channel_fn_func_triple_ratchet_init_round_3(RustBuffer ratchet
RustBuffer uniffi_channel_fn_func_triple_ratchet_init_round_4(RustBuffer ratchet_state_and_metadata, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_TRIPLE_RATCHET_RESIZE
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_TRIPLE_RATCHET_RESIZE
RustBuffer uniffi_channel_fn_func_triple_ratchet_resize(RustBuffer ratchet_state, RustBuffer other, uint64_t id, uint64_t total, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_VERIFY_ED448
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_VERIFY_ED448
RustBuffer uniffi_channel_fn_func_verify_ed448(RustBuffer public_key, RustBuffer message, RustBuffer signature, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUSTBUFFER_ALLOC
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUSTBUFFER_ALLOC
RustBuffer ffi_channel_rustbuffer_alloc(uint64_t size, RustCallStatus *out_status
@ -716,6 +763,12 @@ void ffi_channel_rust_future_free_void(uint64_t handle
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_COMPLETE_VOID
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_COMPLETE_VOID
void ffi_channel_rust_future_complete_void(uint64_t handle, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_DECRYPT_INBOX_MESSAGE
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_DECRYPT_INBOX_MESSAGE
uint16_t uniffi_channel_checksum_func_decrypt_inbox_message(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_DOUBLE_RATCHET_DECRYPT
@ -728,6 +781,36 @@ uint16_t uniffi_channel_checksum_func_double_ratchet_decrypt(void
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_DOUBLE_RATCHET_ENCRYPT
uint16_t uniffi_channel_checksum_func_double_ratchet_encrypt(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_ENCRYPT_INBOX_MESSAGE
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_ENCRYPT_INBOX_MESSAGE
uint16_t uniffi_channel_checksum_func_encrypt_inbox_message(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_GENERATE_ED448
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_GENERATE_ED448
uint16_t uniffi_channel_checksum_func_generate_ed448(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_GENERATE_X448
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_GENERATE_X448
uint16_t uniffi_channel_checksum_func_generate_x448(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_GET_PUBKEY_ED448
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_GET_PUBKEY_ED448
uint16_t uniffi_channel_checksum_func_get_pubkey_ed448(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_GET_PUBKEY_X448
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_GET_PUBKEY_X448
uint16_t uniffi_channel_checksum_func_get_pubkey_x448(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_NEW_DOUBLE_RATCHET
@ -752,6 +835,12 @@ uint16_t uniffi_channel_checksum_func_receiver_x3dh(void
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_SENDER_X3DH
uint16_t uniffi_channel_checksum_func_sender_x3dh(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_SIGN_ED448
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_SIGN_ED448
uint16_t uniffi_channel_checksum_func_sign_ed448(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_TRIPLE_RATCHET_DECRYPT
@ -788,6 +877,18 @@ uint16_t uniffi_channel_checksum_func_triple_ratchet_init_round_3(void
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_TRIPLE_RATCHET_INIT_ROUND_4
uint16_t uniffi_channel_checksum_func_triple_ratchet_init_round_4(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_TRIPLE_RATCHET_RESIZE
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_TRIPLE_RATCHET_RESIZE
uint16_t uniffi_channel_checksum_func_triple_ratchet_resize(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_VERIFY_ED448
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_VERIFY_ED448
uint16_t uniffi_channel_checksum_func_verify_ed448(void
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_UNIFFI_CONTRACT_VERSION

View File

@ -43,7 +43,7 @@ func FormatVersion(version []byte) string {
}
func GetPatchNumber() byte {
return 0x11
return 0x12
}
func GetRCNumber() byte {

View File

@ -33,7 +33,7 @@ func main() {
}
logger, _ := zap.NewProduction()
pubsub := p2p.NewBlossomSub(cfg.P2P, cfg.Engine, logger, 0)
pubsub := p2p.NewBlossomSub(cfg.P2P, cfg.Engine, logger, 0, p2p.ConfigDir(*configDirectory))
fmt.Print("Enter bitmask in hex (no 0x prefix): ")
reader := bufio.NewReader(os.Stdin)
bitmaskHex, _ := reader.ReadString('\n')

View File

@ -7,10 +7,15 @@ edition = "2021"
crate-type = ["lib", "staticlib"]
name = "channel"
[[bin]]
name = "uniffi-bindgen"
path = "uniffi-bindgen.rs"
[dependencies]
base64 = "0.22.1"
serde_json = "1.0.117"
ed448-goldilocks-plus = "0.11.2"
ed448-rust = { path = "../ed448-rust", version = "0.1.2" }
hex = "0.4.3"
rand = "0.8.5"
sha2 = "0.10.8"
@ -18,7 +23,7 @@ hkdf = "0.12.4"
aes-gcm = "0.10.3"
thiserror = "1.0.63"
hmac = "0.12.1"
serde = "1.0.208"
serde = { version = "1.0.208", features = ["derive"] }
lazy_static = "1.5.0"
uniffi = { version= "0.28.3", features = ["cli"]}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,773 @@
// This file was autogenerated by some hot garbage in the `uniffi` crate.
// Trust me, you don't want to mess with it!
#pragma once
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
// The following structs are used to implement the lowest level
// of the FFI, and thus useful to multiple uniffied crates.
// We ensure they are declared exactly once, with a header guard, UNIFFI_SHARED_H.
#ifdef UNIFFI_SHARED_H
// We also try to prevent mixing versions of shared uniffi header structs.
// If you add anything to the #else block, you must increment the version suffix in UNIFFI_SHARED_HEADER_V4
#ifndef UNIFFI_SHARED_HEADER_V4
#error Combining helper code from multiple versions of uniffi is not supported
#endif // ndef UNIFFI_SHARED_HEADER_V4
#else
#define UNIFFI_SHARED_H
#define UNIFFI_SHARED_HEADER_V4
// ⚠️ Attention: If you change this #else block (ending in `#endif // def UNIFFI_SHARED_H`) you *must* ⚠️
// ⚠️ increment the version suffix in all instances of UNIFFI_SHARED_HEADER_V4 in this file. ⚠️
typedef struct RustBuffer
{
uint64_t capacity;
uint64_t len;
uint8_t *_Nullable data;
} RustBuffer;
typedef struct ForeignBytes
{
int32_t len;
const uint8_t *_Nullable data;
} ForeignBytes;
// Error definitions
typedef struct RustCallStatus {
int8_t code;
RustBuffer errorBuf;
} RustCallStatus;
// ⚠️ Attention: If you change this #else block (ending in `#endif // def UNIFFI_SHARED_H`) you *must* ⚠️
// ⚠️ increment the version suffix in all instances of UNIFFI_SHARED_HEADER_V4 in this file. ⚠️
#endif // def UNIFFI_SHARED_H
#ifndef UNIFFI_FFIDEF_RUST_FUTURE_CONTINUATION_CALLBACK
#define UNIFFI_FFIDEF_RUST_FUTURE_CONTINUATION_CALLBACK
typedef void (*UniffiRustFutureContinuationCallback)(uint64_t, int8_t
);
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_FREE
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_FREE
typedef void (*UniffiForeignFutureFree)(uint64_t
);
#endif
#ifndef UNIFFI_FFIDEF_CALLBACK_INTERFACE_FREE
#define UNIFFI_FFIDEF_CALLBACK_INTERFACE_FREE
typedef void (*UniffiCallbackInterfaceFree)(uint64_t
);
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE
#define UNIFFI_FFIDEF_FOREIGN_FUTURE
typedef struct UniffiForeignFuture {
uint64_t handle;
UniffiForeignFutureFree _Nonnull free;
} UniffiForeignFuture;
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_U8
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_U8
typedef struct UniffiForeignFutureStructU8 {
uint8_t returnValue;
RustCallStatus callStatus;
} UniffiForeignFutureStructU8;
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_U8
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_U8
typedef void (*UniffiForeignFutureCompleteU8)(uint64_t, UniffiForeignFutureStructU8
);
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_I8
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_I8
typedef struct UniffiForeignFutureStructI8 {
int8_t returnValue;
RustCallStatus callStatus;
} UniffiForeignFutureStructI8;
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_I8
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_I8
typedef void (*UniffiForeignFutureCompleteI8)(uint64_t, UniffiForeignFutureStructI8
);
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_U16
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_U16
typedef struct UniffiForeignFutureStructU16 {
uint16_t returnValue;
RustCallStatus callStatus;
} UniffiForeignFutureStructU16;
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_U16
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_U16
typedef void (*UniffiForeignFutureCompleteU16)(uint64_t, UniffiForeignFutureStructU16
);
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_I16
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_I16
typedef struct UniffiForeignFutureStructI16 {
int16_t returnValue;
RustCallStatus callStatus;
} UniffiForeignFutureStructI16;
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_I16
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_I16
typedef void (*UniffiForeignFutureCompleteI16)(uint64_t, UniffiForeignFutureStructI16
);
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_U32
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_U32
typedef struct UniffiForeignFutureStructU32 {
uint32_t returnValue;
RustCallStatus callStatus;
} UniffiForeignFutureStructU32;
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_U32
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_U32
typedef void (*UniffiForeignFutureCompleteU32)(uint64_t, UniffiForeignFutureStructU32
);
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_I32
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_I32
typedef struct UniffiForeignFutureStructI32 {
int32_t returnValue;
RustCallStatus callStatus;
} UniffiForeignFutureStructI32;
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_I32
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_I32
typedef void (*UniffiForeignFutureCompleteI32)(uint64_t, UniffiForeignFutureStructI32
);
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_U64
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_U64
typedef struct UniffiForeignFutureStructU64 {
uint64_t returnValue;
RustCallStatus callStatus;
} UniffiForeignFutureStructU64;
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_U64
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_U64
typedef void (*UniffiForeignFutureCompleteU64)(uint64_t, UniffiForeignFutureStructU64
);
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_I64
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_I64
typedef struct UniffiForeignFutureStructI64 {
int64_t returnValue;
RustCallStatus callStatus;
} UniffiForeignFutureStructI64;
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_I64
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_I64
typedef void (*UniffiForeignFutureCompleteI64)(uint64_t, UniffiForeignFutureStructI64
);
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_F32
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_F32
typedef struct UniffiForeignFutureStructF32 {
float returnValue;
RustCallStatus callStatus;
} UniffiForeignFutureStructF32;
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_F32
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_F32
typedef void (*UniffiForeignFutureCompleteF32)(uint64_t, UniffiForeignFutureStructF32
);
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_F64
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_F64
typedef struct UniffiForeignFutureStructF64 {
double returnValue;
RustCallStatus callStatus;
} UniffiForeignFutureStructF64;
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_F64
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_F64
typedef void (*UniffiForeignFutureCompleteF64)(uint64_t, UniffiForeignFutureStructF64
);
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_POINTER
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_POINTER
typedef struct UniffiForeignFutureStructPointer {
void*_Nonnull returnValue;
RustCallStatus callStatus;
} UniffiForeignFutureStructPointer;
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_POINTER
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_POINTER
typedef void (*UniffiForeignFutureCompletePointer)(uint64_t, UniffiForeignFutureStructPointer
);
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_RUST_BUFFER
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_RUST_BUFFER
typedef struct UniffiForeignFutureStructRustBuffer {
RustBuffer returnValue;
RustCallStatus callStatus;
} UniffiForeignFutureStructRustBuffer;
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_RUST_BUFFER
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_RUST_BUFFER
typedef void (*UniffiForeignFutureCompleteRustBuffer)(uint64_t, UniffiForeignFutureStructRustBuffer
);
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_VOID
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_VOID
typedef struct UniffiForeignFutureStructVoid {
RustCallStatus callStatus;
} UniffiForeignFutureStructVoid;
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_VOID
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_VOID
typedef void (*UniffiForeignFutureCompleteVoid)(uint64_t, UniffiForeignFutureStructVoid
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_DECRYPT_INBOX_MESSAGE
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_DECRYPT_INBOX_MESSAGE
RustBuffer uniffi_channel_fn_func_decrypt_inbox_message(RustBuffer input, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_DOUBLE_RATCHET_DECRYPT
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_DOUBLE_RATCHET_DECRYPT
RustBuffer uniffi_channel_fn_func_double_ratchet_decrypt(RustBuffer ratchet_state_and_envelope, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_DOUBLE_RATCHET_ENCRYPT
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_DOUBLE_RATCHET_ENCRYPT
RustBuffer uniffi_channel_fn_func_double_ratchet_encrypt(RustBuffer ratchet_state_and_message, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_ENCRYPT_INBOX_MESSAGE
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_ENCRYPT_INBOX_MESSAGE
RustBuffer uniffi_channel_fn_func_encrypt_inbox_message(RustBuffer input, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_GENERATE_ED448
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_GENERATE_ED448
RustBuffer uniffi_channel_fn_func_generate_ed448(RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_GENERATE_X448
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_GENERATE_X448
RustBuffer uniffi_channel_fn_func_generate_x448(RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_GET_PUBKEY_ED448
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_GET_PUBKEY_ED448
RustBuffer uniffi_channel_fn_func_get_pubkey_ed448(RustBuffer key, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_GET_PUBKEY_X448
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_GET_PUBKEY_X448
RustBuffer uniffi_channel_fn_func_get_pubkey_x448(RustBuffer key, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_NEW_DOUBLE_RATCHET
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_NEW_DOUBLE_RATCHET
RustBuffer uniffi_channel_fn_func_new_double_ratchet(RustBuffer session_key, RustBuffer sending_header_key, RustBuffer next_receiving_header_key, int8_t is_sender, RustBuffer sending_ephemeral_private_key, RustBuffer receiving_ephemeral_key, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_NEW_TRIPLE_RATCHET
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_NEW_TRIPLE_RATCHET
RustBuffer uniffi_channel_fn_func_new_triple_ratchet(RustBuffer peers, RustBuffer peer_key, RustBuffer identity_key, RustBuffer signed_pre_key, uint64_t threshold, int8_t async_dkg_ratchet, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_RECEIVER_X3DH
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_RECEIVER_X3DH
RustBuffer uniffi_channel_fn_func_receiver_x3dh(RustBuffer sending_identity_private_key, RustBuffer sending_signed_private_key, RustBuffer receiving_identity_key, RustBuffer receiving_ephemeral_key, uint64_t session_key_length, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_SENDER_X3DH
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_SENDER_X3DH
RustBuffer uniffi_channel_fn_func_sender_x3dh(RustBuffer sending_identity_private_key, RustBuffer sending_ephemeral_private_key, RustBuffer receiving_identity_key, RustBuffer receiving_signed_pre_key, uint64_t session_key_length, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_SIGN_ED448
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_SIGN_ED448
RustBuffer uniffi_channel_fn_func_sign_ed448(RustBuffer key, RustBuffer message, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_TRIPLE_RATCHET_DECRYPT
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_TRIPLE_RATCHET_DECRYPT
RustBuffer uniffi_channel_fn_func_triple_ratchet_decrypt(RustBuffer ratchet_state_and_envelope, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_TRIPLE_RATCHET_ENCRYPT
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_TRIPLE_RATCHET_ENCRYPT
RustBuffer uniffi_channel_fn_func_triple_ratchet_encrypt(RustBuffer ratchet_state_and_message, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_TRIPLE_RATCHET_INIT_ROUND_1
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_TRIPLE_RATCHET_INIT_ROUND_1
RustBuffer uniffi_channel_fn_func_triple_ratchet_init_round_1(RustBuffer ratchet_state_and_metadata, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_TRIPLE_RATCHET_INIT_ROUND_2
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_TRIPLE_RATCHET_INIT_ROUND_2
RustBuffer uniffi_channel_fn_func_triple_ratchet_init_round_2(RustBuffer ratchet_state_and_metadata, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_TRIPLE_RATCHET_INIT_ROUND_3
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_TRIPLE_RATCHET_INIT_ROUND_3
RustBuffer uniffi_channel_fn_func_triple_ratchet_init_round_3(RustBuffer ratchet_state_and_metadata, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_TRIPLE_RATCHET_INIT_ROUND_4
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_TRIPLE_RATCHET_INIT_ROUND_4
RustBuffer uniffi_channel_fn_func_triple_ratchet_init_round_4(RustBuffer ratchet_state_and_metadata, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_TRIPLE_RATCHET_RESIZE
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_TRIPLE_RATCHET_RESIZE
RustBuffer uniffi_channel_fn_func_triple_ratchet_resize(RustBuffer ratchet_state, RustBuffer other, uint64_t id, uint64_t total, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_VERIFY_ED448
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_FN_FUNC_VERIFY_ED448
RustBuffer uniffi_channel_fn_func_verify_ed448(RustBuffer public_key, RustBuffer message, RustBuffer signature, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUSTBUFFER_ALLOC
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUSTBUFFER_ALLOC
RustBuffer ffi_channel_rustbuffer_alloc(uint64_t size, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUSTBUFFER_FROM_BYTES
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUSTBUFFER_FROM_BYTES
RustBuffer ffi_channel_rustbuffer_from_bytes(ForeignBytes bytes, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUSTBUFFER_FREE
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUSTBUFFER_FREE
void ffi_channel_rustbuffer_free(RustBuffer buf, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUSTBUFFER_RESERVE
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUSTBUFFER_RESERVE
RustBuffer ffi_channel_rustbuffer_reserve(RustBuffer buf, uint64_t additional, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_POLL_U8
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_POLL_U8
void ffi_channel_rust_future_poll_u8(uint64_t handle, UniffiRustFutureContinuationCallback _Nonnull callback, uint64_t callback_data
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_CANCEL_U8
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_CANCEL_U8
void ffi_channel_rust_future_cancel_u8(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_FREE_U8
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_FREE_U8
void ffi_channel_rust_future_free_u8(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_COMPLETE_U8
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_COMPLETE_U8
uint8_t ffi_channel_rust_future_complete_u8(uint64_t handle, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_POLL_I8
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_POLL_I8
void ffi_channel_rust_future_poll_i8(uint64_t handle, UniffiRustFutureContinuationCallback _Nonnull callback, uint64_t callback_data
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_CANCEL_I8
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_CANCEL_I8
void ffi_channel_rust_future_cancel_i8(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_FREE_I8
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_FREE_I8
void ffi_channel_rust_future_free_i8(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_COMPLETE_I8
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_COMPLETE_I8
int8_t ffi_channel_rust_future_complete_i8(uint64_t handle, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_POLL_U16
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_POLL_U16
void ffi_channel_rust_future_poll_u16(uint64_t handle, UniffiRustFutureContinuationCallback _Nonnull callback, uint64_t callback_data
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_CANCEL_U16
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_CANCEL_U16
void ffi_channel_rust_future_cancel_u16(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_FREE_U16
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_FREE_U16
void ffi_channel_rust_future_free_u16(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_COMPLETE_U16
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_COMPLETE_U16
uint16_t ffi_channel_rust_future_complete_u16(uint64_t handle, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_POLL_I16
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_POLL_I16
void ffi_channel_rust_future_poll_i16(uint64_t handle, UniffiRustFutureContinuationCallback _Nonnull callback, uint64_t callback_data
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_CANCEL_I16
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_CANCEL_I16
void ffi_channel_rust_future_cancel_i16(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_FREE_I16
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_FREE_I16
void ffi_channel_rust_future_free_i16(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_COMPLETE_I16
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_COMPLETE_I16
int16_t ffi_channel_rust_future_complete_i16(uint64_t handle, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_POLL_U32
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_POLL_U32
void ffi_channel_rust_future_poll_u32(uint64_t handle, UniffiRustFutureContinuationCallback _Nonnull callback, uint64_t callback_data
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_CANCEL_U32
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_CANCEL_U32
void ffi_channel_rust_future_cancel_u32(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_FREE_U32
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_FREE_U32
void ffi_channel_rust_future_free_u32(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_COMPLETE_U32
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_COMPLETE_U32
uint32_t ffi_channel_rust_future_complete_u32(uint64_t handle, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_POLL_I32
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_POLL_I32
void ffi_channel_rust_future_poll_i32(uint64_t handle, UniffiRustFutureContinuationCallback _Nonnull callback, uint64_t callback_data
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_CANCEL_I32
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_CANCEL_I32
void ffi_channel_rust_future_cancel_i32(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_FREE_I32
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_FREE_I32
void ffi_channel_rust_future_free_i32(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_COMPLETE_I32
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_COMPLETE_I32
int32_t ffi_channel_rust_future_complete_i32(uint64_t handle, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_POLL_U64
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_POLL_U64
void ffi_channel_rust_future_poll_u64(uint64_t handle, UniffiRustFutureContinuationCallback _Nonnull callback, uint64_t callback_data
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_CANCEL_U64
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_CANCEL_U64
void ffi_channel_rust_future_cancel_u64(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_FREE_U64
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_FREE_U64
void ffi_channel_rust_future_free_u64(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_COMPLETE_U64
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_COMPLETE_U64
uint64_t ffi_channel_rust_future_complete_u64(uint64_t handle, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_POLL_I64
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_POLL_I64
void ffi_channel_rust_future_poll_i64(uint64_t handle, UniffiRustFutureContinuationCallback _Nonnull callback, uint64_t callback_data
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_CANCEL_I64
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_CANCEL_I64
void ffi_channel_rust_future_cancel_i64(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_FREE_I64
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_FREE_I64
void ffi_channel_rust_future_free_i64(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_COMPLETE_I64
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_COMPLETE_I64
int64_t ffi_channel_rust_future_complete_i64(uint64_t handle, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_POLL_F32
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_POLL_F32
void ffi_channel_rust_future_poll_f32(uint64_t handle, UniffiRustFutureContinuationCallback _Nonnull callback, uint64_t callback_data
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_CANCEL_F32
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_CANCEL_F32
void ffi_channel_rust_future_cancel_f32(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_FREE_F32
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_FREE_F32
void ffi_channel_rust_future_free_f32(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_COMPLETE_F32
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_COMPLETE_F32
float ffi_channel_rust_future_complete_f32(uint64_t handle, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_POLL_F64
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_POLL_F64
void ffi_channel_rust_future_poll_f64(uint64_t handle, UniffiRustFutureContinuationCallback _Nonnull callback, uint64_t callback_data
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_CANCEL_F64
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_CANCEL_F64
void ffi_channel_rust_future_cancel_f64(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_FREE_F64
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_FREE_F64
void ffi_channel_rust_future_free_f64(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_COMPLETE_F64
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_COMPLETE_F64
double ffi_channel_rust_future_complete_f64(uint64_t handle, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_POLL_POINTER
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_POLL_POINTER
void ffi_channel_rust_future_poll_pointer(uint64_t handle, UniffiRustFutureContinuationCallback _Nonnull callback, uint64_t callback_data
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_CANCEL_POINTER
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_CANCEL_POINTER
void ffi_channel_rust_future_cancel_pointer(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_FREE_POINTER
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_FREE_POINTER
void ffi_channel_rust_future_free_pointer(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_COMPLETE_POINTER
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_COMPLETE_POINTER
void*_Nonnull ffi_channel_rust_future_complete_pointer(uint64_t handle, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_POLL_RUST_BUFFER
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_POLL_RUST_BUFFER
void ffi_channel_rust_future_poll_rust_buffer(uint64_t handle, UniffiRustFutureContinuationCallback _Nonnull callback, uint64_t callback_data
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_CANCEL_RUST_BUFFER
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_CANCEL_RUST_BUFFER
void ffi_channel_rust_future_cancel_rust_buffer(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_FREE_RUST_BUFFER
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_FREE_RUST_BUFFER
void ffi_channel_rust_future_free_rust_buffer(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_COMPLETE_RUST_BUFFER
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_COMPLETE_RUST_BUFFER
RustBuffer ffi_channel_rust_future_complete_rust_buffer(uint64_t handle, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_POLL_VOID
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_POLL_VOID
void ffi_channel_rust_future_poll_void(uint64_t handle, UniffiRustFutureContinuationCallback _Nonnull callback, uint64_t callback_data
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_CANCEL_VOID
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_CANCEL_VOID
void ffi_channel_rust_future_cancel_void(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_FREE_VOID
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_FREE_VOID
void ffi_channel_rust_future_free_void(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_COMPLETE_VOID
#define UNIFFI_FFIDEF_FFI_CHANNEL_RUST_FUTURE_COMPLETE_VOID
void ffi_channel_rust_future_complete_void(uint64_t handle, RustCallStatus *_Nonnull out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_DECRYPT_INBOX_MESSAGE
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_DECRYPT_INBOX_MESSAGE
uint16_t uniffi_channel_checksum_func_decrypt_inbox_message(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_DOUBLE_RATCHET_DECRYPT
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_DOUBLE_RATCHET_DECRYPT
uint16_t uniffi_channel_checksum_func_double_ratchet_decrypt(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_DOUBLE_RATCHET_ENCRYPT
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_DOUBLE_RATCHET_ENCRYPT
uint16_t uniffi_channel_checksum_func_double_ratchet_encrypt(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_ENCRYPT_INBOX_MESSAGE
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_ENCRYPT_INBOX_MESSAGE
uint16_t uniffi_channel_checksum_func_encrypt_inbox_message(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_GENERATE_ED448
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_GENERATE_ED448
uint16_t uniffi_channel_checksum_func_generate_ed448(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_GENERATE_X448
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_GENERATE_X448
uint16_t uniffi_channel_checksum_func_generate_x448(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_GET_PUBKEY_ED448
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_GET_PUBKEY_ED448
uint16_t uniffi_channel_checksum_func_get_pubkey_ed448(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_GET_PUBKEY_X448
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_GET_PUBKEY_X448
uint16_t uniffi_channel_checksum_func_get_pubkey_x448(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_NEW_DOUBLE_RATCHET
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_NEW_DOUBLE_RATCHET
uint16_t uniffi_channel_checksum_func_new_double_ratchet(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_NEW_TRIPLE_RATCHET
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_NEW_TRIPLE_RATCHET
uint16_t uniffi_channel_checksum_func_new_triple_ratchet(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_RECEIVER_X3DH
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_RECEIVER_X3DH
uint16_t uniffi_channel_checksum_func_receiver_x3dh(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_SENDER_X3DH
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_SENDER_X3DH
uint16_t uniffi_channel_checksum_func_sender_x3dh(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_SIGN_ED448
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_SIGN_ED448
uint16_t uniffi_channel_checksum_func_sign_ed448(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_TRIPLE_RATCHET_DECRYPT
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_TRIPLE_RATCHET_DECRYPT
uint16_t uniffi_channel_checksum_func_triple_ratchet_decrypt(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_TRIPLE_RATCHET_ENCRYPT
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_TRIPLE_RATCHET_ENCRYPT
uint16_t uniffi_channel_checksum_func_triple_ratchet_encrypt(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_TRIPLE_RATCHET_INIT_ROUND_1
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_TRIPLE_RATCHET_INIT_ROUND_1
uint16_t uniffi_channel_checksum_func_triple_ratchet_init_round_1(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_TRIPLE_RATCHET_INIT_ROUND_2
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_TRIPLE_RATCHET_INIT_ROUND_2
uint16_t uniffi_channel_checksum_func_triple_ratchet_init_round_2(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_TRIPLE_RATCHET_INIT_ROUND_3
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_TRIPLE_RATCHET_INIT_ROUND_3
uint16_t uniffi_channel_checksum_func_triple_ratchet_init_round_3(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_TRIPLE_RATCHET_INIT_ROUND_4
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_TRIPLE_RATCHET_INIT_ROUND_4
uint16_t uniffi_channel_checksum_func_triple_ratchet_init_round_4(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_TRIPLE_RATCHET_RESIZE
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_TRIPLE_RATCHET_RESIZE
uint16_t uniffi_channel_checksum_func_triple_ratchet_resize(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_VERIFY_ED448
#define UNIFFI_FFIDEF_UNIFFI_CHANNEL_CHECKSUM_FUNC_VERIFY_ED448
uint16_t uniffi_channel_checksum_func_verify_ed448(void
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_CHANNEL_UNIFFI_CONTRACT_VERSION
#define UNIFFI_FFIDEF_FFI_CHANNEL_UNIFFI_CONTRACT_VERSION
uint32_t ffi_channel_uniffi_contract_version(void
);
#endif

View File

@ -0,0 +1,4 @@
module channelFFI {
header "channelFFI.h"
export *
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -1,15 +1,37 @@
use aes_gcm::{Aes256Gcm, Nonce};
use aes_gcm::aead::{Aead, Payload};
use base64::prelude::*;
use ed448_rust::Ed448Error;
use hkdf::Hkdf;
use rand::{rngs::OsRng, RngCore};
use sha2::Sha512;
use serde::{Deserialize, Serialize};
use std::{collections::HashMap, error::Error, io::Read};
use std::{collections::HashMap, error::Error};
use hex;
use ed448_goldilocks_plus::{elliptic_curve::group::GroupEncoding, CompressedEdwardsY, EdwardsPoint, Scalar};
use ed448_goldilocks_plus::{elliptic_curve::group::GroupEncoding, elliptic_curve::Group, CompressedEdwardsY, EdwardsPoint, Scalar};
use protocols::{doubleratchet::{DoubleRatchetParticipant, P2PChannelEnvelope}, tripleratchet::{PeerInfo, TripleRatchetParticipant}, x3dh};
pub(crate) mod protocols;
uniffi::include_scaffolding!("lib");
#[derive(Debug, thiserror::Error)]
pub enum CryptoError {
#[error("Invalid state: {0}")]
InvalidState(String),
#[error("Invalid envelope: {0}")]
InvalidEnvelope(String),
#[error("Decryption failed: {0}")]
DecryptionFailed(String),
#[error("Encryption failed: {0}")]
EncryptionFailed(String),
#[error("Serialization failed: {0}")]
SerializationFailed(String),
#[error("Invalid input: {0}")]
InvalidInput(String),
}
#[derive(Clone, PartialEq, Serialize, Deserialize)]
pub struct DoubleRatchetStateAndEnvelope {
pub ratchet_state: String,
@ -40,6 +62,305 @@ pub struct TripleRatchetStateAndMessage {
pub message: Vec<u8>,
}
// ============ Keypair Types ============
#[derive(Clone, PartialEq, Serialize, Deserialize)]
pub struct EncryptionKeyPair {
pub public_key: Vec<u8>,
pub private_key: Vec<u8>,
}
#[derive(Clone, PartialEq, Serialize, Deserialize)]
pub struct MessageCiphertext {
pub ciphertext: String,
pub initialization_vector: String,
pub associated_data: Option<String>,
}
#[derive(Clone, PartialEq, Serialize, Deserialize)]
pub struct SealedInboxMessageEncryptRequest {
pub inbox_public_key: Vec<u8>,
pub ephemeral_private_key: Vec<u8>,
pub plaintext: Vec<u8>,
}
#[derive(Clone, PartialEq, Serialize, Deserialize)]
pub struct SealedInboxMessageDecryptRequest {
pub inbox_private_key: Vec<u8>,
pub ephemeral_public_key: Vec<u8>,
pub ciphertext: MessageCiphertext,
}
// ============ Encryption Helpers ============
fn encrypt_aead(plaintext: &[u8], key: &[u8]) -> Result<MessageCiphertext, String> {
use aes_gcm::KeyInit;
let mut iv = [0u8; 12];
OsRng.fill_bytes(&mut iv);
let cipher = Aes256Gcm::new_from_slice(key)
.map_err(|e| format!("Invalid key: {}", e))?;
let nonce = Nonce::from_slice(&iv);
let mut aad = [0u8; 32];
OsRng.fill_bytes(&mut aad);
let ciphertext = cipher.encrypt(nonce, Payload {
msg: plaintext,
aad: &aad,
}).map_err(|e| format!("Encryption failed: {}", e))?;
Ok(MessageCiphertext {
ciphertext: BASE64_STANDARD.encode(ciphertext),
initialization_vector: BASE64_STANDARD.encode(iv.to_vec()),
associated_data: Some(BASE64_STANDARD.encode(aad.to_vec())),
})
}
fn decrypt_aead(ciphertext: &MessageCiphertext, key: &[u8]) -> Result<Vec<u8>, String> {
use aes_gcm::KeyInit;
if key.len() != 32 {
return Err("Invalid key length".to_string());
}
let cipher = Aes256Gcm::new_from_slice(key)
.map_err(|e| format!("Invalid key: {}", e))?;
let iv = BASE64_STANDARD.decode(&ciphertext.initialization_vector)
.map_err(|e| format!("Invalid IV: {}", e))?;
let nonce = Nonce::from_slice(&iv);
let associated_data = match &ciphertext.associated_data {
Some(aad) => BASE64_STANDARD.decode(aad)
.map_err(|e| format!("Invalid AAD: {}", e))?,
None => Vec::new(),
};
let ct = BASE64_STANDARD.decode(&ciphertext.ciphertext)
.map_err(|e| format!("Invalid ciphertext: {}", e))?;
cipher.decrypt(nonce, Payload {
msg: &ct,
aad: &associated_data,
}).map_err(|e| format!("Decryption failed: {}", e))
}
// ============ Key Generation ============
pub fn generate_x448() -> String {
let priv_key = Scalar::random(&mut rand::thread_rng());
let pub_key = EdwardsPoint::generator() * priv_key;
match serde_json::to_string(&EncryptionKeyPair {
public_key: pub_key.compress().to_bytes().to_vec(),
private_key: priv_key.to_bytes().to_vec(),
}) {
Ok(result) => result,
Err(e) => e.to_string(),
}
}
pub fn generate_ed448() -> String {
let priv_key = ed448_rust::PrivateKey::new(&mut rand::thread_rng());
let pub_key = ed448_rust::PublicKey::from(&priv_key);
match serde_json::to_string(&EncryptionKeyPair {
public_key: pub_key.as_byte().to_vec(),
private_key: priv_key.as_bytes().to_vec(),
}) {
Ok(result) => result,
Err(e) => e.to_string(),
}
}
pub fn get_pubkey_x448(key: String) -> String {
let maybe_key = BASE64_STANDARD.decode(&key);
if maybe_key.is_err() {
return maybe_key.unwrap_err().to_string();
}
let key_bytes = maybe_key.unwrap();
if key_bytes.len() != 56 {
return "invalid key length".to_string();
}
let mut priv_key_bytes = [0u8; 56];
priv_key_bytes.copy_from_slice(&key_bytes);
let priv_key = Scalar::from_bytes(&priv_key_bytes);
let pub_key = EdwardsPoint::generator() * priv_key;
format!("\"{}\"", BASE64_STANDARD.encode(pub_key.compress().to_bytes().to_vec()))
}
pub fn get_pubkey_ed448(key: String) -> String {
let maybe_key = BASE64_STANDARD.decode(&key);
if maybe_key.is_err() {
return maybe_key.unwrap_err().to_string();
}
let key_bytes = maybe_key.unwrap();
if key_bytes.len() != 57 {
return "invalid key length".to_string();
}
let key_arr: [u8; 57] = key_bytes.try_into().unwrap();
let priv_key = ed448_rust::PrivateKey::from(key_arr);
let pub_key = ed448_rust::PublicKey::from(&priv_key);
format!("\"{}\"", BASE64_STANDARD.encode(pub_key.as_byte()))
}
// ============ Signing ============
pub fn sign_ed448(key: String, message: String) -> String {
let maybe_key = BASE64_STANDARD.decode(&key);
if maybe_key.is_err() {
return maybe_key.unwrap_err().to_string();
}
let maybe_message = BASE64_STANDARD.decode(&message);
if maybe_message.is_err() {
return maybe_message.unwrap_err().to_string();
}
let key_bytes = maybe_key.unwrap();
if key_bytes.len() != 57 {
return "invalid key length".to_string();
}
let key_arr: [u8; 57] = key_bytes.try_into().unwrap();
let priv_key = ed448_rust::PrivateKey::from(key_arr);
let signature = priv_key.sign(&maybe_message.unwrap(), None);
match signature {
Ok(output) => format!("\"{}\"", BASE64_STANDARD.encode(output)),
Err(Ed448Error::WrongKeyLength) => "invalid key length".to_string(),
Err(Ed448Error::WrongPublicKeyLength) => "invalid public key length".to_string(),
Err(Ed448Error::WrongSignatureLength) => "invalid signature length".to_string(),
Err(Ed448Error::InvalidPoint) => "invalid point".to_string(),
Err(Ed448Error::InvalidSignature) => "invalid signature".to_string(),
Err(Ed448Error::ContextTooLong) => "context too long".to_string(),
}
}
pub fn verify_ed448(public_key: String, message: String, signature: String) -> String {
let maybe_key = BASE64_STANDARD.decode(&public_key);
if maybe_key.is_err() {
return maybe_key.unwrap_err().to_string();
}
let maybe_message = BASE64_STANDARD.decode(&message);
if maybe_message.is_err() {
return maybe_message.unwrap_err().to_string();
}
let maybe_signature = BASE64_STANDARD.decode(&signature);
if maybe_signature.is_err() {
return maybe_signature.unwrap_err().to_string();
}
let key_bytes = maybe_key.unwrap();
if key_bytes.len() != 57 {
return "invalid key length".to_string();
}
let pub_arr: [u8; 57] = key_bytes.try_into().unwrap();
let pub_key = ed448_rust::PublicKey::from(pub_arr);
let result = pub_key.verify(&maybe_message.unwrap(), &maybe_signature.unwrap(), None);
match result {
Ok(()) => "true".to_string(),
Err(Ed448Error::WrongKeyLength) => "invalid key length".to_string(),
Err(Ed448Error::WrongPublicKeyLength) => "invalid public key length".to_string(),
Err(Ed448Error::WrongSignatureLength) => "invalid signature length".to_string(),
Err(Ed448Error::InvalidPoint) => "invalid point".to_string(),
Err(Ed448Error::InvalidSignature) => "invalid signature".to_string(),
Err(Ed448Error::ContextTooLong) => "context too long".to_string(),
}
}
// ============ Inbox Message Encryption ============
pub fn encrypt_inbox_message(input: String) -> String {
let json: Result<SealedInboxMessageEncryptRequest, serde_json::Error> = serde_json::from_str(&input);
match json {
Ok(params) => {
let key = params.ephemeral_private_key;
if key.len() != 56 {
return "invalid ephemeral key length".to_string();
}
let inbox_key = params.inbox_public_key;
if inbox_key.len() != 57 {
return "invalid inbox key length".to_string();
}
let key_bytes: [u8; 56] = key.try_into().unwrap();
let inbox_key_bytes: [u8; 57] = inbox_key.try_into().unwrap();
let priv_key = Scalar::from_bytes(&key_bytes);
let maybe_pub_key = CompressedEdwardsY(inbox_key_bytes).decompress();
if maybe_pub_key.is_none().into() {
return "invalid inbox key".to_string();
}
let dh_output = priv_key * maybe_pub_key.unwrap();
let hkdf = Hkdf::<Sha512>::new(None, &dh_output.compress().to_bytes());
let mut derived = [0u8; 32];
if hkdf.expand(b"quilibrium-sealed-sender", &mut derived).is_err() {
return "invalid length".to_string();
}
match encrypt_aead(&params.plaintext, &derived) {
Ok(result) => serde_json::to_string(&result).unwrap_or_else(|e| e.to_string()),
Err(e) => e,
}
}
Err(e) => e.to_string(),
}
}
pub fn decrypt_inbox_message(input: String) -> String {
let json: Result<SealedInboxMessageDecryptRequest, serde_json::Error> = serde_json::from_str(&input);
match json {
Ok(params) => {
let ephemeral_key = params.ephemeral_public_key;
if ephemeral_key.len() != 57 {
return "invalid ephemeral key length".to_string();
}
let inbox_key = params.inbox_private_key;
if inbox_key.len() != 56 {
return "invalid inbox key length".to_string();
}
let ephemeral_key_bytes: [u8; 57] = ephemeral_key.try_into().unwrap();
let inbox_key_bytes: [u8; 56] = inbox_key.try_into().unwrap();
let priv_key = Scalar::from_bytes(&inbox_key_bytes);
let maybe_eph_key = CompressedEdwardsY(ephemeral_key_bytes).decompress();
if maybe_eph_key.is_none().into() {
return "invalid ephemeral key".to_string();
}
let dh_output = priv_key * maybe_eph_key.unwrap();
let hkdf = Hkdf::<Sha512>::new(None, &dh_output.compress().to_bytes());
let mut derived = [0u8; 32];
if hkdf.expand(b"quilibrium-sealed-sender", &mut derived).is_err() {
return "invalid length".to_string();
}
match decrypt_aead(&params.ciphertext, &derived) {
Ok(result) => serde_json::to_string(&result).unwrap_or_else(|e| e.to_string()),
Err(e) => e,
}
}
Err(e) => e.to_string(),
}
}
// ============ X3DH Key Agreement ============
pub fn sender_x3dh(sending_identity_private_key: &Vec<u8>, sending_ephemeral_private_key: &Vec<u8>, receiving_identity_key: &Vec<u8>, receiving_signed_pre_key: &Vec<u8>, session_key_length: u64) -> String {
if sending_identity_private_key.len() != 56 {
return "invalid sending identity private key length".to_string();
@ -162,84 +483,45 @@ pub fn new_double_ratchet(session_key: &Vec<u8>, sending_header_key: &Vec<u8>, n
return json.unwrap();
}
pub fn double_ratchet_encrypt(ratchet_state_and_message: DoubleRatchetStateAndMessage) -> DoubleRatchetStateAndEnvelope {
pub fn double_ratchet_encrypt(ratchet_state_and_message: DoubleRatchetStateAndMessage) -> Result<DoubleRatchetStateAndEnvelope, CryptoError> {
let ratchet_state = ratchet_state_and_message.ratchet_state.clone();
let participant = DoubleRatchetParticipant::from_json(ratchet_state.clone());
let participant = DoubleRatchetParticipant::from_json(ratchet_state.clone())
.map_err(|e| CryptoError::InvalidState(e.to_string()))?;
if participant.is_err() {
return DoubleRatchetStateAndEnvelope{
ratchet_state: participant.unwrap_err().to_string(),
envelope: "".to_string(),
};
}
let mut dr = participant;
let envelope = dr.ratchet_encrypt(&ratchet_state_and_message.message)
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
let mut dr = participant.unwrap();
let envelope = dr.ratchet_encrypt(&ratchet_state_and_message.message);
let participant_json = dr.to_json()
.map_err(|e| CryptoError::SerializationFailed(e.to_string()))?;
if envelope.is_err() {
return DoubleRatchetStateAndEnvelope{
ratchet_state: ratchet_state,
envelope: envelope.unwrap_err().to_string(),
};
}
let envelope_json = envelope.to_json()
.map_err(|e| CryptoError::SerializationFailed(e.to_string()))?;
let participant_json = dr.to_json();
if participant_json.is_err() {
return DoubleRatchetStateAndEnvelope{
ratchet_state: participant_json.unwrap_err().to_string(),
envelope: "".to_string(),
};
}
let envelope_json = envelope.unwrap().to_json();
if envelope_json.is_err() {
return DoubleRatchetStateAndEnvelope{
ratchet_state: ratchet_state,
envelope: envelope_json.unwrap_err().to_string(),
};
}
return DoubleRatchetStateAndEnvelope{
ratchet_state: participant_json.unwrap(),
envelope: envelope_json.unwrap(),
};
Ok(DoubleRatchetStateAndEnvelope{
ratchet_state: participant_json,
envelope: envelope_json,
})
}
pub fn double_ratchet_decrypt(ratchet_state_and_envelope: DoubleRatchetStateAndEnvelope) -> DoubleRatchetStateAndMessage {
pub fn double_ratchet_decrypt(ratchet_state_and_envelope: DoubleRatchetStateAndEnvelope) -> Result<DoubleRatchetStateAndMessage, CryptoError> {
let ratchet_state = ratchet_state_and_envelope.ratchet_state.clone();
let participant = DoubleRatchetParticipant::from_json(ratchet_state.clone());
let envelope = P2PChannelEnvelope::from_json(ratchet_state_and_envelope.envelope);
let participant = DoubleRatchetParticipant::from_json(ratchet_state.clone())
.map_err(|e| CryptoError::InvalidState(e.to_string()))?;
let envelope = P2PChannelEnvelope::from_json(ratchet_state_and_envelope.envelope)
.map_err(|e| CryptoError::InvalidEnvelope(e.to_string()))?;
if participant.is_err() || envelope.is_err() {
return DoubleRatchetStateAndMessage{
ratchet_state: ratchet_state,
message: vec![],
};
}
let mut dr = participant;
let message = dr.ratchet_decrypt(&envelope)
.map_err(|e| CryptoError::DecryptionFailed(e.to_string()))?;
let mut dr = participant.unwrap();
let message = dr.ratchet_decrypt(&envelope.unwrap());
let participant_json = dr.to_json()
.map_err(|e| CryptoError::SerializationFailed(e.to_string()))?;
if message.is_err() {
return DoubleRatchetStateAndMessage{
ratchet_state: ratchet_state,
message: message.unwrap_err().to_string().as_bytes().to_vec(),
};
}
let participant_json = dr.to_json();
if participant_json.is_err() {
return DoubleRatchetStateAndMessage{
ratchet_state: participant_json.unwrap_err().to_string(),
message: vec![],
};
}
return DoubleRatchetStateAndMessage{
ratchet_state: participant_json.unwrap(),
message: message.unwrap(),
};
Ok(DoubleRatchetStateAndMessage{
ratchet_state: participant_json,
message: message,
})
}
pub fn new_triple_ratchet(peers: &Vec<Vec<u8>>, peer_key: &Vec<u8>, identity_key: &Vec<u8>, signed_pre_key: &Vec<u8>, threshold: u64, async_dkg_ratchet: bool) -> TripleRatchetStateAndMetadata {
@ -383,287 +665,178 @@ fn json_to_metadata(ratchet_state_and_metadata: TripleRatchetStateAndMetadata, r
Ok(metadata)
}
pub fn triple_ratchet_init_round_1(ratchet_state_and_metadata: TripleRatchetStateAndMetadata) -> TripleRatchetStateAndMetadata {
let ratchet_state = ratchet_state_and_metadata.ratchet_state.clone();
let tr = TripleRatchetParticipant::from_json(&ratchet_state);
if tr.is_err() {
return TripleRatchetStateAndMetadata{
ratchet_state: tr.err().unwrap().to_string(),
metadata: HashMap::new(),
};
}
let metadata = match json_to_metadata(ratchet_state_and_metadata, &ratchet_state) {
Ok(value) => value,
Err(value) => return value,
};
let mut trp = tr.unwrap();
let result = trp.initialize(&metadata);
if result.is_err() {
return TripleRatchetStateAndMetadata{
ratchet_state: result.err().unwrap().to_string(),
metadata: HashMap::new(),
};
}
let metadata = result.unwrap();
let metadata_json = match metadata_to_json(&ratchet_state, metadata) {
Ok(value) => value,
Err(value) => return value,
};
let json = trp.to_json();
if json.is_err() {
return TripleRatchetStateAndMetadata{
ratchet_state: json.err().unwrap().to_string(),
metadata: HashMap::new(),
};
}
return TripleRatchetStateAndMetadata{
ratchet_state: json.unwrap(),
metadata: metadata_json,
};
fn json_to_metadata_result(ratchet_state_and_metadata: TripleRatchetStateAndMetadata, _ratchet_state: &String) -> Result<HashMap<Vec<u8>, P2PChannelEnvelope>, CryptoError> {
let mut metadata = HashMap::<Vec<u8>, P2PChannelEnvelope>::new();
for (k,v) in ratchet_state_and_metadata.metadata {
let env = P2PChannelEnvelope::from_json(v)
.map_err(|e| CryptoError::InvalidEnvelope(e.to_string()))?;
let kb = BASE64_STANDARD.decode(k)
.map_err(|e| CryptoError::InvalidInput(e.to_string()))?;
metadata.insert(kb, env);
}
Ok(metadata)
}
pub fn triple_ratchet_init_round_2(ratchet_state_and_metadata: TripleRatchetStateAndMetadata) -> TripleRatchetStateAndMetadata {
let ratchet_state = ratchet_state_and_metadata.ratchet_state.clone();
let tr = TripleRatchetParticipant::from_json(&ratchet_state);
if tr.is_err() {
return TripleRatchetStateAndMetadata{
ratchet_state: tr.err().unwrap().to_string(),
metadata: HashMap::new(),
};
fn metadata_to_json_result(_ratchet_state: &String, metadata: HashMap<Vec<u8>, P2PChannelEnvelope>) -> Result<HashMap<String, String>, CryptoError> {
let mut metadata_json = HashMap::<String, String>::new();
for (k,v) in metadata {
let env = v.to_json()
.map_err(|e| CryptoError::SerializationFailed(e.to_string()))?;
metadata_json.insert(BASE64_STANDARD.encode(k), env);
}
Ok(metadata_json)
}
let metadata = match json_to_metadata(ratchet_state_and_metadata, &ratchet_state) {
Ok(value) => value,
Err(value) => return value,
};
pub fn triple_ratchet_init_round_1(ratchet_state_and_metadata: TripleRatchetStateAndMetadata) -> Result<TripleRatchetStateAndMetadata, CryptoError> {
let ratchet_state = ratchet_state_and_metadata.ratchet_state.clone();
let tr = TripleRatchetParticipant::from_json(&ratchet_state)
.map_err(|e| CryptoError::InvalidState(e.to_string()))?;
let mut trp = tr.unwrap();
let metadata = json_to_metadata_result(ratchet_state_and_metadata, &ratchet_state)?;
let mut trp = tr;
let result = trp.initialize(&metadata)
.map_err(|e| CryptoError::InvalidInput(e.to_string()))?;
let metadata_json = metadata_to_json_result(&ratchet_state, result)?;
let json = trp.to_json()
.map_err(|e| CryptoError::SerializationFailed(e.to_string()))?;
Ok(TripleRatchetStateAndMetadata{
ratchet_state: json,
metadata: metadata_json,
})
}
pub fn triple_ratchet_init_round_2(ratchet_state_and_metadata: TripleRatchetStateAndMetadata) -> Result<TripleRatchetStateAndMetadata, CryptoError> {
let ratchet_state = ratchet_state_and_metadata.ratchet_state.clone();
let tr = TripleRatchetParticipant::from_json(&ratchet_state)
.map_err(|e| CryptoError::InvalidState(e.to_string()))?;
let metadata = json_to_metadata_result(ratchet_state_and_metadata, &ratchet_state)?;
let mut trp = tr;
let mut result = HashMap::<Vec<u8>, P2PChannelEnvelope>::new();
for (k, v) in metadata {
let r = trp.receive_poly_frag(&k, &v);
if r.is_err() {
return TripleRatchetStateAndMetadata{
ratchet_state: r.err().unwrap().to_string(),
metadata: HashMap::new(),
};
}
let r = trp.receive_poly_frag(&k, &v)
.map_err(|e| CryptoError::InvalidInput(e.to_string()))?;
let opt = r.unwrap();
if opt.is_some() {
result = opt.unwrap();
if let Some(out) = r {
result = out;
}
}
let metadata_json = match metadata_to_json(&ratchet_state, result) {
Ok(value) => value,
Err(value) => return value,
};
let metadata_json = metadata_to_json_result(&ratchet_state, result)?;
let json = trp.to_json();
if json.is_err() {
return TripleRatchetStateAndMetadata{
ratchet_state: json.err().unwrap().to_string(),
metadata: HashMap::new(),
};
}
let json = trp.to_json()
.map_err(|e| CryptoError::SerializationFailed(e.to_string()))?;
return TripleRatchetStateAndMetadata{
ratchet_state: json.unwrap(),
Ok(TripleRatchetStateAndMetadata{
ratchet_state: json,
metadata: metadata_json,
};
})
}
pub fn triple_ratchet_init_round_3(ratchet_state_and_metadata: TripleRatchetStateAndMetadata) -> TripleRatchetStateAndMetadata {
pub fn triple_ratchet_init_round_3(ratchet_state_and_metadata: TripleRatchetStateAndMetadata) -> Result<TripleRatchetStateAndMetadata, CryptoError> {
let ratchet_state = ratchet_state_and_metadata.ratchet_state.clone();
let tr = TripleRatchetParticipant::from_json(&ratchet_state);
if tr.is_err() {
return TripleRatchetStateAndMetadata{
ratchet_state: tr.err().unwrap().to_string(),
metadata: HashMap::new(),
};
}
let tr = TripleRatchetParticipant::from_json(&ratchet_state)
.map_err(|e| CryptoError::InvalidState(e.to_string()))?;
let metadata = match json_to_metadata(ratchet_state_and_metadata, &ratchet_state) {
Ok(value) => value,
Err(value) => return value,
};
let metadata = json_to_metadata_result(ratchet_state_and_metadata, &ratchet_state)?;
let mut trp = tr.unwrap();
let mut trp = tr;
let mut result = HashMap::<Vec<u8>, P2PChannelEnvelope>::new();
for (k, v) in metadata {
let r = trp.receive_commitment(&k, &v);
if r.is_err() {
return TripleRatchetStateAndMetadata{
ratchet_state: r.err().unwrap().to_string(),
metadata: HashMap::new(),
};
}
let r = trp.receive_commitment(&k, &v)
.map_err(|e| CryptoError::InvalidInput(e.to_string()))?;
let opt = r.unwrap();
if opt.is_some() {
result = opt.unwrap();
if let Some(out) = r {
result = out;
}
}
let metadata_json = match metadata_to_json(&ratchet_state, result) {
Ok(value) => value,
Err(value) => return value,
};
let metadata_json = metadata_to_json_result(&ratchet_state, result)?;
let json = trp.to_json();
if json.is_err() {
return TripleRatchetStateAndMetadata{
ratchet_state: json.err().unwrap().to_string(),
metadata: HashMap::new(),
};
}
let json = trp.to_json()
.map_err(|e| CryptoError::SerializationFailed(e.to_string()))?;
return TripleRatchetStateAndMetadata{
ratchet_state: json.unwrap(),
Ok(TripleRatchetStateAndMetadata{
ratchet_state: json,
metadata: metadata_json,
};
})
}
pub fn triple_ratchet_init_round_4(ratchet_state_and_metadata: TripleRatchetStateAndMetadata) -> TripleRatchetStateAndMetadata {
pub fn triple_ratchet_init_round_4(ratchet_state_and_metadata: TripleRatchetStateAndMetadata) -> Result<TripleRatchetStateAndMetadata, CryptoError> {
let ratchet_state = ratchet_state_and_metadata.ratchet_state.clone();
let tr = TripleRatchetParticipant::from_json(&ratchet_state);
if tr.is_err() {
return TripleRatchetStateAndMetadata{
ratchet_state: tr.err().unwrap().to_string(),
metadata: HashMap::new(),
};
}
let tr = TripleRatchetParticipant::from_json(&ratchet_state)
.map_err(|e| CryptoError::InvalidState(e.to_string()))?;
let metadata = match json_to_metadata(ratchet_state_and_metadata, &ratchet_state) {
Ok(value) => value,
Err(value) => return value,
};
let metadata = json_to_metadata_result(ratchet_state_and_metadata, &ratchet_state)?;
let mut trp = tr.unwrap();
let mut result = HashMap::<Vec<u8>, P2PChannelEnvelope>::new();
let mut trp = tr;
let result = HashMap::<Vec<u8>, P2PChannelEnvelope>::new();
for (k, v) in metadata {
let r = trp.recombine(&k, &v);
if r.is_err() {
return TripleRatchetStateAndMetadata{
ratchet_state: r.err().unwrap().to_string(),
metadata: HashMap::new(),
};
}
trp.recombine(&k, &v)
.map_err(|e| CryptoError::InvalidInput(e.to_string()))?;
}
let metadata_json = match metadata_to_json(&ratchet_state, result) {
Ok(value) => value,
Err(value) => return value,
};
let metadata_json = metadata_to_json_result(&ratchet_state, result)?;
let json = trp.to_json();
if json.is_err() {
return TripleRatchetStateAndMetadata{
ratchet_state: json.err().unwrap().to_string(),
metadata: HashMap::new(),
};
}
let json = trp.to_json()
.map_err(|e| CryptoError::SerializationFailed(e.to_string()))?;
return TripleRatchetStateAndMetadata{
ratchet_state: json.unwrap(),
Ok(TripleRatchetStateAndMetadata{
ratchet_state: json,
metadata: metadata_json,
};
})
}
pub fn triple_ratchet_encrypt(ratchet_state_and_message: TripleRatchetStateAndMessage) -> TripleRatchetStateAndEnvelope {
pub fn triple_ratchet_encrypt(ratchet_state_and_message: TripleRatchetStateAndMessage) -> Result<TripleRatchetStateAndEnvelope, CryptoError> {
let ratchet_state = ratchet_state_and_message.ratchet_state.clone();
let tr = TripleRatchetParticipant::from_json(&ratchet_state);
if tr.is_err() {
return TripleRatchetStateAndEnvelope{
ratchet_state: tr.err().unwrap().to_string(),
envelope: "".to_string(),
};
}
let tr = TripleRatchetParticipant::from_json(&ratchet_state)
.map_err(|e| CryptoError::InvalidState(e.to_string()))?;
let mut trp = tr.unwrap();
let result = trp.ratchet_encrypt(&ratchet_state_and_message.message);
let mut trp = tr;
let envelope = trp.ratchet_encrypt(&ratchet_state_and_message.message)
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
if result.is_err() {
return TripleRatchetStateAndEnvelope{
ratchet_state: result.err().unwrap().to_string(),
envelope: "".to_string(),
};
}
let envelope_json = envelope.to_json()
.map_err(|e| CryptoError::SerializationFailed(e.to_string()))?;
let envelope = result.unwrap();
let envelope_json = envelope.to_json();
let json = trp.to_json()
.map_err(|e| CryptoError::SerializationFailed(e.to_string()))?;
if envelope_json.is_err() {
return TripleRatchetStateAndEnvelope{
ratchet_state: envelope_json.err().unwrap().to_string(),
envelope: "".to_string(),
};
}
let json = trp.to_json();
if json.is_err() {
return TripleRatchetStateAndEnvelope{
ratchet_state: json.err().unwrap().to_string(),
envelope: "".to_string(),
};
}
return TripleRatchetStateAndEnvelope{
ratchet_state: json.unwrap(),
envelope: envelope_json.unwrap(),
};
Ok(TripleRatchetStateAndEnvelope{
ratchet_state: json,
envelope: envelope_json,
})
}
pub fn triple_ratchet_decrypt(ratchet_state_and_envelope: TripleRatchetStateAndEnvelope) -> TripleRatchetStateAndMessage {
pub fn triple_ratchet_decrypt(ratchet_state_and_envelope: TripleRatchetStateAndEnvelope) -> Result<TripleRatchetStateAndMessage, CryptoError> {
let ratchet_state = ratchet_state_and_envelope.ratchet_state.clone();
let tr = TripleRatchetParticipant::from_json(&ratchet_state);
if tr.is_err() {
return TripleRatchetStateAndMessage{
ratchet_state: tr.err().unwrap().to_string(),
message: vec![],
};
}
let tr = TripleRatchetParticipant::from_json(&ratchet_state)
.map_err(|e| CryptoError::InvalidState(e.to_string()))?;
let mut trp = tr.unwrap();
let env = P2PChannelEnvelope::from_json(ratchet_state_and_envelope.envelope);
if env.is_err() {
return TripleRatchetStateAndMessage{
ratchet_state: env.err().unwrap().to_string(),
message: vec![],
};
}
let mut trp = tr;
let env = P2PChannelEnvelope::from_json(ratchet_state_and_envelope.envelope)
.map_err(|e| CryptoError::InvalidEnvelope(e.to_string()))?;
let result = trp.ratchet_decrypt(&env.unwrap());
let result = trp.ratchet_decrypt(&env)
.map_err(|e| CryptoError::DecryptionFailed(e.to_string()))?;
if result.is_err() {
return TripleRatchetStateAndMessage{
ratchet_state: result.err().unwrap().to_string(),
message: vec![],
};
}
let message = result.0;
let message = result.unwrap().0;
let json = trp.to_json()
.map_err(|e| CryptoError::SerializationFailed(e.to_string()))?;
let json = trp.to_json();
if json.is_err() {
return TripleRatchetStateAndMessage{
ratchet_state: json.err().unwrap().to_string(),
message: vec![],
};
}
return TripleRatchetStateAndMessage{
ratchet_state: json.unwrap(),
Ok(TripleRatchetStateAndMessage{
ratchet_state: json,
message: message,
};
})
}
pub fn triple_ratchet_resize(ratchet_state: String, other: String, id: usize, total: usize) -> Vec<Vec<u8>> {
pub fn triple_ratchet_resize(ratchet_state: String, other: String, id: u64, total: u64) -> Vec<Vec<u8>> {
let tr = TripleRatchetParticipant::from_json(&ratchet_state);
if tr.is_err() {
return vec![vec![1]];
@ -674,7 +847,7 @@ pub fn triple_ratchet_resize(ratchet_state: String, other: String, id: usize, to
return vec![other_bytes.unwrap_err().to_string().as_bytes().to_vec()];
}
let result = tr.unwrap().ratchet_resize(other_bytes.unwrap(), id, total);
let result = tr.unwrap().ratchet_resize(other_bytes.unwrap(), id as usize, total as usize);
if result.is_err() {
return vec![result.unwrap_err().to_string().as_bytes().to_vec()];
}

View File

@ -1,18 +1,56 @@
namespace channel {
// Key Generation
string generate_x448();
string generate_ed448();
string get_pubkey_x448(string key);
string get_pubkey_ed448(string key);
// Signing
string sign_ed448(string key, string message);
string verify_ed448(string public_key, string message, string signature);
// Inbox Message Encryption (Sealed Sender)
string encrypt_inbox_message(string input);
string decrypt_inbox_message(string input);
// X3DH Key Agreement
string sender_x3dh([ByRef] sequence<u8> sending_identity_private_key, [ByRef] sequence<u8> sending_ephemeral_private_key, [ByRef] sequence<u8> receiving_identity_key, [ByRef] sequence<u8> receiving_signed_pre_key, u64 session_key_length);
string receiver_x3dh([ByRef] sequence<u8> sending_identity_private_key, [ByRef] sequence<u8> sending_signed_private_key, [ByRef] sequence<u8> receiving_identity_key, [ByRef] sequence<u8> receiving_ephemeral_key, u64 session_key_length);
// Double Ratchet
string new_double_ratchet([ByRef] sequence<u8> session_key, [ByRef] sequence<u8> sending_header_key, [ByRef] sequence<u8> next_receiving_header_key, boolean is_sender, [ByRef] sequence<u8> sending_ephemeral_private_key, [ByRef] sequence<u8> receiving_ephemeral_key);
[Throws=CryptoError]
DoubleRatchetStateAndEnvelope double_ratchet_encrypt(DoubleRatchetStateAndMessage ratchet_state_and_message);
[Throws=CryptoError]
DoubleRatchetStateAndMessage double_ratchet_decrypt(DoubleRatchetStateAndEnvelope ratchet_state_and_envelope);
// Triple Ratchet
TripleRatchetStateAndMetadata new_triple_ratchet([ByRef] sequence<sequence<u8>> peers, [ByRef] sequence<u8> peer_key, [ByRef] sequence<u8> identity_key, [ByRef] sequence<u8> signed_pre_key, u64 threshold, boolean async_dkg_ratchet);
[Throws=CryptoError]
TripleRatchetStateAndMetadata triple_ratchet_init_round_1(TripleRatchetStateAndMetadata ratchet_state_and_metadata);
[Throws=CryptoError]
TripleRatchetStateAndMetadata triple_ratchet_init_round_2(TripleRatchetStateAndMetadata ratchet_state_and_metadata);
[Throws=CryptoError]
TripleRatchetStateAndMetadata triple_ratchet_init_round_3(TripleRatchetStateAndMetadata ratchet_state_and_metadata);
[Throws=CryptoError]
TripleRatchetStateAndMetadata triple_ratchet_init_round_4(TripleRatchetStateAndMetadata ratchet_state_and_metadata);
[Throws=CryptoError]
TripleRatchetStateAndEnvelope triple_ratchet_encrypt(TripleRatchetStateAndMessage ratchet_state_and_message);
[Throws=CryptoError]
TripleRatchetStateAndMessage triple_ratchet_decrypt(TripleRatchetStateAndEnvelope ratchet_state_and_envelope);
// Triple Ratchet Resize
sequence<sequence<u8>> triple_ratchet_resize(string ratchet_state, string other, u64 id, u64 total);
};
[Error]
enum CryptoError {
"InvalidState",
"InvalidEnvelope",
"DecryptionFailed",
"EncryptionFailed",
"SerializationFailed",
"InvalidInput",
};
dictionary DoubleRatchetStateAndEnvelope {

View File

@ -0,0 +1,3 @@
fn main() {
uniffi::uniffi_bindgen_main()
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB

1
crates/dkls23/.cargo-ok Normal file
View File

@ -0,0 +1 @@
{"v":1}

View File

@ -0,0 +1,6 @@
{
"git": {
"sha1": "be65d367a0b9bf0348b747055c96fcc3bc847ba1"
},
"path_in_vcs": ""
}

View File

@ -0,0 +1,38 @@
{
"name": "DKLs23",
"dockerComposeFile": "docker-compose.yml",
"service": "app",
"workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
"postCreateCommand": "git config --global --add safe.directory /workspaces/${localWorkspaceFolderBasename}",
// Forward to host PSQL, Redis, Node, worker, Rust
//"forwardPorts": [5432, 6379, 3000, 3030, 8000],
// Configure tool-specific properties.
"customizations": {
// Configure properties specific to VS Code.
"vscode": {
// Set *default* container specific settings.json values on container create.
"settings": {
"lldb.executable": "/usr/bin/lldb",
// VS Code don't watch files under ./target
"files.watcherExclude": {
"**/target/**": true
},
"rust-analyzer.checkOnSave.command": "clippy"
},
// Add the IDs of extensions you want installed when the container is created.
"extensions": [
"vadimcn.vscode-lldb",
"mutantdino.resourcemonitor",
"rust-lang.rust-analyzer",
"tamasfe.even-better-toml",
"serayuzgur.crates",
"ms-azuretools.vscode-docker",
"ms-vscode.makefile-tools",
"github.vscode-github-actions",
"jinxdash.prettier-rust",
"streetsidesoftware.code-spell-checker"
]
}
}
}

View File

@ -0,0 +1,7 @@
version: '3.8'
services:
app:
image: ghcr.io/0xcarbon/devcontainer@sha256:f43ac09bb3ba5673621c2273172bac221c8d01067e84e327d913ec7a1788ce5a
volumes:
- ../..:/workspaces:cached

View File

@ -0,0 +1,94 @@
#!/bin/sh
# Function to check if a command exists
check_command() {
if ! command -v "$1" &> /dev/null; then
echo "Error: $1 is not installed. Please install $1 and try again."
exit 1
fi
}
# Check if cargo and pnpm are installed and abort if some of them is not.
REQUIREMENTS=("cargo" "pnpm")
for REQUIERMENT in ${REQUIREMENTS[@]}; do
check_command $REQUIERMENT;
done
cargo install cargo-audit
cargo install cargo-unmaintained
# install husky and commitlint
pnpm add --save-dev husky @commitlint/{cli,config-conventional}
# init husky
pnpm exec husky init
# Create .commitlintrc
cat <<EOF > commitlint.config.js
module.exports = { extends: ['@commitlint/config-conventional'] };
EOF
# Create pre-commit hook
cat << 'EOF' > .husky/pre-commit
#!/bin/sh
# Run cargo fmt to format all files
cargo fmt
# Get a list of all staged files
STAGED_FILES=$(git diff --cached --name-only --diff-filter=ACM)
# Re-stage any files that were modified by cargo fmt
for FILE in $STAGED_FILES; do
if [ -f "$FILE" ]; then
git add "$FILE"
fi
done
# Run clippy to ensure code quality
cargo clippy --all-targets
if [ $? -ne 0 ]; then
echo "clippy failed"
exit 1
fi
# Run cargo audit to check for vulnerabilities
cargo audit
if [ $? -ne 0 ]; then
echo "cargo audit found vulnerabilities"
exit 1
fi
# Run cargo unmaintained to check for unmaintained dependencies
cargo unmaintained
if [ $? -ne 0 ]; then
echo "cargo unmaintained found unmaintained dependencies"
exit 1
fi
# Run cargo test
cargo test
EOF
# Create commit-msg hook
cat <<EOF > .husky/commit-msg
#!/bin/sh
pnpm exec commitlint --edit "\$1"
EOF
# add executable permissions
chmod +x .husky/pre-commit
chmod +x .husky/commit-msg
# ignore locally
LOCAL_IGNORE_FILES=(
"package.json"
"pnpm-lock.yaml"
"commitlint.config.js"
"node_modules"
".husky"
)
for FILE in ${LOCAL_IGNORE_FILES[@]}; do
if ! grep -qF -- $FILE .git/info/exclude; then
echo $FILE >> .git/info/exclude
fi
done

View File

@ -0,0 +1,32 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. ...
**Expected behavior**
A clear and concise description of what you expected to happen.
**Desktop (please complete the following information):**
- OS: [e.g. iOS]
- Browser [e.g. chrome, safari]
- Version [e.g. 22]
**Smartphone (please complete the following information):**
- Device: [e.g. iPhone6]
- OS: [e.g. iOS8.1]
- Browser [e.g. stock browser, safari]
- Version [e.g. 22]
**Additional context**
Add any other context about the problem here.

View File

@ -0,0 +1,16 @@
---
name: Enhancement request
about: Suggest an improvement of a feature for this project
title: ''
labels: ''
assignees: ''
---
**Describe the enhancement you'd like**
A clear and concise description of what you want to be improved.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the enhancement here.

View File

@ -0,0 +1,20 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

View File

@ -0,0 +1,44 @@
Alore
ERC
DDH
ckd
ote
OTE
len
vec
Vec
mul
sid
prg
DKLs
ecdsa
ElGamal
Schnorr
Fischlin
Pedersen
Chaum
Vanstone
Menezes
Hankerson
Zhou
hasher
Shamir
decommit
Chaincode
dlog
Keccak
counterparties
counterparty
hmac
Hmac
HMAC
secp
Secp
ethereum
bip
bitcoin
AAD
EIP
prehash
seedable
rngs

View File

@ -0,0 +1,25 @@
# See https://github.com/check-spelling/check-spelling/wiki/Configuration-Examples:-excludes
(?:^|/)(?i)COPYRIGHT
(?:^|/)(?i)LICEN[CS]E
(?:^|/).husky/
(?:^|/).vscode/
(?:^|/).github/
(?:^|/).devcontainer/
(?:^|/)Cargo\.toml$
(?:^|/)Cargo\.lock$
\.avi$
\.env$
\.env.json$
\.eslintrc
\.ico$
\.jpe?g$
\.lock$
\.map$
\.min\.
\.mod$
\.mp[34]$
\.png$
\.sol$
\.svg$
\.wav$
ignore$

View File

@ -0,0 +1,14 @@
chmod
commitlint
commitlintrc
libdkls
pnpm
REQUIERMENT
rlib
rustc
Rustfmt
rustup
screenshots
socio
tls
xcarbon

View File

@ -0,0 +1,140 @@
# patch hunk comments
^\@\@ -\d+(?:,\d+|) \+\d+(?:,\d+|) \@\@ .*
# git index header
index [0-9a-z]{7,40}\.\.[0-9a-z]{7,40}
# cid urls
(['"])cid:.*?\g{-1}
# data urls
\(data:.*?\)
(['"])data:.*?\g{-1}
data:[-a-zA-Z=;:/0-9+]*,\S*
# mailto urls
mailto:[-a-zA-Z=;:/?%&0-9+]*
# magnet urls
magnet:[?=:\w]+
# ANSI color codes
\\u001b\[\d+(?:;\d+|)m
# URL escaped characters
\%[0-9A-F]{2}
# IPv6
\b(?:[0-9a-f]{0,4}:){5}[0-9a-f]{0,4}\b
# c99 hex digits (not the full format, just one I've seen)
0x[0-9a-fA-F](?:\.[0-9a-fA-F]*|)[pP]
# Punycode
\bxn--[-0-9a-z]+
# sha256
sha256:[0-9a-f]+
# sha-... -- uses a fancy capture
(['"]|&quot;)[0-9a-f]{40,}\g{-1}
# hex in url queries
=[0-9a-fA-F]+&
# ssh
(?:ssh-\S+|-nistp256) [-a-zA-Z=;:/0-9+]*
# PGP
\b(?:[0-9A-F]{4} ){9}[0-9A-F]{4}\b
# uuid:
[<({"'>][0-9a-fA-F]{8}-(?:[0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}[<'"})>]
# hex digits including css/html color classes:
(?:[\\0][xX]|\\u|[uU]\+|#x?|\%23)[0-9a-fA-FgGrR_]{2,}(?:[uUlL]{0,3}|u\d+)\b
# integrity
integrity="sha\d+-[-a-zA-Z=;:/0-9+]{40,}"
# .desktop mime types
^MimeTypes?=.*$
# .desktop localized entries
^[A-Z][a-z]+\[[a-z]+\]=.*$
# IServiceProvider
\bI(?=(?:[A-Z][a-z]{2,})+\b)
# crypt
"\$2[ayb]\$.{56}"
# Input to GitHub JSON
content: "[-a-zA-Z=;:/0-9+]*="
# Python stringprefix / binaryprefix
\b(?:B|BR|Br|F|FR|Fr|R|RB|RF|Rb|Rf|U|UR|Ur|b|bR|br|f|fR|fr|r|rB|rF|rb|rf|u|uR|ur)'
# Regular expressions for (P|p)assword
\([A-Z]\|[a-z]\)[a-z]+
# JavaScript regular expressions
/.*/[gim]*\.test\(
\.replace\(/[^/]*/[gim]*\s*,
# Go regular expressions
regexp\.MustCompile\(`[^`]*`\)
# kubernetes pod status lists
# https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase
\w+(?:-\w+)+\s+\d+/\d+\s+(?:Running|Pending|Succeeded|Failed|Unknown)\s+
# kubectl - pods in CrashLoopBackOff
\w+-[0-9a-f]+-\w+\s+\d+/\d+\s+CrashLoopBackOff\s+
# posthog secrets
posthog\.init\((['"])phc_[^"',]+\g{-1},
# Update Lorem based on your content (requires `ge` and `w` from https://github.com/jsoref/spelling; and `review` from https://github.com/check-spelling/check-spelling/wiki/Looking-for-items-locally )
# grep lorem .github/actions/spelling/patterns.txt|perl -pne 's/.*i..\?://;s/\).*//' |tr '|' "\n"|sort -f |xargs -n1 ge|perl -pne 's/^[^:]*://'|sort -u|w|sed -e 's/ .*//'|w|review -
# Warning, while `(?i)` is very neat and fancy, if you have some binary files that aren't proper unicode, you might run into:
## Operation "substitution (s///)" returns its argument for non-Unicode code point 0x1C19AE (the code point will vary).
## You could manually change `(?i)X...` to use `[Xx]...`
## or you could add the files to your `excludes` file (a version after 0.0.19 should identify the file path)
# Lorem
(?:\w|\s|[,.])*\b(?i)(?:amet|consectetur|cursus|dolor|eros|ipsum|lacus|libero|ligula|lorem|magna|neque|nulla|suscipit|tempus)\b(?:\w|\s|[,.])*
# Non-English
[a-zA-Z]*[ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýÿĀāŁłŃńŅņŒœŚśŸŽž][a-zA-ZÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýÿĀāŁłŃńŅņŒœŚśŸŽž]*
# French
# This corpus only had capital letters, but you probably want lowercase ones as well.
\b[LN]'+[a-z]+\b
# the negative lookahead here is to allow catching 'templatesz' as a misspelling
# but to otherwise recognize a Windows path with \templates\foo.template or similar:
\\templates(?![a-z])
# ignore long runs of a single character:
\b([A-Za-z])\g{-1}{3,}\b
# Note that the next example is no longer necessary if you are using
# to match a string starting with a `#`, use a character-class:
[#]backwards
# version suffix <word>v#
[Vv]\d+(?:\b|(?=[a-zA-Z_]))
# Compiler flags
[\t >"'`=(](?:-J|)-[DPWXY]
[\t "'`=(]-[DPWXYLlf]
,-B
# curl arguments
\b(?:\\n|)curl(?:\s+-[a-zA-Z]+)+
# set arguments
\bset\s+-[abefiuox]+\b
# tar arguments
\b(?:\\n|)tar(?:\s+-[a-zA-Z]+|\s[a-z]+)+
# macOS temp folders
/var/folders/\w\w/[+\w]+/(?:T|-Caches-)/
# ignore hex colors
(?:[\\0][xX]|\\u|[uU]\+|#x?|\%23)[0-9a-fA-FgGrR_]{2,}(?:[uUlL]{0,3}|u\d+)\b
# ignore imports
import(?:["'\s]*([\w*{}\n, ]+)from\s*)?["'\s]*([@\w/_-]+)["'\s]*;?
# ignore URL's
https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*)\b
# ignore tag's in jsx
<[a-zA-Z0-9]*? \b
# ignore file path
\/[-a-zA-Z0-9@:%._\+~#=]*\.[a-zA-Z0-9()]{1,6}\b
# ignore blockchain account address
^0x[a-fA-F0-9]{40}$\b

View File

@ -0,0 +1,7 @@
^attache$
benefitting
occurence
Sorce
^[Ss]pae
^untill
^wether

View File

@ -0,0 +1,25 @@
# Description
Please include a summary of the change and which issue is fixed. Please delete options that are not relevant.
Issue: [#00](link)
## Features
- [ ] Feat A
- [ ] Feat B
## Fixes
- [ ] Fix A
- [ ] Fix B
## Checklist
- [ ] I have performed a self-review of my own code
- [ ] My changes generate no new warnings
- [ ] I have made corresponding changes to the documentation
- [ ] I have added tests that prove my feat/fix is effective and works
- [ ] New and existing tests pass locally with my changes
## Observations

View File

@ -0,0 +1,28 @@
name: Backend Security Audit
on:
schedule:
- cron: '0 0 * * *'
push:
paths:
- 'Cargo.toml'
- 'Cargo.lock'
jobs:
audit:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup Rust
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- uses: actions-rs/audit-check@v1
env:
CARGO_TERM_COLOR: always
with:
token: ${{ secrets.GITHUB_TOKEN }}

View File

@ -0,0 +1,37 @@
name: Rust Test
on:
push:
branches: [main, dev]
paths:
- 'src/**/*.rs'
- 'Cargo.toml'
pull_request:
types: [opened, reopened, synchronize]
paths:
- 'src/**/*.rs'
- 'Cargo.toml'
jobs:
test:
name: Run cargo test
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install stable toolchain
uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
components: rustc, cargo, llvm-tools-preview
- name: Update packages
run: sudo apt-get update
- name: Install C compiler
run: sudo apt-get install lld lldb clang -y
- name: Configure to use LLVM linker
run: echo "[build]" >> ~/.cargo/config && echo "rustflags = [\"-C\", \"link-arg=-fuse-ld=lld\"]" >> ~/.cargo/config
- name: Run cargo test
uses: actions-rs/cargo@v1
with:
command: test
args: --release

View File

@ -0,0 +1,35 @@
# The configs for this spell check can be find at /.github/actions/spelling
name: Spell checking
on:
push:
branches: [main, dev]
pull_request:
branches: [main, dev]
jobs:
spelling:
name: Spell checking
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: check-spelling
uses: check-spelling/check-spelling@v0.0.21
with:
dictionary_source_prefixes: '
{
"cspell_050923": "https://raw.githubusercontent.com/check-spelling/cspell-dicts/v20230509/dictionaries/"
}'
# Extra dictionaries to verify words
extra_dictionaries:
cspell_050923:public-licenses/src/generated/public-licenses.txt
cspell_050923:cryptocurrencies/cryptocurrencies.txt
cspell_050923:software-terms/src/network-protocols.txt
cspell_050923:software-terms/src/software-terms.txt
cspell_050923:bash/src/bash-words.txt
cspell_050923:filetypes/filetypes.txt
cspell_050923:fonts/fonts.txt
cspell_050923:fullstack/src/fullstack.txt
cspell_050923:rust/src/rust.txt
cspell_050923:typescript/src/typescript.txt
experimental_apply_changes_via_bot: 1
check_file_names: 1

View File

@ -0,0 +1,22 @@
name: Cargo clippy
on:
push:
branches: [main, dev]
paths:
- 'src/**/*.rs'
pull_request:
branches: [main, dev]
types: [opened, reopened, synchronize]
paths:
- 'src/**/*.rs'
jobs:
clippy:
name: Run cargo clippy
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: stable
- run: cargo clippy --all-targets

View File

@ -0,0 +1,22 @@
name: Cargo fmt
on:
push:
branches: [main, dev]
paths:
- 'src/**/*.rs'
pull_request:
branches: [main, dev]
types: [opened, reopened, synchronize]
paths:
- 'src/**/*.rs'
jobs:
cargo-fmt:
name: Run cargo fmt
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: stable
- run: cargo fmt --check -- src/**/*.rs

View File

@ -0,0 +1,27 @@
name: Cargo unmaintained
on:
push:
branches: [main, dev]
paths:
- 'Cargo.toml'
pull_request:
branches: [main, dev]
types: [opened, reopened, synchronize]
paths:
- 'Cargo.toml'
jobs:
cargo-unmaintained:
name: Run cargo unmaintained
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: stable
- name: Install cargo unmaintained
run: cargo install cargo-unmaintained
- name: Run cargo unmaintained
run: cargo unmaintained
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

2
crates/dkls23/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
/target
/Cargo.lock

42
crates/dkls23/.vscode/settings.json vendored Normal file
View File

@ -0,0 +1,42 @@
{
"cSpell.words": [
"Alore",
"bip",
"bitcoin",
"Chaincode",
"Chaum",
"ckd",
"counterparties",
"counterparty",
"DDH",
"decommit",
"DKLs",
"dlog",
"ecdsa",
"ElGamal",
"ethereum",
"Fischlin",
"Hankerson",
"hasher",
"hmac",
"Hmac",
"HMAC",
"Keccak",
"len",
"Menezes",
"mul",
"ote",
"OTE",
"Pedersen",
"prg",
"Schnorr",
"secp",
"Secp",
"Shamir",
"sid",
"Vanstone",
"vec",
"Vec",
"Zhou"
]
}

View File

@ -0,0 +1,45 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to make participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment include:
- Using welcoming and inclusive language
- Being respectful of differing viewpoints and experiences
- Gracefully accepting constructive criticism
- Focusing on what is best for the community
- Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
- The use of sexualized language or imagery and unwelcome sexual attention or advances
- Trolling, insulting/derogatory comments, and personal or political attacks
- Public or private harassment
- Publishing others' private information, such as a physical or electronic address, without explicit permission
- Other conduct which could reasonably be considered inappropriate in a professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at [support@0xcarbon.org]. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 2.1, available at [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html](https://www.contributor-covenant.org/version/2/1/code_of_conduct.html).
For answers to common questions about this code of conduct, see [https://www.contributor-covenant.org/faq](https://www.contributor-covenant.org/faq).

View File

@ -0,0 +1,91 @@
# Contributing to DKLs23
First off, thank you for considering contributing to our project! We appreciate your time and effort.
## Table of Contents
- [How to Contribute](#how-to-contribute)
- [Reporting Bugs](#reporting-bugs)
- [Suggesting Enhancements](#suggesting-enhancements)
- [Submitting Changes](#submitting-changes)
- [Setup Instructions](#setup-instructions)
- [Installing Rust](#installing-rust)
- [Cloning the Repository](#cloning-the-repository)
- [Installing Dependencies](#installing-dependencies)
- [Building the Project](#building-the-project)
- [Code Style](#code-style)
- [Running Tests](#running-tests)
- [Code of Conduct](#code-of-conduct)
- [Acknowledgments](#acknowledgments)
## How to Contribute
### Reporting Bugs
If you find a bug, please report it by opening an issue on our [GitHub Issues](https://github.com/0xCarbon/DKLs23/issues) page. Include the following details:
- A clear and descriptive title.
- A detailed description of the issue.
- Steps to reproduce the issue.
- Any relevant logs or screenshots.
### Suggesting Enhancements
We welcome suggestions for new features or improvements. Please open an issue on our [GitHub Issues](https://github.com/0xCarbon/DKLs23/issues) page and describe your idea in detail. Include:
- A clear and descriptive title.
- A detailed description of the enhancement.
- Any relevant examples or use cases.
### Submitting Changes
1. Fork the repository.
2. Create a new branch following [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/) pattern (`git checkout -b <branch-name>`)
3. Make your changes.
4. Commit your changes (`git commit -m 'feat: describe your feature'`).
5. Push to the branch (`git push origin <branch-name>`).
6. Create a new Pull Request.
## Setup Instructions
### Installing Rust
To contribute to this project, you need to have Rust installed on your machine. You can install Rust by following these steps:
1. Open a terminal.
2. Run the following command to install Rust using `rustup`:
```bash
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
```
3. Follow the on-screen instructions to complete the installation.
4. After installation, ensure that Rust is installed correctly by running:
```bash
rustc --version
```
### Cloning the Repository
Once Rust is installed, you can clone the repository:
1. Open a terminal.
2. Run the following commands:
```bash
git clone https://github.com/0xCarbon/DKLs23 cd DKLs23
```
### Installing Dependencies
This project uses Cargo, Rust's package manager, to manage dependencies. To install the necessary dependencies, run:
```bash
cargo build
```
This command will fetch all the dependencies and build them along with the project.
### Building the Project
To build the project, run:
```bash
cargo build
```
This will compile DKLs23 and create rust libraries (`libdkls23.d` and `libdkls23.rlib`) in the `target/debug` directory.
## Code Style
Please follow our coding conventions and style guides. We use [Rustfmt](https://github.com/rust-lang/rustfmt) for formatting Rust code. You can run `cargo fmt` to format your code.
## Running Tests
Make sure all tests pass before submitting your changes. You can run tests using `cargo test`.
## Code of Conduct
Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.
## Acknowledgments
Thank you for contributing with us!

31
crates/dkls23/Cargo.toml Normal file
View File

@ -0,0 +1,31 @@
[package]
edition = "2021"
name = "dkls23"
version = "0.1.1"
description = "DKLs23 Threshold ECDSA in Three Rounds"
readme = "README.md"
license = "Apache-2.0 OR MIT"
repository = "https://github.com/0xCarbon/DKLs23"
[lib]
name = "dkls23"
path = "src/lib.rs"
[dependencies]
bitcoin_hashes = "0.13"
elliptic-curve = { version = "0.13", features = ["serde", "sec1"] }
getrandom = "0.2"
hex = "0.4"
k256 = { version = "0.13", features = ["serde"] }
p256 = { version = "0.13", features = ["serde"] }
rand = "0.8"
serde = { version = "1.0", features = ["derive"] }
serde_bytes = "0.11.12"
sha3 = "0.10"
[features]
insecure-rng = []
[target.'cfg(target_arch = "wasm32")'.dependencies.getrandom]
version = "0.2"
features = ["js"]

View File

@ -0,0 +1,25 @@
[package]
name = "dkls23"
version = "0.1.1"
edition = "2021"
license = "Apache-2.0 OR MIT"
description = "DKLs23 Threshold ECDSA in Three Rounds"
repository = "https://github.com/0xCarbon/DKLs23"
readme = "README.md"
[dependencies]
k256 = { version = "0.13", features = ["serde"] }
bitcoin_hashes = "0.13"
sha3 = "0.10"
rand = "0.8"
getrandom = "0.2"
hex = "0.4"
serde = { version = "1.0", features = ["derive"] }
serde_bytes = "0.11.12"
[target.'cfg(target_arch = "wasm32")'.dependencies.getrandom]
version = "0.2"
features = ["js"]
[features]
insecure-rng = []

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2024 0xCarbon
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

25
crates/dkls23/LICENSE-MIT Normal file
View File

@ -0,0 +1,25 @@
The MIT License (MIT)
=====================
Copyright © 2024 0xCarbon
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the “Software”), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.

64
crates/dkls23/README.md Normal file
View File

@ -0,0 +1,64 @@
<div align="center">
<picture>
<source srcset=".assets/dkls23-banner.png" media="(prefers-color-scheme: dark)">
<img src=".assets/dkls23-banner.png" alt="DKLs logo">
</picture>
<p>
<a href="https://github.com/0xCarbon/DKLs23/actions?query=workflow%3Abackend-ci">
<img src="https://github.com/0xCarbon/DKLs23/actions/workflows/backend-ci.yml/badge.svg?event=push" alt="Test Status">
</a>
<a href="https://crates.io/crates/dkls23">
<img src="https://img.shields.io/crates/v/dkls23.svg" alt="DKLs23 Crate">
</a>
<a href="https://docs.rs/dkls23/latest/dkls23/">
<img src="https://docs.rs/dkls23/badge.svg" alt="DKLs23 Docs">
</a>
</p>
</div>
<br />
## Overview
DKLs23 is an advanced open-source implementation of the Threshold ECDSA method (see https://eprint.iacr.org/2023/765.pdf). The primary goal of DKLs23 is to compute a secret key without centralizing it in a single location. Instead, it leverages multiple parties to compute the secret key, with each party receiving a key share. This approach enhances security by eliminating single points of failure.
## Getting Started
These instructions will get you a copy of the project up and running on your local machine for development and testing purposes.
### Installation
A step-by-step guide to installing the project.
1. **Install Rust using `rustup`**
``` bash
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
```
2. **Clone the repository:**
```bash
git clone https://github.com/0xCarbon/DKLs23 cd DKLs23
```
3. **Install dependencies:**
```bash
cargo build
```
## Contributing
We welcome contributions! Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to get started.
## Security
For information on how to report security vulnerabilities, please see our [SECURITY.md](SECURITY.md).
## Code of Conduct
Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.
## License
This project is licensed under either of
- [Apache License, Version 2.0](LICENSE-APACHE)
- [MIT license](LICENSE-MIT)
at your option.
## Authors
See the list of [contributors](https://github.com/0xCarbon/DKLs23/contributors) who participated in this project.

19
crates/dkls23/SECURITY.md Normal file
View File

@ -0,0 +1,19 @@
# Security Policy
## Introduction
Thank you for helping us keep our project secure. This document outlines our security policy and provides instructions for reporting vulnerabilities.
## Reporting a Vulnerability
If you discover a security vulnerability, please report it to us in a responsible manner. To report a vulnerability, please email us at [security@0xcarbon.org]. Include the following details in your report:
- A description of the vulnerability
- Steps to reproduce the vulnerability
- Any potential impact of the vulnerability
## Expected Response Time
We will acknowledge your report within 48 hours and provide a detailed response within 5 business days, including an evaluation of the vulnerability and an expected resolution date.
## Responsible Disclosure
We ask that you do not disclose the vulnerability publicly until we have had a chance to address it. We believe in responsible disclosure and will work with you to ensure that vulnerabilities are fixed promptly.
## Acknowledgments
Thank you for helping us keep our project secure!

68
crates/dkls23/src/lib.rs Normal file
View File

@ -0,0 +1,68 @@
//! A library for dealing with the `DKLs23` protocol (see <https://eprint.iacr.org/2023/765.pdf>)
//! and related protocols.
//!
//! Written and used by Alore.
#![recursion_limit = "512"]
#![forbid(unsafe_code)]
pub mod protocols;
pub mod utilities;
// The following constants should not be changed!
// They are the same as the reference implementation of DKLs19:
// https://gitlab.com/neucrypt/mpecdsa/-/blob/release/src/lib.rs
/// Computational security parameter `lambda_c` from `DKLs23`.
/// We take it to be the same as the parameter `kappa`.
pub const RAW_SECURITY: u16 = 256;
/// `RAW_SECURITY` divided by 8 (used for arrays of bytes)
pub const SECURITY: u16 = 32;
/// Statistical security parameter `lambda_s` from `DKLs23`.
pub const STAT_SECURITY: u16 = 80;
// ---------------------------------------------------------------------------
// Curve-generic support
// ---------------------------------------------------------------------------
use elliptic_curve::group::Group;
use elliptic_curve::{Curve, CurveArithmetic};
/// Trait alias that captures all elliptic-curve bounds required by DKLs23.
///
/// It is implemented for [`k256::Secp256k1`] and [`p256::NistP256`].
///
/// Because Rust does not propagate `where` clauses from a trait definition to
/// its users, every generic function `fn foo<C: DklsCurve>(...)` must repeat
/// the associated-type bounds it actually needs (e.g.
/// `C::Scalar: Reduce<U256>`). The trait itself is intentionally kept narrow
/// so that adding a new curve only requires one `impl` line.
pub trait DklsCurve: CurveArithmetic + Curve + 'static {}
impl DklsCurve for k256::Secp256k1 {}
impl DklsCurve for p256::NistP256 {}
/// Returns the canonical generator of the curve in affine coordinates.
///
/// This abstracts over `ProjectivePoint::generator().to_affine()` which is the
/// idiomatic way to obtain the generator in the RustCrypto ecosystem (the
/// generator lives on `ProjectivePoint` via [`group::Group::generator`] and is
/// converted to affine via [`group::Curve::to_affine`]).
pub fn generator<C: DklsCurve>() -> C::AffinePoint
where
C::ProjectivePoint: Group,
{
use elliptic_curve::group::Curve as _;
C::ProjectivePoint::generator().to_affine()
}
/// Returns the identity (point at infinity) in affine coordinates.
///
/// In the RustCrypto elliptic-curve crates, `AffinePoint::default()` yields
/// the identity element.
pub fn identity<C: DklsCurve>() -> C::AffinePoint
where
C::AffinePoint: Default,
{
C::AffinePoint::default()
}

View File

@ -0,0 +1,91 @@
//! `DKLs23` main protocols and related ones.
//!
//! Some structs appearing in most of the protocols are defined here.
use std::collections::BTreeMap;
use serde::{Deserialize, Serialize};
use crate::protocols::derivation::DerivData;
use crate::utilities::multiplication::{MulReceiver, MulSender};
use crate::utilities::zero_shares::ZeroShare;
use crate::DklsCurve;
pub mod derivation;
pub mod dkg;
pub mod re_key;
pub mod refresh;
pub mod signing;
/// Contains the values `t` and `n` from `DKLs23`.
#[derive(Clone, Serialize, Deserialize, Debug)]
pub struct Parameters {
pub threshold: u8, //t
pub share_count: u8, //n
}
/// Represents a party after key generation ready to sign a message.
#[derive(Clone, Serialize, Deserialize, Debug)]
#[serde(bound(
serialize = "C::Scalar: Serialize, C::AffinePoint: Serialize",
deserialize = "C::Scalar: Deserialize<'de>, C::AffinePoint: Deserialize<'de>"
))]
pub struct Party<C: DklsCurve> {
pub parameters: Parameters,
pub party_index: u8,
pub session_id: Vec<u8>,
/// Behaves as the secret key share.
pub poly_point: C::Scalar,
/// Public key.
pub pk: C::AffinePoint,
/// Used for computing shares of zero during signing.
pub zero_share: ZeroShare,
/// Initializations for two-party multiplication.
/// The key in the `BTreeMap` represents the other party.
pub mul_senders: BTreeMap<u8, MulSender<C>>,
pub mul_receivers: BTreeMap<u8, MulReceiver<C>>,
/// Data for BIP-32 derivation.
pub derivation_data: DerivData<C>,
/// Ethereum address calculated from the public key.
pub eth_address: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Abort {
/// Index of the party generating the abort message.
pub index: u8,
pub description: String,
}
impl Abort {
/// Creates an instance of `Abort`.
#[must_use]
pub fn new(index: u8, description: &str) -> Abort {
Abort {
index,
description: String::from(description),
}
}
}
/// Saves the sender and receiver of a message.
#[derive(Clone, Serialize, Deserialize, Debug)]
pub struct PartiesMessage {
pub sender: u8,
pub receiver: u8,
}
impl PartiesMessage {
/// Swaps the sender with the receiver, returning another instance of `PartiesMessage`.
#[must_use]
pub fn reverse(&self) -> PartiesMessage {
PartiesMessage {
sender: self.receiver,
receiver: self.sender,
}
}
}

View File

@ -0,0 +1,565 @@
//! Adaptation of BIP-32 to the threshold setting.
//!
//! This file implements a key derivation mechanism for threshold wallets
//! based on BIP-32 (<https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki>).
//! Each party can derive their key share individually so that the secret
//! key reconstructed corresponds to the derivation (via BIP-32) of the
//! original secret key.
//!
//! We follow mainly this repository:
//! <https://github.com/rust-bitcoin/rust-bitcoin/blob/master/bitcoin/src/bip32.rs>.
//!
//! ATTENTION: Since no party has the full secret key, it is not convenient
//! to do hardened derivation. Thus, we only implement normal derivation.
use bitcoin_hashes::{hash160, sha512, Hash, HashEngine, Hmac, HmacEngine};
use elliptic_curve::bigint::U256;
use elliptic_curve::group::{Curve as _, GroupEncoding};
use elliptic_curve::ops::Reduce;
use elliptic_curve::CurveArithmetic;
use elliptic_curve::{Field, PrimeField};
use serde::{Deserialize, Serialize};
use crate::protocols::Party;
use crate::utilities::hashes::point_to_bytes;
use crate::DklsCurve;
use super::dkg::compute_eth_address;
/// Fingerprint of a key as in BIP-32.
///
/// See <https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki>.
pub type Fingerprint = [u8; 4];
/// Chaincode of a key as in BIP-32.
///
/// See <https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki>.
pub type ChainCode = [u8; 32];
/// Represents an error during the derivation protocol.
#[derive(Clone, Serialize, Deserialize, Debug)]
pub struct ErrorDeriv {
pub description: String,
}
impl ErrorDeriv {
/// Creates an instance of `ErrorDeriv`.
#[must_use]
pub fn new(description: &str) -> ErrorDeriv {
ErrorDeriv {
description: String::from(description),
}
}
}
/// Contains all the data needed for derivation.
///
/// The values that are really needed are only `poly_point`,
/// `pk` and `chain_code`, but we also include the other ones
/// if someone wants to retrieve the full extended public key
/// as in BIP-32. The only field missing is the one for the
/// network, but it can be easily inferred from context.
#[derive(Clone, Serialize, Deserialize, Debug)]
#[serde(bound(
serialize = "C::Scalar: Serialize, C::AffinePoint: Serialize",
deserialize = "C::Scalar: Deserialize<'de>, C::AffinePoint: Deserialize<'de>"
))]
pub struct DerivData<C: CurveArithmetic> {
/// Counts after how many derivations this key is obtained from the master node.
pub depth: u8,
/// Index used to obtain this key from its parent.
pub child_number: u32,
/// Identifier of the parent key.
pub parent_fingerprint: Fingerprint,
/// Behaves as the secret key share.
pub poly_point: C::Scalar,
/// Public key.
pub pk: C::AffinePoint,
/// Extra entropy given by BIP-32.
pub chain_code: ChainCode,
}
/// Maximum depth.
pub const MAX_DEPTH: u8 = 255;
/// Maximum child number.
///
/// This is the limit since we are not implementing hardened derivation.
pub const MAX_CHILD_NUMBER: u32 = 0x7FFF_FFFF;
impl<C: DklsCurve> DerivData<C>
where
C::Scalar: Reduce<U256> + PrimeField,
C::AffinePoint: GroupEncoding + Default,
{
/// Computes the "tweak" needed to derive a secret key. In the process,
/// it also produces the chain code and the parent fingerprint.
///
/// This is an adaptation of `ckd_pub_tweak` from the repository:
/// <https://github.com/rust-bitcoin/rust-bitcoin/blob/master/bitcoin/src/bip32.rs>.
///
/// # Errors
///
/// Will return `Err` if the HMAC result is too big (very unlikely).
pub fn child_tweak(
&self,
child_number: u32,
) -> Result<(C::Scalar, ChainCode, Fingerprint), ErrorDeriv> {
let mut hmac_engine: HmacEngine<sha512::Hash> = HmacEngine::new(&self.chain_code[..]);
let pk_as_bytes = point_to_bytes::<C>(&self.pk);
hmac_engine.input(&pk_as_bytes);
hmac_engine.input(&child_number.to_be_bytes());
let hmac_result: Hmac<sha512::Hash> = Hmac::from_engine(hmac_engine);
let number_for_tweak = U256::from_be_slice(&hmac_result[..32]);
let tweak = C::Scalar::reduce(number_for_tweak);
// If reduce produced zero (vanishingly unlikely), return None.
// This is the generic equivalent of the BIP-32 check "if tweak >= n".
if tweak.is_zero().into() {
return Err(ErrorDeriv::new(
"Very improbable: Child index results in value not allowed by BIP-32!",
));
}
let chain_code: ChainCode = hmac_result[32..]
.try_into()
.expect("Half of hmac is guaranteed to be 32 bytes!");
// We also calculate the fingerprint here for convenience.
let mut engine = hash160::Hash::engine();
engine.input(&pk_as_bytes);
let fingerprint: Fingerprint = hash160::Hash::from_engine(engine)[0..4]
.try_into()
.expect("4 is the fingerprint length!");
Ok((tweak, chain_code, fingerprint))
}
/// Derives an instance of `DerivData` given a child number.
///
/// # Errors
///
/// Will return `Err` if the depth is already at the maximum value,
/// if the child number is invalid or if `child_tweak` fails.
/// It will also fail if the new public key is invalid (very unlikely).
pub fn derive_child(&self, child_number: u32) -> Result<DerivData<C>, ErrorDeriv> {
if self.depth == MAX_DEPTH {
return Err(ErrorDeriv::new("We are already at maximum depth!"));
}
if child_number > MAX_CHILD_NUMBER {
return Err(ErrorDeriv::new(
"Child index should be between 0 and 2^31 - 1!",
));
}
let (tweak, new_chain_code, parent_fingerprint) = self.child_tweak(child_number)?;
// If every party shifts their poly_point by the same tweak,
// the resulting secret key also shifts by the same amount.
// Note that the tweak depends only on public data.
let new_poly_point = self.poly_point + tweak;
let new_pk = (C::ProjectivePoint::from(crate::generator::<C>()) * tweak + C::ProjectivePoint::from(self.pk)).to_affine();
if new_pk == crate::identity::<C>() {
return Err(ErrorDeriv::new(
"Very improbable: Child index results in value not allowed by BIP-32!",
));
}
Ok(DerivData {
depth: self.depth + 1,
child_number,
parent_fingerprint,
poly_point: new_poly_point,
pk: new_pk,
chain_code: new_chain_code,
})
}
/// Derives an instance of `DerivData` following a path
/// on the "key tree".
///
/// See <https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki>
/// for the description of a possible path (and don't forget that
/// hardened derivations are not implemented).
///
/// # Errors
///
/// Will return `Err` if the path is invalid or if `derive_child` fails.
pub fn derive_from_path(&self, path: &str) -> Result<DerivData<C>, ErrorDeriv> {
let path_parsed = parse_path(path)?;
let mut final_data = self.clone();
for child_number in path_parsed {
final_data = final_data.derive_child(child_number)?;
}
Ok(final_data)
}
}
// We implement the derivation functions for Party as well.
/// Implementations related to BIP-32 derivation ([read more](self)).
impl<C: DklsCurve + 'static> Party<C>
where
C::Scalar: Reduce<U256> + PrimeField,
C::AffinePoint: GroupEncoding + Default,
{
/// Derives an instance of `Party` given a child number.
///
/// # Errors
///
/// Will return `Err` if the `DerivData::derive_child` fails.
pub fn derive_child(&self, child_number: u32) -> Result<Party<C>, ErrorDeriv> {
let new_derivation_data = self.derivation_data.derive_child(child_number)?;
// We don't change information relating other parties,
// we only update our key share, our public key and the address.
let new_address = compute_eth_address::<C>(&new_derivation_data.pk);
Ok(Party {
parameters: self.parameters.clone(),
party_index: self.party_index,
session_id: self.session_id.clone(),
poly_point: new_derivation_data.poly_point,
pk: new_derivation_data.pk,
zero_share: self.zero_share.clone(),
mul_senders: self.mul_senders.clone(),
mul_receivers: self.mul_receivers.clone(),
derivation_data: new_derivation_data,
eth_address: new_address,
})
}
/// Derives an instance of `Party` following a path
/// on the "key tree".
///
/// See <https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki>
/// for the description of a possible path (and don't forget that
/// hardened derivations are not implemented).
///
/// # Errors
///
/// Will return `Err` if the `DerivData::derive_from_path` fails.
pub fn derive_from_path(&self, path: &str) -> Result<Party<C>, ErrorDeriv> {
let new_derivation_data = self.derivation_data.derive_from_path(path)?;
// We don't change information relating other parties,
// we only update our key share, our public key and the address.
let new_address = compute_eth_address::<C>(&new_derivation_data.pk);
Ok(Party {
parameters: self.parameters.clone(),
party_index: self.party_index,
session_id: self.session_id.clone(),
poly_point: new_derivation_data.poly_point,
pk: new_derivation_data.pk,
zero_share: self.zero_share.clone(),
mul_senders: self.mul_senders.clone(),
mul_receivers: self.mul_receivers.clone(),
derivation_data: new_derivation_data,
eth_address: new_address,
})
}
}
/// Takes a path as in BIP-32 (for normal derivation),
/// and transforms it into a vector of child numbers.
///
/// # Errors
///
/// Will return `Err` if the path is not valid or empty.
pub fn parse_path(path: &str) -> Result<Vec<u32>, ErrorDeriv> {
let mut parts = path.split('/');
if parts.next().unwrap_or_default() != "m" {
return Err(ErrorDeriv::new("Invalid path format!"));
}
let mut path_parsed = Vec::new();
for part in parts {
match part.parse::<u32>() {
Ok(num) if num <= MAX_CHILD_NUMBER => path_parsed.push(num),
_ => {
return Err(ErrorDeriv::new(
"Invalid path format or index out of bounds!",
))
}
}
}
if path_parsed.len() > MAX_DEPTH as usize {
return Err(ErrorDeriv::new("The path is too long!"));
}
Ok(path_parsed)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::protocols::re_key::re_key;
use crate::protocols::signing::*;
use crate::protocols::Parameters;
use crate::utilities::hashes::*;
use crate::utilities::rng;
use hex;
use k256::elliptic_curve::Field;
use rand::Rng;
use std::collections::BTreeMap;
type C = k256::Secp256k1;
type ProjectivePoint = <C as CurveArithmetic>::ProjectivePoint;
/// Tests if the method `derive_from_path` from [`DerivData`]
/// works properly by checking its output against a known value.
///
/// Since this function calls the other methods in this struct,
/// they are implicitly tested as well.
#[test]
fn test_derivation() {
// The following values were calculated at random with: https://bitaps.com/bip32.
// You should test other values as well.
let sk = <k256::Scalar as Reduce<U256>>::reduce(U256::from_be_hex(
"6728f18f7163f7a0c11cc0ad53140afb4e345d760f966176865a860041549903",
));
let pk = (ProjectivePoint::from(crate::generator::<C>()) * sk).to_affine();
let chain_code: ChainCode =
hex::decode("6f990adb9337033001af2487a8617f68586c4ea17433492bbf1659f6e4cf9564")
.unwrap()
.try_into()
.unwrap();
let data: DerivData<C> = DerivData {
depth: 0,
child_number: 0,
parent_fingerprint: [0u8; 4],
poly_point: sk,
pk,
chain_code,
};
// You should try other paths as well.
let path = "m/0/1/2/3";
let try_derive = data.derive_from_path(path);
match try_derive {
Err(error) => {
panic!("Error: {:?}", error.description);
}
Ok(child) => {
assert_eq!(child.depth, 4);
assert_eq!(child.child_number, 3);
assert_eq!(hex::encode(child.parent_fingerprint), "9502bb8b");
assert_eq!(
hex::encode(scalar_to_bytes::<C>(&child.poly_point)),
"bdebf4ed48fae0b5b3ed6671496f7e1d741996dbb30d79f990933892c8ed316a"
);
assert_eq!(
hex::encode(point_to_bytes::<C>(&child.pk)),
"037c892dca96d4c940aafb3a1e65f470e43fba57b3146efeb312c2a39a208fffaa"
);
assert_eq!(
hex::encode(child.chain_code),
"c6536c2f5c232aa7613652831b7a3b21e97f4baa3114a3837de3764759f5b2aa"
);
}
}
}
/// Tests if the key shares are still capable of executing
/// the signing protocol after being derived.
#[test]
fn test_derivation_and_signing() {
let threshold = rng::get_rng().gen_range(2..=5); // You can change the ranges here.
let offset = rng::get_rng().gen_range(0..=5);
let parameters = Parameters {
threshold,
share_count: threshold + offset,
}; // You can fix the parameters if you prefer.
// We use the re_key function to quickly sample the parties.
let session_id = rng::get_rng().gen::<[u8; 32]>();
let secret_key = <k256::Scalar as Field>::random(rng::get_rng());
let parties = re_key::<C>(&parameters, &session_id, &secret_key, None);
// DERIVATION
let path = "m/0/1/2/3";
let mut derived_parties: Vec<Party<C>> = Vec::with_capacity(parameters.share_count as usize);
for i in 0..parameters.share_count {
let result = parties[i as usize].derive_from_path(path);
match result {
Err(error) => {
panic!("Error for Party {}: {:?}", i, error.description);
}
Ok(party) => {
derived_parties.push(party);
}
}
}
let parties = derived_parties;
// SIGNING (as in test_signing)
let sign_id = rng::get_rng().gen::<[u8; 32]>();
let message_to_sign = hash("Message to sign!".as_bytes(), &[]);
// For simplicity, we are testing only the first parties.
let executing_parties: Vec<u8> = Vec::from_iter(1..=parameters.threshold);
// Each party prepares their data for this signing session.
let mut all_data: BTreeMap<u8, SignData> = BTreeMap::new();
for party_index in executing_parties.clone() {
//Gather the counterparties
let mut counterparties = executing_parties.clone();
counterparties.retain(|index| *index != party_index);
all_data.insert(
party_index,
SignData {
sign_id: sign_id.to_vec(),
counterparties,
message_hash: message_to_sign,
},
);
}
// Phase 1
let mut unique_kept_1to2: BTreeMap<u8, UniqueKeep1to2<C>> = BTreeMap::new();
let mut kept_1to2: BTreeMap<u8, BTreeMap<u8, KeepPhase1to2<C>>> = BTreeMap::new();
let mut transmit_1to2: BTreeMap<u8, Vec<TransmitPhase1to2>> = BTreeMap::new();
for party_index in executing_parties.clone() {
let (unique_keep, keep, transmit) = parties[(party_index - 1) as usize]
.sign_phase1(all_data.get(&party_index).unwrap());
unique_kept_1to2.insert(party_index, unique_keep);
kept_1to2.insert(party_index, keep);
transmit_1to2.insert(party_index, transmit);
}
// Communication round 1
let mut received_1to2: BTreeMap<u8, Vec<TransmitPhase1to2>> = BTreeMap::new();
// Iterate over each party_index in executing_parties
for &party_index in &executing_parties {
let new_row: Vec<TransmitPhase1to2> = transmit_1to2
.iter()
.flat_map(|(_, messages)| {
messages
.iter()
.filter(|message| message.parties.receiver == party_index)
.cloned()
})
.collect();
received_1to2.insert(party_index, new_row);
}
// Phase 2
let mut unique_kept_2to3: BTreeMap<u8, UniqueKeep2to3<C>> = BTreeMap::new();
let mut kept_2to3: BTreeMap<u8, BTreeMap<u8, KeepPhase2to3<C>>> = BTreeMap::new();
let mut transmit_2to3: BTreeMap<u8, Vec<TransmitPhase2to3<C>>> = BTreeMap::new();
for party_index in executing_parties.clone() {
let result = parties[(party_index - 1) as usize].sign_phase2(
all_data.get(&party_index).unwrap(),
unique_kept_1to2.get(&party_index).unwrap(),
kept_1to2.get(&party_index).unwrap(),
received_1to2.get(&party_index).unwrap(),
);
match result {
Err(abort) => {
panic!("Party {} aborted: {:?}", abort.index, abort.description);
}
Ok((unique_keep, keep, transmit)) => {
unique_kept_2to3.insert(party_index, unique_keep);
kept_2to3.insert(party_index, keep);
transmit_2to3.insert(party_index, transmit);
}
}
}
// Communication round 2
let mut received_2to3: BTreeMap<u8, Vec<TransmitPhase2to3<C>>> = BTreeMap::new();
// Use references to avoid cloning executing_parties
for &party_index in &executing_parties {
let filtered_messages: Vec<TransmitPhase2to3<C>> = transmit_2to3
.iter()
.flat_map(|(_, messages)| {
messages
.iter()
.filter(|message| message.parties.receiver == party_index)
})
.cloned()
.collect();
received_2to3.insert(party_index, filtered_messages);
}
// Phase 3
let mut x_coords: Vec<String> = Vec::with_capacity(parameters.threshold as usize);
let mut broadcast_3to4: Vec<Broadcast3to4<C>> =
Vec::with_capacity(parameters.threshold as usize);
for party_index in executing_parties.clone() {
let result = parties[(party_index - 1) as usize].sign_phase3(
all_data.get(&party_index).unwrap(),
unique_kept_2to3.get(&party_index).unwrap(),
kept_2to3.get(&party_index).unwrap(),
received_2to3.get(&party_index).unwrap(),
);
match result {
Err(abort) => {
panic!("Party {} aborted: {:?}", abort.index, abort.description);
}
Ok((x_coord, broadcast)) => {
x_coords.push(x_coord);
broadcast_3to4.push(broadcast);
}
}
}
// We verify all parties got the same x coordinate.
let x_coord = x_coords[0].clone(); // We take the first one as reference.
for i in 1..parameters.threshold {
assert_eq!(x_coord, x_coords[i as usize]);
}
// Communication round 3
// This is a broadcast to all parties. The desired result is already broadcast_3to4.
// Phase 4
let some_index = executing_parties[0];
let result = parties[(some_index - 1) as usize].sign_phase4(
all_data.get(&some_index).unwrap(),
&x_coord,
&broadcast_3to4,
true,
);
if let Err(abort) = result {
panic!("Party {} aborted: {:?}", abort.index, abort.description);
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,229 @@
//! Splits a secret key into a threshold signature scheme.
//!
//! This file implements a re-key function: if the user already has
//! an address, he can split his secret key into a threshold signature
//! scheme. Since he starts with the secret key, we consider him as a
//! "trusted dealer" that can manipulate all the data from `DKLs23` to the
//! other parties. Hence, this function is computed locally and doesn't
//! need any communication.
use std::collections::BTreeMap;
use elliptic_curve::group::{Curve as _, GroupEncoding};
use elliptic_curve::Field;
use crate::utilities::rng;
use crate::DklsCurve;
use rand::Rng;
use crate::protocols::derivation::{ChainCode, DerivData};
use crate::protocols::dkg::compute_eth_address;
use crate::protocols::{Parameters, Party};
use crate::utilities::hashes::HashOutput;
use crate::utilities::multiplication::{MulReceiver, MulSender};
use crate::utilities::ot::{
self,
extension::{OTEReceiver, OTESender},
};
use crate::utilities::zero_shares::{self, ZeroShare};
/// Given a secret key, computes the data needed to make
/// `DKLs23` signatures under the corresponding public key.
///
/// The output is a vector of [`Party`]'s which should be
/// distributed to different users.
///
/// We also include an option to put a chain code if the original
/// wallet followed BIP-32 for key derivation ([read more](super::derivation)).
#[must_use]
pub fn re_key<C: DklsCurve>(
parameters: &Parameters,
session_id: &[u8],
secret_key: &C::Scalar,
option_chain_code: Option<ChainCode>,
) -> Vec<Party<C>>
where
C::Scalar: Field,
C::AffinePoint: GroupEncoding,
{
// Public key.
let pk = (C::ProjectivePoint::from(crate::generator::<C>()) * secret_key).to_affine();
// We will compute "poly_point" for each party with this polynomial
// via Shamir's secret sharing.
let mut polynomial: Vec<C::Scalar> = Vec::with_capacity(parameters.threshold as usize);
polynomial.push(*secret_key);
for _ in 1..parameters.threshold {
polynomial.push(C::Scalar::random(rng::get_rng()));
}
// Zero shares.
// We compute the common seed each pair of parties must save.
// The vector below should interpreted as follows: its first entry
// is a vector containing the seeds for the pair of parties (1,2),
// (1,3), ..., (1,n). The second entry contains the seeds for the pairs
// (2,3), (2,4), ..., (2,n), and so on. The last entry contains the
// seed for the pair (n-1, n).
let mut common_seeds: Vec<Vec<zero_shares::Seed>> =
Vec::with_capacity((parameters.share_count - 1) as usize);
for lower_index in 1..parameters.share_count {
let mut seeds_with_lower_index: Vec<zero_shares::Seed> =
Vec::with_capacity((parameters.share_count - lower_index) as usize);
for _ in (lower_index + 1)..=parameters.share_count {
let seed = rng::get_rng().gen::<zero_shares::Seed>();
seeds_with_lower_index.push(seed);
}
common_seeds.push(seeds_with_lower_index);
}
// We can now finish the initialization.
let mut zero_shares: Vec<ZeroShare> = Vec::with_capacity(parameters.share_count as usize);
for party in 1..=parameters.share_count {
let mut seeds: Vec<zero_shares::SeedPair> =
Vec::with_capacity((parameters.share_count - 1) as usize);
// We compute the pairs for which we have the highest index.
if party > 1 {
for counterparty in 1..party {
seeds.push(zero_shares::SeedPair {
lowest_index: false,
index_counterparty: counterparty,
seed: common_seeds[(counterparty - 1) as usize]
[(party - counterparty - 1) as usize],
});
}
}
// We compute the pairs for which we have the lowest index.
if party < parameters.share_count {
for counterparty in (party + 1)..=parameters.share_count {
seeds.push(zero_shares::SeedPair {
lowest_index: true,
index_counterparty: counterparty,
seed: common_seeds[(party - 1) as usize][(counterparty - party - 1) as usize],
});
}
}
zero_shares.push(ZeroShare::initialize(seeds));
}
// Two-party multiplication.
// These will store the result of initialization for each party.
let mut all_mul_receivers: Vec<BTreeMap<u8, MulReceiver<C>>> =
vec![BTreeMap::new(); parameters.share_count as usize];
let mut all_mul_senders: Vec<BTreeMap<u8, MulSender<C>>> =
vec![BTreeMap::new(); parameters.share_count as usize];
for receiver in 1..=parameters.share_count {
for sender in 1..=parameters.share_count {
if sender == receiver {
continue;
}
// We first compute the data for the OT extension.
// Receiver: Sample the seeds.
let mut seeds0: Vec<HashOutput> = Vec::with_capacity(ot::extension::KAPPA as usize);
let mut seeds1: Vec<HashOutput> = Vec::with_capacity(ot::extension::KAPPA as usize);
for _ in 0..ot::extension::KAPPA {
seeds0.push(rng::get_rng().gen::<HashOutput>());
seeds1.push(rng::get_rng().gen::<HashOutput>());
}
// Sender: Sample the correlation and choose the correct seed.
// The choice bits are sampled randomly.
let mut correlation: Vec<bool> = Vec::with_capacity(ot::extension::KAPPA as usize);
let mut seeds: Vec<HashOutput> = Vec::with_capacity(ot::extension::KAPPA as usize);
for i in 0..ot::extension::KAPPA {
let current_bit: bool = rng::get_rng().gen();
if current_bit {
seeds.push(seeds1[i as usize]);
} else {
seeds.push(seeds0[i as usize]);
}
correlation.push(current_bit);
}
let ote_receiver = OTEReceiver { seeds0, seeds1 };
let ote_sender = OTESender { correlation, seeds };
// We sample the public gadget vector.
let mut public_gadget: Vec<C::Scalar> =
Vec::with_capacity(ot::extension::BATCH_SIZE as usize);
for _ in 0..ot::extension::BATCH_SIZE {
public_gadget.push(C::Scalar::random(rng::get_rng()));
}
// We finish the initialization.
let mul_receiver = MulReceiver {
public_gadget: public_gadget.clone(),
ote_receiver,
};
let mul_sender = MulSender {
public_gadget,
ote_sender,
};
// We save the results.
all_mul_receivers[(receiver - 1) as usize].insert(sender, mul_receiver);
all_mul_senders[(sender - 1) as usize].insert(receiver, mul_sender);
}
}
// Key derivation - BIP-32.
// We use the chain code given or we sample a new one.
let chain_code = match option_chain_code {
Some(cc) => cc,
None => rng::get_rng().gen::<ChainCode>(),
};
// We create the parties.
let mut parties: Vec<Party<C>> = Vec::with_capacity(parameters.share_count as usize);
for index in 1..=parameters.share_count {
// poly_point is polynomial evaluated at index.
let mut poly_point = C::Scalar::ZERO;
let mut power_of_index = C::Scalar::ONE;
for i in 0..parameters.threshold {
poly_point += polynomial[i as usize] * power_of_index;
power_of_index *= C::Scalar::from(u64::from(index));
}
// Remark: There is a very tiny probability that poly_point is trivial.
// However, the person that will receive this data should apply the
// refresh protocol to guarantee their key share is really secret.
// This reduces the probability even more, so we are not going to
// introduce an "Abort" case here.
let derivation_data = DerivData {
depth: 0,
child_number: 0, // These three values are initialized as zero for the master node.
parent_fingerprint: [0; 4],
poly_point,
pk,
chain_code,
};
parties.push(Party {
parameters: parameters.clone(),
party_index: index,
session_id: session_id.to_vec(),
poly_point,
pk,
zero_share: zero_shares[(index - 1) as usize].clone(),
mul_senders: all_mul_senders[(index - 1) as usize].clone(),
mul_receivers: all_mul_receivers[(index - 1) as usize].clone(),
derivation_data,
eth_address: compute_eth_address::<C>(&pk),
});
}
parties
}
// For tests, see the file signing.rs. It uses the function above.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,9 @@
//! Extra functionalities needed in `protocols`.
pub mod commits;
pub mod hashes;
pub mod multiplication;
pub mod ot;
pub mod proofs;
pub mod rng;
pub mod zero_shares;

View File

@ -0,0 +1,114 @@
//! Commit and decommit protocols.
//!
//! This file implements the commitment functionality needed for `DKLs23`.
//! We follow the approach suggested on page 7 of their paper
//! (<https://eprint.iacr.org/2023/765.pdf>).
use crate::utilities::hashes::{hash, point_to_bytes, HashOutput};
use crate::utilities::rng;
use elliptic_curve::group::GroupEncoding;
use elliptic_curve::CurveArithmetic;
use rand::Rng;
// Computational security parameter lambda_c from DKLs23 (divided by 8)
use crate::SECURITY;
/// Commits to a given message.
///
/// Given a message, this function generates a random salt of size `2*lambda_c`
/// and computes the corresponding commitment.
///
/// The sender should first communicate the commitment. When he wants to decommit,
/// he sends the message together with the salt.
#[must_use]
pub fn commit(msg: &[u8]) -> (HashOutput, Vec<u8>) {
//The paper instructs the salt to have 2*lambda_c bits.
let mut salt = [0u8; 2 * SECURITY as usize];
rng::get_rng().fill(&mut salt[..]);
let commitment = hash(msg, &salt);
(commitment, salt.to_vec())
}
/// Verifies a commitment for a message.
///
/// After having received the commitment and later the message and the salt, the receiver
/// verifies if these data are compatible.
#[must_use]
pub fn verify_commitment(msg: &[u8], commitment: &HashOutput, salt: &[u8]) -> bool {
let expected_commitment = hash(msg, salt);
*commitment == expected_commitment
}
/// Commits to a given point.
///
/// This is the same as [`commit`], but it receives a point on the elliptic curve instead.
#[must_use]
pub fn commit_point<C: CurveArithmetic>(point: &C::AffinePoint) -> (HashOutput, Vec<u8>)
where
C::AffinePoint: GroupEncoding,
{
let point_as_bytes = point_to_bytes::<C>(point);
commit(&point_as_bytes)
}
/// Verifies a commitment for a point.
///
/// This is the same as [`verify_commitment`], but it receives a point on the elliptic curve instead.
#[must_use]
pub fn verify_commitment_point<C: CurveArithmetic>(
point: &C::AffinePoint,
commitment: &HashOutput,
salt: &[u8],
) -> bool
where
C::AffinePoint: GroupEncoding,
{
let point_as_bytes = point_to_bytes::<C>(point);
verify_commitment(&point_as_bytes, commitment, salt)
}
#[cfg(test)]
mod tests {
use super::*;
/// Tests if committing and de-committing work.
#[test]
fn test_commit_decommit() {
let msg = rng::get_rng().gen::<[u8; 32]>();
let (commitment, salt) = commit(&msg);
assert!(verify_commitment(&msg, &commitment, &salt));
}
/// Commits to a message and changes it on purpose
/// to check that if [`verify_commitment`] returns `false`.
#[test]
fn test_commit_decommit_fail_msg() {
let msg = rng::get_rng().gen::<[u8; 32]>();
let (commitment, salt) = commit(&msg);
let msg = rng::get_rng().gen::<[u8; 32]>(); //We change the message
assert!(!(verify_commitment(&msg, &commitment, &salt))); //The test can fail but with very low probability
}
/// Commits to a message and changes the commitment on purpose
/// to check that if [`verify_commitment`] returns `false`.
#[test]
fn test_commit_decommit_fail_commitment() {
let msg = rng::get_rng().gen::<[u8; 32]>();
let (_, salt) = commit(&msg);
let commitment = rng::get_rng().gen::<HashOutput>(); //We change the commitment
assert!(!(verify_commitment(&msg, &commitment, &salt))); //The test can fail but with very low probability
}
/// Commits to a message and changes the salt on purpose
/// to check that if [`verify_commitment`] returns `false`.
#[test]
fn test_commit_decommit_fail_salt() {
let msg = rng::get_rng().gen::<[u8; 32]>();
let (commitment, _) = commit(&msg);
let mut salt = [0u8; 2 * SECURITY as usize];
rng::get_rng().fill(&mut salt[..]);
assert!(!(verify_commitment(&msg, &commitment, &salt))); //The test can fail but with very low probability
}
}

View File

@ -0,0 +1,174 @@
//! Functions relating hashes and byte conversions.
//!
//! We are using SHA-256 from SHA-2 as in the implementation of the
//! previous version of the `DKLs` protocol (<https://gitlab.com/neucrypt/mpecdsa/-/blob/release/src/lib.rs>).
//!
//! As explained by one of the authors (see <https://youtu.be/-d0Ny7NAG-w?si=POTKF1BwwGOzvIpL&t=3065>),
//! each subprotocol should use a different random oracle. For this purpose, our implementation
//! has a "salt" parameter to modify the hash function. In our main protocol, the salt is
//! usually derived from the session id.
// TODO/FOR THE FUTURE: It requires some work to really guarantee that all "salts" are
// different for each subprotocol. For example, the implementation above has a
// file just for this purpose. Thus, it's worth analyzing this code in the future
// and maybe implementing something similar.
use bitcoin_hashes::{sha256, Hash};
use elliptic_curve::bigint::{Encoding, U256};
use elliptic_curve::group::GroupEncoding;
use elliptic_curve::ops::Reduce;
use elliptic_curve::CurveArithmetic;
use elliptic_curve::PrimeField;
use crate::SECURITY;
/// Represents the output of the hash function.
///
/// We are using SHA-256, so the hash values have 256 bits.
pub type HashOutput = [u8; SECURITY as usize];
/// Hash with result in bytes.
#[must_use]
pub fn hash(msg: &[u8], salt: &[u8]) -> HashOutput {
let concatenation = [salt, msg].concat();
sha256::Hash::hash(&concatenation).to_byte_array()
}
/// Hash with result as an integer.
#[must_use]
pub fn hash_as_int(msg: &[u8], salt: &[u8]) -> U256 {
let as_bytes = hash(msg, salt);
U256::from_be_bytes(as_bytes)
}
/// Hash with result as a scalar on curve `C`.
///
/// It takes the integer from [`hash_as_int`] and reduces it modulo the order
/// of the curve.
#[must_use]
pub fn hash_as_scalar<C>(msg: &[u8], salt: &[u8]) -> C::Scalar
where
C: CurveArithmetic,
C::Scalar: Reduce<U256>,
{
let as_int = hash_as_int(msg, salt);
C::Scalar::reduce(as_int)
}
/// Converts a scalar on curve `C` to bytes.
///
/// The scalar is represented by an integer.
/// This function writes this integer as a byte array via
/// [`PrimeField::to_repr`].
#[must_use]
pub fn scalar_to_bytes<C>(scalar: &C::Scalar) -> Vec<u8>
where
C: CurveArithmetic,
C::Scalar: PrimeField,
{
scalar.to_repr().as_ref().to_vec()
}
/// Converts a point on elliptic curve `C` to bytes.
///
/// Apart from the point at infinity, it computes the compressed
/// representation of `point` via [`GroupEncoding::to_bytes`].
#[must_use]
pub fn point_to_bytes<C>(point: &C::AffinePoint) -> Vec<u8>
where
C: CurveArithmetic,
C::AffinePoint: GroupEncoding,
{
point.to_bytes().as_ref().to_vec()
}
#[cfg(test)]
mod tests {
use super::*;
use crate::utilities::rng;
use elliptic_curve::group::Group;
use elliptic_curve::point::AffineCoordinates;
use elliptic_curve::Field;
use hex;
use rand::Rng;
// All tests use secp256k1 as the concrete curve, matching the original
// hard-coded behaviour.
type C = k256::Secp256k1;
type Scalar = <C as CurveArithmetic>::Scalar;
type AffinePoint = <C as CurveArithmetic>::AffinePoint;
/// Tests if [`hash`] really works as `SHA-256` is intended.
///
/// In this case, you should manually change the values and
/// use a trusted source which computes `SHA-256` to compare.
#[test]
fn test_hash() {
let msg_string = "Testing message";
let salt_string = "Testing salt";
let msg = msg_string.as_bytes();
let salt = salt_string.as_bytes();
assert_eq!(
hash(msg, salt).to_vec(),
hex::decode("847bf2f0d27a519b25e519efebc9d509316539b89ee8f6f09ef6d2abc08113ba")
.unwrap()
);
}
/// Tests if [`hash_as_int`] gives the correct integer.
///
/// In this case, you should manually change the values and
/// use a trusted source which computes `SHA-256` to compare.
#[test]
fn test_hash_as_int() {
let msg_string = "Testing message";
let salt_string = "Testing salt";
let msg = msg_string.as_bytes();
let salt = salt_string.as_bytes();
assert_eq!(
hash_as_int(msg, salt),
U256::from_be_hex("847bf2f0d27a519b25e519efebc9d509316539b89ee8f6f09ef6d2abc08113ba")
);
}
/// Tests if [`scalar_to_bytes`] converts a `Scalar`
/// in the expected way.
#[test]
fn test_scalar_to_bytes() {
for _ in 0..100 {
let number: u32 = rng::get_rng().gen();
let scalar = Scalar::from(u64::from(number));
let number_as_bytes = [vec![0u8; 28], number.to_be_bytes().to_vec()].concat();
assert_eq!(number_as_bytes, scalar_to_bytes::<C>(&scalar));
}
}
/// Tests if [`point_to_bytes`] indeed returns the compressed
/// representation of a point on the elliptic curve.
#[test]
fn test_point_to_bytes() {
for _ in 0..100 {
let generator: AffinePoint = crate::generator::<C>();
let identity: AffinePoint = crate::identity::<C>();
let point = (<C as CurveArithmetic>::ProjectivePoint::from(generator)
* Scalar::random(rng::get_rng()))
.to_affine();
if point == identity {
continue;
}
let mut compressed_point = Vec::with_capacity(33);
compressed_point.push(if bool::from(point.y_is_odd()) { 3 } else { 2 });
compressed_point.extend_from_slice(point.x().as_slice());
assert_eq!(compressed_point, point_to_bytes::<C>(&point));
}
}
}

View File

@ -0,0 +1,715 @@
//! Random Vector OLE functionality from `DKLs23`.
//!
//! This file realizes Functionality 3.5 in `DKLs23` (<https://eprint.iacr.org/2023/765.pdf>).
//! It is based upon the OT extension protocol [here](super::ot::extension).
//!
//! As `DKLs23` suggested, we use Protocol 1 of `DKLs19` (<https://eprint.iacr.org/2019/523.pdf>).
//! The first paper also gives some orientations on how to implement the protocol
//! in only two-rounds (see page 8 and Section 5.1) which we adopt here.
use elliptic_curve::bigint::U256;
use elliptic_curve::group::GroupEncoding;
use elliptic_curve::ops::Reduce;
use elliptic_curve::CurveArithmetic;
use elliptic_curve::{Field, PrimeField};
use serde::{Deserialize, Serialize};
use crate::utilities::hashes::{hash, hash_as_scalar, scalar_to_bytes, HashOutput};
use crate::utilities::proofs::{DLogProof, EncProof};
use crate::utilities::rng;
use crate::DklsCurve;
use super::ot::extension::{deserialize_vec_prg, serialize_vec_prg};
use crate::utilities::ot::base::{OTReceiver, OTSender, Seed};
use crate::utilities::ot::extension::{
OTEDataToSender, OTEReceiver, OTESender, PRGOutput, BATCH_SIZE,
};
use crate::utilities::ot::ErrorOT;
use rand::Rng;
/// Constant `L` from Functionality 3.5 in `DKLs23` used for signing in Protocol 3.6.
pub const L: u8 = 2;
/// This represents the number of times the OT extension protocol will be
/// called using the same value chosen by the receiver.
pub const OT_WIDTH: u8 = 2 * L;
/// Sender's data and methods for the multiplication protocol.
#[derive(Clone, Serialize, Deserialize, Debug)]
#[serde(bound(
serialize = "C::Scalar: Serialize",
deserialize = "C::Scalar: Deserialize<'de>"
))]
pub struct MulSender<C: CurveArithmetic> {
pub public_gadget: Vec<C::Scalar>,
pub ote_sender: OTESender,
}
/// Receiver's data and methods for the multiplication protocol.
#[derive(Clone, Serialize, Deserialize, Debug)]
#[serde(bound(
serialize = "C::Scalar: Serialize",
deserialize = "C::Scalar: Deserialize<'de>"
))]
pub struct MulReceiver<C: CurveArithmetic> {
pub public_gadget: Vec<C::Scalar>,
pub ote_receiver: OTEReceiver,
}
/// Data transmitted by the sender to the receiver after his phase.
#[derive(Clone, Serialize, Deserialize, Debug)]
#[serde(bound(
serialize = "C::Scalar: Serialize",
deserialize = "C::Scalar: Deserialize<'de>"
))]
pub struct MulDataToReceiver<C: CurveArithmetic> {
pub vector_of_tau: Vec<Vec<C::Scalar>>,
pub verify_r: HashOutput,
pub verify_u: Vec<C::Scalar>,
pub gamma_sender: Vec<C::Scalar>,
}
/// Data kept by the receiver between phases.
#[derive(Clone, Serialize, Deserialize, Debug)]
#[serde(bound(
serialize = "C::Scalar: Serialize",
deserialize = "C::Scalar: Deserialize<'de>"
))]
pub struct MulDataToKeepReceiver<C: CurveArithmetic> {
pub b: C::Scalar,
pub choice_bits: Vec<bool>,
#[serde(
serialize_with = "serialize_vec_prg",
deserialize_with = "deserialize_vec_prg"
)]
pub extended_seeds: Vec<PRGOutput>,
pub chi_tilde: Vec<C::Scalar>,
pub chi_hat: Vec<C::Scalar>,
}
/// Represents an error during the multiplication protocol.
pub struct ErrorMul {
pub description: String,
}
impl ErrorMul {
/// Creates an instance of `ErrorMul`.
#[must_use]
pub fn new(description: &str) -> ErrorMul {
ErrorMul {
description: String::from(description),
}
}
}
// We implement the protocol.
impl<C: DklsCurve> MulSender<C>
where
C::Scalar: Reduce<U256> + PrimeField,
C::AffinePoint: GroupEncoding,
{
// INITIALIZE
// As in DKLs19 (https://eprint.iacr.org/2019/523.pdf), the initialization of the
// multiplication protocol is the same as for our OT extension protocol.
// Thus, we repeat the phases from the file ot_extension.rs.
// The only difference is that we include the sampling for the public gadget vector.
/// Starts the initialization of the protocol.
///
/// See [`OTESender`](super::ot::extension::OTESender) for explanation.
#[must_use]
pub fn init_phase1(session_id: &[u8]) -> (OTReceiver, Vec<bool>, Vec<C::Scalar>, Vec<EncProof<C>>) {
OTESender::init_phase1::<C>(session_id)
}
/// Finishes the initialization of the protocol.
///
/// The inputs here come from [`OTESender`](super::ot::extension::OTESender),
/// except for `nonce`, which was sent by the receiver for the
/// computation of the public gadget vector.
///
/// # Errors
///
/// Will return `Err` if the initialization fails (see the file above).
pub fn init_phase2(
ot_receiver: &OTReceiver,
session_id: &[u8],
correlation: Vec<bool>,
vec_r: &[C::Scalar],
dlog_proof: &DLogProof<C>,
nonce: &C::Scalar,
) -> Result<MulSender<C>, ErrorOT> {
let ote_sender =
OTESender::init_phase2::<C>(ot_receiver, session_id, correlation, vec_r, dlog_proof)?;
// We compute the public gadget vector from the nonce, in the same way as in
// https://gitlab.com/neucrypt/mpecdsa/-/blob/release/src/mul.rs.
let mut public_gadget: Vec<C::Scalar> = Vec::with_capacity(BATCH_SIZE as usize);
let mut counter = *nonce;
for _ in 0..BATCH_SIZE {
counter += C::Scalar::ONE;
public_gadget.push(hash_as_scalar::<C>(&scalar_to_bytes::<C>(&counter), session_id));
}
let mul_sender = MulSender {
public_gadget,
ote_sender,
};
Ok(mul_sender)
}
// PROTOCOL
// We now follow the steps of Protocol 1 in DKLs19, implementing
// the suggestions of DKLs23 as well.
// It is worth pointing out that the parameter l from DKLs19 is not
// the same as the parameter l from DKLs23. To highlight the difference,
// we will always denote the DKLs23 parameter by a capital L.
/// Runs the sender's protocol.
///
/// Input: [`L`] instances of `Scalar` and data coming from receiver.
///
/// Output: Protocol's output and data to receiver.
///
/// # Errors
///
/// Will return `Err` if the underlying OT extension fails (see [`OTESender`](super::ot::extension::OTESender)).
pub fn run(
&self,
session_id: &[u8],
input: &[C::Scalar],
data: &OTEDataToSender,
) -> Result<(Vec<C::Scalar>, MulDataToReceiver<C>), ErrorMul> {
// RANDOMIZED MULTIPLICATION
// Step 1 - No action for the sender.
// Step 2 - We sample the pads a_tilde and the check values a_hat.
// We also set the correlation for the OT protocol.
// There are L pads and L check_values.
let mut a_tilde: Vec<C::Scalar> = Vec::with_capacity(L as usize);
let mut a_hat: Vec<C::Scalar> = Vec::with_capacity(L as usize);
for _ in 0..L {
a_tilde.push(C::Scalar::random(rng::get_rng()));
a_hat.push(C::Scalar::random(rng::get_rng()));
}
// For the correlation, let us first explain the case L = 1.
// In this case, there are actually two correlations: one is
// made with BATCH_SIZE copies of a_tilde and the other with
// BATCH_SIZE copies of a_hat. We use two correlations in order
// to get two outputs, as in DKLs19. Both of them will be used
// in the OT extension with the same choice bits from the receiver.
//
// Now, by DKLs23, we hardcoded l = 1 in DKLs19. At the same time,
// DKLs23 has its parameter L. To adapt the old protocol, we repeat
// Step 2 in DKLs19 L times, so in the end we get 2*L correlations.
let mut correlation_tilde: Vec<Vec<C::Scalar>> = Vec::with_capacity(L as usize);
let mut correlation_hat: Vec<Vec<C::Scalar>> = Vec::with_capacity(L as usize);
for i in 0..L {
let correlation_tilde_i = vec![a_tilde[i as usize]; BATCH_SIZE as usize];
let correlation_hat_i = vec![a_hat[i as usize]; BATCH_SIZE as usize];
correlation_tilde.push(correlation_tilde_i);
correlation_hat.push(correlation_hat_i);
}
// We gather the correlations.
let correlations = [correlation_tilde, correlation_hat].concat();
// Step 3 - We execute the OT protocol.
// It is here that we use the "forced-reuse" technique that
// DKLs23 mentions on page 8. As they say: "Alice performs the
// steps of the protocol for each input in her vector, but uses
// a single batch of Bob's OT instances for all of them,
// concatenating the corresponding OT payloads to form one batch
// of payloads with lengths proportionate to her input vector length."
//
// Hence, the OT extension protocol will be executed 2*L times with
// the 2*L correlations from the previous step. The implementation
// in the file ot/extension.rs already deals with these repetitions,
// we just have to specify this quantity (the "OT width").
let ote_sid = ["OT Extension protocol".as_bytes(), session_id].concat();
let result = self.ote_sender.run::<C>(&ote_sid, OT_WIDTH, &correlations, data);
let ot_outputs: Vec<Vec<C::Scalar>>;
let vector_of_tau: Vec<Vec<C::Scalar>>; // Used by the receiver to finish the OT protocol.
match result {
Ok((out, tau)) => {
(ot_outputs, vector_of_tau) = (out, tau);
}
Err(error) => {
return Err(ErrorMul::new(&format!(
"OTE error during multiplication: {:?}",
error.description
)));
}
}
// This is the sender's output from the OT protocol with the notation from DKLs19.
let (z_tilde, z_hat) = ot_outputs.split_at(L as usize);
// Step 4 - We compute the shared random values.
// We use data as a transcript from Step 3.
let transcript = [
data.u.concat(),
data.verify_x.to_vec(),
data.verify_t.concat(),
]
.concat();
// At this point, the constant L from DKLs23 behaves as the
// constant l from DKLs19.
let mut chi_tilde: Vec<C::Scalar> = Vec::with_capacity(L as usize);
let mut chi_hat: Vec<C::Scalar> = Vec::with_capacity(L as usize);
for i in 0..L {
// We compute the salts according to i and the variable.
let salt_tilde = [&(1u8).to_be_bytes(), &i.to_be_bytes(), session_id].concat();
let salt_hat = [&(2u8).to_be_bytes(), &i.to_be_bytes(), session_id].concat();
chi_tilde.push(hash_as_scalar::<C>(&transcript, &salt_tilde));
chi_hat.push(hash_as_scalar::<C>(&transcript, &salt_hat));
}
// Step 5 - We compute the verification value.
// We use Section 5.1 in DKLs23 for an optimization of the
// protocol in DKLs19.
// We have to compute a matrix r and a vector u.
// Only a hash of r will be sent to the receiver,
// so we'll compute r directly in bytes.
// The variable below saves each row of r in bytes.
let mut rows_r_as_bytes: Vec<Vec<u8>> = Vec::with_capacity(L as usize);
let mut verify_u: Vec<C::Scalar> = Vec::with_capacity(L as usize);
for i in 0..L {
// We compute the i-th row of the matrix r in bytes.
let mut entries_as_bytes: Vec<Vec<u8>> = Vec::with_capacity(BATCH_SIZE as usize);
for j in 0..BATCH_SIZE {
let entry = (chi_tilde[i as usize] * z_tilde[i as usize][j as usize])
+ (chi_hat[i as usize] * z_hat[i as usize][j as usize]);
let entry_as_bytes = scalar_to_bytes::<C>(&entry);
entries_as_bytes.push(entry_as_bytes);
}
let row_i_as_bytes = entries_as_bytes.concat();
rows_r_as_bytes.push(row_i_as_bytes);
// We compute the i-th entry of the vector u.
let entry = (chi_tilde[i as usize] * a_tilde[i as usize])
+ (chi_hat[i as usize] * a_hat[i as usize]);
verify_u.push(entry);
}
let r_as_bytes = rows_r_as_bytes.concat();
// We transform r into a hash.
let verify_r: HashOutput = hash(&r_as_bytes, session_id);
// Step 6 - No action for the sender.
// INPUT AND ADJUSTMENT
// Step 7 - We compute the difference gamma_A.
let mut gamma: Vec<C::Scalar> = Vec::with_capacity(L as usize);
for i in 0..L {
let difference = input[i as usize] - a_tilde[i as usize];
gamma.push(difference);
}
// Step 8 - Finally, we compute the protocol's output.
// Recall that we hardcoded gamma_B = 0.
let mut output: Vec<C::Scalar> = Vec::with_capacity(L as usize);
for i in 0..L {
let mut summation = C::Scalar::ZERO;
for j in 0..BATCH_SIZE {
summation += self.public_gadget[j as usize] * z_tilde[i as usize][j as usize];
}
output.push(summation);
}
// We now return all values.
let data_to_receiver = MulDataToReceiver {
vector_of_tau,
verify_r,
verify_u,
gamma_sender: gamma,
};
Ok((output, data_to_receiver))
}
}
impl<C: DklsCurve> MulReceiver<C>
where
C::Scalar: Reduce<U256> + PrimeField,
C::AffinePoint: GroupEncoding,
{
// INITIALIZE
// As in DKLs19 (https://eprint.iacr.org/2019/523.pdf), the initialization of the
// multiplication protocol is the same as for our OT extension protocol.
// Thus, we repeat the phases from the file ot_extension.rs.
// The only difference is that we include the sampling for the public gadget vector.
/// Starts the initialization of the protocol.
///
/// See [`OTEReceiver`](super::ot::extension::OTEReceiver) for explanation.
///
/// The `Scalar` does not come from the OT extension. It is just
/// a nonce for the generation of the public gadget vector. It should
/// be kept for the next phase and transmitted to the sender.
#[must_use]
pub fn init_phase1(session_id: &[u8]) -> (OTSender<C>, DLogProof<C>, C::Scalar) {
let (ot_sender, proof) = OTEReceiver::init_phase1::<C>(session_id);
// For the choice of the public gadget vector, we will use the same approach
// as in https://gitlab.com/neucrypt/mpecdsa/-/blob/release/src/mul.rs.
// We sample a nonce that will be used by both parties to compute a common vector.
let nonce = C::Scalar::random(rng::get_rng());
(ot_sender, proof, nonce)
}
/// Finishes the initialization of the protocol.
///
/// The inputs here come from [`OTEReceiver`](super::ot::extension::OTEReceiver),
/// except for `nonce`, which was generated during the previous phase.
///
/// # Errors
///
/// Will return `Err` if the initialization fails (see the file above).
pub fn init_phase2(
ot_sender: &OTSender<C>,
session_id: &[u8],
seed: &Seed,
enc_proofs: &[EncProof<C>],
nonce: &C::Scalar,
) -> Result<MulReceiver<C>, ErrorOT> {
let ote_receiver = OTEReceiver::init_phase2::<C>(ot_sender, session_id, seed, enc_proofs)?;
// We compute the public gadget vector from the nonce, in the same way as in
// https://gitlab.com/neucrypt/mpecdsa/-/blob/release/src/mul.rs.
let mut public_gadget: Vec<C::Scalar> = Vec::with_capacity(BATCH_SIZE as usize);
let mut counter = *nonce;
for _ in 0..BATCH_SIZE {
counter += C::Scalar::ONE;
public_gadget.push(hash_as_scalar::<C>(&scalar_to_bytes::<C>(&counter), session_id));
}
let mul_receiver = MulReceiver {
public_gadget,
ote_receiver,
};
Ok(mul_receiver)
}
// PROTOCOL
// We now follow the steps of Protocol 1 in DKLs19, implementing
// the suggestions of DKLs23 as well.
// It is worth pointing out that the parameter l from DKLs19 is not
// the same as the parameter l from DKLs23. To highlight the difference,
// we will always denote the DKLs23 parameter by a capital L.
/// Runs the first phase of the receiver's protocol.
///
/// Note that it is the receiver who starts the multiplication protocol.
///
/// The random factor coming from the protocol is already returned here.
/// There are two other outputs: one to be kept for the next phase
/// and one to be sent to the sender (related to the OT extension).
#[must_use]
pub fn run_phase1(
&self,
session_id: &[u8],
) -> (C::Scalar, MulDataToKeepReceiver<C>, OTEDataToSender) {
// RANDOMIZED MULTIPLICATION
// Step 1 - We sample the choice bits and compute the pad b_tilde.
// Since we are hardcoding gamma_B = 0, b_tilde will serve as the
// number b that the receiver inputs into the protocol. Hence, we
// will denote b_tilde simply as b.
let mut choice_bits: Vec<bool> = Vec::with_capacity(BATCH_SIZE as usize);
let mut b = C::Scalar::ZERO;
for i in 0..BATCH_SIZE {
let current_bit: bool = rng::get_rng().gen();
if current_bit {
b += &self.public_gadget[i as usize];
}
choice_bits.push(current_bit);
}
// Step 2 - No action for the receiver.
// Step 3 (Incomplete) - We start the OT extension protocol.
// Note that this protocol has one more round, so the receiver
// cannot get the output immediately. This will only be computed
// at the beginning of the next phase for the receiver.
let ote_sid = ["OT Extension protocol".as_bytes(), session_id].concat();
let (extended_seeds, data_to_sender) = self.ote_receiver.run_phase1(&ote_sid, &choice_bits);
// Step 4 - We compute the shared random values.
// We use data_to_sender as a transcript from Step 3.
let transcript = [
data_to_sender.u.concat(),
data_to_sender.verify_x.to_vec(),
data_to_sender.verify_t.concat(),
]
.concat();
// At this point, the constant L from DKLs23 behaves as the
// constant l from DKLs19.
let mut chi_tilde: Vec<C::Scalar> = Vec::with_capacity(L as usize);
let mut chi_hat: Vec<C::Scalar> = Vec::with_capacity(L as usize);
for i in 0..L {
// We compute the salts according to i and the variable.
let salt_tilde = [&(1u8).to_be_bytes(), &i.to_be_bytes(), session_id].concat();
let salt_hat = [&(2u8).to_be_bytes(), &i.to_be_bytes(), session_id].concat();
chi_tilde.push(hash_as_scalar::<C>(&transcript, &salt_tilde));
chi_hat.push(hash_as_scalar::<C>(&transcript, &salt_hat));
}
// Step 5 - No action for the receiver, but he will receive
// some values for the next step, so we stop here.
// We now return all values.
let data_to_keep = MulDataToKeepReceiver {
b,
choice_bits,
extended_seeds,
chi_tilde,
chi_hat,
};
(b, data_to_keep, data_to_sender)
}
/// Finishes the receiver's protocol and gives his output.
///
/// The inputs are the data kept from the previous phase and
/// the data transmitted by the sender.
///
/// # Errors
///
/// Will return `Err` if the consistency check using the sender values fails
/// or if the underlying OT extension fails (see [`OTEReceiver`](super::ot::extension::OTEReceiver)).
pub fn run_phase2(
&self,
session_id: &[u8],
data_kept: &MulDataToKeepReceiver<C>,
data_received: &MulDataToReceiver<C>,
) -> Result<Vec<C::Scalar>, ErrorMul> {
// Step 3 (Conclusion) - We conclude the OT protocol.
// The sender applied the protocol 2*L times with our data,
// so we will have 2*L outputs (we refer to this number as
// the "OT width").
let ote_sid = ["OT Extension protocol".as_bytes(), session_id].concat();
let result = self.ote_receiver.run_phase2::<C>(
&ote_sid,
OT_WIDTH,
&data_kept.choice_bits,
&data_kept.extended_seeds,
&data_received.vector_of_tau,
);
let ot_outputs: Vec<Vec<C::Scalar>> = match result {
Ok(out) => out,
Err(error) => {
return Err(ErrorMul::new(&format!(
"OTE error during multiplication: {:?}",
error.description
)));
}
};
// This is the receiver's output from the OT protocol with the notation from DKLs19.
let (z_tilde, z_hat) = ot_outputs.split_at(L as usize);
// Step 6 - We verify if the data sent by the sender is consistent.
// We use Section 5.1 in DKLs23 for an optimization of the
// protocol in DKLs19.
// We have to compute a matrix r and a vector u.
// Only a hash of r will be sent to us so we'll
// reconstruct r directly in bytes.
// The variable below saves each row of r in bytes.
let mut rows_r_as_bytes: Vec<Vec<u8>> = Vec::with_capacity(L as usize);
for i in 0..L {
// We compute the i-th row of the matrix r in bytes.
let mut entries_as_bytes: Vec<Vec<u8>> = Vec::with_capacity(BATCH_SIZE as usize);
for j in 0..BATCH_SIZE {
// The entry depends on the choice bits.
let mut entry = (-(data_kept.chi_tilde[i as usize]
* z_tilde[i as usize][j as usize]))
- (data_kept.chi_hat[i as usize] * z_hat[i as usize][j as usize]);
if data_kept.choice_bits[j as usize] {
entry += &data_received.verify_u[i as usize];
}
let entry_as_bytes = scalar_to_bytes::<C>(&entry);
entries_as_bytes.push(entry_as_bytes);
}
let row_i_as_bytes = entries_as_bytes.concat();
rows_r_as_bytes.push(row_i_as_bytes);
}
let r_as_bytes = rows_r_as_bytes.concat();
// We transform r into a hash.
let expected_verify_r: HashOutput = hash(&r_as_bytes, session_id);
// We compare the values.
if data_received.verify_r != expected_verify_r {
return Err(ErrorMul::new(
"Sender cheated in multiplication protocol: Consistency check failed!",
));
}
// INPUT AND ADJUSTMENT
// Step 7 - No action for the receiver.
// (Remember that we hardcoded gamma_B = 0.)
// Step 8 - Finally, we compute the protocol's output.
// Recall that we hardcoded gamma_B = 0.
let mut output: Vec<C::Scalar> = Vec::with_capacity(L as usize);
for i in 0..L {
let mut summation = C::Scalar::ZERO;
for j in 0..BATCH_SIZE {
summation += self.public_gadget[j as usize] * z_tilde[i as usize][j as usize];
}
let final_sum = (data_kept.b * data_received.gamma_sender[i as usize]) + summation;
output.push(final_sum);
}
Ok(output)
}
}
#[cfg(test)]
mod tests {
use super::*;
use rand::Rng;
type C = k256::Secp256k1;
/// Tests if the outputs for the multiplication protocol
/// satisfy the relations they are supposed to satisfy.
#[test]
fn test_multiplication() {
let session_id = rng::get_rng().gen::<[u8; 32]>();
// INITIALIZATION
// Phase 1 - Receiver
let (ot_sender, dlog_proof, nonce) = MulReceiver::<C>::init_phase1(&session_id);
// Phase 1 - Sender
let (ot_receiver, correlation, vec_r, enc_proofs) = MulSender::<C>::init_phase1(&session_id);
// Communication round
// OT: Exchange the proofs and the seed.
// Mul: Exchange the nonce.
let seed = ot_receiver.seed;
// Phase 2 - Receiver
let result_receiver =
MulReceiver::<C>::init_phase2(&ot_sender, &session_id, &seed, &enc_proofs, &nonce);
let mul_receiver = match result_receiver {
Ok(r) => r,
Err(error) => {
panic!("Two-party multiplication error: {:?}", error.description);
}
};
// Phase 2 - Sender
let result_sender = MulSender::<C>::init_phase2(
&ot_receiver,
&session_id,
correlation,
&vec_r,
&dlog_proof,
&nonce,
);
let mul_sender = match result_sender {
Ok(s) => s,
Err(error) => {
panic!("Two-party multiplication error: {:?}", error.description);
}
};
// PROTOCOL
// Sampling the choices.
let mut sender_input: Vec<<C as CurveArithmetic>::Scalar> = Vec::with_capacity(L as usize);
for _ in 0..L {
sender_input.push(<C as CurveArithmetic>::Scalar::random(rng::get_rng()));
}
// Phase 1 - Receiver
let (receiver_random, data_to_keep, data_to_sender) = mul_receiver.run_phase1(&session_id);
// Communication round 1
// Receiver keeps receiver_random (part of the output)
// and data_to_keep, and transmits data_to_sender.
// Unique phase - Sender
let sender_result = mul_sender.run(&session_id, &sender_input, &data_to_sender);
let sender_output: Vec<<C as CurveArithmetic>::Scalar>;
let data_to_receiver: MulDataToReceiver<C>;
match sender_result {
Ok((output, data)) => {
sender_output = output;
data_to_receiver = data;
}
Err(error) => {
panic!("Two-party multiplication error: {:?}", error.description);
}
}
// Communication round 2
// Sender transmits data_to_receiver.
// Phase 2 - Receiver
let receiver_result =
mul_receiver.run_phase2(&session_id, &data_to_keep, &data_to_receiver);
let receiver_output = match receiver_result {
Ok(output) => output,
Err(error) => {
panic!("Two-party multiplication error: {:?}", error.description);
}
};
// Verification that the protocol did what it should do.
for i in 0..L {
// The sum of the outputs should be equal to the product of the
// sender's chosen scalar and the receiver's random scalar.
let sum = sender_output[i as usize] + receiver_output[i as usize];
assert_eq!(sum, sender_input[i as usize] * receiver_random);
}
}
}

View File

@ -0,0 +1,23 @@
//! Oblivious Transfer.
//!
//! The main protocol is given by the file [`extension`], but it needs
//! a base OT implemented in [`base`].
pub mod base;
pub mod extension;
/// Represents an error during any of the OT protocols.
#[derive(Debug, Clone)]
pub struct ErrorOT {
pub description: String,
}
impl ErrorOT {
/// Creates an instance of `ErrorOT`.
#[must_use]
pub fn new(description: &str) -> ErrorOT {
ErrorOT {
description: String::from(description),
}
}
}

View File

@ -0,0 +1,476 @@
//! Base OT.
//!
//! This file implements an oblivious transfer (OT) which will serve as a base
//! for the OT extension protocol.
//!
//! As suggested in page 30 of `DKLs23` (<https://eprint.iacr.org/2023/765.pdf>),
//! we implement the endemic OT protocol of Zhou et al., which can be found on
//! Section 3 of <https://eprint.iacr.org/2022/1525.pdf>.
//!
//! There are two phases for each party and one communication round between
//! them. Both Phase 1 and Phase 2 can be done concurrently for the sender
//! and the receiver.
//
//! There is also an initialization function which should be executed during
//! Phase 1. It saves some values that can be reused if the protocol is applied
//! several times. As this will be our case for the OT extension, there are
//! "batch" variants for each of the phases.
use elliptic_curve::bigint::U256;
use elliptic_curve::group::{Curve as _, GroupEncoding};
use elliptic_curve::ops::Reduce;
use elliptic_curve::CurveArithmetic;
use elliptic_curve::{Field, PrimeField};
use rand::Rng;
use serde::{Deserialize, Serialize};
use crate::utilities::hashes::{hash, hash_as_scalar, point_to_bytes, HashOutput};
use crate::utilities::ot::ErrorOT;
use crate::utilities::proofs::{DLogProof, EncProof};
use crate::utilities::rng;
use crate::DklsCurve;
use crate::SECURITY;
// SENDER DATA
/// Sender's data and methods for the base OT protocol.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(bound(
serialize = "C::Scalar: Serialize, C::AffinePoint: Serialize",
deserialize = "C::Scalar: Deserialize<'de>, C::AffinePoint: Deserialize<'de>"
))]
pub struct OTSender<C: CurveArithmetic> {
pub s: C::Scalar,
pub proof: DLogProof<C>,
}
// RECEIVER DATA
/// Seed kept by the receiver.
pub type Seed = [u8; SECURITY as usize];
/// Receiver's data and methods for the base OT protocol.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct OTReceiver {
pub seed: Seed,
}
impl<C: DklsCurve> OTSender<C>
where
C::Scalar: Reduce<U256> + PrimeField,
C::AffinePoint: GroupEncoding,
{
// According to first paragraph on page 18,
// the sender can reuse the secret s and the proof of discrete
// logarithm. Thus, we isolate this part from the rest for efficiency.
/// Initializes the protocol for a given session id.
#[must_use]
pub fn init(session_id: &[u8]) -> OTSender<C> {
// We sample a nonzero random scalar.
let mut s = C::Scalar::ZERO;
while s == C::Scalar::ZERO {
s = C::Scalar::random(rng::get_rng());
}
// In the paper, different protocols use different random oracles.
// Thus, we will add a unique string to the session id here.
let current_sid = [session_id, "DLogProof".as_bytes()].concat();
let proof = DLogProof::<C>::prove(&s, &current_sid);
OTSender { s, proof }
}
// Phase 1 - The sender transmits z = s * generator and the proof
// of discrete logarithm. Note that z is contained in the proof.
/// Generates a proof to be sent to the receiver.
#[must_use]
pub fn run_phase1(&self) -> DLogProof<C> {
self.proof.clone()
}
// Since the sender is recycling the proof, we don't need a batch version.
// Communication round
// The sender transmits the proof.
// He receives the receiver's seed and encryption proof (which contains u and v).
// Phase 2 - We verify the receiver's data and compute the output.
/// Using the seed and the encryption proof transmitted by the receiver,
/// the two output messages are computed.
///
/// # Errors
///
/// Will return `Err` if the encryption proof fails.
pub fn run_phase2(
&self,
session_id: &[u8],
seed: &Seed,
enc_proof: &EncProof<C>,
) -> Result<(HashOutput, HashOutput), ErrorOT> {
// We reconstruct h from the seed (as in the paper).
// Instead of using a real identifier for the receiver,
// we just take the word 'Receiver' for simplicity.
// I guess we could omit it, but we leave it to "change the oracle".
let generator: C::AffinePoint = crate::generator::<C>();
let msg_for_h = ["Receiver".as_bytes(), seed].concat();
let h = (C::ProjectivePoint::from(generator) * hash_as_scalar::<C>(&msg_for_h, session_id))
.to_affine();
// We verify the proof.
let current_sid = [session_id, "EncProof".as_bytes()].concat();
let verification = enc_proof.verify(&current_sid);
// h is already in enc_proof, but we check if the values agree.
if !verification || (h != enc_proof.proof0.base_h) {
return Err(ErrorOT::new(
"Receiver cheated in OT: Encryption proof failed!",
));
}
// We compute the messages.
// As before, instead of an identifier for the sender,
// we just take the word 'Sender' for simplicity.
let (_, v) = enc_proof.get_u_and_v();
let value_for_m0 = (C::ProjectivePoint::from(v) * self.s).to_affine();
let value_for_m1 =
((C::ProjectivePoint::from(v) - C::ProjectivePoint::from(h)) * self.s).to_affine();
let msg_for_m0 = ["Sender".as_bytes(), &point_to_bytes::<C>(&value_for_m0)].concat();
let msg_for_m1 = ["Sender".as_bytes(), &point_to_bytes::<C>(&value_for_m1)].concat();
let m0 = hash(&msg_for_m0, session_id);
let m1 = hash(&msg_for_m1, session_id);
Ok((m0, m1))
}
// Phase 2 batch version: used for multiple executions (e.g. OT extension).
/// Executes `run_phase2` for each encryption proof in `enc_proofs`.
///
/// # Errors
///
/// Will return `Err` if one of the executions fails.
pub fn run_phase2_batch(
&self,
session_id: &[u8],
seed: &Seed,
enc_proofs: &[EncProof<C>],
) -> Result<(Vec<HashOutput>, Vec<HashOutput>), ErrorOT> {
let batch_size =
u16::try_from(enc_proofs.len()).expect("The batch sizes used always fit into an u16!");
let mut vec_m0: Vec<HashOutput> = Vec::with_capacity(batch_size as usize);
let mut vec_m1: Vec<HashOutput> = Vec::with_capacity(batch_size as usize);
for i in 0..batch_size {
// We use different ids for different iterations.
let current_sid = [&i.to_be_bytes(), session_id].concat();
let (m0, m1) = self.run_phase2(&current_sid, seed, &enc_proofs[i as usize])?;
vec_m0.push(m0);
vec_m1.push(m1);
}
Ok((vec_m0, vec_m1))
}
}
impl OTReceiver {
// Initialization - According to first paragraph on page 18,
// the sender can reuse the seed. Thus, we isolate this part
// from the rest for efficiency.
/// Initializes the protocol.
#[must_use]
pub fn init() -> OTReceiver {
let seed = rng::get_rng().gen::<Seed>();
OTReceiver { seed }
}
// Phase 1 - We sample the secret values and provide proof.
/// Given a choice bit, returns a secret scalar (to be kept)
/// and an encryption proof (to be sent to the sender).
#[must_use]
pub fn run_phase1<C: DklsCurve>(
&self,
session_id: &[u8],
bit: bool,
) -> (C::Scalar, EncProof<C>)
where
C::Scalar: Reduce<U256> + PrimeField,
C::AffinePoint: GroupEncoding,
{
// We sample the secret scalar r.
let r = C::Scalar::random(rng::get_rng());
// We compute h as in the paper.
// Instead of using a real identifier for the receiver,
// we just take the word 'Receiver' for simplicity.
// I guess we could omit it, but we leave it to "change the oracle".
let generator: C::AffinePoint = crate::generator::<C>();
let msg_for_h = ["Receiver".as_bytes(), &self.seed].concat();
let h = (C::ProjectivePoint::from(generator) * hash_as_scalar::<C>(&msg_for_h, session_id))
.to_affine();
// We prove our data.
// In the paper, different protocols use different random oracles.
// Thus, we will add a unique string to the session id here.
let current_sid = [session_id, "EncProof".as_bytes()].concat();
let proof = EncProof::<C>::prove(&current_sid, &h, &r, bit);
// r should be kept and proof should be sent.
(r, proof)
}
// Phase 1 batch version: used for multiple executions (e.g. OT extension).
/// Executes `run_phase1` for each choice bit in `bits`.
#[must_use]
pub fn run_phase1_batch<C: DklsCurve>(
&self,
session_id: &[u8],
bits: &[bool],
) -> (Vec<C::Scalar>, Vec<EncProof<C>>)
where
C::Scalar: Reduce<U256> + PrimeField,
C::AffinePoint: GroupEncoding,
{
let batch_size =
u16::try_from(bits.len()).expect("The batch sizes used always fit into an u16!");
let mut vec_r: Vec<C::Scalar> = Vec::with_capacity(batch_size as usize);
let mut vec_proof: Vec<EncProof<C>> = Vec::with_capacity(batch_size as usize);
for i in 0..batch_size {
// We use different ids for different iterations.
let current_sid = [&i.to_be_bytes(), session_id].concat();
let (r, proof) = self.run_phase1::<C>(&current_sid, bits[i as usize]);
vec_r.push(r);
vec_proof.push(proof);
}
(vec_r, vec_proof)
}
// Communication round
// The receiver transmits his seed and the proof.
// He receives the sender's seed and proof of discrete logarithm (which contains z).
// Phase 2 - We verify the sender's data and compute the output.
// For the batch version, we split the phase into two steps: the
// first depends only on the initialization values and can be done
// once, while the second is different for each iteration.
/// Verifies the discrete logarithm proof sent by the sender
/// and returns the point concerned in the proof.
///
/// # Errors
///
/// Will return `Err` if the proof fails.
pub fn run_phase2_step1<C: DklsCurve>(
&self,
session_id: &[u8],
dlog_proof: &DLogProof<C>,
) -> Result<C::AffinePoint, ErrorOT>
where
C::Scalar: Reduce<U256> + PrimeField,
C::AffinePoint: GroupEncoding,
{
// Verification of the proof.
let current_sid = [session_id, "DLogProof".as_bytes()].concat();
let verification = DLogProof::<C>::verify(dlog_proof, &current_sid);
if !verification {
return Err(ErrorOT::new(
"Sender cheated in OT: Proof of discrete logarithm failed!",
));
}
let z = dlog_proof.point;
Ok(z)
}
/// With the secret value `r` from Phase 1 and with the point `z`
/// from the previous step, the output message is computed.
#[must_use]
pub fn run_phase2_step2<C: DklsCurve>(
&self,
session_id: &[u8],
r: &C::Scalar,
z: &C::AffinePoint,
) -> HashOutput
where
C::Scalar: Reduce<U256> + PrimeField,
C::AffinePoint: GroupEncoding,
{
// We compute the message.
// As before, instead of an identifier for the sender,
// we just take the word 'Sender' for simplicity.
let value_for_mb = (C::ProjectivePoint::from(*z) * r).to_affine();
let msg_for_mb = ["Sender".as_bytes(), &point_to_bytes::<C>(&value_for_mb)].concat();
// We could return the bit as in the paper, but the receiver has this information.
hash(&msg_for_mb, session_id)
}
// Phase 2 batch version: used for multiple executions (e.g. OT extension).
/// Executes `run_phase2_step1` once and `run_phase2_step2` for every
/// secret scalar in `vec_r` from Phase 1.
///
/// # Errors
///
/// Will return `Err` if one of the executions fails.
pub fn run_phase2_batch<C: DklsCurve>(
&self,
session_id: &[u8],
vec_r: &[C::Scalar],
dlog_proof: &DLogProof<C>,
) -> Result<Vec<HashOutput>, ErrorOT>
where
C::Scalar: Reduce<U256> + PrimeField,
C::AffinePoint: GroupEncoding,
{
// Step 1
let z = self.run_phase2_step1::<C>(session_id, dlog_proof)?;
// Step 2
let batch_size =
u16::try_from(vec_r.len()).expect("The batch sizes used always fit into an u16!");
let mut vec_mb: Vec<HashOutput> = Vec::with_capacity(batch_size as usize);
for i in 0..batch_size {
// We use different ids for different iterations.
let current_sid = [&i.to_be_bytes(), session_id].concat();
let mb = self.run_phase2_step2::<C>(&current_sid, &vec_r[i as usize], &z);
vec_mb.push(mb);
}
Ok(vec_mb)
}
}
#[cfg(test)]
mod tests {
use super::*;
type C = k256::Secp256k1;
/// Tests if the outputs for the OT base protocol
/// satisfy the relations they are supposed to satisfy.
#[test]
fn test_ot_base() {
let session_id = rng::get_rng().gen::<[u8; 32]>();
// Initialization
let sender = OTSender::<C>::init(&session_id);
let receiver = OTReceiver::init();
// Phase 1 - Sender
let dlog_proof = sender.run_phase1();
// Phase 1 - Receiver
let bit = rng::get_rng().gen();
let (r, enc_proof) = receiver.run_phase1::<C>(&session_id, bit);
// Communication round - The parties exchange the proofs.
// The receiver also sends his seed.
let seed = receiver.seed;
// Phase 2 - Sender
let result_sender = sender.run_phase2(&session_id, &seed, &enc_proof);
if let Err(error) = result_sender {
panic!("OT error: {:?}", error.description);
}
let (m0, m1) = result_sender.unwrap();
// Phase 2 - Receiver
let result_receiver = receiver.run_phase2_step1::<C>(&session_id, &dlog_proof);
if let Err(error) = result_receiver {
panic!("OT error: {:?}", error.description);
}
let z = result_receiver.unwrap();
let mb = receiver.run_phase2_step2::<C>(&session_id, &r, &z);
// Verification that the protocol did what it should do.
// Depending on the choice the receiver made, he should receive one of the pads.
if bit {
assert_eq!(m1, mb);
} else {
assert_eq!(m0, mb);
}
}
/// Batch version for [`test_ot_base`].
#[test]
fn test_ot_base_batch() {
let session_id = rng::get_rng().gen::<[u8; 32]>();
// Initialization (unique)
let sender = OTSender::<C>::init(&session_id);
let receiver = OTReceiver::init();
let batch_size = 256;
// Phase 1 - Sender (unique)
let dlog_proof = sender.run_phase1();
// Phase 1 - Receiver
let mut bits: Vec<bool> = Vec::with_capacity(batch_size);
for _ in 0..batch_size {
bits.push(rng::get_rng().gen());
}
let (vec_r, enc_proofs) = receiver.run_phase1_batch::<C>(&session_id, &bits);
// Communication round - The parties exchange the proofs.
// The receiver also sends his seed.
let seed = receiver.seed;
// Phase 2 - Sender
let result_sender = sender.run_phase2_batch(&session_id, &seed, &enc_proofs);
if let Err(error) = result_sender {
panic!("OT error: {:?}", error.description);
}
let (vec_m0, vec_m1) = result_sender.unwrap();
// Phase 2 - Receiver
let result_receiver = receiver.run_phase2_batch::<C>(&session_id, &vec_r, &dlog_proof);
if let Err(error) = result_receiver {
panic!("OT error: {:?}", error.description);
}
let vec_mb = result_receiver.unwrap();
// Verification that the protocol did what it should do.
// Depending on the choice the receiver made, he should receive one of the pads.
for i in 0..batch_size {
if bits[i] {
assert_eq!(vec_m1[i], vec_mb[i]);
} else {
assert_eq!(vec_m0[i], vec_mb[i]);
}
}
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,18 @@
#[cfg(feature = "insecure-rng")]
use rand::rngs::StdRng;
#[cfg(not(feature = "insecure-rng"))]
use rand::rngs::ThreadRng;
#[cfg(feature = "insecure-rng")]
use rand::SeedableRng;
pub const DEFAULT_SEED: u64 = 42;
#[cfg(not(feature = "insecure-rng"))]
pub fn get_rng() -> ThreadRng {
rand::thread_rng()
}
#[cfg(feature = "insecure-rng")]
pub fn get_rng() -> StdRng {
rand::rngs::StdRng::seed_from_u64(DEFAULT_SEED)
}

View File

@ -0,0 +1,200 @@
//! Zero-sharing sampling functionality from `DKLs23`.
//!
//! This file implements the zero-sharing sampling functionality from the `DKLs23` protocol
//! (this is Functionality 3.4 on page 7 of their paper).
//!
//! The implementation follows the suggestion they give using the commitment functionality.
use crate::utilities::commits;
use crate::utilities::hashes::{hash_as_scalar, HashOutput};
use crate::utilities::rng;
use elliptic_curve::bigint::U256;
use elliptic_curve::ops::Reduce;
use elliptic_curve::{CurveArithmetic, Field};
use rand::Rng;
use serde::{Deserialize, Serialize};
// Computational security parameter lambda_c from DKLs23 (divided by 8)
use crate::SECURITY;
/// Byte array of `SECURITY` bytes.
pub type Seed = [u8; SECURITY as usize];
/// Represents the common seed a pair of parties shares.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SeedPair {
/// Verifies if the party that owns this data has the lowest index in the pair.
pub lowest_index: bool,
pub index_counterparty: u8,
pub seed: Seed,
}
/// Used to run the protocol.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ZeroShare {
pub seeds: Vec<SeedPair>,
}
impl ZeroShare {
// We implement the functions in the order they should be applied during the protocol.
// INITIALIZATION
/// Generates and commits a seed to another party using the commitment functionality.
///
/// The variables `seed` and `salt` should be kept, while `commitment` is transmitted.
/// At the time of de-commitment, these secret values are revealed.
#[must_use]
pub fn generate_seed_with_commitment() -> (Seed, HashOutput, Vec<u8>) {
let seed = rng::get_rng().gen::<Seed>();
let (commitment, salt) = commits::commit(&seed);
(seed, commitment, salt)
}
/// Verifies a seed against the commitment.
#[must_use]
pub fn verify_seed(seed: &Seed, commitment: &HashOutput, salt: &[u8]) -> bool {
commits::verify_commitment(seed, commitment, salt)
}
/// Transforms the two seeds generated by a pair into a single shared seed.
#[must_use]
pub fn generate_seed_pair(
index_party: u8,
index_counterparty: u8,
seed_party: &Seed,
seed_counterparty: &Seed,
) -> SeedPair {
// Instead of adding the seeds, as suggested in DKLs23, we apply the XOR operation.
let mut seed: Seed = [0u8; SECURITY as usize];
for i in 0..SECURITY {
seed[i as usize] = seed_party[i as usize] ^ seed_counterparty[i as usize];
}
// We save if we are the party with lowest index.
// The case where index_party == index_counterparty shouldn't occur in practice.
let lowest_index = index_party <= index_counterparty;
SeedPair {
lowest_index,
index_counterparty,
seed,
}
}
/// Finishes the initialization procedure.
///
/// All the `SeedPair`'s relating to the same party are gathered.
#[must_use]
pub fn initialize(seeds: Vec<SeedPair>) -> ZeroShare {
ZeroShare { seeds }
}
// FUNCTIONALITY
/// Executes the protocol.
///
/// To compute the zero shares, the parties must agree on the same "random seed"
/// for the "random number generator". This is achieved by using the current session id.
/// Moreover, not all parties need to participate in this step, so we need to provide a
/// list of counterparties.
#[must_use]
pub fn compute<C: CurveArithmetic>(&self, counterparties: &[u8], session_id: &[u8]) -> C::Scalar
where
C::Scalar: Reduce<U256>,
{
let mut share = C::Scalar::ZERO;
let seeds = self.seeds.clone();
for seed_pair in seeds {
// We ignore if this seed pair comes from a counterparty not in the current list of counterparties
if !counterparties.contains(&seed_pair.index_counterparty) {
continue;
}
// Seeds generate fragments that add up to the share that will be returned.
let fragment = hash_as_scalar::<C>(&seed_pair.seed, session_id);
// This sign guarantees that the shares from different parties add up to zero.
if seed_pair.lowest_index {
share -= fragment;
} else {
share += fragment;
}
}
share
}
}
#[cfg(test)]
mod tests {
use super::*;
use k256::Scalar;
/// Tests if the shares returned by the zero shares
/// protocol indeed add up to zero.
#[test]
fn test_zero_shares() {
let number_parties: u8 = 8; //This number can be changed. If so, change executing_parties below.
//Parties generate the initial seeds and the commitments.
let mut step1: Vec<Vec<(Seed, HashOutput, Vec<u8>)>> =
Vec::with_capacity(number_parties as usize);
for _ in 0..number_parties {
let mut step1_party_i: Vec<(Seed, HashOutput, Vec<u8>)> =
Vec::with_capacity(number_parties as usize);
for _ in 0..number_parties {
//Each party should skip his own iteration, but we ignore this now for simplicity.
step1_party_i.push(ZeroShare::generate_seed_with_commitment());
}
step1.push(step1_party_i);
}
//Communication round
//The parties exchange their seeds and verify the message.
for i in 0..number_parties {
for j in 0..number_parties {
let (seed, commitment, salt) = step1[i as usize][j as usize].clone();
assert!(ZeroShare::verify_seed(&seed, &commitment, &salt));
}
}
//Each party creates his "seed pairs" and finishes the initialization.
let mut zero_shares: Vec<ZeroShare> = Vec::with_capacity(number_parties as usize);
for i in 0..number_parties {
let mut seeds: Vec<SeedPair> = Vec::with_capacity((number_parties - 1) as usize);
for j in 0..number_parties {
if i == j {
continue;
} //Now each party skip his iteration.
let (seed_party, _, _) = step1[i as usize][j as usize];
let (seed_counterparty, _, _) = step1[j as usize][i as usize];
//We add 1 below because indexes for parties start at 1 and not 0.
seeds.push(ZeroShare::generate_seed_pair(
i + 1,
j + 1,
&seed_party,
&seed_counterparty,
));
}
zero_shares.push(ZeroShare::initialize(seeds));
}
//We can finally execute the functionality.
let session_id = rng::get_rng().gen::<[u8; 32]>();
let executing_parties: Vec<u8> = vec![1, 3, 5, 7, 8]; //These are the parties running the protocol.
let mut shares: Vec<Scalar> = Vec::with_capacity(executing_parties.len());
for party in executing_parties.clone() {
//Gather the counterparties
let mut counterparties = executing_parties.clone();
counterparties.retain(|index| *index != party);
//Compute the share (there is a -1 because indexes for parties start at 1).
let share = zero_shares[(party as usize) - 1].compute::<k256::Secp256k1>(&counterparties, &session_id);
shares.push(share);
}
//Final check
let sum: Scalar = shares.iter().sum();
assert_eq!(sum, Scalar::ZERO);
}
}

View File

@ -0,0 +1,32 @@
[package]
name = "dkls23_ffi"
version = "0.1.0"
edition = "2021"
authors = ["Quilibrium Inc."]
description = "FFI bindings for DKLs23 threshold ECDSA protocol"
license = "Apache-2.0"
repository = "https://github.com/quilibriumnetwork/monorepo"
keywords = ["threshold", "ecdsa", "mpc", "cryptography", "dkls"]
categories = ["cryptography", "algorithms"]
[lib]
crate-type = ["lib", "staticlib"]
name = "dkls23_ffi"
[dependencies]
uniffi = { version = "0.28.3", features = ["cli"] }
dkls23 = { path = "../dkls23" }
k256 = { version = "0.13", features = ["ecdsa", "serde"] }
p256 = { version = "0.13", features = ["ecdsa", "serde"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
rand = "0.8"
sha2 = "0.10"
hex = "0.4"
thiserror = "1.0"
[build-dependencies]
uniffi = { version = "0.28.3", features = ["build"] }
[dev-dependencies]
criterion = { version = "0.5", features = ["html_reports"] }

View File

@ -0,0 +1,6 @@
fn main() {
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-changed=src/lib.udl");
uniffi::generate_scaffolding("src/lib.udl").expect("uniffi generation failed");
}

4656
crates/dkls23_ffi/src/lib.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,296 @@
// Elliptic curve selection for DKLs23 protocol
enum EllipticCurve {
"Secp256k1",
"P256",
};
namespace dkls23_ffi {
// Initialization
void init();
// ============================================
// Distributed Key Generation (DKG)
// ============================================
// Initialize a new DKG session with a random session ID
// Note: For actual multi-party DKG, use dkg_init_with_session_id instead
DkgInitResult dkg_init(u32 party_id, u32 threshold, u32 total_parties, EllipticCurve curve);
// Initialize a new DKG session with a shared session ID
// All parties must use the same session_id for DKG to succeed
DkgInitResult dkg_init_with_session_id(u32 party_id, u32 threshold, u32 total_parties, [ByRef] sequence<u8> session_id, EllipticCurve curve);
// Process DKG round 1: generate and return broadcast message
DkgRoundResult dkg_round1([ByRef] sequence<u8> session_state);
// Process DKG round 2: process received messages, generate response
DkgRoundResult dkg_round2(
[ByRef] sequence<u8> session_state,
[ByRef] sequence<PartyMessage> received_messages
);
// Process DKG round 3: finalize key generation
DkgRoundResult dkg_round3(
[ByRef] sequence<u8> session_state,
[ByRef] sequence<PartyMessage> received_messages
);
// Finalize DKG and extract key share
DkgFinalResult dkg_finalize(
[ByRef] sequence<u8> session_state,
[ByRef] sequence<PartyMessage> received_messages
);
// ============================================
// Threshold Signing
// ============================================
// Initialize a signing session
SignInitResult sign_init(
[ByRef] sequence<u8> key_share,
[ByRef] sequence<u8> message_hash,
[ByRef] sequence<u32> signer_party_ids
);
// Initialize a signing session with a shared sign ID
// All parties must use the same sign_id for a signing session to work
SignInitResult sign_init_with_sign_id(
[ByRef] sequence<u8> key_share,
[ByRef] sequence<u8> message_hash,
[ByRef] sequence<u32> signer_party_ids,
[ByRef] sequence<u8> sign_id
);
// Process signing round 1
SignRoundResult sign_round1([ByRef] sequence<u8> session_state);
// Process signing round 2
SignRoundResult sign_round2(
[ByRef] sequence<u8> session_state,
[ByRef] sequence<PartyMessage> received_messages
);
// Process signing round 3 (produces broadcasts)
SignRoundResult sign_round3(
[ByRef] sequence<u8> session_state,
[ByRef] sequence<PartyMessage> received_messages
);
// Finalize signing (collects broadcasts, produces signature)
SignFinalResult sign_finalize(
[ByRef] sequence<u8> session_state,
[ByRef] sequence<PartyMessage> received_messages
);
// ============================================
// Key Refresh (same threshold, new shares)
// ============================================
// Initialize a refresh session
RefreshInitResult refresh_init(
[ByRef] sequence<u8> key_share,
u32 party_id
);
// Initialize a refresh session with a shared refresh ID
// All parties must use the same refresh_id for a refresh session to work
RefreshInitResult refresh_init_with_refresh_id(
[ByRef] sequence<u8> key_share,
u32 party_id,
[ByRef] sequence<u8> refresh_id
);
// Process refresh round 1 (phase 1: generate polynomial fragments)
RefreshRoundResult refresh_round1([ByRef] sequence<u8> session_state);
// Process refresh round 2 (phase 2: process fragments, generate proofs)
RefreshRoundResult refresh_round2(
[ByRef] sequence<u8> session_state,
[ByRef] sequence<PartyMessage> received_messages
);
// Process refresh round 3 (phase 3: process transmits)
RefreshRoundResult refresh_round3(
[ByRef] sequence<u8> session_state,
[ByRef] sequence<PartyMessage> received_messages
);
// Finalize refresh (phase 4: verify and produce new key share)
RefreshFinalResult refresh_finalize(
[ByRef] sequence<u8> session_state,
[ByRef] sequence<PartyMessage> received_messages
);
// ============================================
// Key Resize (change threshold or party count)
// ============================================
// Initialize a resize session
ResizeInitResult resize_init(
[ByRef] sequence<u8> key_share,
u32 party_id,
u32 new_threshold,
u32 new_total_parties,
[ByRef] sequence<u32> new_party_ids,
EllipticCurve curve
);
// Process resize round 1
ResizeRoundResult resize_round1([ByRef] sequence<u8> session_state);
// Process resize round 2 and finalize
ResizeFinalResult resize_round2(
[ByRef] sequence<u8> session_state,
[ByRef] sequence<PartyMessage> received_messages
);
// ============================================
// Utility Functions
// ============================================
// Convert a full secret key to threshold shares (for migration)
RekeyResult rekey_from_secret(
[ByRef] sequence<u8> secret_key,
u32 threshold,
u32 total_parties,
EllipticCurve curve
);
// Derive a child key share using BIP-32 path
DeriveResult derive_child_share(
[ByRef] sequence<u8> key_share,
[ByRef] sequence<u32> derivation_path
);
// Get public key from key share
sequence<u8> get_public_key([ByRef] sequence<u8> key_share);
// Validate a key share structure
boolean validate_key_share([ByRef] sequence<u8> key_share);
};
// ============================================
// Data Types
// ============================================
// Message exchanged between parties
dictionary PartyMessage {
u32 from_party;
u32 to_party;
sequence<u8> data;
};
// DKG initialization result
dictionary DkgInitResult {
sequence<u8> session_state;
boolean success;
string? error_message;
};
// DKG round result (intermediate rounds)
dictionary DkgRoundResult {
sequence<u8> session_state;
sequence<PartyMessage> messages_to_send;
boolean is_complete;
boolean success;
string? error_message;
};
// DKG final result
dictionary DkgFinalResult {
sequence<u8> key_share;
sequence<u8> public_key;
u32 party_id;
u32 threshold;
u32 total_parties;
boolean success;
string? error_message;
};
// Sign initialization result
dictionary SignInitResult {
sequence<u8> session_state;
boolean success;
string? error_message;
};
// Sign round result
dictionary SignRoundResult {
sequence<u8> session_state;
sequence<PartyMessage> messages_to_send;
boolean is_complete;
boolean success;
string? error_message;
};
// Sign final result
dictionary SignFinalResult {
sequence<u8> signature;
boolean success;
string? error_message;
};
// Refresh initialization result
dictionary RefreshInitResult {
sequence<u8> session_state;
boolean success;
string? error_message;
};
// Refresh round result
dictionary RefreshRoundResult {
sequence<u8> session_state;
sequence<PartyMessage> messages_to_send;
boolean is_complete;
boolean success;
string? error_message;
};
// Refresh final result
dictionary RefreshFinalResult {
sequence<u8> new_key_share;
u32 generation;
boolean success;
string? error_message;
};
// Resize initialization result
dictionary ResizeInitResult {
sequence<u8> session_state;
boolean success;
string? error_message;
};
// Resize round result
dictionary ResizeRoundResult {
sequence<u8> session_state;
sequence<PartyMessage> messages_to_send;
boolean is_complete;
boolean success;
string? error_message;
};
// Resize final result
dictionary ResizeFinalResult {
sequence<u8> new_key_share;
u32 new_threshold;
u32 new_total_parties;
boolean success;
string? error_message;
};
// Rekey result (converting full key to shares)
dictionary RekeyResult {
sequence<sequence<u8>> key_shares;
sequence<u8> public_key;
boolean success;
string? error_message;
};
// Key derivation result
dictionary DeriveResult {
sequence<u8> derived_key_share;
sequence<u8> derived_public_key;
boolean success;
string? error_message;
};

View File

@ -6,16 +6,29 @@ use std::process::Command;
fn main() {
let target = env::var("TARGET").expect("cargo should have set this");
// Get path to local emp-tool and emp-ot directories (relative to crates/ferret)
// manifest_dir is .../ceremonyclient/crates/ferret
// emp-tool is at .../ceremonyclient/emp-tool
// emp-ot is at .../ceremonyclient/emp-ot
let manifest_dir = env::var("CARGO_MANIFEST_DIR").unwrap();
let emp_tool_local = format!("{}/../../emp-tool", manifest_dir);
let emp_ot_local = format!("{}/../../emp-ot", manifest_dir);
if target == "aarch64-apple-darwin" {
cc::Build::new()
.cpp(true)
.flag_if_supported("-std=c++17")
.file("emp_bridge.cpp")
// Local emp-tool first (for buffer_io_channel.h)
.flag(&format!("-I{}", emp_tool_local))
// Local emp-ot first (for ferret_cot.h with is_setup())
.flag(&format!("-I{}", emp_ot_local))
.flag("-I/usr/local/include/emp-tool/")
.flag("-I/usr/local/include/emp-ot/")
.flag("-I/opt/homebrew/Cellar/openssl@3/3.5.0/include")
.flag("-I/opt/homebrew/Cellar/openssl@3/3.6.1/include")
.flag("-L/usr/local/lib/emp-tool/")
.flag("-L/opt/homebrew/Cellar/openssl@3/3.5.0/lib")
.flag("-L/opt/homebrew/Cellar/openssl@3/3.6.1/lib")
.warnings(false)
.compile("emp_bridge");

View File

@ -1,6 +1,8 @@
#include "emp_bridge.h"
#include <emp-tool/emp-tool.h>
#include <emp-tool/io/buffer_io_channel.h>
#include <emp-ot/emp-ot.h>
#include <cstring>
using namespace emp;
@ -8,14 +10,26 @@ struct NetIO_t {
NetIO* netio;
};
struct BufferIO_t {
BufferIO* bufferio;
};
struct FerretCOT_t {
FerretCOT<NetIO>* ferret_cot;
};
struct FerretCOT_Buffer_t {
FerretCOT<BufferIO>* ferret_cot;
};
struct block_t {
block* blocks;
};
// =============================================================================
// NetIO functions (TCP-based, original interface)
// =============================================================================
NetIO_ptr create_netio(int party, const char* address, int port) {
NetIO_ptr io_ptr = new NetIO_t();
if (party == ALICE_PARTY) {
@ -33,6 +47,70 @@ void free_netio(NetIO_ptr io) {
}
}
// =============================================================================
// BufferIO functions (message-based, new interface)
// =============================================================================
BufferIO_ptr create_buffer_io(int64_t initial_cap) {
BufferIO_ptr io_ptr = new BufferIO_t();
io_ptr->bufferio = new BufferIO(initial_cap);
return io_ptr;
}
void free_buffer_io(BufferIO_ptr io) {
if (io) {
delete io->bufferio;
delete io;
}
}
int buffer_io_fill_recv(BufferIO_ptr io, const uint8_t* data, size_t len) {
if (!io || !io->bufferio || !data) return -1;
try {
io->bufferio->fill_recv_buffer(reinterpret_cast<const char*>(data), len);
return 0;
} catch (...) {
return -1;
}
}
size_t buffer_io_drain_send(BufferIO_ptr io, uint8_t* out_buffer, size_t max_len) {
if (!io || !io->bufferio || !out_buffer) return 0;
return io->bufferio->drain_send_buffer(reinterpret_cast<char*>(out_buffer), max_len);
}
size_t buffer_io_send_size(BufferIO_ptr io) {
if (!io || !io->bufferio) return 0;
return io->bufferio->send_buffer_size();
}
size_t buffer_io_recv_available(BufferIO_ptr io) {
if (!io || !io->bufferio) return 0;
return io->bufferio->recv_buffer_available();
}
void buffer_io_set_timeout(BufferIO_ptr io, int64_t timeout_ms) {
if (io && io->bufferio) {
io->bufferio->set_recv_timeout(timeout_ms);
}
}
void buffer_io_set_error(BufferIO_ptr io, const char* message) {
if (io && io->bufferio && message) {
io->bufferio->set_error(std::string(message));
}
}
void buffer_io_clear(BufferIO_ptr io) {
if (io && io->bufferio) {
io->bufferio->clear();
}
}
// =============================================================================
// FerretCOT functions (TCP-based, original interface)
// =============================================================================
FerretCOT_ptr create_ferret_cot(int party, int threads, NetIO_ptr io, bool malicious) {
FerretCOT_ptr ot_ptr = new FerretCOT_t();
ot_ptr->ferret_cot = new FerretCOT<NetIO>(party, threads, &io->netio, malicious, true);
@ -53,49 +131,6 @@ block_ptr get_delta(FerretCOT_ptr ot) {
return delta_ptr;
}
block_ptr allocate_blocks(size_t length) {
block_ptr blocks_ptr = new block_t();
blocks_ptr->blocks = new block[length];
return blocks_ptr;
}
void free_blocks(block_ptr blocks) {
if (blocks) {
delete[] blocks->blocks;
delete blocks;
}
}
size_t get_block_data(block_ptr blocks, size_t index, uint8_t* buffer, size_t buffer_len) {
if (!blocks || !blocks->blocks) return 0;
const size_t BLOCK_SIZE = 16;
emp::block& b = blocks->blocks[index];
if (!buffer || buffer_len == 0) {
return BLOCK_SIZE;
}
size_t copy_size = buffer_len < BLOCK_SIZE ? buffer_len : BLOCK_SIZE;
memcpy(buffer, &b, copy_size);
return copy_size;
}
void set_block_data(block_ptr blocks, size_t index, const uint8_t* data, size_t data_len) {
if (!blocks || !blocks->blocks || !data) return;
const size_t BLOCK_SIZE = 16;
emp::block& b = blocks->blocks[index];
size_t copy_size = data_len < BLOCK_SIZE ? data_len : BLOCK_SIZE;
memcpy(&b, data, copy_size);
if (copy_size < BLOCK_SIZE) {
memset(reinterpret_cast<uint8_t*>(&b) + copy_size, 0, BLOCK_SIZE - copy_size);
}
}
void send_cot(FerretCOT_ptr ot, block_ptr b0, size_t length) {
ot->ferret_cot->send_cot(b0->blocks, length);
}
@ -111,3 +146,195 @@ void send_rot(FerretCOT_ptr ot, block_ptr b0, block_ptr b1, size_t length) {
void recv_rot(FerretCOT_ptr ot, block_ptr br, bool* choices, size_t length) {
ot->ferret_cot->recv_rot(br->blocks, choices, length);
}
// =============================================================================
// FerretCOT functions (Buffer-based, new interface)
// =============================================================================
FerretCOT_Buffer_ptr create_ferret_cot_buffer(int party, int threads, BufferIO_ptr io, bool malicious) {
FerretCOT_Buffer_ptr ot_ptr = new FerretCOT_Buffer_t();
// IMPORTANT: Pass run_setup=false to avoid blocking I/O during construction.
// With BufferIO, there's no peer connected yet, so setup() would timeout waiting
// for data. The caller must ensure setup() is called later when both parties
// have their message transport active.
ot_ptr->ferret_cot = new FerretCOT<BufferIO>(party, threads, &io->bufferio, malicious, false);
return ot_ptr;
}
void free_ferret_cot_buffer(FerretCOT_Buffer_ptr ot) {
if (ot) {
delete ot->ferret_cot;
delete ot;
}
}
int setup_ferret_cot_buffer(FerretCOT_Buffer_ptr ot, int party) {
if (!ot || !ot->ferret_cot) return -1;
try {
// Run the deferred setup now that message transport is active.
// This mirrors what would happen in the constructor if run_setup=true.
if (party == ALICE_PARTY) {
PRG prg;
block Delta;
prg.random_block(&Delta);
block one = makeBlock(0xFFFFFFFFFFFFFFFFLL, 0xFFFFFFFFFFFFFFFELL);
Delta = Delta & one;
Delta = Delta ^ 0x1;
ot->ferret_cot->setup(Delta);
} else {
ot->ferret_cot->setup();
}
return 0;
} catch (const std::exception& e) {
// Exception during setup - likely timeout or IO error
return -1;
} catch (...) {
// Unknown exception
return -1;
}
}
block_ptr get_delta_buffer(FerretCOT_Buffer_ptr ot) {
block_ptr delta_ptr = new block_t();
delta_ptr->blocks = new block[1];
delta_ptr->blocks[0] = ot->ferret_cot->Delta;
return delta_ptr;
}
int send_cot_buffer(FerretCOT_Buffer_ptr ot, block_ptr b0, size_t length) {
if (!ot || !ot->ferret_cot || !b0) return -1;
try {
ot->ferret_cot->send_cot(b0->blocks, length);
return 0;
} catch (...) {
return -1;
}
}
int recv_cot_buffer(FerretCOT_Buffer_ptr ot, block_ptr br, bool* choices, size_t length) {
if (!ot || !ot->ferret_cot || !br) return -1;
try {
ot->ferret_cot->recv_cot(br->blocks, choices, length);
return 0;
} catch (...) {
return -1;
}
}
int send_rot_buffer(FerretCOT_Buffer_ptr ot, block_ptr b0, block_ptr b1, size_t length) {
if (!ot || !ot->ferret_cot || !b0 || !b1) return -1;
try {
ot->ferret_cot->send_rot(b0->blocks, b1->blocks, length);
return 0;
} catch (...) {
return -1;
}
}
int recv_rot_buffer(FerretCOT_Buffer_ptr ot, block_ptr br, bool* choices, size_t length) {
if (!ot || !ot->ferret_cot || !br) return -1;
try {
ot->ferret_cot->recv_rot(br->blocks, choices, length);
return 0;
} catch (...) {
return -1;
}
}
// =============================================================================
// Block data accessors
// =============================================================================
block_ptr allocate_blocks(size_t length) {
block_ptr blocks_ptr = new block_t();
blocks_ptr->blocks = new block[length];
return blocks_ptr;
}
void free_blocks(block_ptr blocks) {
if (blocks) {
delete[] blocks->blocks;
delete blocks;
}
}
size_t get_block_data(block_ptr blocks, size_t index, uint8_t* buffer, size_t buffer_len) {
if (!blocks || !blocks->blocks) return 0;
const size_t BLOCK_SIZE = 16;
emp::block& b = blocks->blocks[index];
if (!buffer || buffer_len == 0) {
return BLOCK_SIZE;
}
size_t copy_size = buffer_len < BLOCK_SIZE ? buffer_len : BLOCK_SIZE;
memcpy(buffer, &b, copy_size);
return copy_size;
}
void set_block_data(block_ptr blocks, size_t index, const uint8_t* data, size_t data_len) {
if (!blocks || !blocks->blocks || !data) return;
const size_t BLOCK_SIZE = 16;
emp::block& b = blocks->blocks[index];
size_t copy_size = data_len < BLOCK_SIZE ? data_len : BLOCK_SIZE;
memcpy(&b, data, copy_size);
if (copy_size < BLOCK_SIZE) {
memset(reinterpret_cast<uint8_t*>(&b) + copy_size, 0, BLOCK_SIZE - copy_size);
}
}
// =============================================================================
// State serialization functions (for persistent storage)
// =============================================================================
int64_t ferret_cot_state_size(FerretCOT_ptr ot) {
if (!ot || !ot->ferret_cot) return 0;
return ot->ferret_cot->state_size();
}
int64_t ferret_cot_buffer_state_size(FerretCOT_Buffer_ptr ot) {
if (!ot || !ot->ferret_cot) return 0;
return ot->ferret_cot->state_size();
}
int ferret_cot_assemble_state(FerretCOT_ptr ot, uint8_t* buffer, int64_t buffer_size) {
if (!ot || !ot->ferret_cot || !buffer) return -1;
int64_t needed = ot->ferret_cot->state_size();
if (buffer_size < needed) return -1;
ot->ferret_cot->assemble_state(buffer, buffer_size);
return 0;
}
int ferret_cot_buffer_assemble_state(FerretCOT_Buffer_ptr ot, uint8_t* buffer, int64_t buffer_size) {
if (!ot || !ot->ferret_cot || !buffer) return -1;
int64_t needed = ot->ferret_cot->state_size();
if (buffer_size < needed) return -1;
ot->ferret_cot->assemble_state(buffer, buffer_size);
return 0;
}
int ferret_cot_disassemble_state(FerretCOT_ptr ot, const uint8_t* buffer, int64_t buffer_size) {
if (!ot || !ot->ferret_cot || !buffer) return -1;
return ot->ferret_cot->disassemble_state(buffer, buffer_size);
}
int ferret_cot_buffer_disassemble_state(FerretCOT_Buffer_ptr ot, const uint8_t* buffer, int64_t buffer_size) {
if (!ot || !ot->ferret_cot || !buffer) return -1;
return ot->ferret_cot->disassemble_state(buffer, buffer_size);
}
bool ferret_cot_is_setup(FerretCOT_ptr ot) {
if (!ot || !ot->ferret_cot) return false;
return ot->ferret_cot->is_setup();
}
bool ferret_cot_buffer_is_setup(FerretCOT_Buffer_ptr ot) {
if (!ot || !ot->ferret_cot) return false;
return ot->ferret_cot->is_setup();
}

View File

@ -11,38 +11,115 @@ extern "C" {
// Opaque pointers to hide C++ implementation
typedef struct NetIO_t* NetIO_ptr;
typedef struct BufferIO_t* BufferIO_ptr;
typedef struct FerretCOT_t* FerretCOT_ptr;
typedef struct FerretCOT_Buffer_t* FerretCOT_Buffer_ptr;
typedef struct block_t* block_ptr;
// Constants
#define ALICE_PARTY 1
#define BOB_PARTY 2
// NetIO functions
// NetIO functions (TCP-based, original interface)
NetIO_ptr create_netio(int party, const char* address, int port);
void free_netio(NetIO_ptr io);
// FerretCOT functions
// BufferIO functions (message-based, new interface)
BufferIO_ptr create_buffer_io(int64_t initial_cap);
void free_buffer_io(BufferIO_ptr io);
// Fill receive buffer with data from external transport
// Returns 0 on success, -1 on error
int buffer_io_fill_recv(BufferIO_ptr io, const uint8_t* data, size_t len);
// Drain send buffer to external transport
// Returns number of bytes copied, or 0 if empty
// Caller provides buffer and max length
size_t buffer_io_drain_send(BufferIO_ptr io, uint8_t* out_buffer, size_t max_len);
// Get current send buffer size (to check if there's data to send)
size_t buffer_io_send_size(BufferIO_ptr io);
// Get current receive buffer available data
size_t buffer_io_recv_available(BufferIO_ptr io);
// Set timeout for blocking receive (milliseconds)
void buffer_io_set_timeout(BufferIO_ptr io, int64_t timeout_ms);
// Set error state (will cause recv to fail)
void buffer_io_set_error(BufferIO_ptr io, const char* message);
// Clear all buffers
void buffer_io_clear(BufferIO_ptr io);
// FerretCOT functions (TCP-based, original interface)
FerretCOT_ptr create_ferret_cot(int party, int threads, NetIO_ptr io, bool malicious);
void free_ferret_cot(FerretCOT_ptr ot);
// FerretCOT functions (Buffer-based, new interface)
// NOTE: create_ferret_cot_buffer does NOT run setup automatically.
// You must call setup_ferret_cot_buffer after both parties have their
// message transport active (i.e., can send/receive data).
FerretCOT_Buffer_ptr create_ferret_cot_buffer(int party, int threads, BufferIO_ptr io, bool malicious);
void free_ferret_cot_buffer(FerretCOT_Buffer_ptr ot);
// Run the OT setup protocol. Must be called after create_ferret_cot_buffer
// when both parties have their BufferIO connected (message transport active).
// For ALICE: generates Delta and runs sender setup
// For BOB: runs receiver setup
// Returns 0 on success, -1 on error (exception caught)
int setup_ferret_cot_buffer(FerretCOT_Buffer_ptr ot, int party);
// Get the Delta correlation value
block_ptr get_delta(FerretCOT_ptr ot);
block_ptr get_delta_buffer(FerretCOT_Buffer_ptr ot);
// Allocate and free blocks
block_ptr allocate_blocks(size_t length);
void free_blocks(block_ptr blocks);
// OT Operations
// OT Operations (TCP-based)
void send_cot(FerretCOT_ptr ot, block_ptr b0, size_t length);
void recv_cot(FerretCOT_ptr ot, block_ptr br, bool* choices, size_t length);
void send_rot(FerretCOT_ptr ot, block_ptr b0, block_ptr b1, size_t length);
void recv_rot(FerretCOT_ptr ot, block_ptr br, bool* choices, size_t length);
// OT Operations (Buffer-based)
// All return 0 on success, -1 on error (exception caught)
int send_cot_buffer(FerretCOT_Buffer_ptr ot, block_ptr b0, size_t length);
int recv_cot_buffer(FerretCOT_Buffer_ptr ot, block_ptr br, bool* choices, size_t length);
int send_rot_buffer(FerretCOT_Buffer_ptr ot, block_ptr b0, block_ptr b1, size_t length);
int recv_rot_buffer(FerretCOT_Buffer_ptr ot, block_ptr br, bool* choices, size_t length);
// Block data accessors
size_t get_block_data(block_ptr blocks, size_t index, uint8_t* buffer, size_t buffer_len);
void set_block_data(block_ptr blocks, size_t index, const uint8_t* data, size_t data_len);
// =============================================================================
// State serialization functions (for persistent storage)
// =============================================================================
// Get the size needed to store the FerretCOT state
// This allows storing setup data externally instead of in files
int64_t ferret_cot_state_size(FerretCOT_ptr ot);
int64_t ferret_cot_buffer_state_size(FerretCOT_Buffer_ptr ot);
// Serialize FerretCOT state to a buffer
// buffer must be at least ferret_cot_state_size() bytes
// Returns 0 on success, -1 on error
int ferret_cot_assemble_state(FerretCOT_ptr ot, uint8_t* buffer, int64_t buffer_size);
int ferret_cot_buffer_assemble_state(FerretCOT_Buffer_ptr ot, uint8_t* buffer, int64_t buffer_size);
// Restore FerretCOT state from a buffer (created by assemble_state)
// This must be called INSTEAD of setup, not after
// Returns 0 on success, -1 on error (e.g., parameter mismatch)
int ferret_cot_disassemble_state(FerretCOT_ptr ot, const uint8_t* buffer, int64_t buffer_size);
int ferret_cot_buffer_disassemble_state(FerretCOT_Buffer_ptr ot, const uint8_t* buffer, int64_t buffer_size);
// Check if setup has been run (state is initialized)
bool ferret_cot_is_setup(FerretCOT_ptr ot);
bool ferret_cot_buffer_is_setup(FerretCOT_Buffer_ptr ot);
#ifdef __cplusplus
}
#endif

View File

@ -24,10 +24,14 @@ impl Error for FerretError {}
// Opaque pointer types
pub enum NetIO_t {}
pub enum BufferIO_t {}
pub enum FerretCOT_t {}
pub enum FerretCOT_Buffer_t {}
pub enum block_t {}
pub type NetIO_ptr = *mut NetIO_t;
pub type BufferIO_ptr = *mut BufferIO_t;
pub type FerretCOT_ptr = *mut FerretCOT_t;
pub type FerretCOT_Buffer_ptr = *mut FerretCOT_Buffer_t;
pub type block_ptr = *mut block_t;
// Constants
@ -37,25 +41,58 @@ pub const BOB: i32 = 2;
// FFI declarations
#[link(name = "emp_bridge")]
extern "C" {
// NetIO (TCP-based)
pub fn create_netio(party: c_int, address: *const c_char, port: c_int) -> NetIO_ptr;
pub fn free_netio(io: NetIO_ptr);
// BufferIO (message-based)
pub fn create_buffer_io(initial_cap: i64) -> BufferIO_ptr;
pub fn free_buffer_io(io: BufferIO_ptr);
pub fn buffer_io_fill_recv(io: BufferIO_ptr, data: *const u8, len: usize) -> c_int;
pub fn buffer_io_drain_send(io: BufferIO_ptr, out_buffer: *mut u8, max_len: usize) -> usize;
pub fn buffer_io_send_size(io: BufferIO_ptr) -> usize;
pub fn buffer_io_recv_available(io: BufferIO_ptr) -> usize;
pub fn buffer_io_set_timeout(io: BufferIO_ptr, timeout_ms: i64);
pub fn buffer_io_set_error(io: BufferIO_ptr, message: *const c_char);
pub fn buffer_io_clear(io: BufferIO_ptr);
// FerretCOT (TCP-based)
pub fn create_ferret_cot(party: c_int, threads: c_int, io: NetIO_ptr, malicious: bool) -> FerretCOT_ptr;
pub fn free_ferret_cot(ot: FerretCOT_ptr);
pub fn get_delta(ot: FerretCOT_ptr) -> block_ptr;
pub fn allocate_blocks(length: usize) -> block_ptr;
pub fn free_blocks(blocks: block_ptr);
pub fn send_cot(ot: FerretCOT_ptr, b0: block_ptr, length: usize);
pub fn recv_cot(ot: FerretCOT_ptr, br: block_ptr, choices: *const bool, length: usize);
pub fn send_rot(ot: FerretCOT_ptr, b0: block_ptr, b1: block_ptr, length: usize);
pub fn recv_rot(ot: FerretCOT_ptr, br: block_ptr, choices: *const bool, length: usize);
// FerretCOT (Buffer-based)
// NOTE: create_ferret_cot_buffer does NOT run setup automatically.
// You must call setup_ferret_cot_buffer after both parties have their
// message transport active (i.e., can send/receive data).
pub fn create_ferret_cot_buffer(party: c_int, threads: c_int, io: BufferIO_ptr, malicious: bool) -> FerretCOT_Buffer_ptr;
pub fn free_ferret_cot_buffer(ot: FerretCOT_Buffer_ptr);
pub fn setup_ferret_cot_buffer(ot: FerretCOT_Buffer_ptr, party: c_int) -> c_int;
pub fn get_delta_buffer(ot: FerretCOT_Buffer_ptr) -> block_ptr;
pub fn send_cot_buffer(ot: FerretCOT_Buffer_ptr, b0: block_ptr, length: usize) -> c_int;
pub fn recv_cot_buffer(ot: FerretCOT_Buffer_ptr, br: block_ptr, choices: *const bool, length: usize) -> c_int;
pub fn send_rot_buffer(ot: FerretCOT_Buffer_ptr, b0: block_ptr, b1: block_ptr, length: usize) -> c_int;
pub fn recv_rot_buffer(ot: FerretCOT_Buffer_ptr, br: block_ptr, choices: *const bool, length: usize) -> c_int;
// Block operations
pub fn allocate_blocks(length: usize) -> block_ptr;
pub fn free_blocks(blocks: block_ptr);
pub fn get_block_data(blocks: block_ptr, index: usize, buffer: *mut u8, buffer_len: usize) -> usize;
pub fn set_block_data(blocks: block_ptr, index: usize, data: *const u8, data_len: usize);
// State serialization (for persistent storage instead of file-based)
pub fn ferret_cot_state_size(ot: FerretCOT_ptr) -> i64;
pub fn ferret_cot_buffer_state_size(ot: FerretCOT_Buffer_ptr) -> i64;
pub fn ferret_cot_assemble_state(ot: FerretCOT_ptr, buffer: *mut u8, buffer_size: i64) -> c_int;
pub fn ferret_cot_buffer_assemble_state(ot: FerretCOT_Buffer_ptr, buffer: *mut u8, buffer_size: i64) -> c_int;
pub fn ferret_cot_disassemble_state(ot: FerretCOT_ptr, buffer: *const u8, buffer_size: i64) -> c_int;
pub fn ferret_cot_buffer_disassemble_state(ot: FerretCOT_Buffer_ptr, buffer: *const u8, buffer_size: i64) -> c_int;
pub fn ferret_cot_is_setup(ot: FerretCOT_ptr) -> bool;
pub fn ferret_cot_buffer_is_setup(ot: FerretCOT_Buffer_ptr) -> bool;
}
// Safe Rust wrapper for NetIO
@ -226,6 +263,237 @@ impl Drop for FerretCOT {
}
}
// =============================================================================
// BufferIO - Message-based IO for Ferret OT (no TCP required)
// =============================================================================
#[derive(Debug)]
pub struct BufferIO {
pub(crate) inner: Mutex<BufferIO_ptr>,
}
unsafe impl Send for BufferIO {}
unsafe impl Sync for BufferIO {}
impl BufferIO {
pub fn new(initial_cap: i64) -> Self {
let inner = unsafe { create_buffer_io(initial_cap) };
BufferIO { inner: Mutex::new(inner) }
}
pub fn fill_recv(&self, data: &[u8]) -> Result<(), String> {
let ptr = *self.inner.lock().unwrap();
if ptr.is_null() {
return Err("BufferIO is null".to_string());
}
let result = unsafe { buffer_io_fill_recv(ptr, data.as_ptr(), data.len()) };
if result == 0 {
Ok(())
} else {
Err("Failed to fill recv buffer".to_string())
}
}
pub fn drain_send(&self, max_len: usize) -> Vec<u8> {
let ptr = *self.inner.lock().unwrap();
if ptr.is_null() {
return Vec::new();
}
let mut buffer = vec![0u8; max_len];
let actual_len = unsafe { buffer_io_drain_send(ptr, buffer.as_mut_ptr(), max_len) };
buffer.truncate(actual_len);
buffer
}
pub fn send_size(&self) -> usize {
let ptr = *self.inner.lock().unwrap();
if ptr.is_null() {
return 0;
}
unsafe { buffer_io_send_size(ptr) }
}
pub fn recv_available(&self) -> usize {
let ptr = *self.inner.lock().unwrap();
if ptr.is_null() {
return 0;
}
unsafe { buffer_io_recv_available(ptr) }
}
pub fn set_timeout(&self, timeout_ms: i64) {
let ptr = *self.inner.lock().unwrap();
if !ptr.is_null() {
unsafe { buffer_io_set_timeout(ptr, timeout_ms) }
}
}
pub fn set_error(&self, message: &str) {
let ptr = *self.inner.lock().unwrap();
if !ptr.is_null() {
let c_msg = CString::new(message).unwrap();
unsafe { buffer_io_set_error(ptr, c_msg.as_ptr()) }
}
}
pub fn clear(&self) {
let ptr = *self.inner.lock().unwrap();
if !ptr.is_null() {
unsafe { buffer_io_clear(ptr) }
}
}
pub(crate) fn get_ptr(&self) -> BufferIO_ptr {
*self.inner.lock().unwrap()
}
}
impl Drop for BufferIO {
fn drop(&mut self) {
let ptr = *self.inner.lock().unwrap();
if !ptr.is_null() {
unsafe { free_buffer_io(ptr) }
}
}
}
// =============================================================================
// FerretCOTBuffer - Ferret OT using BufferIO (message-based)
// =============================================================================
#[derive(Debug)]
pub struct FerretCOTBuffer {
pub(crate) inner: Mutex<FerretCOT_Buffer_ptr>,
}
unsafe impl Send for FerretCOTBuffer {}
unsafe impl Sync for FerretCOTBuffer {}
impl FerretCOTBuffer {
pub fn new(party: i32, threads: i32, bufferio: &BufferIO, malicious: bool) -> Self {
let inner = unsafe { create_ferret_cot_buffer(party, threads, bufferio.get_ptr(), malicious) };
FerretCOTBuffer {
inner: Mutex::new(inner),
}
}
/// Run the OT setup protocol. Must be called after both parties have their
/// BufferIO message transport active (can send/receive data).
/// This is deferred from construction because BufferIO-based OT needs
/// the message channel to be ready before setup can exchange data.
/// Returns true on success, false on error.
pub fn setup(&self, party: i32) -> bool {
let ptr = *self.inner.lock().unwrap();
if ptr.is_null() {
return false;
}
let result = unsafe { setup_ferret_cot_buffer(ptr, party) };
result == 0
}
/// Check if setup has been run
pub fn is_setup(&self) -> bool {
let ptr = *self.inner.lock().unwrap();
if ptr.is_null() {
return false;
}
unsafe { ferret_cot_buffer_is_setup(ptr) }
}
/// Get the size needed to store the OT state
pub fn state_size(&self) -> i64 {
let ptr = *self.inner.lock().unwrap();
if ptr.is_null() {
return 0;
}
unsafe { ferret_cot_buffer_state_size(ptr) }
}
/// Serialize OT state to a buffer for persistent storage.
/// This allows storing setup data externally instead of in files.
/// Returns None if serialization fails.
pub fn assemble_state(&self) -> Option<Vec<u8>> {
let ptr = *self.inner.lock().unwrap();
if ptr.is_null() {
return None;
}
let size = unsafe { ferret_cot_buffer_state_size(ptr) };
if size <= 0 {
return None;
}
let mut buffer = vec![0u8; size as usize];
let result = unsafe { ferret_cot_buffer_assemble_state(ptr, buffer.as_mut_ptr(), size) };
if result == 0 {
Some(buffer)
} else {
None
}
}
/// Restore OT state from a buffer (created by assemble_state).
/// This must be called INSTEAD of setup, not after.
/// Returns true on success.
pub fn disassemble_state(&self, data: &[u8]) -> bool {
let ptr = *self.inner.lock().unwrap();
if ptr.is_null() || data.is_empty() {
return false;
}
let result = unsafe { ferret_cot_buffer_disassemble_state(ptr, data.as_ptr(), data.len() as i64) };
result == 0
}
pub fn get_delta(&self) -> BlockArray {
let ptr = *self.inner.lock().unwrap();
let delta_ptr = unsafe { get_delta_buffer(ptr) };
BlockArray { inner: Mutex::new(delta_ptr), length: 1 }
}
pub fn send_cot(&self, b0: &BlockArray, length: u64) -> bool {
let ptr = *self.inner.lock().unwrap();
if ptr.is_null() {
return false;
}
let result = unsafe { send_cot_buffer(ptr, b0.get_ptr(), length as usize) };
result == 0
}
pub fn recv_cot(&self, br: &BlockArray, choices: &Vec<bool>, length: u64) -> bool {
let ptr = *self.inner.lock().unwrap();
if ptr.is_null() {
return false;
}
let result = unsafe { recv_cot_buffer(ptr, br.get_ptr(), choices.as_ptr(), length as usize) };
result == 0
}
pub fn send_rot(&self, b0: &BlockArray, b1: &BlockArray, length: u64) -> bool {
let ptr = *self.inner.lock().unwrap();
if ptr.is_null() {
return false;
}
let result = unsafe { send_rot_buffer(ptr, b0.get_ptr(), b1.get_ptr(), length as usize) };
result == 0
}
pub fn recv_rot(&self, br: &BlockArray, choices: &Vec<bool>, length: u64) -> bool {
let ptr = *self.inner.lock().unwrap();
if ptr.is_null() {
return false;
}
let result = unsafe { recv_rot_buffer(ptr, br.get_ptr(), choices.as_ptr(), length as usize) };
result == 0
}
}
impl Drop for FerretCOTBuffer {
fn drop(&mut self) {
let ptr = *self.inner.lock().unwrap();
if !ptr.is_null() {
unsafe { free_ferret_cot_buffer(ptr) }
}
}
}
// todo: when uniffi 0.28 is available for go bindgen, nuke this entire monstrosity from orbit:
pub struct NetIOManager {
@ -293,3 +561,145 @@ pub fn create_ferret_cot_manager(party: i32, threads: i32, length: u64, choices:
let ferret_cot = Arc::new(FerretCOT::new(party, threads, &netio.netio, malicious));
Arc::new(FerretCOTManager { ferret_cot, party, b0: create_block_array_manager(length), b1: if party == 2 { None } else { Some(create_block_array_manager(length)) }, choices, length })
}
// =============================================================================
// BufferIO Manager types for UniFFI (message-based Ferret OT)
// =============================================================================
pub struct BufferIOManager {
pub bufferio: Arc<BufferIO>,
}
impl BufferIOManager {
/// Fill the receive buffer with data from external transport
pub fn fill_recv(&self, data: Vec<u8>) -> bool {
self.bufferio.fill_recv(&data).is_ok()
}
/// Drain data from send buffer (up to max_len bytes)
pub fn drain_send(&self, max_len: u64) -> Vec<u8> {
self.bufferio.drain_send(max_len as usize)
}
/// Get current send buffer size
pub fn send_size(&self) -> u64 {
self.bufferio.send_size() as u64
}
/// Get available bytes in receive buffer
pub fn recv_available(&self) -> u64 {
self.bufferio.recv_available() as u64
}
/// Set timeout for blocking receive (milliseconds)
pub fn set_timeout(&self, timeout_ms: i64) {
self.bufferio.set_timeout(timeout_ms);
}
/// Set error state
pub fn set_error(&self, message: String) {
self.bufferio.set_error(&message);
}
/// Clear all buffers
pub fn clear(&self) {
self.bufferio.clear();
}
}
pub struct FerretCOTBufferManager {
pub ferret_cot: Arc<FerretCOTBuffer>,
pub party: i32,
pub b0: Arc<BlockArrayManager>,
pub b1: Option<Arc<BlockArrayManager>>,
pub choices: Vec<bool>,
pub length: u64,
}
impl FerretCOTBufferManager {
/// Run the OT setup protocol. Must be called after both parties have their
/// BufferIO message transport active (can send/receive data).
/// Returns true on success, false on error.
pub fn setup(&self) -> bool {
self.ferret_cot.setup(self.party)
}
/// Check if setup has been run
pub fn is_setup(&self) -> bool {
self.ferret_cot.is_setup()
}
/// Get the size needed to store the OT state
pub fn state_size(&self) -> i64 {
self.ferret_cot.state_size()
}
/// Serialize OT state for persistent storage.
/// Returns the serialized state, or empty vector if failed.
pub fn assemble_state(&self) -> Vec<u8> {
self.ferret_cot.assemble_state().unwrap_or_default()
}
/// Restore OT state from a buffer (created by assemble_state).
/// This must be called INSTEAD of setup, not after.
/// Returns true on success.
pub fn disassemble_state(&self, data: Vec<u8>) -> bool {
self.ferret_cot.disassemble_state(&data)
}
pub fn send_cot(&self) -> bool {
self.ferret_cot.send_cot(&self.b0.block_array, self.length)
}
pub fn recv_cot(&self) -> bool {
self.ferret_cot.recv_cot(&self.b0.block_array, &self.choices, self.length)
}
pub fn send_rot(&self) -> bool {
self.ferret_cot.send_rot(&self.b0.block_array, &self.b1.as_ref().unwrap().block_array, self.length)
}
pub fn recv_rot(&self) -> bool {
self.ferret_cot.recv_rot(&self.b0.block_array, &self.choices, self.length)
}
pub fn get_block_data(&self, block_choice: u8, index: u64) -> Vec<u8> {
if block_choice == 0 {
self.b0.block_array.get_block_data(index)
} else {
self.b1.as_ref().unwrap().block_array.get_block_data(index)
}
}
pub fn set_block_data(&self, block_choice: u8, index: u64, data: Vec<u8>) {
if block_choice == 0 {
self.b0.block_array.set_block_data(index, data)
} else {
self.b1.as_ref().unwrap().block_array.set_block_data(index, data)
}
}
}
pub fn create_buffer_io_manager(initial_cap: i64) -> Arc<BufferIOManager> {
let bufferio = Arc::new(BufferIO::new(initial_cap));
Arc::new(BufferIOManager { bufferio })
}
pub fn create_ferret_cot_buffer_manager(
party: i32,
threads: i32,
length: u64,
choices: Vec<bool>,
bufferio: &Arc<BufferIOManager>,
malicious: bool
) -> Arc<FerretCOTBufferManager> {
let ferret_cot = Arc::new(FerretCOTBuffer::new(party, threads, &bufferio.bufferio, malicious));
Arc::new(FerretCOTBufferManager {
ferret_cot,
party,
b0: create_block_array_manager(length),
b1: if party == 2 { None } else { Some(create_block_array_manager(length)) },
choices,
length,
})
}

View File

@ -1,11 +1,18 @@
namespace ferret {
// TCP-based (original interface)
NetIOManager create_netio_manager(i32 party, string? address, i32 port);
FerretCOTManager create_ferret_cot_manager(i32 party, i32 threads, u64 length, sequence<boolean> choices, [ByRef] NetIOManager netio, boolean malicious);
// Buffer-based (new message-channel interface)
BufferIOManager create_buffer_io_manager(i64 initial_cap);
FerretCOTBufferManager create_ferret_cot_buffer_manager(i32 party, i32 threads, u64 length, sequence<boolean> choices, [ByRef] BufferIOManager bufferio, boolean malicious);
};
// TCP-based IO (original)
interface NetIOManager {};
// TCP-based Ferret COT (original)
interface FerretCOTManager {
void send_cot();
void recv_cot();
@ -14,3 +21,46 @@ interface FerretCOTManager {
sequence<u8> get_block_data(u8 block_choice, u64 index);
void set_block_data(u8 block_choice, u64 index, sequence<u8> data);
};
// Buffer-based IO (new - for message channels)
interface BufferIOManager {
// Fill receive buffer with data from external transport
boolean fill_recv(sequence<u8> data);
// Drain send buffer (up to max_len bytes)
sequence<u8> drain_send(u64 max_len);
// Get current send buffer size
u64 send_size();
// Get available bytes in receive buffer
u64 recv_available();
// Set timeout for blocking receive (milliseconds)
void set_timeout(i64 timeout_ms);
// Set error state
void set_error(string message);
// Clear all buffers
void clear();
};
// Buffer-based Ferret COT (new - for message channels)
// NOTE: After creating with create_ferret_cot_buffer_manager, you MUST either:
// 1. Call setup() once both parties have their message transport active, OR
// 2. Call disassemble_state() with previously saved state data
interface FerretCOTBufferManager {
// Run the OT setup protocol (call after message transport is active)
// Returns true on success, false on error (e.g., timeout, IO error)
boolean setup();
// Check if setup has been run
boolean is_setup();
// Get the size needed to store the OT state
i64 state_size();
// Serialize OT state for persistent storage (returns empty if failed)
sequence<u8> assemble_state();
// Restore OT state from a buffer (call INSTEAD of setup, not after)
boolean disassemble_state(sequence<u8> data);
// COT operations - return true on success, false on error
boolean send_cot();
boolean recv_cot();
boolean send_rot();
boolean recv_rot();
sequence<u8> get_block_data(u8 block_choice, u64 index);
void set_block_data(u8 block_choice, u64 index, sequence<u8> data);
};

211
dkls23_ffi/dkls23.go Normal file
View File

@ -0,0 +1,211 @@
// Package dkls23_ffi provides Go bindings for the DKLs23 threshold ECDSA protocol.
// This wraps the Rust dkls23 crate via uniffi-generated FFI bindings.
package dkls23_ffi
import (
generated "source.quilibrium.com/quilibrium/monorepo/dkls23_ffi/generated/dkls23_ffi"
)
//go:generate ./generate.sh
// Re-export types from generated bindings
type (
PartyMessage = generated.PartyMessage
DkgInitResult = generated.DkgInitResult
DkgRoundResult = generated.DkgRoundResult
DkgFinalResult = generated.DkgFinalResult
SignInitResult = generated.SignInitResult
SignRoundResult = generated.SignRoundResult
SignFinalResult = generated.SignFinalResult
RefreshInitResult = generated.RefreshInitResult
RefreshRoundResult = generated.RefreshRoundResult
RefreshFinalResult = generated.RefreshFinalResult
ResizeInitResult = generated.ResizeInitResult
ResizeRoundResult = generated.ResizeRoundResult
ResizeFinalResult = generated.ResizeFinalResult
RekeyResult = generated.RekeyResult
DeriveResult = generated.DeriveResult
EllipticCurve = generated.EllipticCurve
)
// Elliptic curve constants
const (
EllipticCurveSecp256k1 = generated.EllipticCurveSecp256k1
EllipticCurveP256 = generated.EllipticCurveP256
)
// Init initializes the DKLs23 library. Call once before using other functions.
func Init() {
generated.Init()
}
// ============================================
// DKG Functions
// ============================================
// DkgInit initializes a new distributed key generation session.
// partyID is the 1-indexed identifier for this party.
// threshold is the minimum number of parties needed to sign (t in t-of-n).
// totalParties is the total number of parties (n in t-of-n).
func DkgInit(partyID, threshold, totalParties uint32, curve EllipticCurve) DkgInitResult {
return generated.DkgInit(partyID, threshold, totalParties, curve)
}
// DkgInitWithSessionId initializes a new DKG session with a shared session ID.
// All parties MUST use the same 32-byte sessionId for the DKG to succeed.
func DkgInitWithSessionId(partyID, threshold, totalParties uint32, sessionId []byte, curve EllipticCurve) DkgInitResult {
return generated.DkgInitWithSessionId(partyID, threshold, totalParties, sessionId, curve)
}
// DkgRound1 processes DKG round 1, generating the broadcast commitment message.
func DkgRound1(sessionState []byte) DkgRoundResult {
return generated.DkgRound1(sessionState)
}
// DkgRound2 processes DKG round 2 with received messages from other parties.
func DkgRound2(sessionState []byte, receivedMessages []PartyMessage) DkgRoundResult {
return generated.DkgRound2(sessionState, receivedMessages)
}
// DkgRound3 processes DKG round 3 (verification and share computation).
func DkgRound3(sessionState []byte, receivedMessages []PartyMessage) DkgRoundResult {
return generated.DkgRound3(sessionState, receivedMessages)
}
// DkgFinalize completes DKG and extracts the key share.
func DkgFinalize(sessionState []byte, receivedMessages []PartyMessage) DkgFinalResult {
return generated.DkgFinalize(sessionState, receivedMessages)
}
// ============================================
// Signing Functions
// ============================================
// SignInit initializes a threshold signing session.
// keyShare is the party's key share from DKG.
// messageHash is the 32-byte hash of the message to sign.
// signerPartyIDs lists the party IDs participating in this signing session.
func SignInit(keyShare, messageHash []byte, signerPartyIDs []uint32) SignInitResult {
return generated.SignInit(keyShare, messageHash, signerPartyIDs)
}
// SignInitWithSignId initializes a threshold signing session with a shared sign ID.
// All parties must use the same signId for a signing session to work.
func SignInitWithSignId(keyShare, messageHash []byte, signerPartyIDs []uint32, signId []byte) SignInitResult {
return generated.SignInitWithSignId(keyShare, messageHash, signerPartyIDs, signId)
}
// SignRound1 processes signing round 1, generating nonce commitment.
func SignRound1(sessionState []byte) SignRoundResult {
return generated.SignRound1(sessionState)
}
// SignRound2 processes signing round 2 with received nonce commitments.
func SignRound2(sessionState []byte, receivedMessages []PartyMessage) SignRoundResult {
return generated.SignRound2(sessionState, receivedMessages)
}
// SignRound3 processes signing round 3 and produces broadcast messages.
func SignRound3(sessionState []byte, receivedMessages []PartyMessage) SignRoundResult {
return generated.SignRound3(sessionState, receivedMessages)
}
// SignFinalize collects broadcasts from all parties and produces the final signature.
func SignFinalize(sessionState []byte, receivedMessages []PartyMessage) SignFinalResult {
return generated.SignFinalize(sessionState, receivedMessages)
}
// ============================================
// Refresh Functions
// ============================================
// RefreshInit initializes a key share refresh session.
// This allows parties to generate new shares for the same key,
// invalidating old shares (proactive security).
func RefreshInit(keyShare []byte, partyID uint32) RefreshInitResult {
return generated.RefreshInit(keyShare, partyID)
}
// RefreshInitWithRefreshId initializes a refresh session with a shared refresh ID.
// All parties must use the same refreshId for a refresh session to work.
func RefreshInitWithRefreshId(keyShare []byte, partyID uint32, refreshId []byte) RefreshInitResult {
return generated.RefreshInitWithRefreshId(keyShare, partyID, refreshId)
}
// RefreshRound1 processes refresh round 1 (phase 1: generate polynomial fragments).
func RefreshRound1(sessionState []byte) RefreshRoundResult {
return generated.RefreshRound1(sessionState)
}
// RefreshRound2 processes refresh round 2 (phase 2: process fragments, generate proofs).
func RefreshRound2(sessionState []byte, receivedMessages []PartyMessage) RefreshRoundResult {
return generated.RefreshRound2(sessionState, receivedMessages)
}
// RefreshRound3 processes refresh round 3 (phase 3: process transmits).
func RefreshRound3(sessionState []byte, receivedMessages []PartyMessage) RefreshRoundResult {
return generated.RefreshRound3(sessionState, receivedMessages)
}
// RefreshFinalize verifies proofs and produces the new key share.
func RefreshFinalize(sessionState []byte, receivedMessages []PartyMessage) RefreshFinalResult {
return generated.RefreshFinalize(sessionState, receivedMessages)
}
// ============================================
// Resize Functions
// ============================================
// ResizeInit initializes a threshold resize session.
// This allows changing the threshold (t) and/or total parties (n).
func ResizeInit(keyShare []byte, partyID, newThreshold, newTotalParties uint32, newPartyIDs []uint32, curve EllipticCurve) ResizeInitResult {
return generated.ResizeInit(keyShare, partyID, newThreshold, newTotalParties, newPartyIDs, curve)
}
// ResizeRound1 processes resize round 1.
func ResizeRound1(sessionState []byte) ResizeRoundResult {
return generated.ResizeRound1(sessionState)
}
// ResizeRound2 processes resize round 2 and produces the new key share.
func ResizeRound2(sessionState []byte, receivedMessages []PartyMessage) ResizeFinalResult {
return generated.ResizeRound2(sessionState, receivedMessages)
}
// ============================================
// Utility Functions
// ============================================
// RekeyFromSecret converts a full secret key into threshold shares.
// This is useful for migrating existing keys to threshold custody.
func RekeyFromSecret(secretKey []byte, threshold, totalParties uint32, curve EllipticCurve) RekeyResult {
return generated.RekeyFromSecret(secretKey, threshold, totalParties, curve)
}
// DeriveChildShare derives a child key share using a BIP-32 derivation path.
func DeriveChildShare(keyShare []byte, derivationPath []uint32) DeriveResult {
return generated.DeriveChildShare(keyShare, derivationPath)
}
// GetPublicKey extracts the public key from a key share.
func GetPublicKey(keyShare []byte) []byte {
return generated.GetPublicKey(keyShare)
}
// ValidateKeyShare validates a key share's structure and parameters.
func ValidateKeyShare(keyShare []byte) bool {
return generated.ValidateKeyShare(keyShare)
}
// ============================================
// Helper functions for error checking
// ============================================
// GetErrorMessage returns the error message from an optional string pointer.
// Returns empty string if the pointer is nil.
func GetErrorMessage(errMsg *string) string {
if errMsg != nil {
return *errMsg
}
return ""
}

14
dkls23_ffi/generate.sh Executable file
View File

@ -0,0 +1,14 @@
#!/bin/bash
set -euxo pipefail
ROOT_DIR="${ROOT_DIR:-$( cd "$(dirname "$(realpath "$( dirname "${BASH_SOURCE[0]}" )")")" >/dev/null 2>&1 && pwd )}"
RUST_DKLS23_PACKAGE="$ROOT_DIR/crates/dkls23_ffi"
BINDINGS_DIR="$ROOT_DIR/dkls23_ffi"
# Build the Rust DKLs23 FFI package in release mode
cargo build -p dkls23_ffi --release
# Generate Go bindings
pushd "$RUST_DKLS23_PACKAGE" > /dev/null
uniffi-bindgen-go src/lib.udl -o "$BINDINGS_DIR"/generated

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,954 @@
// This file was autogenerated by some hot garbage in the `uniffi` crate.
// Trust me, you don't want to mess with it!
#include <stdbool.h>
#include <stdint.h>
// The following structs are used to implement the lowest level
// of the FFI, and thus useful to multiple uniffied crates.
// We ensure they are declared exactly once, with a header guard, UNIFFI_SHARED_H.
#ifdef UNIFFI_SHARED_H
// We also try to prevent mixing versions of shared uniffi header structs.
// If you add anything to the #else block, you must increment the version suffix in UNIFFI_SHARED_HEADER_V6
#ifndef UNIFFI_SHARED_HEADER_V6
#error Combining helper code from multiple versions of uniffi is not supported
#endif // ndef UNIFFI_SHARED_HEADER_V6
#else
#define UNIFFI_SHARED_H
#define UNIFFI_SHARED_HEADER_V6
// ⚠️ Attention: If you change this #else block (ending in `#endif // def UNIFFI_SHARED_H`) you *must* ⚠️
// ⚠️ increment the version suffix in all instances of UNIFFI_SHARED_HEADER_V6 in this file. ⚠️
typedef struct RustBuffer {
uint64_t capacity;
uint64_t len;
uint8_t *data;
} RustBuffer;
typedef struct ForeignBytes {
int32_t len;
const uint8_t *data;
} ForeignBytes;
// Error definitions
typedef struct RustCallStatus {
int8_t code;
RustBuffer errorBuf;
} RustCallStatus;
#endif // UNIFFI_SHARED_H
#ifndef UNIFFI_FFIDEF_RUST_FUTURE_CONTINUATION_CALLBACK
#define UNIFFI_FFIDEF_RUST_FUTURE_CONTINUATION_CALLBACK
typedef void (*UniffiRustFutureContinuationCallback)(uint64_t data, int8_t poll_result);
// Making function static works arround:
// https://github.com/golang/go/issues/11263
static void call_UniffiRustFutureContinuationCallback(
UniffiRustFutureContinuationCallback cb, uint64_t data, int8_t poll_result)
{
return cb(data, poll_result);
}
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_FREE
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_FREE
typedef void (*UniffiForeignFutureFree)(uint64_t handle);
// Making function static works arround:
// https://github.com/golang/go/issues/11263
static void call_UniffiForeignFutureFree(
UniffiForeignFutureFree cb, uint64_t handle)
{
return cb(handle);
}
#endif
#ifndef UNIFFI_FFIDEF_CALLBACK_INTERFACE_FREE
#define UNIFFI_FFIDEF_CALLBACK_INTERFACE_FREE
typedef void (*UniffiCallbackInterfaceFree)(uint64_t handle);
// Making function static works arround:
// https://github.com/golang/go/issues/11263
static void call_UniffiCallbackInterfaceFree(
UniffiCallbackInterfaceFree cb, uint64_t handle)
{
return cb(handle);
}
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE
#define UNIFFI_FFIDEF_FOREIGN_FUTURE
typedef struct UniffiForeignFuture {
uint64_t handle;
UniffiForeignFutureFree free;
} UniffiForeignFuture;
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_U8
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_U8
typedef struct UniffiForeignFutureStructU8 {
uint8_t returnValue;
RustCallStatus callStatus;
} UniffiForeignFutureStructU8;
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_U8
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_U8
typedef void (*UniffiForeignFutureCompleteU8)(uint64_t callback_data, UniffiForeignFutureStructU8 result);
// Making function static works arround:
// https://github.com/golang/go/issues/11263
static void call_UniffiForeignFutureCompleteU8(
UniffiForeignFutureCompleteU8 cb, uint64_t callback_data, UniffiForeignFutureStructU8 result)
{
return cb(callback_data, result);
}
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_I8
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_I8
typedef struct UniffiForeignFutureStructI8 {
int8_t returnValue;
RustCallStatus callStatus;
} UniffiForeignFutureStructI8;
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_I8
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_I8
typedef void (*UniffiForeignFutureCompleteI8)(uint64_t callback_data, UniffiForeignFutureStructI8 result);
// Making function static works arround:
// https://github.com/golang/go/issues/11263
static void call_UniffiForeignFutureCompleteI8(
UniffiForeignFutureCompleteI8 cb, uint64_t callback_data, UniffiForeignFutureStructI8 result)
{
return cb(callback_data, result);
}
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_U16
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_U16
typedef struct UniffiForeignFutureStructU16 {
uint16_t returnValue;
RustCallStatus callStatus;
} UniffiForeignFutureStructU16;
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_U16
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_U16
typedef void (*UniffiForeignFutureCompleteU16)(uint64_t callback_data, UniffiForeignFutureStructU16 result);
// Making function static works arround:
// https://github.com/golang/go/issues/11263
static void call_UniffiForeignFutureCompleteU16(
UniffiForeignFutureCompleteU16 cb, uint64_t callback_data, UniffiForeignFutureStructU16 result)
{
return cb(callback_data, result);
}
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_I16
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_I16
typedef struct UniffiForeignFutureStructI16 {
int16_t returnValue;
RustCallStatus callStatus;
} UniffiForeignFutureStructI16;
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_I16
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_I16
typedef void (*UniffiForeignFutureCompleteI16)(uint64_t callback_data, UniffiForeignFutureStructI16 result);
// Making function static works arround:
// https://github.com/golang/go/issues/11263
static void call_UniffiForeignFutureCompleteI16(
UniffiForeignFutureCompleteI16 cb, uint64_t callback_data, UniffiForeignFutureStructI16 result)
{
return cb(callback_data, result);
}
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_U32
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_U32
typedef struct UniffiForeignFutureStructU32 {
uint32_t returnValue;
RustCallStatus callStatus;
} UniffiForeignFutureStructU32;
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_U32
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_U32
typedef void (*UniffiForeignFutureCompleteU32)(uint64_t callback_data, UniffiForeignFutureStructU32 result);
// Making function static works arround:
// https://github.com/golang/go/issues/11263
static void call_UniffiForeignFutureCompleteU32(
UniffiForeignFutureCompleteU32 cb, uint64_t callback_data, UniffiForeignFutureStructU32 result)
{
return cb(callback_data, result);
}
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_I32
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_I32
typedef struct UniffiForeignFutureStructI32 {
int32_t returnValue;
RustCallStatus callStatus;
} UniffiForeignFutureStructI32;
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_I32
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_I32
typedef void (*UniffiForeignFutureCompleteI32)(uint64_t callback_data, UniffiForeignFutureStructI32 result);
// Making function static works arround:
// https://github.com/golang/go/issues/11263
static void call_UniffiForeignFutureCompleteI32(
UniffiForeignFutureCompleteI32 cb, uint64_t callback_data, UniffiForeignFutureStructI32 result)
{
return cb(callback_data, result);
}
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_U64
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_U64
typedef struct UniffiForeignFutureStructU64 {
uint64_t returnValue;
RustCallStatus callStatus;
} UniffiForeignFutureStructU64;
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_U64
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_U64
typedef void (*UniffiForeignFutureCompleteU64)(uint64_t callback_data, UniffiForeignFutureStructU64 result);
// Making function static works arround:
// https://github.com/golang/go/issues/11263
static void call_UniffiForeignFutureCompleteU64(
UniffiForeignFutureCompleteU64 cb, uint64_t callback_data, UniffiForeignFutureStructU64 result)
{
return cb(callback_data, result);
}
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_I64
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_I64
typedef struct UniffiForeignFutureStructI64 {
int64_t returnValue;
RustCallStatus callStatus;
} UniffiForeignFutureStructI64;
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_I64
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_I64
typedef void (*UniffiForeignFutureCompleteI64)(uint64_t callback_data, UniffiForeignFutureStructI64 result);
// Making function static works arround:
// https://github.com/golang/go/issues/11263
static void call_UniffiForeignFutureCompleteI64(
UniffiForeignFutureCompleteI64 cb, uint64_t callback_data, UniffiForeignFutureStructI64 result)
{
return cb(callback_data, result);
}
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_F32
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_F32
typedef struct UniffiForeignFutureStructF32 {
float returnValue;
RustCallStatus callStatus;
} UniffiForeignFutureStructF32;
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_F32
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_F32
typedef void (*UniffiForeignFutureCompleteF32)(uint64_t callback_data, UniffiForeignFutureStructF32 result);
// Making function static works arround:
// https://github.com/golang/go/issues/11263
static void call_UniffiForeignFutureCompleteF32(
UniffiForeignFutureCompleteF32 cb, uint64_t callback_data, UniffiForeignFutureStructF32 result)
{
return cb(callback_data, result);
}
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_F64
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_F64
typedef struct UniffiForeignFutureStructF64 {
double returnValue;
RustCallStatus callStatus;
} UniffiForeignFutureStructF64;
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_F64
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_F64
typedef void (*UniffiForeignFutureCompleteF64)(uint64_t callback_data, UniffiForeignFutureStructF64 result);
// Making function static works arround:
// https://github.com/golang/go/issues/11263
static void call_UniffiForeignFutureCompleteF64(
UniffiForeignFutureCompleteF64 cb, uint64_t callback_data, UniffiForeignFutureStructF64 result)
{
return cb(callback_data, result);
}
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_POINTER
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_POINTER
typedef struct UniffiForeignFutureStructPointer {
void* returnValue;
RustCallStatus callStatus;
} UniffiForeignFutureStructPointer;
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_POINTER
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_POINTER
typedef void (*UniffiForeignFutureCompletePointer)(uint64_t callback_data, UniffiForeignFutureStructPointer result);
// Making function static works arround:
// https://github.com/golang/go/issues/11263
static void call_UniffiForeignFutureCompletePointer(
UniffiForeignFutureCompletePointer cb, uint64_t callback_data, UniffiForeignFutureStructPointer result)
{
return cb(callback_data, result);
}
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_RUST_BUFFER
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_RUST_BUFFER
typedef struct UniffiForeignFutureStructRustBuffer {
RustBuffer returnValue;
RustCallStatus callStatus;
} UniffiForeignFutureStructRustBuffer;
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_RUST_BUFFER
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_RUST_BUFFER
typedef void (*UniffiForeignFutureCompleteRustBuffer)(uint64_t callback_data, UniffiForeignFutureStructRustBuffer result);
// Making function static works arround:
// https://github.com/golang/go/issues/11263
static void call_UniffiForeignFutureCompleteRustBuffer(
UniffiForeignFutureCompleteRustBuffer cb, uint64_t callback_data, UniffiForeignFutureStructRustBuffer result)
{
return cb(callback_data, result);
}
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_VOID
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_STRUCT_VOID
typedef struct UniffiForeignFutureStructVoid {
RustCallStatus callStatus;
} UniffiForeignFutureStructVoid;
#endif
#ifndef UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_VOID
#define UNIFFI_FFIDEF_FOREIGN_FUTURE_COMPLETE_VOID
typedef void (*UniffiForeignFutureCompleteVoid)(uint64_t callback_data, UniffiForeignFutureStructVoid result);
// Making function static works arround:
// https://github.com/golang/go/issues/11263
static void call_UniffiForeignFutureCompleteVoid(
UniffiForeignFutureCompleteVoid cb, uint64_t callback_data, UniffiForeignFutureStructVoid result)
{
return cb(callback_data, result);
}
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_DERIVE_CHILD_SHARE
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_DERIVE_CHILD_SHARE
RustBuffer uniffi_dkls23_ffi_fn_func_derive_child_share(RustBuffer key_share, RustBuffer derivation_path, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_DKG_FINALIZE
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_DKG_FINALIZE
RustBuffer uniffi_dkls23_ffi_fn_func_dkg_finalize(RustBuffer session_state, RustBuffer received_messages, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_DKG_INIT
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_DKG_INIT
RustBuffer uniffi_dkls23_ffi_fn_func_dkg_init(uint32_t party_id, uint32_t threshold, uint32_t total_parties, RustBuffer curve, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_DKG_INIT_WITH_SESSION_ID
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_DKG_INIT_WITH_SESSION_ID
RustBuffer uniffi_dkls23_ffi_fn_func_dkg_init_with_session_id(uint32_t party_id, uint32_t threshold, uint32_t total_parties, RustBuffer session_id, RustBuffer curve, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_DKG_ROUND1
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_DKG_ROUND1
RustBuffer uniffi_dkls23_ffi_fn_func_dkg_round1(RustBuffer session_state, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_DKG_ROUND2
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_DKG_ROUND2
RustBuffer uniffi_dkls23_ffi_fn_func_dkg_round2(RustBuffer session_state, RustBuffer received_messages, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_DKG_ROUND3
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_DKG_ROUND3
RustBuffer uniffi_dkls23_ffi_fn_func_dkg_round3(RustBuffer session_state, RustBuffer received_messages, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_GET_PUBLIC_KEY
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_GET_PUBLIC_KEY
RustBuffer uniffi_dkls23_ffi_fn_func_get_public_key(RustBuffer key_share, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_INIT
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_INIT
void uniffi_dkls23_ffi_fn_func_init(RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_REFRESH_FINALIZE
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_REFRESH_FINALIZE
RustBuffer uniffi_dkls23_ffi_fn_func_refresh_finalize(RustBuffer session_state, RustBuffer received_messages, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_REFRESH_INIT
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_REFRESH_INIT
RustBuffer uniffi_dkls23_ffi_fn_func_refresh_init(RustBuffer key_share, uint32_t party_id, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_REFRESH_INIT_WITH_REFRESH_ID
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_REFRESH_INIT_WITH_REFRESH_ID
RustBuffer uniffi_dkls23_ffi_fn_func_refresh_init_with_refresh_id(RustBuffer key_share, uint32_t party_id, RustBuffer refresh_id, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_REFRESH_ROUND1
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_REFRESH_ROUND1
RustBuffer uniffi_dkls23_ffi_fn_func_refresh_round1(RustBuffer session_state, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_REFRESH_ROUND2
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_REFRESH_ROUND2
RustBuffer uniffi_dkls23_ffi_fn_func_refresh_round2(RustBuffer session_state, RustBuffer received_messages, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_REFRESH_ROUND3
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_REFRESH_ROUND3
RustBuffer uniffi_dkls23_ffi_fn_func_refresh_round3(RustBuffer session_state, RustBuffer received_messages, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_REKEY_FROM_SECRET
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_REKEY_FROM_SECRET
RustBuffer uniffi_dkls23_ffi_fn_func_rekey_from_secret(RustBuffer secret_key, uint32_t threshold, uint32_t total_parties, RustBuffer curve, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_RESIZE_INIT
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_RESIZE_INIT
RustBuffer uniffi_dkls23_ffi_fn_func_resize_init(RustBuffer key_share, uint32_t party_id, uint32_t new_threshold, uint32_t new_total_parties, RustBuffer new_party_ids, RustBuffer curve, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_RESIZE_ROUND1
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_RESIZE_ROUND1
RustBuffer uniffi_dkls23_ffi_fn_func_resize_round1(RustBuffer session_state, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_RESIZE_ROUND2
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_RESIZE_ROUND2
RustBuffer uniffi_dkls23_ffi_fn_func_resize_round2(RustBuffer session_state, RustBuffer received_messages, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_SIGN_FINALIZE
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_SIGN_FINALIZE
RustBuffer uniffi_dkls23_ffi_fn_func_sign_finalize(RustBuffer session_state, RustBuffer received_messages, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_SIGN_INIT
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_SIGN_INIT
RustBuffer uniffi_dkls23_ffi_fn_func_sign_init(RustBuffer key_share, RustBuffer message_hash, RustBuffer signer_party_ids, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_SIGN_INIT_WITH_SIGN_ID
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_SIGN_INIT_WITH_SIGN_ID
RustBuffer uniffi_dkls23_ffi_fn_func_sign_init_with_sign_id(RustBuffer key_share, RustBuffer message_hash, RustBuffer signer_party_ids, RustBuffer sign_id, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_SIGN_ROUND1
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_SIGN_ROUND1
RustBuffer uniffi_dkls23_ffi_fn_func_sign_round1(RustBuffer session_state, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_SIGN_ROUND2
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_SIGN_ROUND2
RustBuffer uniffi_dkls23_ffi_fn_func_sign_round2(RustBuffer session_state, RustBuffer received_messages, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_SIGN_ROUND3
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_SIGN_ROUND3
RustBuffer uniffi_dkls23_ffi_fn_func_sign_round3(RustBuffer session_state, RustBuffer received_messages, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_VALIDATE_KEY_SHARE
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_FN_FUNC_VALIDATE_KEY_SHARE
int8_t uniffi_dkls23_ffi_fn_func_validate_key_share(RustBuffer key_share, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUSTBUFFER_ALLOC
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUSTBUFFER_ALLOC
RustBuffer ffi_dkls23_ffi_rustbuffer_alloc(uint64_t size, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUSTBUFFER_FROM_BYTES
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUSTBUFFER_FROM_BYTES
RustBuffer ffi_dkls23_ffi_rustbuffer_from_bytes(ForeignBytes bytes, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUSTBUFFER_FREE
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUSTBUFFER_FREE
void ffi_dkls23_ffi_rustbuffer_free(RustBuffer buf, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUSTBUFFER_RESERVE
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUSTBUFFER_RESERVE
RustBuffer ffi_dkls23_ffi_rustbuffer_reserve(RustBuffer buf, uint64_t additional, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_POLL_U8
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_POLL_U8
void ffi_dkls23_ffi_rust_future_poll_u8(uint64_t handle, UniffiRustFutureContinuationCallback callback, uint64_t callback_data
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_CANCEL_U8
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_CANCEL_U8
void ffi_dkls23_ffi_rust_future_cancel_u8(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_FREE_U8
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_FREE_U8
void ffi_dkls23_ffi_rust_future_free_u8(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_COMPLETE_U8
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_COMPLETE_U8
uint8_t ffi_dkls23_ffi_rust_future_complete_u8(uint64_t handle, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_POLL_I8
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_POLL_I8
void ffi_dkls23_ffi_rust_future_poll_i8(uint64_t handle, UniffiRustFutureContinuationCallback callback, uint64_t callback_data
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_CANCEL_I8
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_CANCEL_I8
void ffi_dkls23_ffi_rust_future_cancel_i8(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_FREE_I8
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_FREE_I8
void ffi_dkls23_ffi_rust_future_free_i8(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_COMPLETE_I8
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_COMPLETE_I8
int8_t ffi_dkls23_ffi_rust_future_complete_i8(uint64_t handle, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_POLL_U16
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_POLL_U16
void ffi_dkls23_ffi_rust_future_poll_u16(uint64_t handle, UniffiRustFutureContinuationCallback callback, uint64_t callback_data
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_CANCEL_U16
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_CANCEL_U16
void ffi_dkls23_ffi_rust_future_cancel_u16(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_FREE_U16
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_FREE_U16
void ffi_dkls23_ffi_rust_future_free_u16(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_COMPLETE_U16
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_COMPLETE_U16
uint16_t ffi_dkls23_ffi_rust_future_complete_u16(uint64_t handle, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_POLL_I16
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_POLL_I16
void ffi_dkls23_ffi_rust_future_poll_i16(uint64_t handle, UniffiRustFutureContinuationCallback callback, uint64_t callback_data
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_CANCEL_I16
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_CANCEL_I16
void ffi_dkls23_ffi_rust_future_cancel_i16(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_FREE_I16
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_FREE_I16
void ffi_dkls23_ffi_rust_future_free_i16(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_COMPLETE_I16
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_COMPLETE_I16
int16_t ffi_dkls23_ffi_rust_future_complete_i16(uint64_t handle, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_POLL_U32
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_POLL_U32
void ffi_dkls23_ffi_rust_future_poll_u32(uint64_t handle, UniffiRustFutureContinuationCallback callback, uint64_t callback_data
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_CANCEL_U32
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_CANCEL_U32
void ffi_dkls23_ffi_rust_future_cancel_u32(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_FREE_U32
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_FREE_U32
void ffi_dkls23_ffi_rust_future_free_u32(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_COMPLETE_U32
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_COMPLETE_U32
uint32_t ffi_dkls23_ffi_rust_future_complete_u32(uint64_t handle, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_POLL_I32
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_POLL_I32
void ffi_dkls23_ffi_rust_future_poll_i32(uint64_t handle, UniffiRustFutureContinuationCallback callback, uint64_t callback_data
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_CANCEL_I32
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_CANCEL_I32
void ffi_dkls23_ffi_rust_future_cancel_i32(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_FREE_I32
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_FREE_I32
void ffi_dkls23_ffi_rust_future_free_i32(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_COMPLETE_I32
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_COMPLETE_I32
int32_t ffi_dkls23_ffi_rust_future_complete_i32(uint64_t handle, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_POLL_U64
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_POLL_U64
void ffi_dkls23_ffi_rust_future_poll_u64(uint64_t handle, UniffiRustFutureContinuationCallback callback, uint64_t callback_data
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_CANCEL_U64
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_CANCEL_U64
void ffi_dkls23_ffi_rust_future_cancel_u64(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_FREE_U64
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_FREE_U64
void ffi_dkls23_ffi_rust_future_free_u64(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_COMPLETE_U64
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_COMPLETE_U64
uint64_t ffi_dkls23_ffi_rust_future_complete_u64(uint64_t handle, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_POLL_I64
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_POLL_I64
void ffi_dkls23_ffi_rust_future_poll_i64(uint64_t handle, UniffiRustFutureContinuationCallback callback, uint64_t callback_data
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_CANCEL_I64
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_CANCEL_I64
void ffi_dkls23_ffi_rust_future_cancel_i64(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_FREE_I64
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_FREE_I64
void ffi_dkls23_ffi_rust_future_free_i64(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_COMPLETE_I64
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_COMPLETE_I64
int64_t ffi_dkls23_ffi_rust_future_complete_i64(uint64_t handle, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_POLL_F32
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_POLL_F32
void ffi_dkls23_ffi_rust_future_poll_f32(uint64_t handle, UniffiRustFutureContinuationCallback callback, uint64_t callback_data
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_CANCEL_F32
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_CANCEL_F32
void ffi_dkls23_ffi_rust_future_cancel_f32(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_FREE_F32
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_FREE_F32
void ffi_dkls23_ffi_rust_future_free_f32(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_COMPLETE_F32
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_COMPLETE_F32
float ffi_dkls23_ffi_rust_future_complete_f32(uint64_t handle, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_POLL_F64
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_POLL_F64
void ffi_dkls23_ffi_rust_future_poll_f64(uint64_t handle, UniffiRustFutureContinuationCallback callback, uint64_t callback_data
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_CANCEL_F64
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_CANCEL_F64
void ffi_dkls23_ffi_rust_future_cancel_f64(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_FREE_F64
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_FREE_F64
void ffi_dkls23_ffi_rust_future_free_f64(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_COMPLETE_F64
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_COMPLETE_F64
double ffi_dkls23_ffi_rust_future_complete_f64(uint64_t handle, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_POLL_POINTER
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_POLL_POINTER
void ffi_dkls23_ffi_rust_future_poll_pointer(uint64_t handle, UniffiRustFutureContinuationCallback callback, uint64_t callback_data
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_CANCEL_POINTER
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_CANCEL_POINTER
void ffi_dkls23_ffi_rust_future_cancel_pointer(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_FREE_POINTER
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_FREE_POINTER
void ffi_dkls23_ffi_rust_future_free_pointer(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_COMPLETE_POINTER
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_COMPLETE_POINTER
void* ffi_dkls23_ffi_rust_future_complete_pointer(uint64_t handle, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_POLL_RUST_BUFFER
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_POLL_RUST_BUFFER
void ffi_dkls23_ffi_rust_future_poll_rust_buffer(uint64_t handle, UniffiRustFutureContinuationCallback callback, uint64_t callback_data
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_CANCEL_RUST_BUFFER
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_CANCEL_RUST_BUFFER
void ffi_dkls23_ffi_rust_future_cancel_rust_buffer(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_FREE_RUST_BUFFER
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_FREE_RUST_BUFFER
void ffi_dkls23_ffi_rust_future_free_rust_buffer(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_COMPLETE_RUST_BUFFER
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_COMPLETE_RUST_BUFFER
RustBuffer ffi_dkls23_ffi_rust_future_complete_rust_buffer(uint64_t handle, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_POLL_VOID
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_POLL_VOID
void ffi_dkls23_ffi_rust_future_poll_void(uint64_t handle, UniffiRustFutureContinuationCallback callback, uint64_t callback_data
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_CANCEL_VOID
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_CANCEL_VOID
void ffi_dkls23_ffi_rust_future_cancel_void(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_FREE_VOID
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_FREE_VOID
void ffi_dkls23_ffi_rust_future_free_void(uint64_t handle
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_COMPLETE_VOID
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_RUST_FUTURE_COMPLETE_VOID
void ffi_dkls23_ffi_rust_future_complete_void(uint64_t handle, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_DERIVE_CHILD_SHARE
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_DERIVE_CHILD_SHARE
uint16_t uniffi_dkls23_ffi_checksum_func_derive_child_share(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_DKG_FINALIZE
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_DKG_FINALIZE
uint16_t uniffi_dkls23_ffi_checksum_func_dkg_finalize(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_DKG_INIT
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_DKG_INIT
uint16_t uniffi_dkls23_ffi_checksum_func_dkg_init(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_DKG_INIT_WITH_SESSION_ID
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_DKG_INIT_WITH_SESSION_ID
uint16_t uniffi_dkls23_ffi_checksum_func_dkg_init_with_session_id(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_DKG_ROUND1
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_DKG_ROUND1
uint16_t uniffi_dkls23_ffi_checksum_func_dkg_round1(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_DKG_ROUND2
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_DKG_ROUND2
uint16_t uniffi_dkls23_ffi_checksum_func_dkg_round2(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_DKG_ROUND3
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_DKG_ROUND3
uint16_t uniffi_dkls23_ffi_checksum_func_dkg_round3(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_GET_PUBLIC_KEY
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_GET_PUBLIC_KEY
uint16_t uniffi_dkls23_ffi_checksum_func_get_public_key(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_INIT
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_INIT
uint16_t uniffi_dkls23_ffi_checksum_func_init(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_REFRESH_FINALIZE
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_REFRESH_FINALIZE
uint16_t uniffi_dkls23_ffi_checksum_func_refresh_finalize(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_REFRESH_INIT
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_REFRESH_INIT
uint16_t uniffi_dkls23_ffi_checksum_func_refresh_init(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_REFRESH_INIT_WITH_REFRESH_ID
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_REFRESH_INIT_WITH_REFRESH_ID
uint16_t uniffi_dkls23_ffi_checksum_func_refresh_init_with_refresh_id(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_REFRESH_ROUND1
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_REFRESH_ROUND1
uint16_t uniffi_dkls23_ffi_checksum_func_refresh_round1(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_REFRESH_ROUND2
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_REFRESH_ROUND2
uint16_t uniffi_dkls23_ffi_checksum_func_refresh_round2(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_REFRESH_ROUND3
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_REFRESH_ROUND3
uint16_t uniffi_dkls23_ffi_checksum_func_refresh_round3(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_REKEY_FROM_SECRET
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_REKEY_FROM_SECRET
uint16_t uniffi_dkls23_ffi_checksum_func_rekey_from_secret(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_RESIZE_INIT
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_RESIZE_INIT
uint16_t uniffi_dkls23_ffi_checksum_func_resize_init(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_RESIZE_ROUND1
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_RESIZE_ROUND1
uint16_t uniffi_dkls23_ffi_checksum_func_resize_round1(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_RESIZE_ROUND2
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_RESIZE_ROUND2
uint16_t uniffi_dkls23_ffi_checksum_func_resize_round2(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_SIGN_FINALIZE
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_SIGN_FINALIZE
uint16_t uniffi_dkls23_ffi_checksum_func_sign_finalize(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_SIGN_INIT
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_SIGN_INIT
uint16_t uniffi_dkls23_ffi_checksum_func_sign_init(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_SIGN_INIT_WITH_SIGN_ID
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_SIGN_INIT_WITH_SIGN_ID
uint16_t uniffi_dkls23_ffi_checksum_func_sign_init_with_sign_id(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_SIGN_ROUND1
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_SIGN_ROUND1
uint16_t uniffi_dkls23_ffi_checksum_func_sign_round1(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_SIGN_ROUND2
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_SIGN_ROUND2
uint16_t uniffi_dkls23_ffi_checksum_func_sign_round2(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_SIGN_ROUND3
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_SIGN_ROUND3
uint16_t uniffi_dkls23_ffi_checksum_func_sign_round3(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_VALIDATE_KEY_SHARE
#define UNIFFI_FFIDEF_UNIFFI_DKLS23_FFI_CHECKSUM_FUNC_VALIDATE_KEY_SHARE
uint16_t uniffi_dkls23_ffi_checksum_func_validate_key_share(void
);
#endif
#ifndef UNIFFI_FFIDEF_FFI_DKLS23_FFI_UNIFFI_CONTRACT_VERSION
#define UNIFFI_FFIDEF_FFI_DKLS23_FFI_UNIFFI_CONTRACT_VERSION
uint32_t ffi_dkls23_ffi_uniffi_contract_version(void
);
#endif

3
dkls23_ffi/go.mod Normal file
View File

@ -0,0 +1,3 @@
module source.quilibrium.com/quilibrium/monorepo/dkls23_ffi
go 1.24.0

View File

@ -1,4 +1,4 @@
cmake_minimum_required (VERSION 3.0)
cmake_minimum_required (VERSION 3.5)
project (emp-ot)
set(NAME "emp-ot")

View File

@ -47,6 +47,8 @@ PrimalLPNParameter param = ferret_b13, std::string pre_file="");
int disassemble_state(const void * data, int64_t size);
int64_t state_size();
bool is_setup() const { return extend_initialized; }
private:
block ch[2];

View File

@ -28,8 +28,6 @@ FerretCOT<T>::FerretCOT(int64_t party, int64_t threads, T **ios,
template<typename T>
FerretCOT<T>::~FerretCOT() {
if (ot_pre_data != nullptr) {
if(party == ALICE) write_pre_data128_to_file((void*)ot_pre_data, (__uint128_t)Delta, pre_ot_filename);
else write_pre_data128_to_file((void*)ot_pre_data, (__uint128_t)0, pre_ot_filename);
delete[] ot_pre_data;
}
if (ot_data != nullptr) delete[] ot_data;

View File

@ -1,4 +1,4 @@
cmake_minimum_required (VERSION 3.0)
cmake_minimum_required (VERSION 3.5)
project (emptool)
set(NAME "emp-tool")

View File

@ -0,0 +1,276 @@
#ifndef EMP_BUFFER_IO_CHANNEL
#define EMP_BUFFER_IO_CHANNEL
#include <string>
#include <cstring>
#include <stdexcept>
#include <mutex>
#include <condition_variable>
#include <chrono>
#include "emp-tool/io/io_channel.h"
namespace emp {
/**
* BufferIO - A message-based IO channel for EMP toolkit
*
* This IO channel uses internal buffers instead of network sockets,
* allowing Ferret OT to be used with any transport mechanism
* (message queues, gRPC, HTTP, etc).
*
* Usage:
* 1. Create BufferIO for each party
* 2. When Ferret calls send_data_internal, data goes to send_buffer
* 3. External code calls drain_send_buffer() to get data to transmit
* 4. External code calls fill_recv_buffer() with received data
* 5. When Ferret calls recv_data_internal, data comes from recv_buffer
*
* Thread safety:
* - fill_recv_buffer and drain_send_buffer can be called from different threads
* - recv_data_internal will block if recv_buffer is empty (with timeout)
*/
class BufferIO: public IOChannel<BufferIO> {
public:
// Send buffer (data written by Ferret, read by external transport)
char* send_buffer = nullptr;
int64_t send_size = 0; // Current data in send buffer
int64_t send_cap = 0; // Send buffer capacity
// Receive buffer (data written by external transport, read by Ferret)
char* recv_buffer = nullptr;
int64_t recv_size = 0; // Current data in recv buffer
int64_t recv_pos = 0; // Current read position
int64_t recv_cap = 0; // Receive buffer capacity
// Synchronization
std::mutex send_mutex;
std::mutex recv_mutex;
std::condition_variable recv_cv;
// Timeout for blocking receive (milliseconds)
int64_t recv_timeout_ms = 30000; // 30 second default
// Error state
bool has_error = false;
std::string error_message;
BufferIO(int64_t initial_cap = 1024 * 1024) {
send_cap = initial_cap;
recv_cap = initial_cap;
send_buffer = new char[send_cap];
recv_buffer = new char[recv_cap];
send_size = 0;
recv_size = 0;
recv_pos = 0;
}
~BufferIO() {
if (send_buffer != nullptr) {
delete[] send_buffer;
}
if (recv_buffer != nullptr) {
delete[] recv_buffer;
}
}
/**
* Set timeout for blocking receive operations
*/
void set_recv_timeout(int64_t timeout_ms) {
recv_timeout_ms = timeout_ms;
}
/**
* Fill the receive buffer with data from external transport
* This is called by the external code when data arrives
*/
void fill_recv_buffer(const char* data, int64_t len) {
std::lock_guard<std::mutex> lock(recv_mutex);
// Compact buffer if needed
if (recv_pos > 0 && recv_pos == recv_size) {
recv_pos = 0;
recv_size = 0;
} else if (recv_pos > recv_cap / 2) {
// Move remaining data to front
int64_t remaining = recv_size - recv_pos;
memmove(recv_buffer, recv_buffer + recv_pos, remaining);
recv_pos = 0;
recv_size = remaining;
}
// Grow buffer if needed
int64_t available = recv_cap - recv_size;
if (len > available) {
int64_t new_cap = recv_cap * 2;
while (new_cap - recv_size < len) {
new_cap *= 2;
}
char* new_buffer = new char[new_cap];
memcpy(new_buffer, recv_buffer + recv_pos, recv_size - recv_pos);
delete[] recv_buffer;
recv_buffer = new_buffer;
recv_size = recv_size - recv_pos;
recv_pos = 0;
recv_cap = new_cap;
}
// Copy data to buffer
memcpy(recv_buffer + recv_size, data, len);
recv_size += len;
// Notify any waiting receivers
recv_cv.notify_all();
}
/**
* Get available data in receive buffer (non-blocking check)
*/
int64_t recv_buffer_available() {
std::lock_guard<std::mutex> lock(recv_mutex);
return recv_size - recv_pos;
}
/**
* Drain the send buffer - returns data that needs to be transmitted
* This is called by external code to get data to send
* Returns the number of bytes copied, or 0 if buffer is empty
*/
int64_t drain_send_buffer(char* out_buffer, int64_t max_len) {
std::lock_guard<std::mutex> lock(send_mutex);
int64_t to_copy = (send_size < max_len) ? send_size : max_len;
if (to_copy > 0) {
memcpy(out_buffer, send_buffer, to_copy);
// Move remaining data to front
if (to_copy < send_size) {
memmove(send_buffer, send_buffer + to_copy, send_size - to_copy);
}
send_size -= to_copy;
}
return to_copy;
}
/**
* Get the entire send buffer as a copy and clear it
* Returns a pair of (data pointer, length) - caller owns the memory
*/
std::pair<char*, int64_t> drain_send_buffer_all() {
std::lock_guard<std::mutex> lock(send_mutex);
if (send_size == 0) {
return {nullptr, 0};
}
char* data = new char[send_size];
memcpy(data, send_buffer, send_size);
int64_t len = send_size;
send_size = 0;
return {data, len};
}
/**
* Get current send buffer size (for checking if there's data to send)
*/
int64_t send_buffer_size() {
std::lock_guard<std::mutex> lock(send_mutex);
return send_size;
}
/**
* Clear all buffers
*/
void clear() {
{
std::lock_guard<std::mutex> lock(send_mutex);
send_size = 0;
}
{
std::lock_guard<std::mutex> lock(recv_mutex);
recv_size = 0;
recv_pos = 0;
}
}
/**
* Set error state - will cause recv_data_internal to throw
*/
void set_error(const std::string& msg) {
has_error = true;
error_message = msg;
recv_cv.notify_all(); // Wake up any blocking receivers
}
/**
* Internal send - called by Ferret/EMP
* Appends data to send buffer
*/
void send_data_internal(const void* data, int64_t len) {
std::lock_guard<std::mutex> lock(send_mutex);
// Grow buffer if needed
if (send_size + len > send_cap) {
int64_t new_cap = send_cap * 2;
while (new_cap < send_size + len) {
new_cap *= 2;
}
char* new_buffer = new char[new_cap];
memcpy(new_buffer, send_buffer, send_size);
delete[] send_buffer;
send_buffer = new_buffer;
send_cap = new_cap;
}
memcpy(send_buffer + send_size, data, len);
send_size += len;
}
/**
* Internal receive - called by Ferret/EMP
* Reads data from receive buffer, blocking if necessary
*/
void recv_data_internal(void* data, int64_t len) {
std::unique_lock<std::mutex> lock(recv_mutex);
int64_t received = 0;
char* out = static_cast<char*>(data);
while (received < len) {
// Check for error state
if (has_error) {
throw std::runtime_error("BufferIO error: " + error_message);
}
// Check available data
int64_t available = recv_size - recv_pos;
if (available > 0) {
int64_t to_copy = (available < (len - received)) ? available : (len - received);
memcpy(out + received, recv_buffer + recv_pos, to_copy);
recv_pos += to_copy;
received += to_copy;
} else {
// Wait for data with timeout
auto timeout = std::chrono::milliseconds(recv_timeout_ms);
if (!recv_cv.wait_for(lock, timeout, [this]() {
return (recv_size - recv_pos > 0) || has_error;
})) {
throw std::runtime_error("BufferIO recv timeout");
}
}
}
}
/**
* Flush - no-op for BufferIO since there's no underlying stream
* But can be used as a signal that a message boundary has been reached
*/
void flush() {
// No-op - data is immediately available in send_buffer
}
};
} // namespace emp
#endif // EMP_BUFFER_IO_CHANNEL

View File

@ -108,3 +108,185 @@ func (ot *FerretOT) SenderGetBlockData(choice bool, index uint64) []byte {
func (ot *FerretOT) ReceiverGetBlockData(index uint64) []byte {
return ot.ferretCOT.GetBlockData(0, index)
}
// FerretBufferOT is a buffer-based Ferret OT that uses message passing
// instead of direct TCP connections. This allows routing OT traffic through
// an external transport (e.g., message channels, proxies).
type FerretBufferOT struct {
party int
ferretCOT *generated.FerretCotBufferManager
bufferIO *generated.BufferIoManager
}
// NewFerretBufferOT creates a new buffer-based Ferret OT.
// Unlike NewFerretOT, this doesn't establish any network connections.
// Instead, the caller is responsible for:
// 1. Calling DrainSend() to get outgoing data
// 2. Transmitting that data to the peer via their own transport
// 3. Receiving data from peer and calling FillRecv() with it
func NewFerretBufferOT(
party int,
threads int,
length uint64,
choices []bool,
malicious bool,
initialBufferCap int64,
) (*FerretBufferOT, error) {
if threads > 1 {
fmt.Println(
"!!!WARNING!!! THERE BE DRAGONS. RUNNING MULTITHREADED MODE IN SOME " +
"SITUATIONS HAS LEAD TO CRASHES AND OTHER ISSUES. IF YOU STILL WISH " +
"TO DO THIS, YOU WILL NEED TO MANUALLY UPDATE THE BUILD AND REMOVE " +
"THIS CHECK. DO SO AT YOUR OWN RISK",
)
return nil, errors.Wrap(errors.New("invalid thread count"), "new ferret buffer ot")
}
bufferIO := generated.CreateBufferIoManager(initialBufferCap)
ferretCOT := generated.CreateFerretCotBufferManager(
int32(party),
int32(threads),
length,
choices,
bufferIO,
malicious,
)
return &FerretBufferOT{
party: party,
ferretCOT: ferretCOT,
bufferIO: bufferIO,
}, nil
}
// FillRecv fills the receive buffer with data from an external transport.
// Call this when you receive data from the peer.
func (ot *FerretBufferOT) FillRecv(data []byte) bool {
return ot.bufferIO.FillRecv(data)
}
// DrainSend drains up to maxLen bytes from the send buffer.
// Call this to get data that needs to be sent to the peer.
func (ot *FerretBufferOT) DrainSend(maxLen uint64) []byte {
return ot.bufferIO.DrainSend(maxLen)
}
// SendSize returns the number of bytes waiting to be sent.
func (ot *FerretBufferOT) SendSize() uint64 {
return ot.bufferIO.SendSize()
}
// RecvAvailable returns the number of bytes available in the receive buffer.
func (ot *FerretBufferOT) RecvAvailable() uint64 {
return ot.bufferIO.RecvAvailable()
}
// SetTimeout sets the timeout for blocking receive operations (in milliseconds).
// Set to -1 for no timeout (blocking forever until data arrives).
func (ot *FerretBufferOT) SetTimeout(timeoutMs int64) {
ot.bufferIO.SetTimeout(timeoutMs)
}
// SetError sets an error state that will cause receive operations to fail.
// Useful for signaling that the connection has been closed.
func (ot *FerretBufferOT) SetError(message string) {
ot.bufferIO.SetError(message)
}
// Clear clears all buffers.
func (ot *FerretBufferOT) Clear() {
ot.bufferIO.Clear()
}
// Setup runs the OT setup protocol. Must be called after both parties have
// their BufferIO message transport active (can send/receive data).
// This is deferred from construction because BufferIO-based OT needs
// the message channel to be ready before setup can exchange data.
// Returns true on success, false on error.
func (ot *FerretBufferOT) Setup() bool {
return ot.ferretCOT.Setup()
}
// IsSetup returns true if the OT setup has been completed.
func (ot *FerretBufferOT) IsSetup() bool {
return ot.ferretCOT.IsSetup()
}
// StateSize returns the size in bytes needed to store the OT state.
func (ot *FerretBufferOT) StateSize() int64 {
return ot.ferretCOT.StateSize()
}
// AssembleState serializes the OT state for persistent storage.
// This allows storing setup data externally instead of in files.
// Returns nil if serialization fails.
func (ot *FerretBufferOT) AssembleState() []byte {
return ot.ferretCOT.AssembleState()
}
// DisassembleState restores the OT state from a buffer (created by AssembleState).
// This must be called INSTEAD of Setup, not after.
// Returns true on success.
func (ot *FerretBufferOT) DisassembleState(data []byte) bool {
return ot.ferretCOT.DisassembleState(data)
}
func (ot *FerretBufferOT) SendCOT() error {
if ot.party != ALICE {
return errors.New("incorrect party")
}
if !ot.ferretCOT.SendCot() {
return errors.New("send COT failed")
}
return nil
}
func (ot *FerretBufferOT) RecvCOT() error {
if ot.party != BOB {
return errors.New("incorrect party")
}
if !ot.ferretCOT.RecvCot() {
return errors.New("recv COT failed")
}
return nil
}
func (ot *FerretBufferOT) SendROT() error {
if !ot.ferretCOT.SendRot() {
return errors.New("send ROT failed")
}
return nil
}
func (ot *FerretBufferOT) RecvROT() error {
if !ot.ferretCOT.RecvRot() {
return errors.New("recv ROT failed")
}
return nil
}
func (ot *FerretBufferOT) SenderGetBlockData(choice bool, index uint64) []byte {
c := uint8(0)
if choice {
c = 1
}
return ot.ferretCOT.GetBlockData(c, index)
}
func (ot *FerretBufferOT) ReceiverGetBlockData(index uint64) []byte {
return ot.ferretCOT.GetBlockData(0, index)
}
func (ot *FerretBufferOT) Destroy() {
if ot.ferretCOT != nil {
ot.ferretCOT.Destroy()
}
if ot.bufferIO != nil {
ot.bufferIO.Destroy()
}
}

View File

@ -346,6 +346,24 @@ func uniffiCheckChecksums() {
// If this happens try cleaning and rebuilding your project
panic("ferret: UniFFI contract version mismatch")
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_func_create_buffer_io_manager()
})
if checksum != 31310 {
// If this happens try cleaning and rebuilding your project
panic("ferret: uniffi_ferret_checksum_func_create_buffer_io_manager: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_func_create_ferret_cot_buffer_manager()
})
if checksum != 17020 {
// If this happens try cleaning and rebuilding your project
panic("ferret: uniffi_ferret_checksum_func_create_ferret_cot_buffer_manager: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_func_create_ferret_cot_manager()
@ -364,6 +382,168 @@ func uniffiCheckChecksums() {
panic("ferret: uniffi_ferret_checksum_func_create_netio_manager: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_bufferiomanager_clear()
})
if checksum != 46028 {
// If this happens try cleaning and rebuilding your project
panic("ferret: uniffi_ferret_checksum_method_bufferiomanager_clear: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_bufferiomanager_drain_send()
})
if checksum != 42377 {
// If this happens try cleaning and rebuilding your project
panic("ferret: uniffi_ferret_checksum_method_bufferiomanager_drain_send: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_bufferiomanager_fill_recv()
})
if checksum != 47991 {
// If this happens try cleaning and rebuilding your project
panic("ferret: uniffi_ferret_checksum_method_bufferiomanager_fill_recv: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_bufferiomanager_recv_available()
})
if checksum != 30236 {
// If this happens try cleaning and rebuilding your project
panic("ferret: uniffi_ferret_checksum_method_bufferiomanager_recv_available: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_bufferiomanager_send_size()
})
if checksum != 7700 {
// If this happens try cleaning and rebuilding your project
panic("ferret: uniffi_ferret_checksum_method_bufferiomanager_send_size: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_bufferiomanager_set_error()
})
if checksum != 26761 {
// If this happens try cleaning and rebuilding your project
panic("ferret: uniffi_ferret_checksum_method_bufferiomanager_set_error: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_bufferiomanager_set_timeout()
})
if checksum != 18359 {
// If this happens try cleaning and rebuilding your project
panic("ferret: uniffi_ferret_checksum_method_bufferiomanager_set_timeout: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_ferretcotbuffermanager_assemble_state()
})
if checksum != 6363 {
// If this happens try cleaning and rebuilding your project
panic("ferret: uniffi_ferret_checksum_method_ferretcotbuffermanager_assemble_state: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_ferretcotbuffermanager_disassemble_state()
})
if checksum != 47188 {
// If this happens try cleaning and rebuilding your project
panic("ferret: uniffi_ferret_checksum_method_ferretcotbuffermanager_disassemble_state: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_ferretcotbuffermanager_get_block_data()
})
if checksum != 34398 {
// If this happens try cleaning and rebuilding your project
panic("ferret: uniffi_ferret_checksum_method_ferretcotbuffermanager_get_block_data: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_ferretcotbuffermanager_is_setup()
})
if checksum != 1717 {
// If this happens try cleaning and rebuilding your project
panic("ferret: uniffi_ferret_checksum_method_ferretcotbuffermanager_is_setup: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_ferretcotbuffermanager_recv_cot()
})
if checksum != 8122 {
// If this happens try cleaning and rebuilding your project
panic("ferret: uniffi_ferret_checksum_method_ferretcotbuffermanager_recv_cot: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_ferretcotbuffermanager_recv_rot()
})
if checksum != 15345 {
// If this happens try cleaning and rebuilding your project
panic("ferret: uniffi_ferret_checksum_method_ferretcotbuffermanager_recv_rot: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_ferretcotbuffermanager_send_cot()
})
if checksum != 13639 {
// If this happens try cleaning and rebuilding your project
panic("ferret: uniffi_ferret_checksum_method_ferretcotbuffermanager_send_cot: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_ferretcotbuffermanager_send_rot()
})
if checksum != 3052 {
// If this happens try cleaning and rebuilding your project
panic("ferret: uniffi_ferret_checksum_method_ferretcotbuffermanager_send_rot: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_ferretcotbuffermanager_set_block_data()
})
if checksum != 37344 {
// If this happens try cleaning and rebuilding your project
panic("ferret: uniffi_ferret_checksum_method_ferretcotbuffermanager_set_block_data: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_ferretcotbuffermanager_setup()
})
if checksum != 11907 {
// If this happens try cleaning and rebuilding your project
panic("ferret: uniffi_ferret_checksum_method_ferretcotbuffermanager_setup: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_ferretcotbuffermanager_state_size()
})
if checksum != 3205 {
// If this happens try cleaning and rebuilding your project
panic("ferret: uniffi_ferret_checksum_method_ferretcotbuffermanager_state_size: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_ferret_checksum_method_ferretcotmanager_get_block_data()
@ -492,6 +672,30 @@ type FfiDestroyerUint64 struct{}
func (FfiDestroyerUint64) Destroy(_ uint64) {}
type FfiConverterInt64 struct{}
var FfiConverterInt64INSTANCE = FfiConverterInt64{}
func (FfiConverterInt64) Lower(value int64) C.int64_t {
return C.int64_t(value)
}
func (FfiConverterInt64) Write(writer io.Writer, value int64) {
writeInt64(writer, value)
}
func (FfiConverterInt64) Lift(value C.int64_t) int64 {
return int64(value)
}
func (FfiConverterInt64) Read(reader io.Reader) int64 {
return readInt64(reader)
}
type FfiDestroyerInt64 struct{}
func (FfiDestroyerInt64) Destroy(_ int64) {}
type FfiConverterBool struct{}
var FfiConverterBoolINSTANCE = FfiConverterBool{}
@ -636,6 +840,304 @@ func (ffiObject *FfiObject) freeRustArcPtr() {
})
}
type BufferIoManagerInterface interface {
Clear()
DrainSend(maxLen uint64) []uint8
FillRecv(data []uint8) bool
RecvAvailable() uint64
SendSize() uint64
SetError(message string)
SetTimeout(timeoutMs int64)
}
type BufferIoManager struct {
ffiObject FfiObject
}
func (_self *BufferIoManager) Clear() {
_pointer := _self.ffiObject.incrementPointer("*BufferIoManager")
defer _self.ffiObject.decrementPointer()
rustCall(func(_uniffiStatus *C.RustCallStatus) bool {
C.uniffi_ferret_fn_method_bufferiomanager_clear(
_pointer, _uniffiStatus)
return false
})
}
func (_self *BufferIoManager) DrainSend(maxLen uint64) []uint8 {
_pointer := _self.ffiObject.incrementPointer("*BufferIoManager")
defer _self.ffiObject.decrementPointer()
return FfiConverterSequenceUint8INSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_ferret_fn_method_bufferiomanager_drain_send(
_pointer, FfiConverterUint64INSTANCE.Lower(maxLen), _uniffiStatus),
}
}))
}
func (_self *BufferIoManager) FillRecv(data []uint8) bool {
_pointer := _self.ffiObject.incrementPointer("*BufferIoManager")
defer _self.ffiObject.decrementPointer()
return FfiConverterBoolINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) C.int8_t {
return C.uniffi_ferret_fn_method_bufferiomanager_fill_recv(
_pointer, FfiConverterSequenceUint8INSTANCE.Lower(data), _uniffiStatus)
}))
}
func (_self *BufferIoManager) RecvAvailable() uint64 {
_pointer := _self.ffiObject.incrementPointer("*BufferIoManager")
defer _self.ffiObject.decrementPointer()
return FfiConverterUint64INSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint64_t {
return C.uniffi_ferret_fn_method_bufferiomanager_recv_available(
_pointer, _uniffiStatus)
}))
}
func (_self *BufferIoManager) SendSize() uint64 {
_pointer := _self.ffiObject.incrementPointer("*BufferIoManager")
defer _self.ffiObject.decrementPointer()
return FfiConverterUint64INSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) C.uint64_t {
return C.uniffi_ferret_fn_method_bufferiomanager_send_size(
_pointer, _uniffiStatus)
}))
}
func (_self *BufferIoManager) SetError(message string) {
_pointer := _self.ffiObject.incrementPointer("*BufferIoManager")
defer _self.ffiObject.decrementPointer()
rustCall(func(_uniffiStatus *C.RustCallStatus) bool {
C.uniffi_ferret_fn_method_bufferiomanager_set_error(
_pointer, FfiConverterStringINSTANCE.Lower(message), _uniffiStatus)
return false
})
}
func (_self *BufferIoManager) SetTimeout(timeoutMs int64) {
_pointer := _self.ffiObject.incrementPointer("*BufferIoManager")
defer _self.ffiObject.decrementPointer()
rustCall(func(_uniffiStatus *C.RustCallStatus) bool {
C.uniffi_ferret_fn_method_bufferiomanager_set_timeout(
_pointer, FfiConverterInt64INSTANCE.Lower(timeoutMs), _uniffiStatus)
return false
})
}
func (object *BufferIoManager) Destroy() {
runtime.SetFinalizer(object, nil)
object.ffiObject.destroy()
}
type FfiConverterBufferIoManager struct{}
var FfiConverterBufferIoManagerINSTANCE = FfiConverterBufferIoManager{}
func (c FfiConverterBufferIoManager) Lift(pointer unsafe.Pointer) *BufferIoManager {
result := &BufferIoManager{
newFfiObject(
pointer,
func(pointer unsafe.Pointer, status *C.RustCallStatus) unsafe.Pointer {
return C.uniffi_ferret_fn_clone_bufferiomanager(pointer, status)
},
func(pointer unsafe.Pointer, status *C.RustCallStatus) {
C.uniffi_ferret_fn_free_bufferiomanager(pointer, status)
},
),
}
runtime.SetFinalizer(result, (*BufferIoManager).Destroy)
return result
}
func (c FfiConverterBufferIoManager) Read(reader io.Reader) *BufferIoManager {
return c.Lift(unsafe.Pointer(uintptr(readUint64(reader))))
}
func (c FfiConverterBufferIoManager) Lower(value *BufferIoManager) unsafe.Pointer {
// TODO: this is bad - all synchronization from ObjectRuntime.go is discarded here,
// because the pointer will be decremented immediately after this function returns,
// and someone will be left holding onto a non-locked pointer.
pointer := value.ffiObject.incrementPointer("*BufferIoManager")
defer value.ffiObject.decrementPointer()
return pointer
}
func (c FfiConverterBufferIoManager) Write(writer io.Writer, value *BufferIoManager) {
writeUint64(writer, uint64(uintptr(c.Lower(value))))
}
type FfiDestroyerBufferIoManager struct{}
func (_ FfiDestroyerBufferIoManager) Destroy(value *BufferIoManager) {
value.Destroy()
}
type FerretCotBufferManagerInterface interface {
AssembleState() []uint8
DisassembleState(data []uint8) bool
GetBlockData(blockChoice uint8, index uint64) []uint8
IsSetup() bool
RecvCot() bool
RecvRot() bool
SendCot() bool
SendRot() bool
SetBlockData(blockChoice uint8, index uint64, data []uint8)
Setup() bool
StateSize() int64
}
type FerretCotBufferManager struct {
ffiObject FfiObject
}
func (_self *FerretCotBufferManager) AssembleState() []uint8 {
_pointer := _self.ffiObject.incrementPointer("*FerretCotBufferManager")
defer _self.ffiObject.decrementPointer()
return FfiConverterSequenceUint8INSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_ferret_fn_method_ferretcotbuffermanager_assemble_state(
_pointer, _uniffiStatus),
}
}))
}
func (_self *FerretCotBufferManager) DisassembleState(data []uint8) bool {
_pointer := _self.ffiObject.incrementPointer("*FerretCotBufferManager")
defer _self.ffiObject.decrementPointer()
return FfiConverterBoolINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) C.int8_t {
return C.uniffi_ferret_fn_method_ferretcotbuffermanager_disassemble_state(
_pointer, FfiConverterSequenceUint8INSTANCE.Lower(data), _uniffiStatus)
}))
}
func (_self *FerretCotBufferManager) GetBlockData(blockChoice uint8, index uint64) []uint8 {
_pointer := _self.ffiObject.incrementPointer("*FerretCotBufferManager")
defer _self.ffiObject.decrementPointer()
return FfiConverterSequenceUint8INSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return GoRustBuffer{
inner: C.uniffi_ferret_fn_method_ferretcotbuffermanager_get_block_data(
_pointer, FfiConverterUint8INSTANCE.Lower(blockChoice), FfiConverterUint64INSTANCE.Lower(index), _uniffiStatus),
}
}))
}
func (_self *FerretCotBufferManager) IsSetup() bool {
_pointer := _self.ffiObject.incrementPointer("*FerretCotBufferManager")
defer _self.ffiObject.decrementPointer()
return FfiConverterBoolINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) C.int8_t {
return C.uniffi_ferret_fn_method_ferretcotbuffermanager_is_setup(
_pointer, _uniffiStatus)
}))
}
func (_self *FerretCotBufferManager) RecvCot() bool {
_pointer := _self.ffiObject.incrementPointer("*FerretCotBufferManager")
defer _self.ffiObject.decrementPointer()
return FfiConverterBoolINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) C.int8_t {
return C.uniffi_ferret_fn_method_ferretcotbuffermanager_recv_cot(
_pointer, _uniffiStatus)
}))
}
func (_self *FerretCotBufferManager) RecvRot() bool {
_pointer := _self.ffiObject.incrementPointer("*FerretCotBufferManager")
defer _self.ffiObject.decrementPointer()
return FfiConverterBoolINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) C.int8_t {
return C.uniffi_ferret_fn_method_ferretcotbuffermanager_recv_rot(
_pointer, _uniffiStatus)
}))
}
func (_self *FerretCotBufferManager) SendCot() bool {
_pointer := _self.ffiObject.incrementPointer("*FerretCotBufferManager")
defer _self.ffiObject.decrementPointer()
return FfiConverterBoolINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) C.int8_t {
return C.uniffi_ferret_fn_method_ferretcotbuffermanager_send_cot(
_pointer, _uniffiStatus)
}))
}
func (_self *FerretCotBufferManager) SendRot() bool {
_pointer := _self.ffiObject.incrementPointer("*FerretCotBufferManager")
defer _self.ffiObject.decrementPointer()
return FfiConverterBoolINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) C.int8_t {
return C.uniffi_ferret_fn_method_ferretcotbuffermanager_send_rot(
_pointer, _uniffiStatus)
}))
}
func (_self *FerretCotBufferManager) SetBlockData(blockChoice uint8, index uint64, data []uint8) {
_pointer := _self.ffiObject.incrementPointer("*FerretCotBufferManager")
defer _self.ffiObject.decrementPointer()
rustCall(func(_uniffiStatus *C.RustCallStatus) bool {
C.uniffi_ferret_fn_method_ferretcotbuffermanager_set_block_data(
_pointer, FfiConverterUint8INSTANCE.Lower(blockChoice), FfiConverterUint64INSTANCE.Lower(index), FfiConverterSequenceUint8INSTANCE.Lower(data), _uniffiStatus)
return false
})
}
func (_self *FerretCotBufferManager) Setup() bool {
_pointer := _self.ffiObject.incrementPointer("*FerretCotBufferManager")
defer _self.ffiObject.decrementPointer()
return FfiConverterBoolINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) C.int8_t {
return C.uniffi_ferret_fn_method_ferretcotbuffermanager_setup(
_pointer, _uniffiStatus)
}))
}
func (_self *FerretCotBufferManager) StateSize() int64 {
_pointer := _self.ffiObject.incrementPointer("*FerretCotBufferManager")
defer _self.ffiObject.decrementPointer()
return FfiConverterInt64INSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) C.int64_t {
return C.uniffi_ferret_fn_method_ferretcotbuffermanager_state_size(
_pointer, _uniffiStatus)
}))
}
func (object *FerretCotBufferManager) Destroy() {
runtime.SetFinalizer(object, nil)
object.ffiObject.destroy()
}
type FfiConverterFerretCotBufferManager struct{}
var FfiConverterFerretCotBufferManagerINSTANCE = FfiConverterFerretCotBufferManager{}
func (c FfiConverterFerretCotBufferManager) Lift(pointer unsafe.Pointer) *FerretCotBufferManager {
result := &FerretCotBufferManager{
newFfiObject(
pointer,
func(pointer unsafe.Pointer, status *C.RustCallStatus) unsafe.Pointer {
return C.uniffi_ferret_fn_clone_ferretcotbuffermanager(pointer, status)
},
func(pointer unsafe.Pointer, status *C.RustCallStatus) {
C.uniffi_ferret_fn_free_ferretcotbuffermanager(pointer, status)
},
),
}
runtime.SetFinalizer(result, (*FerretCotBufferManager).Destroy)
return result
}
func (c FfiConverterFerretCotBufferManager) Read(reader io.Reader) *FerretCotBufferManager {
return c.Lift(unsafe.Pointer(uintptr(readUint64(reader))))
}
func (c FfiConverterFerretCotBufferManager) Lower(value *FerretCotBufferManager) unsafe.Pointer {
// TODO: this is bad - all synchronization from ObjectRuntime.go is discarded here,
// because the pointer will be decremented immediately after this function returns,
// and someone will be left holding onto a non-locked pointer.
pointer := value.ffiObject.incrementPointer("*FerretCotBufferManager")
defer value.ffiObject.decrementPointer()
return pointer
}
func (c FfiConverterFerretCotBufferManager) Write(writer io.Writer, value *FerretCotBufferManager) {
writeUint64(writer, uint64(uintptr(c.Lower(value))))
}
type FfiDestroyerFerretCotBufferManager struct{}
func (_ FfiDestroyerFerretCotBufferManager) Destroy(value *FerretCotBufferManager) {
value.Destroy()
}
type FerretCotManagerInterface interface {
GetBlockData(blockChoice uint8, index uint64) []uint8
RecvCot()
@ -935,6 +1437,18 @@ func (FfiDestroyerSequenceBool) Destroy(sequence []bool) {
}
}
func CreateBufferIoManager(initialCap int64) *BufferIoManager {
return FfiConverterBufferIoManagerINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) unsafe.Pointer {
return C.uniffi_ferret_fn_func_create_buffer_io_manager(FfiConverterInt64INSTANCE.Lower(initialCap), _uniffiStatus)
}))
}
func CreateFerretCotBufferManager(party int32, threads int32, length uint64, choices []bool, bufferio *BufferIoManager, malicious bool) *FerretCotBufferManager {
return FfiConverterFerretCotBufferManagerINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) unsafe.Pointer {
return C.uniffi_ferret_fn_func_create_ferret_cot_buffer_manager(FfiConverterInt32INSTANCE.Lower(party), FfiConverterInt32INSTANCE.Lower(threads), FfiConverterUint64INSTANCE.Lower(length), FfiConverterSequenceBoolINSTANCE.Lower(choices), FfiConverterBufferIoManagerINSTANCE.Lower(bufferio), FfiConverterBoolINSTANCE.Lower(malicious), _uniffiStatus)
}))
}
func CreateFerretCotManager(party int32, threads int32, length uint64, choices []bool, netio *NetIoManager, malicious bool) *FerretCotManager {
return FfiConverterFerretCotManagerINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) unsafe.Pointer {
return C.uniffi_ferret_fn_func_create_ferret_cot_manager(FfiConverterInt32INSTANCE.Lower(party), FfiConverterInt32INSTANCE.Lower(threads), FfiConverterUint64INSTANCE.Lower(length), FfiConverterSequenceBoolINSTANCE.Lower(choices), FfiConverterNetIoManagerINSTANCE.Lower(netio), FfiConverterBoolINSTANCE.Lower(malicious), _uniffiStatus)

View File

@ -377,6 +377,116 @@ static void call_UniffiForeignFutureCompleteVoid(
}
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_FN_CLONE_BUFFERIOMANAGER
#define UNIFFI_FFIDEF_UNIFFI_FERRET_FN_CLONE_BUFFERIOMANAGER
void* uniffi_ferret_fn_clone_bufferiomanager(void* ptr, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_FN_FREE_BUFFERIOMANAGER
#define UNIFFI_FFIDEF_UNIFFI_FERRET_FN_FREE_BUFFERIOMANAGER
void uniffi_ferret_fn_free_bufferiomanager(void* ptr, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_BUFFERIOMANAGER_CLEAR
#define UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_BUFFERIOMANAGER_CLEAR
void uniffi_ferret_fn_method_bufferiomanager_clear(void* ptr, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_BUFFERIOMANAGER_DRAIN_SEND
#define UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_BUFFERIOMANAGER_DRAIN_SEND
RustBuffer uniffi_ferret_fn_method_bufferiomanager_drain_send(void* ptr, uint64_t max_len, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_BUFFERIOMANAGER_FILL_RECV
#define UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_BUFFERIOMANAGER_FILL_RECV
int8_t uniffi_ferret_fn_method_bufferiomanager_fill_recv(void* ptr, RustBuffer data, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_BUFFERIOMANAGER_RECV_AVAILABLE
#define UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_BUFFERIOMANAGER_RECV_AVAILABLE
uint64_t uniffi_ferret_fn_method_bufferiomanager_recv_available(void* ptr, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_BUFFERIOMANAGER_SEND_SIZE
#define UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_BUFFERIOMANAGER_SEND_SIZE
uint64_t uniffi_ferret_fn_method_bufferiomanager_send_size(void* ptr, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_BUFFERIOMANAGER_SET_ERROR
#define UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_BUFFERIOMANAGER_SET_ERROR
void uniffi_ferret_fn_method_bufferiomanager_set_error(void* ptr, RustBuffer message, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_BUFFERIOMANAGER_SET_TIMEOUT
#define UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_BUFFERIOMANAGER_SET_TIMEOUT
void uniffi_ferret_fn_method_bufferiomanager_set_timeout(void* ptr, int64_t timeout_ms, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_FN_CLONE_FERRETCOTBUFFERMANAGER
#define UNIFFI_FFIDEF_UNIFFI_FERRET_FN_CLONE_FERRETCOTBUFFERMANAGER
void* uniffi_ferret_fn_clone_ferretcotbuffermanager(void* ptr, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_FN_FREE_FERRETCOTBUFFERMANAGER
#define UNIFFI_FFIDEF_UNIFFI_FERRET_FN_FREE_FERRETCOTBUFFERMANAGER
void uniffi_ferret_fn_free_ferretcotbuffermanager(void* ptr, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_FERRETCOTBUFFERMANAGER_ASSEMBLE_STATE
#define UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_FERRETCOTBUFFERMANAGER_ASSEMBLE_STATE
RustBuffer uniffi_ferret_fn_method_ferretcotbuffermanager_assemble_state(void* ptr, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_FERRETCOTBUFFERMANAGER_DISASSEMBLE_STATE
#define UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_FERRETCOTBUFFERMANAGER_DISASSEMBLE_STATE
int8_t uniffi_ferret_fn_method_ferretcotbuffermanager_disassemble_state(void* ptr, RustBuffer data, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_FERRETCOTBUFFERMANAGER_GET_BLOCK_DATA
#define UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_FERRETCOTBUFFERMANAGER_GET_BLOCK_DATA
RustBuffer uniffi_ferret_fn_method_ferretcotbuffermanager_get_block_data(void* ptr, uint8_t block_choice, uint64_t index, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_FERRETCOTBUFFERMANAGER_IS_SETUP
#define UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_FERRETCOTBUFFERMANAGER_IS_SETUP
int8_t uniffi_ferret_fn_method_ferretcotbuffermanager_is_setup(void* ptr, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_FERRETCOTBUFFERMANAGER_RECV_COT
#define UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_FERRETCOTBUFFERMANAGER_RECV_COT
int8_t uniffi_ferret_fn_method_ferretcotbuffermanager_recv_cot(void* ptr, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_FERRETCOTBUFFERMANAGER_RECV_ROT
#define UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_FERRETCOTBUFFERMANAGER_RECV_ROT
int8_t uniffi_ferret_fn_method_ferretcotbuffermanager_recv_rot(void* ptr, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_FERRETCOTBUFFERMANAGER_SEND_COT
#define UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_FERRETCOTBUFFERMANAGER_SEND_COT
int8_t uniffi_ferret_fn_method_ferretcotbuffermanager_send_cot(void* ptr, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_FERRETCOTBUFFERMANAGER_SEND_ROT
#define UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_FERRETCOTBUFFERMANAGER_SEND_ROT
int8_t uniffi_ferret_fn_method_ferretcotbuffermanager_send_rot(void* ptr, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_FERRETCOTBUFFERMANAGER_SET_BLOCK_DATA
#define UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_FERRETCOTBUFFERMANAGER_SET_BLOCK_DATA
void uniffi_ferret_fn_method_ferretcotbuffermanager_set_block_data(void* ptr, uint8_t block_choice, uint64_t index, RustBuffer data, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_FERRETCOTBUFFERMANAGER_SETUP
#define UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_FERRETCOTBUFFERMANAGER_SETUP
int8_t uniffi_ferret_fn_method_ferretcotbuffermanager_setup(void* ptr, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_FERRETCOTBUFFERMANAGER_STATE_SIZE
#define UNIFFI_FFIDEF_UNIFFI_FERRET_FN_METHOD_FERRETCOTBUFFERMANAGER_STATE_SIZE
int64_t uniffi_ferret_fn_method_ferretcotbuffermanager_state_size(void* ptr, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_FN_CLONE_FERRETCOTMANAGER
#define UNIFFI_FFIDEF_UNIFFI_FERRET_FN_CLONE_FERRETCOTMANAGER
@ -428,6 +538,16 @@ void* uniffi_ferret_fn_clone_netiomanager(void* ptr, RustCallStatus *out_status
void uniffi_ferret_fn_free_netiomanager(void* ptr, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_FN_FUNC_CREATE_BUFFER_IO_MANAGER
#define UNIFFI_FFIDEF_UNIFFI_FERRET_FN_FUNC_CREATE_BUFFER_IO_MANAGER
void* uniffi_ferret_fn_func_create_buffer_io_manager(int64_t initial_cap, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_FN_FUNC_CREATE_FERRET_COT_BUFFER_MANAGER
#define UNIFFI_FFIDEF_UNIFFI_FERRET_FN_FUNC_CREATE_FERRET_COT_BUFFER_MANAGER
void* uniffi_ferret_fn_func_create_ferret_cot_buffer_manager(int32_t party, int32_t threads, uint64_t length, RustBuffer choices, void* bufferio, int8_t malicious, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_FN_FUNC_CREATE_FERRET_COT_MANAGER
#define UNIFFI_FFIDEF_UNIFFI_FERRET_FN_FUNC_CREATE_FERRET_COT_MANAGER
void* uniffi_ferret_fn_func_create_ferret_cot_manager(int32_t party, int32_t threads, uint64_t length, RustBuffer choices, void* netio, int8_t malicious, RustCallStatus *out_status
@ -716,6 +836,18 @@ void ffi_ferret_rust_future_free_void(uint64_t handle
#ifndef UNIFFI_FFIDEF_FFI_FERRET_RUST_FUTURE_COMPLETE_VOID
#define UNIFFI_FFIDEF_FFI_FERRET_RUST_FUTURE_COMPLETE_VOID
void ffi_ferret_rust_future_complete_void(uint64_t handle, RustCallStatus *out_status
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_FUNC_CREATE_BUFFER_IO_MANAGER
#define UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_FUNC_CREATE_BUFFER_IO_MANAGER
uint16_t uniffi_ferret_checksum_func_create_buffer_io_manager(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_FUNC_CREATE_FERRET_COT_BUFFER_MANAGER
#define UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_FUNC_CREATE_FERRET_COT_BUFFER_MANAGER
uint16_t uniffi_ferret_checksum_func_create_ferret_cot_buffer_manager(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_FUNC_CREATE_FERRET_COT_MANAGER
@ -728,6 +860,114 @@ uint16_t uniffi_ferret_checksum_func_create_ferret_cot_manager(void
#define UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_FUNC_CREATE_NETIO_MANAGER
uint16_t uniffi_ferret_checksum_func_create_netio_manager(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_BUFFERIOMANAGER_CLEAR
#define UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_BUFFERIOMANAGER_CLEAR
uint16_t uniffi_ferret_checksum_method_bufferiomanager_clear(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_BUFFERIOMANAGER_DRAIN_SEND
#define UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_BUFFERIOMANAGER_DRAIN_SEND
uint16_t uniffi_ferret_checksum_method_bufferiomanager_drain_send(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_BUFFERIOMANAGER_FILL_RECV
#define UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_BUFFERIOMANAGER_FILL_RECV
uint16_t uniffi_ferret_checksum_method_bufferiomanager_fill_recv(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_BUFFERIOMANAGER_RECV_AVAILABLE
#define UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_BUFFERIOMANAGER_RECV_AVAILABLE
uint16_t uniffi_ferret_checksum_method_bufferiomanager_recv_available(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_BUFFERIOMANAGER_SEND_SIZE
#define UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_BUFFERIOMANAGER_SEND_SIZE
uint16_t uniffi_ferret_checksum_method_bufferiomanager_send_size(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_BUFFERIOMANAGER_SET_ERROR
#define UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_BUFFERIOMANAGER_SET_ERROR
uint16_t uniffi_ferret_checksum_method_bufferiomanager_set_error(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_BUFFERIOMANAGER_SET_TIMEOUT
#define UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_BUFFERIOMANAGER_SET_TIMEOUT
uint16_t uniffi_ferret_checksum_method_bufferiomanager_set_timeout(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_FERRETCOTBUFFERMANAGER_ASSEMBLE_STATE
#define UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_FERRETCOTBUFFERMANAGER_ASSEMBLE_STATE
uint16_t uniffi_ferret_checksum_method_ferretcotbuffermanager_assemble_state(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_FERRETCOTBUFFERMANAGER_DISASSEMBLE_STATE
#define UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_FERRETCOTBUFFERMANAGER_DISASSEMBLE_STATE
uint16_t uniffi_ferret_checksum_method_ferretcotbuffermanager_disassemble_state(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_FERRETCOTBUFFERMANAGER_GET_BLOCK_DATA
#define UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_FERRETCOTBUFFERMANAGER_GET_BLOCK_DATA
uint16_t uniffi_ferret_checksum_method_ferretcotbuffermanager_get_block_data(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_FERRETCOTBUFFERMANAGER_IS_SETUP
#define UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_FERRETCOTBUFFERMANAGER_IS_SETUP
uint16_t uniffi_ferret_checksum_method_ferretcotbuffermanager_is_setup(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_FERRETCOTBUFFERMANAGER_RECV_COT
#define UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_FERRETCOTBUFFERMANAGER_RECV_COT
uint16_t uniffi_ferret_checksum_method_ferretcotbuffermanager_recv_cot(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_FERRETCOTBUFFERMANAGER_RECV_ROT
#define UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_FERRETCOTBUFFERMANAGER_RECV_ROT
uint16_t uniffi_ferret_checksum_method_ferretcotbuffermanager_recv_rot(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_FERRETCOTBUFFERMANAGER_SEND_COT
#define UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_FERRETCOTBUFFERMANAGER_SEND_COT
uint16_t uniffi_ferret_checksum_method_ferretcotbuffermanager_send_cot(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_FERRETCOTBUFFERMANAGER_SEND_ROT
#define UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_FERRETCOTBUFFERMANAGER_SEND_ROT
uint16_t uniffi_ferret_checksum_method_ferretcotbuffermanager_send_rot(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_FERRETCOTBUFFERMANAGER_SET_BLOCK_DATA
#define UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_FERRETCOTBUFFERMANAGER_SET_BLOCK_DATA
uint16_t uniffi_ferret_checksum_method_ferretcotbuffermanager_set_block_data(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_FERRETCOTBUFFERMANAGER_SETUP
#define UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_FERRETCOTBUFFERMANAGER_SETUP
uint16_t uniffi_ferret_checksum_method_ferretcotbuffermanager_setup(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_FERRETCOTBUFFERMANAGER_STATE_SIZE
#define UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_FERRETCOTBUFFERMANAGER_STATE_SIZE
uint16_t uniffi_ferret_checksum_method_ferretcotbuffermanager_state_size(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_FERRET_CHECKSUM_METHOD_FERRETCOTMANAGER_GET_BLOCK_DATA

View File

@ -100,6 +100,14 @@ func (hg *HypergraphCRDT) publishSnapshot(root []byte) {
hg.snapshotMgr.publish(root)
}
// PublishSnapshot announces a new snapshot generation with the given commit root.
// This should be called after Commit() to make the new state available for sync.
// Clients can request sync against this root using the expectedRoot parameter.
// The snapshot manager retains a limited number of historical generations.
func (hg *HypergraphCRDT) PublishSnapshot(root []byte) {
hg.publishSnapshot(root)
}
func (hg *HypergraphCRDT) cloneSetWithStore(
set hypergraph.IdSet,
store tries.TreeBackingStore,
@ -114,6 +122,14 @@ func (hg *HypergraphCRDT) cloneSetWithStore(
return set
}
// SetSelfPeerID sets the self peer ID on the sync controller. Sessions from
// this peer ID are allowed unlimited concurrency (for workers syncing to master).
func (hg *HypergraphCRDT) SetSelfPeerID(peerID string) {
if hg.syncController != nil {
hg.syncController.SetSelfPeerID(peerID)
}
}
func (hg *HypergraphCRDT) SetShutdownContext(ctx context.Context) {
hg.shutdownCtx = ctx
go func() {
@ -155,15 +171,27 @@ func (hg *HypergraphCRDT) snapshotSet(
hg.setsMu.RUnlock()
if set == nil {
// Try to load root from snapshot store since set doesn't exist in memory
var root tries.LazyVectorCommitmentNode
if targetStore != nil {
root, _ = targetStore.GetNodeByPath(
string(atomType),
string(phaseType),
shardKey,
[]int{}, // empty path = root
)
}
set = NewIdSet(
atomType,
phaseType,
shardKey,
hg.store,
targetStore, // Use target store directly since set is new
hg.prover,
nil,
root,
hg.getCoveredPrefix(),
)
// Return directly - no need to clone since we already used targetStore
return set
}
return hg.cloneSetWithStore(set, targetStore)
@ -366,12 +394,12 @@ func (hg *HypergraphCRDT) GetSize(
p, _ := vrs.GetTree().GetByPath(path)
if p != nil {
sum = sum.Add(sum, o.GetSize())
sum = sum.Add(sum, p.GetSize())
}
q, _ := hrs.GetTree().GetByPath(path)
if q != nil {
sum = sum.Add(sum, o.GetSize())
sum = sum.Add(sum, q.GetSize())
}
return sum

View File

@ -52,7 +52,7 @@ func (hg *HypergraphCRDT) Commit(
if r, ok := commits[shardKey]; ok && len(r[0]) != 64 {
continue
}
root := vertexAdds.GetTree().Commit(false)
root := vertexAdds.GetTree().Commit(txn, false)
ensureSet(shardKey)
commits[shardKey][0] = root
@ -77,7 +77,7 @@ func (hg *HypergraphCRDT) Commit(
if r, ok := commits[shardKey]; ok && len(r[1]) != 64 {
continue
}
root := vertexRemoves.GetTree().Commit(false)
root := vertexRemoves.GetTree().Commit(txn, false)
ensureSet(shardKey)
commits[shardKey][1] = root
@ -104,7 +104,7 @@ func (hg *HypergraphCRDT) Commit(
if r, ok := commits[shardKey]; ok && len(r[2]) != 64 {
continue
}
root := hyperedgeAdds.GetTree().Commit(false)
root := hyperedgeAdds.GetTree().Commit(txn, false)
ensureSet(shardKey)
commits[shardKey][2] = root
@ -131,7 +131,7 @@ func (hg *HypergraphCRDT) Commit(
if r, ok := commits[shardKey]; ok && len(r[3]) != 64 {
continue
}
root := hyperedgeRemoves.GetTree().Commit(false)
root := hyperedgeRemoves.GetTree().Commit(txn, false)
ensureSet(shardKey)
commits[shardKey][3] = root
@ -306,9 +306,9 @@ func (hg *HypergraphCRDT) CommitShard(
hg.getCoveredPrefix(),
)
vertexAddTree := vertexAddSet.GetTree()
vertexAddTree.Commit(false)
vertexAddTree.Commit(nil, false)
vertexRemoveTree := vertexRemoveSet.GetTree()
vertexRemoveTree.Commit(false)
vertexRemoveTree.Commit(nil, false)
path := tries.GetFullPath(shardAddress[:32])
for _, p := range shardAddress[32:] {
@ -333,9 +333,9 @@ func (hg *HypergraphCRDT) CommitShard(
hg.getCoveredPrefix(),
)
hyperedgeAddTree := hyperedgeAddSet.GetTree()
hyperedgeAddTree.Commit(false)
hyperedgeAddTree.Commit(nil, false)
hyperedgeRemoveTree := hyperedgeRemoveSet.GetTree()
hyperedgeRemoveTree.Commit(false)
hyperedgeRemoveTree.Commit(nil, false)
hyperedgeAddNode, err := vertexAddTree.GetByPath(path)
if err != nil && !strings.Contains(err.Error(), "not found") {

View File

@ -1,6 +1,7 @@
package hypergraph
import (
"bytes"
"encoding/hex"
"fmt"
"sync"
@ -11,6 +12,11 @@ import (
"source.quilibrium.com/quilibrium/monorepo/types/tries"
)
// maxSnapshotGenerations is the maximum number of historical snapshot
// generations to retain. When a new root is published, older generations
// beyond this limit are released.
const maxSnapshotGenerations = 10
type snapshotHandle struct {
store tries.TreeBackingStore
release func()
@ -152,12 +158,21 @@ func (h *snapshotHandle) isLeafMiss(key []byte) bool {
return miss
}
// snapshotGeneration represents a set of shard snapshots for a specific
// commit root.
type snapshotGeneration struct {
root []byte
handles map[string]*snapshotHandle // keyed by shard key
dbSnapshot tries.DBSnapshot // point-in-time DB snapshot taken at publish
}
type snapshotManager struct {
logger *zap.Logger
store tries.TreeBackingStore
mu sync.Mutex
root []byte
handles map[string]*snapshotHandle
logger *zap.Logger
store tries.TreeBackingStore
mu sync.Mutex
// generations holds snapshot generations ordered from newest to oldest.
// generations[0] is the current/latest generation.
generations []*snapshotGeneration
}
func newSnapshotManager(
@ -165,9 +180,9 @@ func newSnapshotManager(
store tries.TreeBackingStore,
) *snapshotManager {
return &snapshotManager{
logger: logger,
store: store,
handles: make(map[string]*snapshotHandle),
logger: logger,
store: store,
generations: make([]*snapshotGeneration, 0, maxSnapshotGenerations),
}
}
@ -175,39 +190,171 @@ func (m *snapshotManager) publish(root []byte) {
m.mu.Lock()
defer m.mu.Unlock()
// Remove all handles from the map so new syncs get new handles.
// Handles with active refs will be released when their last user calls release().
// Handles with no active refs (only the initial ref from creation) are released now.
for key, handle := range m.handles {
delete(m.handles, key)
if handle != nil {
// releaseRef decrements the ref count. If this was the last ref
// (i.e., no active sync sessions), the underlying DB is released.
// If there are active sync sessions, they will release it when done.
handle.releaseRef(m.logger)
}
}
m.root = nil
if len(root) != 0 {
m.root = append([]byte{}, root...)
}
rootHex := ""
if len(root) != 0 {
rootHex = hex.EncodeToString(root)
}
m.logger.Debug("reset snapshot state", zap.String("root", rootHex))
// Check if this root already matches the current generation
if len(m.generations) > 0 && bytes.Equal(m.generations[0].root, root) {
m.logger.Debug(
"publish called with current root, no change",
zap.String("root", rootHex),
)
return
}
// Create a new generation for this root
newGen := &snapshotGeneration{
handles: make(map[string]*snapshotHandle),
}
if len(root) != 0 {
newGen.root = append([]byte{}, root...)
}
// Take a point-in-time DB snapshot if the store supports it.
// This ensures all shard snapshots for this generation reflect
// the exact state at publish time, avoiding race conditions.
if m.store != nil {
dbSnap, err := m.store.NewDBSnapshot()
if err != nil {
m.logger.Warn(
"failed to create DB snapshot for generation",
zap.String("root", rootHex),
zap.Error(err),
)
} else {
newGen.dbSnapshot = dbSnap
}
}
// Prepend the new generation (newest first)
m.generations = append([]*snapshotGeneration{newGen}, m.generations...)
// Release generations beyond the limit
for len(m.generations) > maxSnapshotGenerations {
oldGen := m.generations[len(m.generations)-1]
m.generations = m.generations[:len(m.generations)-1]
// Release all handles in the old generation
for key, handle := range oldGen.handles {
delete(oldGen.handles, key)
if handle != nil {
handle.releaseRef(m.logger)
}
}
// Close the DB snapshot if present
if oldGen.dbSnapshot != nil {
if err := oldGen.dbSnapshot.Close(); err != nil {
m.logger.Warn(
"failed to close DB snapshot",
zap.Error(err),
)
}
}
oldRootHex := ""
if len(oldGen.root) != 0 {
oldRootHex = hex.EncodeToString(oldGen.root)
}
m.logger.Debug(
"released old snapshot generation",
zap.String("root", oldRootHex),
)
}
m.logger.Debug(
"published new snapshot generation",
zap.String("root", rootHex),
zap.Int("total_generations", len(m.generations)),
)
}
// acquire returns a snapshot handle for the given shard key. If expectedRoot
// is provided and a matching generation has an existing snapshot for this shard,
// that snapshot is returned. Otherwise, a new snapshot is created from the
// generation's DB snapshot (if available) to ensure consistency.
//
// With DB snapshots: Historical generations can create new shard snapshots because
// the DB snapshot captures the exact state at publish time.
// Without DB snapshots (fallback): Only the latest generation can create snapshots.
func (m *snapshotManager) acquire(
shardKey tries.ShardKey,
expectedRoot []byte,
) *snapshotHandle {
key := shardKeyString(shardKey)
m.mu.Lock()
defer m.mu.Unlock()
if handle, ok := m.handles[key]; ok {
if len(m.generations) == 0 {
m.logger.Warn("no snapshot generations available")
return nil
}
var targetGen *snapshotGeneration
// If expectedRoot is provided, look for the matching generation
if len(expectedRoot) > 0 {
for _, gen := range m.generations {
if bytes.Equal(gen.root, expectedRoot) {
// Found matching generation, check if it has a snapshot for this shard
if handle, ok := gen.handles[key]; ok {
m.logger.Debug(
"found existing snapshot for expected root",
zap.String("expected_root", hex.EncodeToString(expectedRoot)),
)
handle.acquire()
return handle
}
// Generation exists but no snapshot for this shard yet.
// If we have a DB snapshot, we can create from it even for older generations.
if gen.dbSnapshot != nil {
targetGen = gen
m.logger.Debug(
"creating snapshot for expected root from DB snapshot",
zap.String("expected_root", hex.EncodeToString(expectedRoot)),
)
break
}
// No DB snapshot - only allow if this is the latest generation
if gen != m.generations[0] {
m.logger.Warn(
"generation matches expected root but has no DB snapshot and is not latest",
zap.String("expected_root", hex.EncodeToString(expectedRoot)),
)
return nil
}
targetGen = gen
m.logger.Debug(
"creating snapshot for expected root (latest generation, no DB snapshot)",
zap.String("expected_root", hex.EncodeToString(expectedRoot)),
)
break
}
}
// If we didn't find a matching generation at all, reject
if targetGen == nil {
if m.logger != nil {
latestRoot := ""
if len(m.generations) > 0 {
latestRoot = hex.EncodeToString(m.generations[0].root)
}
m.logger.Warn(
"no snapshot generation matches expected root, rejecting sync request",
zap.String("expected_root", hex.EncodeToString(expectedRoot)),
zap.String("latest_root", latestRoot),
)
}
return nil
}
} else {
// No expected root - use the latest generation
targetGen = m.generations[0]
}
// Check if we already have a handle for this shard in the target generation
if handle, ok := targetGen.handles[key]; ok {
handle.acquire()
return handle
}
@ -216,7 +363,19 @@ func (m *snapshotManager) acquire(
return nil
}
storeSnapshot, release, err := m.store.NewShardSnapshot(shardKey)
// Create the shard snapshot, preferring DB snapshot if available
var storeSnapshot tries.TreeBackingStore
var release func()
var err error
if targetGen.dbSnapshot != nil {
storeSnapshot, release, err = m.store.NewShardSnapshotFromDBSnapshot(
shardKey,
targetGen.dbSnapshot,
)
} else {
storeSnapshot, release, err = m.store.NewShardSnapshot(shardKey)
}
if err != nil {
m.logger.Warn(
"failed to build shard snapshot",
@ -226,16 +385,27 @@ func (m *snapshotManager) acquire(
return nil
}
handle := newSnapshotHandle(key, storeSnapshot, release, m.root)
handle := newSnapshotHandle(key, storeSnapshot, release, targetGen.root)
// Acquire a ref for the caller. The handle is created with refs=1 (the owner ref
// held by the snapshot manager), and this adds another ref for the sync session.
// This ensures publish() can release the owner ref without closing the DB while
// a sync is still using it.
handle.acquire()
m.handles[key] = handle
targetGen.handles[key] = handle
return handle
}
// currentRoot returns the commit root of the latest snapshot generation.
func (m *snapshotManager) currentRoot() []byte {
m.mu.Lock()
defer m.mu.Unlock()
if len(m.generations) == 0 {
return nil
}
return append([]byte{}, m.generations[0].root...)
}
func (m *snapshotManager) release(handle *snapshotHandle) {
if handle == nil {
return
@ -245,8 +415,13 @@ func (m *snapshotManager) release(handle *snapshotHandle) {
}
m.mu.Lock()
defer m.mu.Unlock()
if current, ok := m.handles[handle.key]; ok && current == handle {
delete(m.handles, handle.key)
// Search all generations for this handle and remove it
for _, gen := range m.generations {
if current, ok := gen.handles[handle.key]; ok && current == handle {
delete(gen.handles, handle.key)
return
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,995 @@
package hypergraph
import (
"bytes"
"context"
"encoding/hex"
"io"
"slices"
"strings"
"time"
"github.com/pkg/errors"
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/protobufs"
"source.quilibrium.com/quilibrium/monorepo/types/hypergraph"
"source.quilibrium.com/quilibrium/monorepo/types/tries"
)
// syncSession holds the state for a PerformSync session.
type syncSession struct {
shardKey tries.ShardKey
phaseSet protobufs.HypergraphPhaseSet
snapshot *snapshotHandle
idSet hypergraph.IdSet
store tries.TreeBackingStore
}
// isGlobalProverShard returns true if this is the global prover registry shard
// (L1={0,0,0}, L2=0xff*32). Used to enable detailed logging for prover sync
// without adding noise from other shard syncs.
func isGlobalProverShard(shardKey tries.ShardKey) bool {
if shardKey.L1 != [3]byte{0, 0, 0} {
return false
}
for _, b := range shardKey.L2 {
if b != 0xff {
return false
}
}
return true
}
// isGlobalProverShardBytes checks the same for concatenated byte slice (35 bytes).
func isGlobalProverShardBytes(shardKeyBytes []byte) bool {
if len(shardKeyBytes) != 35 {
return false
}
for i := 0; i < 3; i++ {
if shardKeyBytes[i] != 0x00 {
return false
}
}
for i := 3; i < 35; i++ {
if shardKeyBytes[i] != 0xff {
return false
}
}
return true
}
// PerformSync implements the server side of the client-driven sync protocol.
// The client sends GetBranch and GetLeaves requests, and the server responds
// with the requested data. This is simpler than HyperStream because there's
// no need for both sides to walk in lockstep.
//
// The server uses a snapshot to ensure consistent reads throughout the session.
func (hg *HypergraphCRDT) PerformSync(
stream protobufs.HypergraphComparisonService_PerformSyncServer,
) error {
ctx := stream.Context()
logger := hg.logger.With(zap.String("method", "PerformSync"))
sessionStart := time.Now()
// Session state - initialized on first request
var session *syncSession
defer func() {
if session != nil {
logger.Info("sync session closed",
zap.Duration("duration", time.Since(sessionStart)),
)
if session.snapshot != nil {
hg.snapshotMgr.release(session.snapshot)
}
}
}()
// Process requests until stream closes
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
req, err := stream.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return errors.Wrap(err, "receive request")
}
var resp *protobufs.HypergraphSyncResponse
switch r := req.Request.(type) {
case *protobufs.HypergraphSyncQuery_GetBranch:
// Initialize session on first request
if session == nil {
session, err = hg.initSyncSession(
r.GetBranch.ShardKey,
r.GetBranch.PhaseSet,
r.GetBranch.ExpectedRoot,
logger,
)
if err != nil {
return errors.Wrap(err, "init sync session")
}
}
resp, err = hg.handleGetBranch(ctx, r.GetBranch, session, logger)
case *protobufs.HypergraphSyncQuery_GetLeaves:
// Initialize session on first request
if session == nil {
session, err = hg.initSyncSession(
r.GetLeaves.ShardKey,
r.GetLeaves.PhaseSet,
r.GetLeaves.ExpectedRoot,
logger,
)
if err != nil {
return errors.Wrap(err, "init sync session")
}
}
resp, err = hg.handleGetLeaves(ctx, r.GetLeaves, session, logger)
default:
resp = &protobufs.HypergraphSyncResponse{
Response: &protobufs.HypergraphSyncResponse_Error{
Error: &protobufs.HypergraphSyncError{
Code: protobufs.HypergraphSyncErrorCode_HYPERGRAPH_SYNC_ERROR_UNKNOWN,
Message: "unknown request type",
},
},
}
}
if err != nil {
logger.Error("error handling request", zap.Error(err))
resp = &protobufs.HypergraphSyncResponse{
Response: &protobufs.HypergraphSyncResponse_Error{
Error: &protobufs.HypergraphSyncError{
Code: protobufs.HypergraphSyncErrorCode_HYPERGRAPH_SYNC_ERROR_INTERNAL,
Message: err.Error(),
},
},
}
}
if err := stream.Send(resp); err != nil {
return errors.Wrap(err, "send response")
}
}
}
// initSyncSession initializes a sync session with a snapshot for consistent reads.
func (hg *HypergraphCRDT) initSyncSession(
shardKeyBytes []byte,
phaseSet protobufs.HypergraphPhaseSet,
expectedRoot []byte,
logger *zap.Logger,
) (*syncSession, error) {
if len(shardKeyBytes) != 35 {
return nil, errors.New("shard key must be 35 bytes")
}
shardKey := tries.ShardKey{
L1: [3]byte(shardKeyBytes[:3]),
L2: [32]byte(shardKeyBytes[3:]),
}
// Acquire a snapshot for consistent reads throughout the session.
// If expectedRoot is provided, we try to find a snapshot matching that root.
snapshot := hg.snapshotMgr.acquire(shardKey, expectedRoot)
if snapshot == nil {
return nil, errors.New("failed to acquire snapshot")
}
snapshotStore := snapshot.Store()
idSet := hg.snapshotPhaseSet(shardKey, phaseSet, snapshotStore)
if idSet == nil {
hg.snapshotMgr.release(snapshot)
return nil, errors.New("unsupported phase set")
}
logger.Info("sync session started",
zap.String("shard", hex.EncodeToString(shardKeyBytes)),
zap.String("phase", phaseSet.String()),
)
return &syncSession{
shardKey: shardKey,
phaseSet: phaseSet,
snapshot: snapshot,
idSet: idSet,
store: snapshotStore,
}, nil
}
func (hg *HypergraphCRDT) handleGetBranch(
ctx context.Context,
req *protobufs.HypergraphSyncGetBranchRequest,
session *syncSession,
logger *zap.Logger,
) (*protobufs.HypergraphSyncResponse, error) {
tree := session.idSet.GetTree()
if tree == nil || tree.Root == nil {
return &protobufs.HypergraphSyncResponse{
Response: &protobufs.HypergraphSyncResponse_Branch{
Branch: &protobufs.HypergraphSyncBranchResponse{
FullPath: req.Path,
Commitment: nil,
Children: nil,
IsLeaf: true,
LeafCount: 0,
},
},
}, nil
}
path := toIntSlice(req.Path)
node := getNodeAtPath(
logger,
tree.SetType,
tree.PhaseType,
tree.ShardKey,
tree.Root,
toInt32Slice(path),
0,
)
if node == nil {
return &protobufs.HypergraphSyncResponse{
Response: &protobufs.HypergraphSyncResponse_Error{
Error: &protobufs.HypergraphSyncError{
Code: protobufs.HypergraphSyncErrorCode_HYPERGRAPH_SYNC_ERROR_NODE_NOT_FOUND,
Message: "node not found at path",
Path: req.Path,
},
},
}, nil
}
resp := &protobufs.HypergraphSyncBranchResponse{}
// Ensure commitment is computed first
node = ensureCommittedNode(logger, tree, path, node)
switch n := node.(type) {
case *tries.LazyVectorCommitmentBranchNode:
resp.FullPath = toInt32Slice(n.FullPrefix)
resp.Commitment = n.Commitment
resp.IsLeaf = false
resp.LeafCount = uint64(n.LeafCount)
// Collect children
for i := 0; i < 64; i++ {
child := n.Children[i]
if child == nil {
var err error
child, err = n.Store.GetNodeByPath(
tree.SetType,
tree.PhaseType,
tree.ShardKey,
slices.Concat(n.FullPrefix, []int{i}),
)
if err != nil && !strings.Contains(err.Error(), "item not found") {
continue
}
}
if child != nil {
childPath := slices.Concat(n.FullPrefix, []int{i})
child = ensureCommittedNode(logger, tree, childPath, child)
var childCommit []byte
switch c := child.(type) {
case *tries.LazyVectorCommitmentBranchNode:
childCommit = c.Commitment
case *tries.LazyVectorCommitmentLeafNode:
childCommit = c.Commitment
}
if len(childCommit) > 0 {
resp.Children = append(resp.Children, &protobufs.HypergraphSyncChildInfo{
Index: int32(i),
Commitment: childCommit,
})
}
}
}
case *tries.LazyVectorCommitmentLeafNode:
resp.FullPath = req.Path // Leaves don't have FullPrefix, use requested path
resp.Commitment = n.Commitment
resp.IsLeaf = true
resp.LeafCount = 1
}
return &protobufs.HypergraphSyncResponse{
Response: &protobufs.HypergraphSyncResponse_Branch{
Branch: resp,
},
}, nil
}
func (hg *HypergraphCRDT) handleGetLeaves(
ctx context.Context,
req *protobufs.HypergraphSyncGetLeavesRequest,
session *syncSession,
logger *zap.Logger,
) (*protobufs.HypergraphSyncResponse, error) {
tree := session.idSet.GetTree()
if tree == nil || tree.Root == nil {
return &protobufs.HypergraphSyncResponse{
Response: &protobufs.HypergraphSyncResponse_Leaves{
Leaves: &protobufs.HypergraphSyncLeavesResponse{
Path: req.Path,
Leaves: nil,
TotalLeaves: 0,
},
},
}, nil
}
path := toIntSlice(req.Path)
node := getNodeAtPath(
logger,
tree.SetType,
tree.PhaseType,
tree.ShardKey,
tree.Root,
toInt32Slice(path),
0,
)
if node == nil {
return &protobufs.HypergraphSyncResponse{
Response: &protobufs.HypergraphSyncResponse_Error{
Error: &protobufs.HypergraphSyncError{
Code: protobufs.HypergraphSyncErrorCode_HYPERGRAPH_SYNC_ERROR_NODE_NOT_FOUND,
Message: "node not found at path",
Path: req.Path,
},
},
}, nil
}
// Get all leaves under this node
allLeaves := tries.GetAllLeaves(
tree.SetType,
tree.PhaseType,
tree.ShardKey,
node,
)
// Apply pagination
maxLeaves := int(req.MaxLeaves)
if maxLeaves == 0 {
maxLeaves = 1000 // Default batch size
}
startIdx := 0
if len(req.ContinuationToken) > 0 {
// Simple continuation: token is the start index as hex
idx, err := parseContToken(req.ContinuationToken)
if err == nil {
startIdx = idx
}
}
var leaves []*protobufs.LeafData
var totalNonNil uint64
for i, leaf := range allLeaves {
if leaf == nil {
continue
}
totalNonNil++
if int(totalNonNil) <= startIdx {
continue
}
if len(leaves) >= maxLeaves {
break
}
leafData := &protobufs.LeafData{
Key: leaf.Key,
Value: leaf.Value,
HashTarget: leaf.HashTarget,
Size: leaf.Size.FillBytes(make([]byte, 32)),
}
// Load underlying vertex tree if available (use snapshot store for consistency)
vtree, err := session.store.LoadVertexTree(leaf.Key)
if err == nil && vtree != nil {
data, err := tries.SerializeNonLazyTree(vtree)
if err == nil {
leafData.UnderlyingData = data
}
}
leaves = append(leaves, leafData)
_ = i // suppress unused warning
}
resp := &protobufs.HypergraphSyncLeavesResponse{
Path: req.Path,
Leaves: leaves,
TotalLeaves: totalNonNil,
}
// Set continuation token if more leaves remain
if startIdx+len(leaves) < int(totalNonNil) {
resp.ContinuationToken = makeContToken(startIdx + len(leaves))
}
return &protobufs.HypergraphSyncResponse{
Response: &protobufs.HypergraphSyncResponse_Leaves{
Leaves: resp,
},
}, nil
}
func (hg *HypergraphCRDT) getPhaseSet(
shardKey tries.ShardKey,
phaseSet protobufs.HypergraphPhaseSet,
) hypergraph.IdSet {
switch phaseSet {
case protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_VERTEX_ADDS:
return hg.getVertexAddsSet(shardKey)
case protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_VERTEX_REMOVES:
return hg.getVertexRemovesSet(shardKey)
case protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_HYPEREDGE_ADDS:
return hg.getHyperedgeAddsSet(shardKey)
case protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_HYPEREDGE_REMOVES:
return hg.getHyperedgeRemovesSet(shardKey)
default:
return nil
}
}
func parseContToken(token []byte) (int, error) {
if len(token) == 0 {
return 0, nil
}
// Token is hex-encoded 4 bytes (big-endian int32)
decoded, err := hex.DecodeString(string(token))
if err != nil {
return 0, err
}
if len(decoded) != 4 {
return 0, errors.New("invalid continuation token length")
}
idx := int(decoded[0])<<24 | int(decoded[1])<<16 | int(decoded[2])<<8 | int(decoded[3])
return idx, nil
}
func makeContToken(idx int) []byte {
return []byte(hex.EncodeToString([]byte{byte(idx >> 24), byte(idx >> 16), byte(idx >> 8), byte(idx)}))
}
// SyncFrom performs a client-driven sync from the given server stream.
// It navigates to the covered prefix (if any), then recursively syncs
// differing subtrees. If expectedRoot is provided, the server will attempt
// to sync from a snapshot matching that root commitment.
// Returns the new root commitment after sync completes.
func (hg *HypergraphCRDT) SyncFrom(
stream protobufs.HypergraphComparisonService_PerformSyncClient,
shardKey tries.ShardKey,
phaseSet protobufs.HypergraphPhaseSet,
expectedRoot []byte,
) ([]byte, error) {
hg.mu.Lock()
defer hg.mu.Unlock()
isGlobalProver := isGlobalProverShard(shardKey)
logger := hg.logger.With(
zap.String("method", "SyncFrom"),
zap.String("shard", hex.EncodeToString(slices.Concat(shardKey.L1[:], shardKey.L2[:]))),
)
if len(expectedRoot) > 0 {
logger = logger.With(zap.String("expectedRoot", hex.EncodeToString(expectedRoot)))
}
syncStart := time.Now()
defer func() {
logger.Debug("SyncFrom completed", zap.Duration("duration", time.Since(syncStart)))
}()
set := hg.getPhaseSet(shardKey, phaseSet)
if set == nil {
return nil, errors.New("unsupported phase set")
}
// For global prover sync, capture pre-sync state to detect changes
var preSyncRoot []byte
if isGlobalProver {
preSyncRoot = set.GetTree().Commit(nil, false)
}
shardKeyBytes := slices.Concat(shardKey.L1[:], shardKey.L2[:])
coveredPrefix := hg.getCoveredPrefix()
// Step 1: Navigate to sync point
syncPoint, err := hg.navigateToSyncPoint(stream, shardKeyBytes, phaseSet, coveredPrefix, expectedRoot, logger)
if err != nil {
return nil, errors.Wrap(err, "navigate to sync point")
}
if syncPoint == nil || len(syncPoint.Commitment) == 0 {
logger.Debug("server has no data at sync point")
// Return current root even if no data was synced
root := set.GetTree().Commit(nil, false)
return root, nil
}
// Step 2: Sync the subtree
err = hg.syncSubtree(stream, shardKeyBytes, phaseSet, expectedRoot, syncPoint, set, logger)
if err != nil {
return nil, errors.Wrap(err, "sync subtree")
}
// Step 3: Recompute commitment so future syncs see updated state
root := set.GetTree().Commit(nil, false)
// For global prover, only log if sync didn't converge (the interesting case)
if isGlobalProver && !bytes.Equal(root, expectedRoot) {
logger.Warn(
"global prover sync did not converge",
zap.String("phase", phaseSet.String()),
zap.String("pre_sync_root", hex.EncodeToString(preSyncRoot)),
zap.String("post_sync_root", hex.EncodeToString(root)),
zap.String("expected_root", hex.EncodeToString(expectedRoot)),
zap.Bool("root_changed", !bytes.Equal(preSyncRoot, root)),
)
}
return root, nil
}
func (hg *HypergraphCRDT) navigateToSyncPoint(
stream protobufs.HypergraphComparisonService_PerformSyncClient,
shardKey []byte,
phaseSet protobufs.HypergraphPhaseSet,
coveredPrefix []int,
expectedRoot []byte,
logger *zap.Logger,
) (*protobufs.HypergraphSyncBranchResponse, error) {
path := []int32{}
for {
// Query server for branch at current path
err := stream.Send(&protobufs.HypergraphSyncQuery{
Request: &protobufs.HypergraphSyncQuery_GetBranch{
GetBranch: &protobufs.HypergraphSyncGetBranchRequest{
ShardKey: shardKey,
PhaseSet: phaseSet,
Path: path,
ExpectedRoot: expectedRoot,
},
},
})
if err != nil {
return nil, errors.Wrap(err, "send GetBranch request")
}
resp, err := stream.Recv()
if err != nil {
return nil, errors.Wrap(err, "receive GetBranch response")
}
if errResp := resp.GetError(); errResp != nil {
if errResp.Code == protobufs.HypergraphSyncErrorCode_HYPERGRAPH_SYNC_ERROR_NODE_NOT_FOUND {
// Server doesn't have this path - nothing to sync
return nil, nil
}
return nil, errors.Errorf("server error: %s", errResp.Message)
}
branch := resp.GetBranch()
if branch == nil {
return nil, errors.New("unexpected response type")
}
logger.Debug("navigating",
zap.String("path", hex.EncodeToString(packPath(path))),
zap.String("fullPath", hex.EncodeToString(packPath(branch.FullPath))),
zap.Int("coveredPrefixLen", len(coveredPrefix)),
)
// If no covered prefix, root is the sync point
if len(coveredPrefix) == 0 {
return branch, nil
}
// Check if server's full path reaches or passes our covered prefix
serverPath := toIntSlice(branch.FullPath)
if isPrefixOrEqual(coveredPrefix, serverPath) {
return branch, nil
}
// Need to navigate deeper - find next child to descend into
if len(serverPath) >= len(coveredPrefix) {
// Server path is longer but doesn't match our prefix
// This means server has data outside our coverage
return branch, nil
}
// Server path is shorter - we need to go deeper
nextNibble := coveredPrefix[len(serverPath)]
// Check if server has a child at this index
found := false
for _, child := range branch.Children {
if int(child.Index) == nextNibble {
found = true
break
}
}
if !found {
// Server doesn't have the path we need
logger.Debug("server missing path to covered prefix",
zap.Int("nextNibble", nextNibble),
)
return nil, nil
}
// Descend to next level
path = append(branch.FullPath, int32(nextNibble))
}
}
func (hg *HypergraphCRDT) syncSubtree(
stream protobufs.HypergraphComparisonService_PerformSyncClient,
shardKey []byte,
phaseSet protobufs.HypergraphPhaseSet,
expectedRoot []byte,
serverBranch *protobufs.HypergraphSyncBranchResponse,
localSet hypergraph.IdSet,
logger *zap.Logger,
) error {
tree := localSet.GetTree()
// Get local node at same path
var localCommitment []byte
var localNode tries.LazyVectorCommitmentNode
if tree != nil && tree.Root != nil {
path := toIntSlice(serverBranch.FullPath)
localNode = getNodeAtPath(
logger,
tree.SetType,
tree.PhaseType,
tree.ShardKey,
tree.Root,
serverBranch.FullPath,
0,
)
if localNode != nil {
localNode = ensureCommittedNode(logger, tree, path, localNode)
switch n := localNode.(type) {
case *tries.LazyVectorCommitmentBranchNode:
localCommitment = n.Commitment
case *tries.LazyVectorCommitmentLeafNode:
localCommitment = n.Commitment
}
}
}
// If commitments match, subtrees are identical
if bytes.Equal(localCommitment, serverBranch.Commitment) {
return nil
}
// Log divergence for global prover sync
isGlobalProver := isGlobalProverShardBytes(shardKey)
var localNodeType string
var localFullPrefix []int
switch n := localNode.(type) {
case *tries.LazyVectorCommitmentBranchNode:
localNodeType = "branch"
localFullPrefix = n.FullPrefix
case *tries.LazyVectorCommitmentLeafNode:
localNodeType = "leaf"
case nil:
localNodeType = "nil"
default:
localNodeType = "unknown"
}
// Check for path prefix mismatch
serverFullPath := toIntSlice(serverBranch.FullPath)
pathMismatch := !slices.Equal(localFullPrefix, serverFullPath)
if isGlobalProver {
logger.Info("global prover sync: commitment divergence",
zap.String("phase", phaseSet.String()),
zap.String("server_path", hex.EncodeToString(packPath(serverBranch.FullPath))),
zap.String("local_path", hex.EncodeToString(packPath(toInt32Slice(localFullPrefix)))),
zap.Bool("path_mismatch", pathMismatch),
zap.Int("path_depth", len(serverBranch.FullPath)),
zap.String("local_commitment", hex.EncodeToString(localCommitment)),
zap.String("server_commitment", hex.EncodeToString(serverBranch.Commitment)),
zap.Bool("local_has_data", localNode != nil),
zap.String("local_node_type", localNodeType),
zap.Int("server_children", len(serverBranch.Children)),
zap.Bool("server_is_leaf", serverBranch.IsLeaf),
)
}
// If server node is a leaf or has no children, fetch all leaves
if serverBranch.IsLeaf || len(serverBranch.Children) == 0 {
return hg.fetchAndIntegrateLeaves(stream, shardKey, phaseSet, expectedRoot, serverBranch.FullPath, localSet, logger)
}
// If we have NO local data at this path, fetch all leaves directly.
// This avoids N round trips for N children when we need all of them anyway.
if localNode == nil {
return hg.fetchAndIntegrateLeaves(stream, shardKey, phaseSet, expectedRoot, serverBranch.FullPath, localSet, logger)
}
// Structural mismatch: local is a leaf but server is a branch with children.
// We can't compare children because local has none - fetch all server leaves.
if _, isLeaf := localNode.(*tries.LazyVectorCommitmentLeafNode); isLeaf {
if isGlobalProver {
logger.Info("global prover sync: structural mismatch - local leaf vs server branch, fetching leaves",
zap.Int("path_depth", len(serverBranch.FullPath)),
zap.Int("server_children", len(serverBranch.Children)),
)
}
return hg.fetchAndIntegrateLeaves(stream, shardKey, phaseSet, expectedRoot, serverBranch.FullPath, localSet, logger)
}
// Compare children and recurse
localChildren := make(map[int32][]byte)
if tree != nil && tree.Root != nil {
path := toIntSlice(serverBranch.FullPath)
if branch, ok := localNode.(*tries.LazyVectorCommitmentBranchNode); ok {
for i := 0; i < 64; i++ {
child := branch.Children[i]
if child == nil {
child, _ = branch.Store.GetNodeByPath(
tree.SetType,
tree.PhaseType,
tree.ShardKey,
slices.Concat(path, []int{i}),
)
}
if child != nil {
childPath := slices.Concat(path, []int{i})
child = ensureCommittedNode(logger, tree, childPath, child)
switch c := child.(type) {
case *tries.LazyVectorCommitmentBranchNode:
localChildren[int32(i)] = c.Commitment
case *tries.LazyVectorCommitmentLeafNode:
localChildren[int32(i)] = c.Commitment
}
}
}
}
}
if isGlobalProver {
logger.Info("global prover sync: comparing children",
zap.Int("path_depth", len(serverBranch.FullPath)),
zap.Int("local_children_count", len(localChildren)),
zap.Int("server_children_count", len(serverBranch.Children)),
)
}
childrenMatched := 0
childrenToSync := 0
for _, serverChild := range serverBranch.Children {
localChildCommit := localChildren[serverChild.Index]
// Both nil/empty means we have no data on either side - skip
// But if server has a commitment and we don't (or vice versa), we need to sync
localEmpty := len(localChildCommit) == 0
serverEmpty := len(serverChild.Commitment) == 0
if localEmpty && serverEmpty {
// Neither side has data, skip
childrenMatched++
continue
}
if bytes.Equal(localChildCommit, serverChild.Commitment) {
// Child matches, skip
childrenMatched++
continue
}
childrenToSync++
// Need to sync this child
childPath := append(slices.Clone(serverBranch.FullPath), serverChild.Index)
// Query for child branch
err := stream.Send(&protobufs.HypergraphSyncQuery{
Request: &protobufs.HypergraphSyncQuery_GetBranch{
GetBranch: &protobufs.HypergraphSyncGetBranchRequest{
ShardKey: shardKey,
PhaseSet: phaseSet,
Path: childPath,
ExpectedRoot: expectedRoot,
},
},
})
if err != nil {
return errors.Wrap(err, "send GetBranch for child")
}
resp, err := stream.Recv()
if err != nil {
return errors.Wrap(err, "receive GetBranch response for child")
}
if errResp := resp.GetError(); errResp != nil {
logger.Warn("error getting child branch",
zap.String("error", errResp.Message),
zap.String("path", hex.EncodeToString(packPath(childPath))),
)
continue
}
childBranch := resp.GetBranch()
if childBranch == nil {
continue
}
// Recurse
if err := hg.syncSubtree(stream, shardKey, phaseSet, expectedRoot, childBranch, localSet, logger); err != nil {
return err
}
}
if isGlobalProver {
logger.Info("global prover sync: children comparison complete",
zap.Int("path_depth", len(serverBranch.FullPath)),
zap.Int("matched", childrenMatched),
zap.Int("synced", childrenToSync),
)
}
// If parent diverged but ALL children matched, we have an inconsistent state.
// The parent commitment should be deterministic from children, so this indicates
// corruption or staleness. Force fetch all leaves to resolve.
if childrenToSync == 0 && len(serverBranch.Children) > 0 {
if isGlobalProver {
logger.Warn("global prover sync: parent diverged but all children matched - forcing leaf fetch",
zap.Int("path_depth", len(serverBranch.FullPath)),
zap.Int("children_count", len(serverBranch.Children)),
)
}
return hg.fetchAndIntegrateLeaves(stream, shardKey, phaseSet, expectedRoot, serverBranch.FullPath, localSet, logger)
}
return nil
}
func (hg *HypergraphCRDT) fetchAndIntegrateLeaves(
stream protobufs.HypergraphComparisonService_PerformSyncClient,
shardKey []byte,
phaseSet protobufs.HypergraphPhaseSet,
expectedRoot []byte,
path []int32,
localSet hypergraph.IdSet,
logger *zap.Logger,
) error {
isGlobalProver := isGlobalProverShardBytes(shardKey)
if isGlobalProver {
logger.Info("global prover sync: fetching leaves",
zap.String("path", hex.EncodeToString(packPath(path))),
zap.Int("path_depth", len(path)),
)
} else {
logger.Debug("fetching leaves",
zap.String("path", hex.EncodeToString(packPath(path))),
)
}
var continuationToken []byte
totalFetched := 0
for {
err := stream.Send(&protobufs.HypergraphSyncQuery{
Request: &protobufs.HypergraphSyncQuery_GetLeaves{
GetLeaves: &protobufs.HypergraphSyncGetLeavesRequest{
ShardKey: shardKey,
PhaseSet: phaseSet,
Path: path,
MaxLeaves: 1000,
ContinuationToken: continuationToken,
ExpectedRoot: expectedRoot,
},
},
})
if err != nil {
return errors.Wrap(err, "send GetLeaves request")
}
resp, err := stream.Recv()
if err != nil {
return errors.Wrap(err, "receive GetLeaves response")
}
if errResp := resp.GetError(); errResp != nil {
return errors.Errorf("server error: %s", errResp.Message)
}
leavesResp := resp.GetLeaves()
if leavesResp == nil {
return errors.New("unexpected response type")
}
// Integrate leaves into local tree
txn, err := hg.store.NewTransaction(false)
if err != nil {
return errors.Wrap(err, "create transaction")
}
for _, leaf := range leavesResp.Leaves {
atom := AtomFromBytes(leaf.Value)
// Persist underlying tree if present
if len(leaf.UnderlyingData) > 0 {
vtree, err := tries.DeserializeNonLazyTree(leaf.UnderlyingData)
if err == nil {
if err := hg.store.SaveVertexTree(txn, leaf.Key, vtree); err != nil {
logger.Warn("failed to save vertex tree", zap.Error(err))
}
}
}
if err := localSet.Add(txn, atom); err != nil {
txn.Abort()
return errors.Wrap(err, "add leaf to local set")
}
}
if err := txn.Commit(); err != nil {
return errors.Wrap(err, "commit transaction")
}
totalFetched += len(leavesResp.Leaves)
logger.Debug("fetched leaves batch",
zap.String("path", hex.EncodeToString(packPath(path))),
zap.Int("count", len(leavesResp.Leaves)),
zap.Int("totalFetched", totalFetched),
zap.Uint64("totalAvailable", leavesResp.TotalLeaves),
)
// Check if more leaves remain
if len(leavesResp.ContinuationToken) == 0 {
break
}
continuationToken = leavesResp.ContinuationToken
}
if isGlobalProver {
logger.Info("global prover sync: leaves integrated",
zap.String("path", hex.EncodeToString(packPath(path))),
zap.Int("total_fetched", totalFetched),
)
}
return nil
}
func isPrefixOrEqual(prefix, path []int) bool {
if len(prefix) > len(path) {
return false
}
for i, v := range prefix {
if path[i] != v {
return false
}
}
return true
}

View File

@ -8,7 +8,9 @@ package frost
import (
"crypto/sha512"
"math/big"
"golang.org/x/crypto/sha3"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
)
@ -25,3 +27,42 @@ func (ed Ed25519ChallengeDeriver) DeriveChallenge(msg []byte, pubKey curves.Poin
_, _ = h.Write(msg)
return new(curves.ScalarEd25519).SetBytesWide(h.Sum(nil))
}
// Ed448ChallengeDeriver implements ChallengeDerive for Ed448 curves
// Ed448 uses SHAKE256 for hashing per RFC 8032
type Ed448ChallengeDeriver struct{}
func (ed Ed448ChallengeDeriver) DeriveChallenge(msg []byte, pubKey curves.Point, r curves.Point) (curves.Scalar, error) {
// Ed448 challenge derivation per RFC 8032:
// SHAKE256(dom4(0, "") || R || A || M, 114) reduced mod L
//
// dom4(phflag, context) = "SigEd448" || octet(phflag) || octet(len(context)) || context
// For pure Ed448 (no prehash, empty context): dom4(0, "") = "SigEd448" || 0x00 || 0x00
h := sha3.NewShake256()
// Write dom4 prefix for Ed448
_, _ = h.Write([]byte("SigEd448"))
_, _ = h.Write([]byte{0x00}) // phflag = 0 (not prehashed)
_, _ = h.Write([]byte{0x00}) // context length = 0
// Write R || A || M
_, _ = h.Write(r.ToAffineCompressed())
_, _ = h.Write(pubKey.ToAffineCompressed())
_, _ = h.Write(msg)
// Read 114 bytes (2 * 57 = 114, matching circl's hashSize)
raw := [114]byte{}
_, _ = h.Read(raw[:])
// Convert little-endian bytes to big.Int for proper modular reduction
// The hash output is in little-endian format
reversed := make([]byte, 114)
for i := 0; i < 114; i++ {
reversed[113-i] = raw[i]
}
hashInt := new(big.Int).SetBytes(reversed)
// SetBigInt performs proper modular reduction by the group order
return new(curves.ScalarEd448).SetBigInt(hashInt)
}

View File

@ -203,7 +203,7 @@ var appConsensusSet = wire.NewSet(
app.NewAppConsensusEngineFactory,
)
func NewDHTNode(*zap.Logger, *config.Config, uint) (*DHTNode, error) {
func NewDHTNode(*zap.Logger, *config.Config, uint, p2p.ConfigDir) (*DHTNode, error) {
panic(wire.Build(
pubSubSet,
newDHTNode,
@ -228,6 +228,7 @@ func NewDataWorkerNodeWithProxyPubsub(
coreId uint,
rpcMultiaddr string,
parentProcess int,
configDir p2p.ConfigDir,
) (*DataWorkerNode, error) {
panic(wire.Build(
verencSet,
@ -251,6 +252,7 @@ func NewDataWorkerNodeWithoutProxyPubsub(
coreId uint,
rpcMultiaddr string,
parentProcess int,
configDir p2p.ConfigDir,
) (*DataWorkerNode, error) {
panic(wire.Build(
verencSet,
@ -274,6 +276,7 @@ func NewDataWorkerNode(
coreId uint,
rpcMultiaddr string,
parentProcess int,
configDir p2p.ConfigDir,
) (*DataWorkerNode, error) {
if config.Engine.EnableMasterProxy {
return NewDataWorkerNodeWithProxyPubsub(
@ -282,6 +285,7 @@ func NewDataWorkerNode(
coreId,
rpcMultiaddr,
parentProcess,
configDir,
)
} else {
return NewDataWorkerNodeWithoutProxyPubsub(
@ -290,6 +294,7 @@ func NewDataWorkerNode(
coreId,
rpcMultiaddr,
parentProcess,
configDir,
)
}
}
@ -385,6 +390,7 @@ func NewMasterNode(
logger *zap.Logger,
config *config.Config,
coreId uint,
configDir p2p.ConfigDir,
) (*MasterNode, error) {
panic(wire.Build(
verencSet,

View File

@ -46,10 +46,10 @@ import (
// Injectors from wire.go:
func NewDHTNode(logger *zap.Logger, configConfig *config.Config, uint2 uint) (*DHTNode, error) {
func NewDHTNode(logger *zap.Logger, configConfig *config.Config, uint2 uint, configDir p2p.ConfigDir) (*DHTNode, error) {
p2PConfig := configConfig.P2P
engineConfig := configConfig.Engine
blossomSub := p2p.NewBlossomSub(p2PConfig, engineConfig, logger, uint2)
blossomSub := p2p.NewBlossomSub(p2PConfig, engineConfig, logger, uint2, configDir)
dhtNode, err := newDHTNode(blossomSub)
if err != nil {
return nil, err
@ -66,15 +66,13 @@ func NewDBConsole(configConfig *config.Config) (*DBConsole, error) {
}
func NewClockStore(logger *zap.Logger, configConfig *config.Config, uint2 uint) (store.ClockStore, error) {
dbConfig := configConfig.DB
pebbleDB := store2.NewPebbleDB(logger, dbConfig, uint2)
pebbleDB := store2.NewPebbleDB(logger, configConfig, uint2)
pebbleClockStore := store2.NewPebbleClockStore(pebbleDB, logger)
return pebbleClockStore, nil
}
func NewDataWorkerNodeWithProxyPubsub(logger *zap.Logger, config2 *config.Config, coreId uint, rpcMultiaddr string, parentProcess int) (*DataWorkerNode, error) {
dbConfig := config2.DB
pebbleDB := store2.NewPebbleDB(logger, dbConfig, coreId)
func NewDataWorkerNodeWithProxyPubsub(logger *zap.Logger, config2 *config.Config, coreId uint, rpcMultiaddr string, parentProcess int, configDir p2p.ConfigDir) (*DataWorkerNode, error) {
pebbleDB := store2.NewPebbleDB(logger, config2, coreId)
pebbleDataProofStore := store2.NewPebbleDataProofStore(pebbleDB, logger)
pebbleClockStore := store2.NewPebbleClockStore(pebbleDB, logger)
pebbleTokenStore := store2.NewPebbleTokenStore(pebbleDB, logger)
@ -88,6 +86,7 @@ func NewDataWorkerNodeWithProxyPubsub(logger *zap.Logger, config2 *config.Config
if err != nil {
return nil, err
}
dbConfig := config2.DB
mpCitHVerifiableEncryptor := newVerifiableEncryptor()
kzgInclusionProver := bls48581.NewKZGInclusionProver(logger)
pebbleHypergraphStore := store2.NewPebbleHypergraphStore(dbConfig, pebbleDB, logger, mpCitHVerifiableEncryptor, kzgInclusionProver)
@ -132,9 +131,8 @@ func NewDataWorkerNodeWithProxyPubsub(logger *zap.Logger, config2 *config.Config
return dataWorkerNode, nil
}
func NewDataWorkerNodeWithoutProxyPubsub(logger *zap.Logger, config2 *config.Config, coreId uint, rpcMultiaddr string, parentProcess int) (*DataWorkerNode, error) {
dbConfig := config2.DB
pebbleDB := store2.NewPebbleDB(logger, dbConfig, coreId)
func NewDataWorkerNodeWithoutProxyPubsub(logger *zap.Logger, config2 *config.Config, coreId uint, rpcMultiaddr string, parentProcess int, configDir p2p.ConfigDir) (*DataWorkerNode, error) {
pebbleDB := store2.NewPebbleDB(logger, config2, coreId)
pebbleDataProofStore := store2.NewPebbleDataProofStore(pebbleDB, logger)
pebbleClockStore := store2.NewPebbleClockStore(pebbleDB, logger)
pebbleTokenStore := store2.NewPebbleTokenStore(pebbleDB, logger)
@ -148,6 +146,7 @@ func NewDataWorkerNodeWithoutProxyPubsub(logger *zap.Logger, config2 *config.Con
if err != nil {
return nil, err
}
dbConfig := config2.DB
mpCitHVerifiableEncryptor := newVerifiableEncryptor()
kzgInclusionProver := bls48581.NewKZGInclusionProver(logger)
pebbleHypergraphStore := store2.NewPebbleHypergraphStore(dbConfig, pebbleDB, logger, mpCitHVerifiableEncryptor, kzgInclusionProver)
@ -161,7 +160,7 @@ func NewDataWorkerNodeWithoutProxyPubsub(logger *zap.Logger, config2 *config.Con
}
p2PConfig := config2.P2P
engineConfig := config2.Engine
blossomSub := p2p.NewBlossomSub(p2PConfig, engineConfig, logger, coreId)
blossomSub := p2p.NewBlossomSub(p2PConfig, engineConfig, logger, coreId, configDir)
pebbleInboxStore := store2.NewPebbleInboxStore(pebbleDB, logger)
pebbleShardsStore := store2.NewPebbleShardsStore(pebbleDB, logger)
pebbleConsensusStore := store2.NewPebbleConsensusStore(pebbleDB, logger)
@ -189,9 +188,8 @@ func NewDataWorkerNodeWithoutProxyPubsub(logger *zap.Logger, config2 *config.Con
return dataWorkerNode, nil
}
func NewMasterNode(logger *zap.Logger, config2 *config.Config, coreId uint) (*MasterNode, error) {
dbConfig := config2.DB
pebbleDB := store2.NewPebbleDB(logger, dbConfig, coreId)
func NewMasterNode(logger *zap.Logger, config2 *config.Config, coreId uint, configDir p2p.ConfigDir) (*MasterNode, error) {
pebbleDB := store2.NewPebbleDB(logger, config2, coreId)
pebbleDataProofStore := store2.NewPebbleDataProofStore(pebbleDB, logger)
pebbleClockStore := store2.NewPebbleClockStore(pebbleDB, logger)
pebbleTokenStore := store2.NewPebbleTokenStore(pebbleDB, logger)
@ -200,8 +198,9 @@ func NewMasterNode(logger *zap.Logger, config2 *config.Config, coreId uint) (*Ma
fileKeyManager := keys.NewFileKeyManager(config2, bls48581KeyConstructor, decaf448KeyConstructor, logger)
p2PConfig := config2.P2P
engineConfig := config2.Engine
blossomSub := p2p.NewBlossomSub(p2PConfig, engineConfig, logger, coreId)
blossomSub := p2p.NewBlossomSub(p2PConfig, engineConfig, logger, coreId, configDir)
inMemoryPeerInfoManager := p2p.NewInMemoryPeerInfoManager(logger)
dbConfig := config2.DB
mpCitHVerifiableEncryptor := newVerifiableEncryptor()
kzgInclusionProver := bls48581.NewKZGInclusionProver(logger)
pebbleHypergraphStore := store2.NewPebbleHypergraphStore(dbConfig, pebbleDB, logger, mpCitHVerifiableEncryptor, kzgInclusionProver)
@ -301,10 +300,10 @@ var engineSet = wire.NewSet(vdf.NewCachedWesolowskiFrameProver, bls48581.NewKZGI
),
)
func provideHypergraph(store3 *store2.PebbleHypergraphStore, config *config.Config,
func provideHypergraph(store3 *store2.PebbleHypergraphStore, config2 *config.Config,
) (hypergraph.Hypergraph, error) {
workers := 1
if config.Engine.ArchiveMode {
if config2.Engine.ArchiveMode {
workers = 100
}
return store3.LoadHypergraph(&tests.Nopthenticator{}, workers)
@ -343,18 +342,21 @@ func NewDataWorkerNode(
coreId uint,
rpcMultiaddr string,
parentProcess int,
configDir p2p.ConfigDir,
) (*DataWorkerNode, error) {
if config2.Engine.EnableMasterProxy {
return NewDataWorkerNodeWithProxyPubsub(
logger, config2, coreId,
rpcMultiaddr,
parentProcess,
configDir,
)
} else {
return NewDataWorkerNodeWithoutProxyPubsub(
logger, config2, coreId,
rpcMultiaddr,
parentProcess,
configDir,
)
}
}

Some files were not shown because too many files have changed in this diff Show More