ceremonyclient/node/protobufs/data.proto
Cassandra Heart b0cf294c99
V2.0.2.3 (#321)
* roll up v2.0.1-b2 to develop

* b2-fixed

* adjust return data of fast sync so it doesn't return the earliest frame

* -b3

* fix: announce peer based on leading frame, not initial frame; fix: looping bug

* fix: last batch fails due to underflow; qol: make logging chattier

* -b4

* resolve frame cache issue

* fix: mint loop + re-migrate

* fix: register execution panic

* fix: mint loop, other side

* fix: handle unexpected return of nil status

* final -b4

* handle subtle change to migration

* qol: add heuristic to handle corruption scenario

* bump genesis

* qol: use separate channel for worker

* final parameterization, parallelize streams

* Add direct peers to blossomsub (#309)

Co-authored-by: Tyler Sturos <tyler.john@qcommander.sh>

* chore(docker): add ca-certificates to fix x509 error. (#307)

* Update qcommander.sh bootrap (#304)

* chore(docker): add ca-certificates to fix x509 error.

---------

Co-authored-by: Tyler Sturos <55340199+tjsturos@users.noreply.github.com>

* deprecate signers 10, 11, 14, 17

* adjust signatory check size to match rotated out signers

* qol: sync by rebroadcast

* upgrade version

* more small adjustments

* wait a little longer

* fix: don't use iterator for frame directly until iterator is fixed

* change iterator, genesis for testnet

* adjust to previous sync handling

* adjust: don't grab the very latest while it's already being broadcasted

* ok, ready for testnet

* handle rebroadcast quirks

* more adjustments from testing

* faster

* temporarily bulk process on frame candidates

* resolve separate frames

* don't loop

* make worker reset resume to check where it should continue

* move window

* reduce signature count now that supermajority signed last

* resolve bottlenecks

* remove GOMAXPROCS limit for now

* revisions for v2.0.2.1

* bump version

* bulk import

* reintroduce sync

* small adustments to make life better

* check bitmask for peers and keep alive

* adjust reconnect

* ensure peer doesn't fall off address list

* adjust blossomsub to background discovery

* bump version

* remove dev check

* remove debug log line

* further adjustments

* a little more logic around connection management

* v2.0.2.3

* Fix peer discovery (#319)

* Fix peer discovery

* Make peer discovery connections parallel

* Monitor peers via pings (#317)

* Support QUILIBRIUM_SIGNATURE_CHECK in client (#314)

* Ensure direct peers are not pruned by resource limits (#315)

* Support pprof profiling via HTTP (#313)

* Fix CPU profiling

* Add pprof server support

* Additional peering connection improvements (#320)

* Lookup peers if not enough external peers are available

* Make bootstrap peer discovery sensitive to a lack of bootstrappers

---------

Co-authored-by: Tyler Sturos <55340199+tjsturos@users.noreply.github.com>
Co-authored-by: Tyler Sturos <tyler.john@qcommander.sh>
Co-authored-by: linquanisaac <33619994+linquanisaac@users.noreply.github.com>
Co-authored-by: petricadaipegsp <155911522+petricadaipegsp@users.noreply.github.com>
2024-10-31 16:43:49 -05:00

118 lines
3.0 KiB
Protocol Buffer

syntax = "proto3";
package quilibrium.node.data.pb;
option go_package = "source.quilibrium.com/quilibrium/monorepo/node/protobufs";
import "channel.proto";
import "clock.proto";
import "keys.proto";
import "node.proto";
message DataPeerListAnnounce {
repeated DataPeer peer_list = 1;
}
message DataPeer {
bytes peer_id = 1;
string multiaddr = 2;
uint64 max_frame = 3;
int64 timestamp = 4;
bytes version = 5;
bytes signature = 6;
bytes public_key = 7;
bytes total_distance = 8;
}
message DataCompressedSync {
uint64 from_frame_number = 1;
uint64 to_frame_number = 2;
repeated quilibrium.node.clock.pb.ClockFrame truncated_clock_frames = 3;
repeated InclusionProofsMap proofs = 4;
repeated InclusionSegmentsMap segments = 5;
}
message SyncRequestAuthentication {
bytes peer_id = 1;
bytes challenge = 2;
quilibrium.node.keys.pb.Ed448Signature response = 3;
}
message DataCompressedSyncRequestMessage {
oneof sync_message {
quilibrium.node.clock.pb.ClockFramesPreflight preflight = 1;
quilibrium.node.clock.pb.ClockFramesRequest request = 2;
SyncRequestAuthentication authentication = 3;
}
}
message DataCompressedSyncResponseMessage {
oneof sync_message {
quilibrium.node.clock.pb.ClockFramesPreflight preflight = 1;
DataCompressedSync response = 2;
}
}
message InclusionProofsMap {
bytes frame_commit = 1;
bytes proof = 2;
repeated InclusionCommitmentsMap commitments = 3;
}
message InclusionSegmentsMap {
bytes hash = 1;
bytes data = 2;
}
message InclusionCommitmentsMap {
bytes commitment = 1;
string type_url = 2;
repeated bytes segment_hashes = 3;
}
message GetDataFrameRequest {
uint64 frame_number = 1;
}
message DataFrameResponse {
quilibrium.node.clock.pb.ClockFrame clock_frame = 1;
bytes proof = 2;
}
message PreMidnightMintResponse {
bytes address = 1;
uint32 increment = 2;
}
message PreMidnightMintStatusRequest {
bytes owner = 1;
}
message FrameRebroadcast {
uint64 from = 1;
uint64 to = 2;
repeated quilibrium.node.clock.pb.ClockFrame clock_frames = 3;
bytes random = 4;
}
service DataService {
rpc GetCompressedSyncFrames (quilibrium.node.clock.pb.ClockFramesRequest) returns (stream DataCompressedSync);
rpc NegotiateCompressedSyncFrames (stream DataCompressedSyncRequestMessage) returns (stream DataCompressedSyncResponseMessage);
rpc GetPublicChannel (stream quilibrium.node.channel.pb.P2PChannelEnvelope) returns (stream quilibrium.node.channel.pb.P2PChannelEnvelope);
rpc GetDataFrame (GetDataFrameRequest) returns (DataFrameResponse);
rpc HandlePreMidnightMint (quilibrium.node.node.pb.MintCoinRequest) returns (PreMidnightMintResponse);
rpc GetPreMidnightMintStatus (PreMidnightMintStatusRequest) returns (PreMidnightMintResponse);
}
message ChallengeProofRequest {
bytes peer_id = 1;
quilibrium.node.clock.pb.ClockFrame clock_frame = 3;
}
message ChallengeProofResponse {
bytes output = 1;
}
service DataIPCService {
rpc CalculateChallengeProof(ChallengeProofRequest) returns (ChallengeProofResponse);
}