From c797d482f9e5452a55768f5b06c0f6d0c557c334 Mon Sep 17 00:00:00 2001 From: Cassandra Heart <7929478+CassOnMars@users.noreply.github.com> Date: Tue, 11 Nov 2025 05:00:17 -0600 Subject: [PATCH] v2.1.0.5 (#457) * wip: conversion of hotstuff from flow into Q-oriented model * bulk of tests * remaining non-integration tests * add integration test, adjust log interface, small tweaks * further adjustments, restore full pacemaker shape * add component lifecycle management+supervisor * further refinements * resolve timeout hanging * mostly finalized state for consensus * bulk of engine swap out * lifecycle-ify most types * wiring nearly complete, missing needed hooks for proposals * plugged in, vetting message validation paths * global consensus, plugged in and verified * app shard now wired in too * do not decode empty keys.yml (#456) * remove obsolete engine.maxFrames config parameter (#454) * default to Info log level unless debug is enabled (#453) * respect config's "logging" section params, remove obsolete single-file logging (#452) * Trivial code cleanup aiming to reduce Go compiler warnings (#451) * simplify range traversal * simplify channel read for single select case * delete rand.Seed() deprecated in Go 1.20 and no-op as of Go 1.24 * simplify range traversal * simplify channel read for single select case * remove redundant type from array * simplify range traversal * simplify channel read for single select case * RC slate * finalize 2.1.0.5 * Update comments in StrictMonotonicCounter Fix comment formatting and clarify description. --------- Co-authored-by: Black Swan <3999712+blacks1ne@users.noreply.github.com> --- config/config.go | 2 - config/engine.go | 1 - config/logger.go | 15 +- config/version.go | 4 +- consensus/.mockery.yaml | 18 + consensus/README.md | 300 +- consensus/consensus_committee.go | 154 + consensus/consensus_consumer.go | 453 +++ consensus/consensus_events.go | 84 + consensus/consensus_finalizer.go | 23 + consensus/consensus_forks.go | 106 + consensus/consensus_leader.go | 30 + consensus/consensus_liveness.go | 25 + consensus/consensus_pacemaker.go | 65 + consensus/consensus_producer.go | 25 + consensus/consensus_safety_rules.go | 73 + consensus/consensus_signature.go | 161 + consensus/consensus_signer.go | 39 + consensus/consensus_store.go | 18 + consensus/consensus_sync.go | 20 + consensus/consensus_timeout.go | 127 + consensus/consensus_tracing.go | 102 + consensus/consensus_validator.go | 32 + consensus/consensus_verifier.go | 45 + consensus/consensus_voting.go | 43 + consensus/consensus_weight.go | 10 + .../counters/strict_monotonic_counter.go | 50 + consensus/eventhandler/event_handler.go | 825 +++++ consensus/eventhandler/event_handler_test.go | 1103 ++++++ consensus/eventloop/event_loop.go | 382 +++ consensus/eventloop/event_loop_test.go | 262 ++ .../example/generic_consensus_example.go | 1102 ------ consensus/forest/leveled_forest.go | 394 +++ consensus/forest/vertex.go | 103 + consensus/forks/forks.go | 657 ++++ consensus/forks/forks_test.go | 950 ++++++ consensus/forks/state_builder_test.go | 165 + consensus/forks/state_container.go | 77 + consensus/go.mod | 42 +- consensus/go.sum | 47 +- consensus/helper/quorum_certificate.go | 122 + consensus/helper/state.go | 467 +++ consensus/helper/timeout_certificate.go | 171 + consensus/integration/assertion_test.go | 40 + consensus/integration/conditions_test.go | 19 + consensus/integration/connect_test.go | 114 + consensus/integration/defaults_test.go | 27 + consensus/integration/filters_test.go | 76 + consensus/integration/instance_test.go | 734 ++++ consensus/integration/integration_test.go | 153 + consensus/integration/liveness_test.go | 422 +++ consensus/integration/options_test.go | 109 + consensus/mocks/communicator_consumer.go | 48 + consensus/mocks/consensus_store.go | 123 + consensus/mocks/consumer.go | 126 + consensus/mocks/dynamic_committee.go | 249 ++ consensus/mocks/event_handler.go | 162 + consensus/mocks/event_loop.go | 67 + consensus/mocks/finalization_consumer.go | 37 + consensus/mocks/finalizer.go | 45 + consensus/mocks/follower_consumer.go | 47 + consensus/mocks/follower_loop.go | 32 + consensus/mocks/forks.go | 183 + consensus/mocks/leader_provider.go | 89 + consensus/mocks/liveness_provider.go | 77 + consensus/mocks/pacemaker.go | 205 ++ consensus/mocks/packer.go | 98 + consensus/mocks/participant_consumer.go | 91 + consensus/mocks/proposal_duration_provider.go | 47 + .../mocks/proposal_violation_consumer.go | 37 + consensus/mocks/read_only_consensus_store.go | 87 + consensus/mocks/replicas.go | 189 + consensus/mocks/safety_rules.go | 117 + consensus/mocks/signature_aggregator.go | 93 + consensus/mocks/signer.go | 87 + consensus/mocks/state_producer.go | 57 + consensus/mocks/state_signer_decoder.go | 57 + consensus/mocks/sync_provider.go | 61 + .../mocks/timeout_aggregation_consumer.go | 62 + .../timeout_aggregation_violation_consumer.go | 37 + consensus/mocks/timeout_aggregator.go | 57 + consensus/mocks/timeout_collector.go | 63 + consensus/mocks/timeout_collector_consumer.go | 52 + consensus/mocks/timeout_collector_factory.go | 59 + consensus/mocks/timeout_collectors.go | 71 + consensus/mocks/timeout_processor.go | 45 + consensus/mocks/timeout_processor_factory.go | 59 + .../mocks/timeout_signature_aggregator.go | 132 + consensus/mocks/trace_logger.go | 34 + consensus/mocks/validator.go | 111 + consensus/mocks/verifier.go | 81 + consensus/mocks/verifying_vote_processor.go | 85 + consensus/mocks/vote_aggregation_consumer.go | 52 + .../vote_aggregation_violation_consumer.go | 42 + consensus/mocks/vote_aggregator.go | 80 + consensus/mocks/vote_collector.go | 106 + consensus/mocks/vote_collector_consumer.go | 37 + consensus/mocks/vote_collectors.go | 131 + consensus/mocks/vote_processor.go | 65 + consensus/mocks/vote_processor_factory.go | 59 + consensus/mocks/voting_provider.go | 282 ++ consensus/mocks/weight_provider.go | 42 + .../mocks/weighted_signature_aggregator.go | 130 + consensus/mocks/workerpool.go | 34 + consensus/mocks/workers.go | 29 + consensus/models/aggregated_signature.go | 48 + consensus/models/consensus_state.go | 15 + consensus/models/control_flows.go | 24 + consensus/models/errors.go | 588 ++++ consensus/models/liveness_state.go | 14 + consensus/models/proposal.go | 45 + consensus/models/quorum_certificate.go | 20 + consensus/models/state.go | 101 + consensus/models/timeout_certificate.go | 19 + consensus/models/timeout_state.go | 65 + consensus/models/unique.go | 26 + consensus/notifications/log_consumer.go | 486 +++ consensus/notifications/noop_consumer.go | 177 + .../pubsub/communicator_distributor.go | 104 + consensus/notifications/pubsub/distributor.go | 127 + .../pubsub/finalization_distributor.go | 83 + .../pubsub/participant_distributor.go | 181 + .../pubsub/proposal_violation_distributor.go | 59 + .../timeout_aggregation_violation_consumer.go | 59 + .../pubsub/timeout_collector_distributor.go | 88 + .../vote_aggregation_violation_consumer.go | 75 + .../pubsub/vote_collector_distributor.go | 52 + consensus/pacemaker/pacemaker.go | 331 ++ consensus/pacemaker/pacemaker_test.go | 439 +++ consensus/pacemaker/proposal_timing.go | 36 + consensus/pacemaker/rank_tracker.go | 190 ++ consensus/pacemaker/rank_tracker_test.go | 253 ++ consensus/pacemaker/timeout/config.go | 124 + consensus/pacemaker/timeout/config_test.go | 83 + consensus/pacemaker/timeout/controller.go | 185 + .../pacemaker/timeout/controller_test.go | 157 + consensus/participant/participant.go | 174 + consensus/recovery/recover.go | 142 + consensus/safetyrules/safety_rules.go | 561 +++ consensus/safetyrules/safety_rules_test.go | 834 +++++ consensus/signature/packer.go | 74 + consensus/signature/state_signer_decoder.go | 135 + .../weighted_signature_aggregator.go | 227 ++ consensus/state_machine.go | 1364 -------- consensus/state_machine_test.go | 1055 ------ consensus/state_machine_viz.go | 360 -- .../stateproducer/safety_rules_wrapper.go | 128 + consensus/stateproducer/state_producer.go | 138 + .../timeoutaggregator/timeout_aggregator.go | 271 ++ .../timeout_aggregator_test.go | 136 + .../timeoutaggregator/timeout_collectors.go | 156 + .../timeout_collectors_test.go | 176 + consensus/timeoutcollector/aggregation.go | 227 ++ consensus/timeoutcollector/factory.go | 174 + consensus/timeoutcollector/timeout_cache.go | 122 + .../timeoutcollector/timeout_cache_test.go | 172 + .../timeoutcollector/timeout_collector.go | 152 + .../timeout_collector_test.go | 230 ++ .../timeoutcollector/timeout_processor.go | 418 +++ .../timeout_processor_test.go | 678 ++++ consensus/tracker/tracker.go | 175 + consensus/tracker/tracker_test.go | 154 + consensus/validator/validator.go | 566 +++ consensus/validator/validator_test.go | 933 +++++ consensus/verification/common.go | 128 + consensus/verification/signer.go | 120 + consensus/vote_aggregator.go | 40 + consensus/vote_collector.go | 146 + consensus/vote_collectors.go | 58 + consensus/voteaggregator/pending_status.go | 54 + consensus/voteaggregator/vote_aggregator.go | 474 +++ .../voteaggregator/vote_aggregator_test.go | 108 + consensus/voteaggregator/vote_collectors.go | 172 + .../voteaggregator/vote_collectors_test.go | 158 + consensus/votecollector/common.go | 68 + consensus/votecollector/factory.go | 181 + consensus/votecollector/factory_test.go | 118 + consensus/votecollector/statemachine.go | 417 +++ consensus/votecollector/statemachine_test.go | 286 ++ consensus/votecollector/testutil.go | 51 + consensus/votecollector/vote_cache.go | 149 + consensus/votecollector/vote_cache_test.go | 189 + consensus/votecollector/vote_processor.go | 241 ++ .../votecollector/vote_processor_test.go | 269 ++ go-libp2p-blossomsub/backoff.go | 1 - go-libp2p-blossomsub/bitmask_test.go | 2 +- go-libp2p-blossomsub/blossomsub_test.go | 31 +- .../subscription_filter_test.go | 16 +- go.mod | 11 - go.sum | 11 - lifecycle/.mockery.yaml | 18 + lifecycle/common.go | 215 ++ lifecycle/common_test.go | 338 ++ lifecycle/component.go | 373 ++ lifecycle/component_test.go | 655 ++++ lifecycle/errors.go | 13 + lifecycle/go.mod | 21 + lifecycle/go.sum | 19 + lifecycle/mocks/component.go | 85 + lifecycle/mocks/component_manager_builder.go | 67 + lifecycle/mocks/signaler_context.go | 124 + lifecycle/signaler.go | 112 + lifecycle/supervisor.go | 268 ++ lifecycle/supervisor_test.go | 449 +++ lifecycle/unittest/utils.go | 339 ++ node/app/node.go | 40 +- node/app/wire.go | 11 +- node/app/wire_gen.go | 37 +- .../consensus_signature_aggregator_wrapper.go | 115 + node/consensus/app/app_consensus_engine.go | 1928 ++++++++--- ...consensus_engine_chaos_integration_test.go | 28 +- .../app_consensus_engine_integration_test.go | 411 +-- .../app/consensus_dynamic_committee.go | 216 ++ .../app/consensus_leader_provider.go | 92 +- .../app/consensus_liveness_provider.go | 76 +- node/consensus/app/consensus_sync_provider.go | 363 +- .../app/consensus_transition_listener.go | 52 - node/consensus/app/consensus_types.go | 53 +- .../app/consensus_voting_provider.go | 753 +--- node/consensus/app/event_distributor.go | 33 +- node/consensus/app/factory.go | 6 + node/consensus/app/integration_helper_test.go | 3 + node/consensus/app/message_processors.go | 866 +++-- node/consensus/app/message_subscription.go | 24 +- node/consensus/app/message_validation.go | 106 +- node/consensus/app/metrics.go | 72 +- node/consensus/app/services.go | 218 +- .../consensus/events/app_event_distributor.go | 59 +- node/consensus/events/distributor_test.go | 187 +- .../events/global_event_distributor.go | 43 +- .../global/consensus_dynamic_committee.go | 213 ++ .../global/consensus_leader_provider.go | 137 +- .../global/consensus_liveness_provider.go | 63 - .../global/consensus_sync_provider.go | 312 +- .../global/consensus_transition_listener.go | 44 - node/consensus/global/consensus_types.go | 53 +- .../global/consensus_voting_provider.go | 558 +-- node/consensus/global/event_distributor.go | 44 +- node/consensus/global/factory.go | 36 +- node/consensus/global/genesis.go | 108 +- .../global/global_consensus_engine.go | 1640 +++++++-- ...lobal_consensus_engine_integration_test.go | 150 +- node/consensus/global/mainnet_genesis.json | 3 +- node/consensus/global/message_processors.go | 1053 +++--- node/consensus/global/message_subscription.go | 19 +- node/consensus/global/message_validation.go | 154 +- node/consensus/global/metrics.go | 119 +- node/consensus/global/services.go | 205 +- node/consensus/time/app_time_reel.go | 40 +- node/consensus/time/app_time_reel_test.go | 286 +- node/consensus/time/global_time_reel.go | 39 +- .../global_time_reel_equivocation_test.go | 46 +- node/consensus/time/global_time_reel_test.go | 261 +- .../time/simple_equivocation_test.go | 15 +- node/consensus/tracing/zap_tracer.go | 73 + node/consensus/voting/voting_aggregator.go | 271 ++ node/datarpc/data_worker_ipc_server.go | 12 + node/dbscan/main.go | 371 +- .../engines/compute_execution_engine.go | 109 +- .../engines/compute_execution_engine_test.go | 41 +- .../engines/global_execution_engine.go | 134 +- .../engines/global_execution_engine_test.go | 8 +- .../engines/hypergraph_execution_engine.go | 115 +- .../hypergraph_execution_engine_test.go | 8 +- .../engines/token_execution_engine.go | 110 +- .../engines/token_execution_engine_test.go | 8 +- node/execution/manager/execution_manager.go | 160 +- node/go.mod | 12 +- node/go.sum | 16 +- node/keys/file.go | 13 +- node/main.go | 22 +- node/p2p/blossomsub.go | 17 + node/p2p/onion/onion_integration_test.go | 46 +- node/p2p/peer_info_manager.go | 101 +- node/store/clock.go | 1392 +++++++- node/store/consensus.go | 353 ++ node/store/constants.go | 66 +- node/store/key.go | 14 +- node/store/pebble.go | 6 + protobufs/canonical_types.go | 8 +- protobufs/global.go | 1257 ++++++- protobufs/global.pb.go | 3039 +++++++++++------ protobufs/global.pb.gw.go | 170 + protobufs/global.proto | 226 +- protobufs/global_grpc.pb.go | 76 +- protobufs/global_test.go | 45 +- protobufs/go.mod | 22 +- protobufs/go.sum | 56 +- protobufs/keys.go | 44 +- types/consensus/distributor.go | 7 +- types/crypto/frame_prover.go | 1 + types/execution/execution_engine.go | 4 +- types/go.mod | 2 + types/mocks/clock_store.go | 312 ++ types/mocks/event_distributor.go | 18 +- types/mocks/frame_prover.go | 2 + types/mocks/peer_info_manager.go | 13 +- types/mocks/shard_execution.go | 15 +- types/p2p/peer_info_manager.go | 8 +- types/store/clock.go | 86 + types/tries/lazy_proof_tree.go | 4 + types/tries/proof_tree.go | 4 + utils/logging/file_logger.go | 32 +- vdf/wesolowski_frame_prover.go | 4 + 304 files changed, 45513 insertions(+), 10727 deletions(-) create mode 100644 consensus/.mockery.yaml create mode 100644 consensus/consensus_committee.go create mode 100644 consensus/consensus_consumer.go create mode 100644 consensus/consensus_events.go create mode 100644 consensus/consensus_finalizer.go create mode 100644 consensus/consensus_forks.go create mode 100644 consensus/consensus_leader.go create mode 100644 consensus/consensus_liveness.go create mode 100644 consensus/consensus_pacemaker.go create mode 100644 consensus/consensus_producer.go create mode 100644 consensus/consensus_safety_rules.go create mode 100644 consensus/consensus_signature.go create mode 100644 consensus/consensus_signer.go create mode 100644 consensus/consensus_store.go create mode 100644 consensus/consensus_sync.go create mode 100644 consensus/consensus_timeout.go create mode 100644 consensus/consensus_tracing.go create mode 100644 consensus/consensus_validator.go create mode 100644 consensus/consensus_verifier.go create mode 100644 consensus/consensus_voting.go create mode 100644 consensus/consensus_weight.go create mode 100644 consensus/counters/strict_monotonic_counter.go create mode 100644 consensus/eventhandler/event_handler.go create mode 100644 consensus/eventhandler/event_handler_test.go create mode 100644 consensus/eventloop/event_loop.go create mode 100644 consensus/eventloop/event_loop_test.go delete mode 100644 consensus/example/generic_consensus_example.go create mode 100644 consensus/forest/leveled_forest.go create mode 100644 consensus/forest/vertex.go create mode 100644 consensus/forks/forks.go create mode 100644 consensus/forks/forks_test.go create mode 100644 consensus/forks/state_builder_test.go create mode 100644 consensus/forks/state_container.go create mode 100644 consensus/helper/quorum_certificate.go create mode 100644 consensus/helper/state.go create mode 100644 consensus/helper/timeout_certificate.go create mode 100644 consensus/integration/assertion_test.go create mode 100644 consensus/integration/conditions_test.go create mode 100644 consensus/integration/connect_test.go create mode 100644 consensus/integration/defaults_test.go create mode 100644 consensus/integration/filters_test.go create mode 100644 consensus/integration/instance_test.go create mode 100644 consensus/integration/integration_test.go create mode 100644 consensus/integration/liveness_test.go create mode 100644 consensus/integration/options_test.go create mode 100644 consensus/mocks/communicator_consumer.go create mode 100644 consensus/mocks/consensus_store.go create mode 100644 consensus/mocks/consumer.go create mode 100644 consensus/mocks/dynamic_committee.go create mode 100644 consensus/mocks/event_handler.go create mode 100644 consensus/mocks/event_loop.go create mode 100644 consensus/mocks/finalization_consumer.go create mode 100644 consensus/mocks/finalizer.go create mode 100644 consensus/mocks/follower_consumer.go create mode 100644 consensus/mocks/follower_loop.go create mode 100644 consensus/mocks/forks.go create mode 100644 consensus/mocks/leader_provider.go create mode 100644 consensus/mocks/liveness_provider.go create mode 100644 consensus/mocks/pacemaker.go create mode 100644 consensus/mocks/packer.go create mode 100644 consensus/mocks/participant_consumer.go create mode 100644 consensus/mocks/proposal_duration_provider.go create mode 100644 consensus/mocks/proposal_violation_consumer.go create mode 100644 consensus/mocks/read_only_consensus_store.go create mode 100644 consensus/mocks/replicas.go create mode 100644 consensus/mocks/safety_rules.go create mode 100644 consensus/mocks/signature_aggregator.go create mode 100644 consensus/mocks/signer.go create mode 100644 consensus/mocks/state_producer.go create mode 100644 consensus/mocks/state_signer_decoder.go create mode 100644 consensus/mocks/sync_provider.go create mode 100644 consensus/mocks/timeout_aggregation_consumer.go create mode 100644 consensus/mocks/timeout_aggregation_violation_consumer.go create mode 100644 consensus/mocks/timeout_aggregator.go create mode 100644 consensus/mocks/timeout_collector.go create mode 100644 consensus/mocks/timeout_collector_consumer.go create mode 100644 consensus/mocks/timeout_collector_factory.go create mode 100644 consensus/mocks/timeout_collectors.go create mode 100644 consensus/mocks/timeout_processor.go create mode 100644 consensus/mocks/timeout_processor_factory.go create mode 100644 consensus/mocks/timeout_signature_aggregator.go create mode 100644 consensus/mocks/trace_logger.go create mode 100644 consensus/mocks/validator.go create mode 100644 consensus/mocks/verifier.go create mode 100644 consensus/mocks/verifying_vote_processor.go create mode 100644 consensus/mocks/vote_aggregation_consumer.go create mode 100644 consensus/mocks/vote_aggregation_violation_consumer.go create mode 100644 consensus/mocks/vote_aggregator.go create mode 100644 consensus/mocks/vote_collector.go create mode 100644 consensus/mocks/vote_collector_consumer.go create mode 100644 consensus/mocks/vote_collectors.go create mode 100644 consensus/mocks/vote_processor.go create mode 100644 consensus/mocks/vote_processor_factory.go create mode 100644 consensus/mocks/voting_provider.go create mode 100644 consensus/mocks/weight_provider.go create mode 100644 consensus/mocks/weighted_signature_aggregator.go create mode 100644 consensus/mocks/workerpool.go create mode 100644 consensus/mocks/workers.go create mode 100644 consensus/models/aggregated_signature.go create mode 100644 consensus/models/consensus_state.go create mode 100644 consensus/models/control_flows.go create mode 100644 consensus/models/errors.go create mode 100644 consensus/models/liveness_state.go create mode 100644 consensus/models/proposal.go create mode 100644 consensus/models/quorum_certificate.go create mode 100644 consensus/models/state.go create mode 100644 consensus/models/timeout_certificate.go create mode 100644 consensus/models/timeout_state.go create mode 100644 consensus/models/unique.go create mode 100644 consensus/notifications/log_consumer.go create mode 100644 consensus/notifications/noop_consumer.go create mode 100644 consensus/notifications/pubsub/communicator_distributor.go create mode 100644 consensus/notifications/pubsub/distributor.go create mode 100644 consensus/notifications/pubsub/finalization_distributor.go create mode 100644 consensus/notifications/pubsub/participant_distributor.go create mode 100644 consensus/notifications/pubsub/proposal_violation_distributor.go create mode 100644 consensus/notifications/pubsub/timeout_aggregation_violation_consumer.go create mode 100644 consensus/notifications/pubsub/timeout_collector_distributor.go create mode 100644 consensus/notifications/pubsub/vote_aggregation_violation_consumer.go create mode 100644 consensus/notifications/pubsub/vote_collector_distributor.go create mode 100644 consensus/pacemaker/pacemaker.go create mode 100644 consensus/pacemaker/pacemaker_test.go create mode 100644 consensus/pacemaker/proposal_timing.go create mode 100644 consensus/pacemaker/rank_tracker.go create mode 100644 consensus/pacemaker/rank_tracker_test.go create mode 100644 consensus/pacemaker/timeout/config.go create mode 100644 consensus/pacemaker/timeout/config_test.go create mode 100644 consensus/pacemaker/timeout/controller.go create mode 100644 consensus/pacemaker/timeout/controller_test.go create mode 100644 consensus/participant/participant.go create mode 100644 consensus/recovery/recover.go create mode 100644 consensus/safetyrules/safety_rules.go create mode 100644 consensus/safetyrules/safety_rules_test.go create mode 100644 consensus/signature/packer.go create mode 100644 consensus/signature/state_signer_decoder.go create mode 100644 consensus/signature/weighted_signature_aggregator.go delete mode 100644 consensus/state_machine.go delete mode 100644 consensus/state_machine_test.go delete mode 100644 consensus/state_machine_viz.go create mode 100644 consensus/stateproducer/safety_rules_wrapper.go create mode 100644 consensus/stateproducer/state_producer.go create mode 100644 consensus/timeoutaggregator/timeout_aggregator.go create mode 100644 consensus/timeoutaggregator/timeout_aggregator_test.go create mode 100644 consensus/timeoutaggregator/timeout_collectors.go create mode 100644 consensus/timeoutaggregator/timeout_collectors_test.go create mode 100644 consensus/timeoutcollector/aggregation.go create mode 100644 consensus/timeoutcollector/factory.go create mode 100644 consensus/timeoutcollector/timeout_cache.go create mode 100644 consensus/timeoutcollector/timeout_cache_test.go create mode 100644 consensus/timeoutcollector/timeout_collector.go create mode 100644 consensus/timeoutcollector/timeout_collector_test.go create mode 100644 consensus/timeoutcollector/timeout_processor.go create mode 100644 consensus/timeoutcollector/timeout_processor_test.go create mode 100644 consensus/tracker/tracker.go create mode 100644 consensus/tracker/tracker_test.go create mode 100644 consensus/validator/validator.go create mode 100644 consensus/validator/validator_test.go create mode 100644 consensus/verification/common.go create mode 100644 consensus/verification/signer.go create mode 100644 consensus/vote_aggregator.go create mode 100644 consensus/vote_collector.go create mode 100644 consensus/vote_collectors.go create mode 100644 consensus/voteaggregator/pending_status.go create mode 100644 consensus/voteaggregator/vote_aggregator.go create mode 100644 consensus/voteaggregator/vote_aggregator_test.go create mode 100644 consensus/voteaggregator/vote_collectors.go create mode 100644 consensus/voteaggregator/vote_collectors_test.go create mode 100644 consensus/votecollector/common.go create mode 100644 consensus/votecollector/factory.go create mode 100644 consensus/votecollector/factory_test.go create mode 100644 consensus/votecollector/statemachine.go create mode 100644 consensus/votecollector/statemachine_test.go create mode 100644 consensus/votecollector/testutil.go create mode 100644 consensus/votecollector/vote_cache.go create mode 100644 consensus/votecollector/vote_cache_test.go create mode 100644 consensus/votecollector/vote_processor.go create mode 100644 consensus/votecollector/vote_processor_test.go delete mode 100644 go.mod delete mode 100644 go.sum create mode 100644 lifecycle/.mockery.yaml create mode 100644 lifecycle/common.go create mode 100644 lifecycle/common_test.go create mode 100644 lifecycle/component.go create mode 100644 lifecycle/component_test.go create mode 100644 lifecycle/errors.go create mode 100644 lifecycle/go.mod create mode 100644 lifecycle/go.sum create mode 100644 lifecycle/mocks/component.go create mode 100644 lifecycle/mocks/component_manager_builder.go create mode 100644 lifecycle/mocks/signaler_context.go create mode 100644 lifecycle/signaler.go create mode 100644 lifecycle/supervisor.go create mode 100644 lifecycle/supervisor_test.go create mode 100644 lifecycle/unittest/utils.go create mode 100644 node/consensus/aggregator/consensus_signature_aggregator_wrapper.go create mode 100644 node/consensus/app/consensus_dynamic_committee.go delete mode 100644 node/consensus/app/consensus_transition_listener.go create mode 100644 node/consensus/global/consensus_dynamic_committee.go delete mode 100644 node/consensus/global/consensus_transition_listener.go create mode 100644 node/consensus/tracing/zap_tracer.go create mode 100644 node/consensus/voting/voting_aggregator.go create mode 100644 node/store/consensus.go diff --git a/config/config.go b/config/config.go index 9969fd4..64f18d4 100644 --- a/config/config.go +++ b/config/config.go @@ -47,7 +47,6 @@ type Config struct { Logger *LogConfig `yaml:"logger"` ListenGRPCMultiaddr string `yaml:"listenGrpcMultiaddr"` ListenRestMultiaddr string `yaml:"listenRESTMultiaddr"` - LogFile string `yaml:"logFile"` } // WithDefaults returns a copy of the config with default values filled in. @@ -293,7 +292,6 @@ func LoadConfig(configPath string, proverKey string, skipGenesisCheck bool) ( ProvingKeyId: "default-proving-key", Filter: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", GenesisSeed: genesisSeed, - MaxFrames: -1, PendingCommitWorkers: 4, }, } diff --git a/config/engine.go b/config/engine.go index e6e13c5..1e2660f 100644 --- a/config/engine.go +++ b/config/engine.go @@ -90,7 +90,6 @@ type EngineConfig struct { ProvingKeyId string `yaml:"provingKeyId"` Filter string `yaml:"filter"` GenesisSeed string `yaml:"genesisSeed"` - MaxFrames int64 `yaml:"maxFrames"` PendingCommitWorkers int64 `yaml:"pendingCommitWorkers"` MinimumPeersRequired int `yaml:"minimumPeersRequired"` StatsMultiaddr string `yaml:"statsMultiaddr"` diff --git a/config/logger.go b/config/logger.go index c503f56..d02d59a 100644 --- a/config/logger.go +++ b/config/logger.go @@ -21,18 +21,15 @@ func (c *Config) CreateLogger(coreId uint, debug bool) ( io.Closer, error, ) { - filename := c.LogFile - if filename != "" || c.Logger != nil { - dir := "" - if c.Logger != nil { - dir = c.Logger.Path - } - + if c.Logger != nil { logger, closer, err := logging.NewRotatingFileLogger( debug, coreId, - dir, - filename, + c.Logger.Path, + c.Logger.MaxSize, + c.Logger.MaxBackups, + c.Logger.MaxAge, + c.Logger.Compress, ) return logger, closer, errors.Wrap(err, "create logger") } diff --git a/config/version.go b/config/version.go index 575f4d2..821643e 100644 --- a/config/version.go +++ b/config/version.go @@ -43,9 +43,9 @@ func FormatVersion(version []byte) string { } func GetPatchNumber() byte { - return 0x04 + return 0x05 } func GetRCNumber() byte { - return 0x06 + return 0x45 } diff --git a/consensus/.mockery.yaml b/consensus/.mockery.yaml new file mode 100644 index 0000000..9ba827f --- /dev/null +++ b/consensus/.mockery.yaml @@ -0,0 +1,18 @@ +dir: "{{.InterfaceDir}}/mock" +outpkg: "mock" +filename: "{{.InterfaceName | snakecase}}.go" +mockname: "{{.InterfaceName}}" + +all: True +with-expecter: False +include-auto-generated: False +disable-func-mocks: True +fail-on-missing: True +disable-version-string: True +resolve-type-alias: False + +packages: + source.quilibrium.com/quilibrium/monorepo/consensus: + config: + dir: "mocks" + outpkg: "mocks" diff --git a/consensus/README.md b/consensus/README.md index ac9cb30..e75035f 100644 --- a/consensus/README.md +++ b/consensus/README.md @@ -1,300 +1,4 @@ # Consensus State Machine -A generic, extensible state machine implementation for building Byzantine Fault -Tolerant (BFT) consensus protocols. This library provides a framework for -implementing round-based consensus algorithms with cryptographic proofs. - -## Overview - -The state machine manages consensus engine state transitions through a -well-defined set of states and events. It supports generic type parameters to -allow different implementations of state data, votes, peer identities, and -collected mutations. - -## Features - -- **Generic Implementation**: Supports custom types for state data, votes, peer - IDs, and collected data -- **Byzantine Fault Tolerance**: Provides BFT consensus with < 1/3 byzantine - nodes, flexible to other probabilistic BFT implementations -- **Round-based Consensus**: Implements a round-based state transition pattern -- **Pluggable Providers**: Extensible through provider interfaces for different - consensus behaviors -- **Event-driven Architecture**: State transitions triggered by events with - optional guard conditions -- **Concurrent Safe**: Thread-safe implementation with proper mutex usage -- **Timeout Support**: Configurable timeouts for each state with automatic - transitions -- **Transition Listeners**: Observable state transitions for monitoring and - debugging - -## Core Concepts - -### States - -The state machine progresses through the following states: - -1. **StateStopped**: Initial state, engine is not running -2. **StateStarting**: Engine is initializing -3. **StateLoading**: Loading data and syncing with network -4. **StateCollecting**: Collecting data/mutations for consensus round -5. **StateLivenessCheck**: Checking peer liveness before proving -6. **StateProving**: Generating cryptographic proof (leader only) -7. **StatePublishing**: Publishing proposed state -8. **StateVoting**: Voting on proposals -9. **StateFinalizing**: Finalizing consensus round -10. **StateVerifying**: Verifying and publishing results -11. **StateStopping**: Engine is shutting down - -### Events - -Events trigger state transitions: -- `EventStart`, `EventStop`: Lifecycle events -- `EventSyncComplete`: Synchronization finished -- `EventCollectionDone`: Mutation collection complete -- `EventLivenessCheckReceived`: Peer liveness confirmed -- `EventProverSignal`: Leader selection complete -- `EventProofComplete`: Proof generation finished -- `EventProposalReceived`: New proposal received -- `EventVoteReceived`: Vote received -- `EventQuorumReached`: Voting quorum achieved -- `EventConfirmationReceived`: State confirmation received -- And more... - -### Type Constraints - -All generic type parameters must implement the `Unique` interface: - -```go -type Unique interface { - Identity() Identity // Returns a unique string identifier -} -``` - -## Provider Interfaces - -### SyncProvider - -Handles initial state synchronization: - -```go -type SyncProvider[StateT Unique] interface { - Synchronize( - existing *StateT, - ctx context.Context, - ) (<-chan *StateT, <-chan error) -} -``` - -### VotingProvider - -Manages the voting process: - -```go -type VotingProvider[StateT Unique, VoteT Unique, PeerIDT Unique] interface { - SendProposal(proposal *StateT, ctx context.Context) error - DecideAndSendVote( - proposals map[Identity]*StateT, - ctx context.Context, - ) (PeerIDT, *VoteT, error) - IsQuorum(votes map[Identity]*VoteT, ctx context.Context) (bool, error) - FinalizeVotes( - proposals map[Identity]*StateT, - votes map[Identity]*VoteT, - ctx context.Context, - ) (*StateT, PeerIDT, error) - SendConfirmation(finalized *StateT, ctx context.Context) error -} -``` - -### LeaderProvider - -Handles leader selection and proof generation: - -```go -type LeaderProvider[ - StateT Unique, - PeerIDT Unique, - CollectedT Unique, -] interface { - GetNextLeaders(prior *StateT, ctx context.Context) ([]PeerIDT, error) - ProveNextState( - prior *StateT, - collected CollectedT, - ctx context.Context, - ) (*StateT, error) -} -``` - -### LivenessProvider - -Manages peer liveness checks: - -```go -type LivenessProvider[ - StateT Unique, - PeerIDT Unique, - CollectedT Unique, -] interface { - Collect(ctx context.Context) (CollectedT, error) - SendLiveness(prior *StateT, collected CollectedT, ctx context.Context) error -} -``` - -## Usage - -### Basic Setup - -```go -// Define your types implementing Unique -type MyState struct { - Round uint64 - Hash string -} -func (s MyState) Identity() string { return s.Hash } - -type MyVote struct { - Voter string - Value bool -} -func (v MyVote) Identity() string { return v.Voter } - -type MyPeerID struct { - ID string -} -func (p MyPeerID) Identity() string { return p.ID } - -type MyCollected struct { - Data []byte -} -func (c MyCollected) Identity() string { return string(c.Data) } - -// Implement providers -syncProvider := &MySyncProvider{} -votingProvider := &MyVotingProvider{} -leaderProvider := &MyLeaderProvider{} -livenessProvider := &MyLivenessProvider{} - -// Create state machine -sm := consensus.NewStateMachine[MyState, MyVote, MyPeerID, MyCollected]( - MyPeerID{ID: "node1"}, // This node's ID - &MyState{Round: 0, Hash: "genesis"}, // Initial state - true, // shouldEmitReceiveEventsOnSends - 3, // minimumProvers - syncProvider, - votingProvider, - leaderProvider, - livenessProvider, - nil, // Optional trace logger -) - -// Add transition listener -sm.AddListener(&MyTransitionListener{}) - -// Start the state machine -if err := sm.Start(); err != nil { - log.Fatal(err) -} - -// Receive external events -sm.ReceiveProposal(peer, proposal) -sm.ReceiveVote(voter, vote) -sm.ReceiveLivenessCheck(peer, collected) -sm.ReceiveConfirmation(peer, confirmation) - -// Stop the state machine -if err := sm.Stop(); err != nil { - log.Fatal(err) -} -``` - -### Implementing Providers - -See the `example/generic_consensus_example.go` for a complete working example -with mock provider implementations. - -## State Flow - -The typical consensus flow: - -1. **Start** → **Starting** → **Loading** -2. **Loading**: Synchronize with network -3. **Collecting**: Gather mutations/changes -4. **LivenessCheck**: Verify peer availability -5. **Proving**: Leader generates proof -6. **Publishing**: Leader publishes proposal -7. **Voting**: All nodes vote on proposals -8. **Finalizing**: Aggregate votes and determine outcome -9. **Verifying**: Confirm and apply state changes -10. Loop back to **Collecting** for next round - -## Configuration - -### Constructor Parameters - -- `id`: This node's peer ID -- `initialState`: Starting state (can be nil) -- `shouldEmitReceiveEventsOnSends`: Whether to emit receive events for own - messages -- `minimumProvers`: Minimum number of active provers required -- `traceLogger`: Optional logger for debugging state transitions - -### State Timeouts - -Each state can have a configured timeout that triggers an automatic transition: - -- **Starting**: 1 second → `EventInitComplete` -- **Loading**: 10 minutes → `EventSyncComplete` -- **Collecting**: 1 second → `EventCollectionDone` -- **LivenessCheck**: 1 second → `EventLivenessTimeout` -- **Proving**: 120 seconds → `EventPublishTimeout` -- **Publishing**: 1 second → `EventPublishTimeout` -- **Voting**: 10 seconds → `EventVotingTimeout` -- **Finalizing**: 1 second → `EventAggregationDone` -- **Verifying**: 1 second → `EventVerificationDone` -- **Stopping**: 30 seconds → `EventCleanupComplete` - -## Thread Safety - -The state machine is thread-safe. All public methods properly handle concurrent -access through mutex locks. State behaviors run in separate goroutines with -proper cancellation support. - -## Error Handling - -- Provider errors are logged but don't crash the state machine -- The state machine continues operating and may retry operations -- Critical errors during state transitions are returned to callers -- Use the `TraceLogger` interface for debugging - -## Best Practices - -1. **Message Isolation**: When implementing providers, always deep-copy data - before sending to prevent shared state between state machine and other - handlers -2. **Nil Handling**: Provider implementations should handle nil prior states - gracefully -3. **Context Usage**: Respect context cancellation in long-running operations -4. **Quorum Size**: Set appropriate quorum size based on your network (typically - 2f+1 for f failures) -5. **Timeout Configuration**: Adjust timeouts based on network conditions and - proof generation time - -## Example - -See `example/generic_consensus_example.go` for a complete working example -demonstrating: -- Mock provider implementations -- Multi-node consensus network -- Byzantine node behavior -- Message passing between nodes -- State transition monitoring - -## Testing - -The package includes comprehensive tests in `state_machine_test.go` covering: -- State transitions -- Event handling -- Concurrent operations -- Byzantine scenarios -- Timeout behavior +Consensus State Machine is being swapped out with a fork of the HotStuff implementation by Flow. +This will be updated with appropriate license details when the fork work has finished. diff --git a/consensus/consensus_committee.go b/consensus/consensus_committee.go new file mode 100644 index 0000000..83befed --- /dev/null +++ b/consensus/consensus_committee.go @@ -0,0 +1,154 @@ +package consensus + +import "source.quilibrium.com/quilibrium/monorepo/consensus/models" + +// A committee provides a subset of the protocol.State, which is restricted to +// exactly those nodes that participate in the current HotStuff instance: the +// state of all legitimate HotStuff participants for the specified rank. +// Legitimate HotStuff participants have NON-ZERO WEIGHT. +// +// For the purposes of validating votes, timeouts, quorum certificates, and +// timeout certificates we consider a committee which is static over the course +// of an rank. Although committee members may be ejected, or have their weight +// change during an rank, we ignore these changes. For these purposes we use +// the Replicas and *ByRank methods. +// +// When validating proposals, we take into account changes to the committee +// during the course of an rank. In particular, if a node is ejected, we will +// immediately reject all future proposals from that node. For these purposes we +// use the DynamicCommittee and *ByState methods. + +// Replicas defines the consensus committee for the purposes of validating +// votes, timeouts, quorum certificates, and timeout certificates. Any consensus +// committee member who was authorized to contribute to consensus AT THE +// BEGINNING of the rank may produce valid votes and timeouts for the entire +// rank, even if they are later ejected. So for validating votes/timeouts we +// use *ByRank methods. +// +// Since the voter committee is considered static over an rank: +// - we can query identities by rank +// - we don't need the full state ancestry prior to validating messages +type Replicas interface { + + // LeaderForRank returns the identity of the leader for a given rank. + // CAUTION: per liveness requirement of HotStuff, the leader must be + // fork-independent. Therefore, a node retains its proposer rank + // slots even if it is slashed. Its proposal is simply considered + // invalid, as it is not from a legitimate participant. + // Returns the following expected errors for invalid inputs: + // - model.ErrRankUnknown if no rank containing the given rank is + // known + LeaderForRank(rank uint64) (models.Identity, error) + + // QuorumThresholdForRank returns the minimum total weight for a supermajority + // at the given rank. This weight threshold is computed using the total weight + // of the initial committee and is static over the course of an rank. + // Returns the following expected errors for invalid inputs: + // - model.ErrRankUnknown if no rank containing the given rank is + // known + QuorumThresholdForRank(rank uint64) (uint64, error) + + // TimeoutThresholdForRank returns the minimum total weight of observed + // timeout states required to safely timeout for the given rank. This weight + // threshold is computed using the total weight of the initial committee and + // is static over the course of an rank. + // Returns the following expected errors for invalid inputs: + // - model.ErrRankUnknown if no rank containing the given rank is + // known + TimeoutThresholdForRank(rank uint64) (uint64, error) + + // Self returns our own node identifier. + // TODO: ultimately, the own identity of the node is necessary for signing. + // Ideally, we would move the method for checking whether an Identifier + // refers to this node to the signer. This would require some + // refactoring of EventHandler (postponed to later) + Self() models.Identity + + // IdentitiesByRank returns a list of the legitimate HotStuff participants + // for the rank given by the input rank. + // The returned list of HotStuff participants: + // - contains nodes that are allowed to submit votes or timeouts within the + // given rank (un-ejected, non-zero weight at the beginning of the rank) + // - is ordered in the canonical order + // - contains no duplicates. + // + // CAUTION: DO NOT use this method for validating state proposals. + // + // Returns the following expected errors for invalid inputs: + // - model.ErrRankUnknown if no rank containing the given rank is + // known + // + IdentitiesByRank(rank uint64) ([]models.WeightedIdentity, error) + + // IdentityByRank returns the full Identity for specified HotStuff + // participant. The node must be a legitimate HotStuff participant with + // NON-ZERO WEIGHT at the specified state. + // + // ERROR conditions: + // - model.InvalidSignerError if participantID does NOT correspond to an + // authorized HotStuff participant at the specified state. + // + // Returns the following expected errors for invalid inputs: + // - model.ErrRankUnknown if no rank containing the given rank is + // known + // + IdentityByRank( + rank uint64, + participantID models.Identity, + ) (models.WeightedIdentity, error) +} + +// DynamicCommittee extends Replicas to provide the consensus committee for the +// purposes of validating proposals. The proposer committee reflects +// state-to-state changes in the identity table to support immediately rejecting +// proposals from nodes after they are ejected. For validating proposals, we use +// *ByState methods. +// +// Since the proposer committee can change at any state: +// - we query by state ID +// - we must have incorporated the full state ancestry prior to validating +// messages +type DynamicCommittee interface { + Replicas + + // IdentitiesByState returns a list of the legitimate HotStuff participants + // for the given state. The returned list of HotStuff participants: + // - contains nodes that are allowed to submit proposals, votes, and + // timeouts (un-ejected, non-zero weight at current state) + // - is ordered in the canonical order + // - contains no duplicates. + // + // ERROR conditions: + // - state.ErrUnknownSnapshotReference if the stateID is for an unknown state + IdentitiesByState(stateID models.Identity) ([]models.WeightedIdentity, error) + + // IdentityByState returns the full Identity for specified HotStuff + // participant. The node must be a legitimate HotStuff participant with + // NON-ZERO WEIGHT at the specified state. + // ERROR conditions: + // - model.InvalidSignerError if participantID does NOT correspond to an + // authorized HotStuff participant at the specified state. + // - state.ErrUnknownSnapshotReference if the stateID is for an unknown state + IdentityByState( + stateID models.Identity, + participantID models.Identity, + ) (models.WeightedIdentity, error) +} + +// StateSignerDecoder defines how to convert the ParentSignerIndices field +// within a particular state header to the identifiers of the nodes which signed +// the state. +type StateSignerDecoder[StateT models.Unique] interface { + // DecodeSignerIDs decodes the signer indices from the given state header into + // full node IDs. + // Note: A state header contains a quorum certificate for its parent, which + // proves that the consensus committee has reached agreement on validity of + // parent state. Consequently, the returned IdentifierList contains the + // consensus participants that signed the parent state. + // Expected Error returns during normal operations: + // - consensus.InvalidSignerIndicesError if signer indices included in the + // header do not encode a valid subset of the consensus committee + DecodeSignerIDs( + state *models.State[StateT], + ) ([]models.WeightedIdentity, error) +} diff --git a/consensus/consensus_consumer.go b/consensus/consensus_consumer.go new file mode 100644 index 0000000..19aeec0 --- /dev/null +++ b/consensus/consensus_consumer.go @@ -0,0 +1,453 @@ +package consensus + +import ( + "time" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// ProposalViolationConsumer consumes outbound notifications about +// HotStuff-protocol violations. Such notifications are produced by the active +// consensus participants and consensus follower. +// +// Implementations must: +// - be concurrency safe +// - be non-blocking +// - handle repetition of the same events (with some processing overhead). +type ProposalViolationConsumer[ + StateT models.Unique, + VoteT models.Unique, +] interface { + // OnInvalidStateDetected notifications are produced by components that have + // detected that a state proposal is invalid and need to report it. Most of + // the time such state can be detected by calling Validator.ValidateProposal. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; + // and must handle repetition of the same events (with some processing + // overhead). + OnInvalidStateDetected(err *models.InvalidProposalError[StateT, VoteT]) + + // OnDoubleProposeDetected notifications are produced by the Finalization + // Logic whenever a double state proposal (equivocation) was detected. + // Equivocation occurs when the same leader proposes two different states for + // the same rank. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; + // and must handle repetition of the same events (with some processing + // overhead). + OnDoubleProposeDetected(*models.State[StateT], *models.State[StateT]) +} + +// VoteAggregationViolationConsumer consumes outbound notifications about +// HotStuff-protocol violations specifically invalid votes during processing. +// Such notifications are produced by the Vote Aggregation logic. +// +// Implementations must: +// - be concurrency safe +// - be non-blocking +// - handle repetition of the same events (with some processing overhead). +type VoteAggregationViolationConsumer[ + StateT models.Unique, + VoteT models.Unique, +] interface { + // OnDoubleVotingDetected notifications are produced by the Vote Aggregation + // logic whenever a double voting (same voter voting for different states at + // the same rank) was detected. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnDoubleVotingDetected(*VoteT, *VoteT) + + // OnInvalidVoteDetected notifications are produced by the Vote Aggregation + // logic whenever an invalid vote was detected. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnInvalidVoteDetected(err models.InvalidVoteError[VoteT]) + + // OnVoteForInvalidStateDetected notifications are produced by the Vote + // Aggregation logic whenever vote for invalid proposal was detected. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnVoteForInvalidStateDetected( + vote *VoteT, + invalidProposal *models.SignedProposal[StateT, VoteT], + ) +} + +// TimeoutAggregationViolationConsumer consumes outbound notifications about +// Active Pacemaker violations specifically invalid timeouts during processing. +// Such notifications are produced by the Timeout Aggregation logic. +// +// Implementations must: +// - be concurrency safe +// - be non-blocking +// - handle repetition of the same events (with some processing overhead). +type TimeoutAggregationViolationConsumer[VoteT models.Unique] interface { + // OnDoubleTimeoutDetected notifications are produced by the Timeout + // Aggregation logic whenever a double timeout (same replica producing two + // different timeouts at the same rank) was detected. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnDoubleTimeoutDetected( + *models.TimeoutState[VoteT], + *models.TimeoutState[VoteT], + ) + + // OnInvalidTimeoutDetected notifications are produced by the Timeout + // Aggregation logic whenever an invalid timeout was detected. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnInvalidTimeoutDetected(err models.InvalidTimeoutError[VoteT]) +} + +// FinalizationConsumer consumes outbound notifications produced by the logic +// tracking forks and finalization. Such notifications are produced by the +// active consensus participants, and generally potentially relevant to the +// larger node. The notifications are emitted in the order in which the +// finalization algorithm makes the respective steps. +// +// Implementations must: +// - be concurrency safe +// - be non-blocking +// - handle repetition of the same events (with some processing overhead). +type FinalizationConsumer[StateT models.Unique] interface { + // OnStateIncorporated notifications are produced by the Finalization Logic + // whenever a state is incorporated into the consensus state. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnStateIncorporated(*models.State[StateT]) + + // OnFinalizedState notifications are produced by the Finalization Logic + // whenever a state has been finalized. They are emitted in the order the + // states are finalized. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnFinalizedState(*models.State[StateT]) +} + +// ParticipantConsumer consumes outbound notifications produced by consensus +// participants actively proposing states, voting, collecting & aggregating +// votes to QCs, and participating in the pacemaker (sending timeouts, +// collecting & aggregating timeouts to TCs). +// Implementations must: +// - be concurrency safe +// - be non-blocking +// - handle repetition of the same events (with some processing overhead). +type ParticipantConsumer[ + StateT models.Unique, + VoteT models.Unique, +] interface { + // OnEventProcessed notifications are produced by the EventHandler when it is + // done processing and hands control back to the EventLoop to wait for the + // next event. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnEventProcessed() + + // OnStart notifications are produced by the EventHandler when it starts + // blocks recovery and prepares for handling incoming events from EventLoop. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnStart(currentRank uint64) + + // OnReceiveProposal notifications are produced by the EventHandler when it + // starts processing a state. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnReceiveProposal( + currentRank uint64, + proposal *models.SignedProposal[StateT, VoteT], + ) + + // OnReceiveQuorumCertificate notifications are produced by the EventHandler + // when it starts processing a QuorumCertificate [QC] constructed by the + // node's internal vote aggregator. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnReceiveQuorumCertificate(currentRank uint64, qc models.QuorumCertificate) + + // OnReceiveTimeoutCertificate notifications are produced by the EventHandler + // when it starts processing a TimeoutCertificate [TC] constructed by the + // node's internal timeout aggregator. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnReceiveTimeoutCertificate(currentRank uint64, tc models.TimeoutCertificate) + + // OnPartialTimeoutCertificate notifications are produced by the EventHandler + // when it starts processing partial TC constructed by local timeout + // aggregator. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnPartialTimeoutCertificate( + currentRank uint64, + partialTimeoutCertificate *PartialTimeoutCertificateCreated, + ) + + // OnLocalTimeout notifications are produced by the EventHandler when it + // reacts to expiry of round duration timer. Such a notification indicates + // that the Pacemaker's timeout was processed by the system. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnLocalTimeout(currentRank uint64) + + // OnRankChange notifications are produced by Pacemaker when it transitions to + // a new rank based on processing a QC or TC. The arguments specify the + // oldRank (first argument), and the newRank to which the Pacemaker + // transitioned (second argument). + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnRankChange(oldRank, newRank uint64) + + // OnQuorumCertificateTriggeredRankChange notifications are produced by + // Pacemaker when it moves to a new rank based on processing a QC. The + // arguments specify the qc (first argument), which triggered the rank change, + // and the newRank to which the Pacemaker transitioned (second argument). + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; + // and must handle repetition of the same events (with some processing + // overhead). + OnQuorumCertificateTriggeredRankChange( + oldRank uint64, + newRank uint64, + qc models.QuorumCertificate, + ) + + // OnTimeoutCertificateTriggeredRankChange notifications are produced by + // Pacemaker when it moves to a new rank based on processing a TC. The + // arguments specify the tc (first argument), which triggered the rank change, + // and the newRank to which the Pacemaker transitioned (second argument). + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnTimeoutCertificateTriggeredRankChange( + oldRank uint64, + newRank uint64, + tc models.TimeoutCertificate, + ) + + // OnStartingTimeout notifications are produced by Pacemaker. Such a + // notification indicates that the Pacemaker is now waiting for the system to + // (receive and) process states or votes. The specific timeout type is + // contained in the TimerInfo. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnStartingTimeout(startTime, endTime time.Time) + + // OnCurrentRankDetails notifications are produced by the EventHandler during + // the course of a rank with auxiliary information. These notifications are + // generally not produced for all ranks (for example skipped ranks). These + // notifications are guaranteed to be produced for all ranks we enter after + // fully processing a message. + // Example 1: + // - We are in rank 8. We process a QC with rank 10, causing us to enter + // rank 11. + // - Then this notification will be produced for rank 11. + // Example 2: + // - We are in rank 8. We process a proposal with rank 10, which contains a + // TC for rank 9 and TC.NewestQC for rank 8. + // - The QC would allow us to enter rank 9 and the TC would allow us to + // enter rank 10, so after fully processing the message we are in rank 10. + // - Then this notification will be produced for rank 10, but not rank 9 + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnCurrentRankDetails( + currentRank, finalizedRank uint64, + currentLeader models.Identity, + ) +} + +// VoteCollectorConsumer consumes outbound notifications produced by HotStuff's +// vote aggregation component. These events are primarily intended for the +// HotStuff-internal state machine (EventHandler), but might also be relevant to +// the larger node in which HotStuff is running. +// +// Implementations must: +// - be concurrency safe +// - be non-blocking +// - handle repetition of the same events (with some processing overhead). +type VoteCollectorConsumer[VoteT models.Unique] interface { + // OnQuorumCertificateConstructedFromVotes notifications are produced by the + // VoteAggregator component, whenever it constructs a QC from votes. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnQuorumCertificateConstructedFromVotes(models.QuorumCertificate) + + // OnVoteProcessed notifications are produced by the Vote Aggregation logic, + // each time we successfully ingest a valid vote. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnVoteProcessed(vote *VoteT) +} + +// TimeoutCollectorConsumer consumes outbound notifications produced by +// HotStuff's timeout aggregation component. These events are primarily intended +// for the HotStuff-internal state machine (EventHandler), but might also be +// relevant to the larger node in which HotStuff is running. +// +// Caution: the events are not strictly ordered by increasing ranks! The +// notifications are emitted by concurrent processing logic. Over larger time +// scales, the emitted events are for statistically increasing ranks. However, +// on short time scales there are _no_ monotonicity guarantees w.r.t. the +// events' ranks. +// +// Implementations must: +// - be concurrency safe +// - be non-blocking +// - handle repetition of the same events (with some processing overhead). +type TimeoutCollectorConsumer[VoteT models.Unique] interface { + // OnTimeoutCertificateConstructedFromTimeouts notifications are produced by + // the TimeoutProcessor component, whenever it constructs a TC based on + // TimeoutStates from a supermajority of consensus participants. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnTimeoutCertificateConstructedFromTimeouts( + certificate models.TimeoutCertificate, + ) + + // OnPartialTimeoutCertificateCreated notifications are produced by the + // TimeoutProcessor component, whenever it collected TimeoutStates from a + // superminority of consensus participants for a specific rank. Along with the + // rank, it reports the newest QC and TC (for previous rank) discovered in + // process of timeout collection. Per convention, the newest QC is never nil, + // while the TC for the previous rank might be nil. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnPartialTimeoutCertificateCreated( + rank uint64, + newestQC models.QuorumCertificate, + lastRankTC models.TimeoutCertificate, + ) + + // OnNewQuorumCertificateDiscovered notifications are produced by the + // TimeoutCollector component, whenever it discovers new QC included in + // timeout state. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnNewQuorumCertificateDiscovered(certificate models.QuorumCertificate) + + // OnNewTimeoutCertificateDiscovered notifications are produced by the + // TimeoutCollector component, whenever it discovers new TC included in + // timeout state. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnNewTimeoutCertificateDiscovered(certificate models.TimeoutCertificate) + + // OnTimeoutProcessed notifications are produced by the Timeout Aggregation + // logic, each time we successfully ingest a valid timeout. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnTimeoutProcessed(timeout *models.TimeoutState[VoteT]) +} + +// CommunicatorConsumer consumes outbound notifications produced by HotStuff and +// it's components. Notifications allow the HotStuff core algorithm to +// communicate with the other actors of the consensus process. +// Implementations must: +// - be concurrency safe +// - be non-blocking +// - handle repetition of the same events (with some processing overhead). +type CommunicatorConsumer[StateT models.Unique, VoteT models.Unique] interface { + // OnOwnVote notifies about intent to send a vote for the given parameters to + // the specified recipient. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnOwnVote(vote *VoteT, recipientID models.Identity) + + // OnOwnTimeout notifies about intent to broadcast the given timeout + // state to all actors of the consensus process. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnOwnTimeout(timeout *models.TimeoutState[VoteT]) + + // OnOwnProposal notifies about intent to broadcast the given state proposal + // to all actors of the consensus process. delay is to hold the proposal + // before broadcasting it. Useful to control the state production rate. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; + // and must handle repetition of the same events (with some processing + // overhead). + OnOwnProposal( + proposal *models.SignedProposal[StateT, VoteT], + targetPublicationTime time.Time, + ) +} + +// FollowerConsumer consumes outbound notifications produced by consensus +// followers. It is a subset of the notifications produced by consensus +// participants. +// Implementations must: +// - be concurrency safe +// - be non-blocking +// - handle repetition of the same events (with some processing overhead). +type FollowerConsumer[StateT models.Unique, VoteT models.Unique] interface { + ProposalViolationConsumer[StateT, VoteT] + FinalizationConsumer[StateT] +} + +// Consumer consumes outbound notifications produced by consensus participants. +// Notifications are consensus-internal state changes which are potentially +// relevant to the larger node in which HotStuff is running. The notifications +// are emitted in the order in which the HotStuff algorithm makes the respective +// steps. +// +// Implementations must: +// - be concurrency safe +// - be non-blocking +// - handle repetition of the same events (with some processing overhead). +type Consumer[StateT models.Unique, VoteT models.Unique] interface { + FollowerConsumer[StateT, VoteT] + CommunicatorConsumer[StateT, VoteT] + ParticipantConsumer[StateT, VoteT] +} + +// VoteAggregationConsumer consumes outbound notifications produced by Vote +// Aggregation logic. It is a subset of the notifications produced by consensus +// participants. +// Implementations must: +// - be concurrency safe +// - be non-blocking +// - handle repetition of the same events (with some processing overhead). +type VoteAggregationConsumer[ + StateT models.Unique, + VoteT models.Unique, +] interface { + VoteAggregationViolationConsumer[StateT, VoteT] + VoteCollectorConsumer[VoteT] +} + +// TimeoutAggregationConsumer consumes outbound notifications produced by Vote +// Aggregation logic. It is a subset of the notifications produced by consensus +// participants. +// Implementations must: +// - be concurrency safe +// - be non-blocking +// - handle repetition of the same events (with some processing overhead). +type TimeoutAggregationConsumer[VoteT models.Unique] interface { + TimeoutAggregationViolationConsumer[VoteT] + TimeoutCollectorConsumer[VoteT] +} diff --git a/consensus/consensus_events.go b/consensus/consensus_events.go new file mode 100644 index 0000000..5869674 --- /dev/null +++ b/consensus/consensus_events.go @@ -0,0 +1,84 @@ +package consensus + +import ( + "context" + "time" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" +) + +// PartialTimeoutCertificateCreated represents a notification emitted by the +// TimeoutProcessor component, whenever it has collected TimeoutStates from a +// superminority of consensus participants for a specific rank. Along with the +// rank, it reports the newest QuorumCertificate and TimeoutCertificate (for +// previous rank) discovered during timeout collection. Per convention, the +// newest QuorumCertificate is never nil, while the TimeoutCertificate for the +// previous rank might be nil. +type PartialTimeoutCertificateCreated struct { + Rank uint64 + NewestQuorumCertificate models.QuorumCertificate + PriorRankTimeoutCertificate models.TimeoutCertificate +} + +// EventHandler runs a state machine to process proposals, QuorumCertificate and +// local timeouts. Not concurrency safe. +type EventHandler[StateT models.Unique, VoteT models.Unique] interface { + // OnReceiveQuorumCertificate processes a valid quorumCertificate constructed + // by internal vote aggregator or discovered in TimeoutState. All inputs + // should be validated before feeding into this function. Assuming trusted + // data. No errors are expected during normal operation. + OnReceiveQuorumCertificate(quorumCertificate models.QuorumCertificate) error + + // OnReceiveTimeoutCertificate processes a valid timeoutCertificate + // constructed by internal timeout aggregator, discovered in TimeoutState or + // broadcast over the network. All inputs should be validated before feeding + // into this function. Assuming trusted data. No errors are expected during + // normal operation. + OnReceiveTimeoutCertificate( + timeoutCertificate models.TimeoutCertificate, + ) error + + // OnReceiveProposal processes a state proposal received from another HotStuff + // consensus participant. All inputs should be validated before feeding into + // this function. Assuming trusted data. No errors are expected during normal + // operation. + OnReceiveProposal(proposal *models.SignedProposal[StateT, VoteT]) error + + // OnLocalTimeout handles a local timeout event by creating a + // models.TimeoutState and broadcasting it. No errors are expected during + // normal operation. + OnLocalTimeout() error + + // OnPartialTimeoutCertificateCreated handles notification produces by the + // internal timeout aggregator. If the notification is for the current rank, + // a corresponding models.TimeoutState is broadcast to the consensus + // committee. No errors are expected during normal operation. + OnPartialTimeoutCertificateCreated( + partialTimeoutCertificate *PartialTimeoutCertificateCreated, + ) error + + // TimeoutChannel returns a channel that sends a signal on timeout. + TimeoutChannel() <-chan time.Time + + // Start starts the event handler. No errors are expected during normal + // operation. + // CAUTION: EventHandler is not concurrency safe. The Start method must be + // executed by the same goroutine that also calls the other business logic + // methods, or concurrency safety has to be implemented externally. + Start(ctx context.Context) error +} + +// EventLoop performs buffer and processing of incoming proposals and QCs. +type EventLoop[StateT models.Unique, VoteT models.Unique] interface { + lifecycle.Component + TimeoutCollectorConsumer[VoteT] + VoteCollectorConsumer[VoteT] + SubmitProposal(proposal *models.SignedProposal[StateT, VoteT]) +} + +// FollowerLoop only follows certified states, does not actively process the +// collection of proposals and QC/TCs. +type FollowerLoop[StateT models.Unique, VoteT models.Unique] interface { + AddCertifiedState(certifiedState *models.CertifiedState[StateT]) +} diff --git a/consensus/consensus_finalizer.go b/consensus/consensus_finalizer.go new file mode 100644 index 0000000..4c1fc2c --- /dev/null +++ b/consensus/consensus_finalizer.go @@ -0,0 +1,23 @@ +package consensus + +import "source.quilibrium.com/quilibrium/monorepo/consensus/models" + +// Finalizer is used by the consensus algorithm to inform other components for +// (such as the protocol state) about finalization of states. +// +// Since we have two different protocol states: one for the main consensus, +// the other for the collection cluster consensus, the Finalizer interface +// allows the two different protocol states to provide different implementations +// for updating its state when a state has been finalized. +// +// Updating the protocol state should always succeed when the data is +// consistent. However, in case the protocol state is corrupted, error should be +// returned and the consensus algorithm should halt. So the error returned from +// MakeFinal is for the protocol state to report exceptions. +type Finalizer interface { + + // MakeFinal will declare a state and all of its ancestors as finalized, which + // makes it an immutable part of the time reel. Returning an error indicates + // some fatal condition and will cause the finalization logic to terminate. + MakeFinal(stateID models.Identity) error +} diff --git a/consensus/consensus_forks.go b/consensus/consensus_forks.go new file mode 100644 index 0000000..5e13075 --- /dev/null +++ b/consensus/consensus_forks.go @@ -0,0 +1,106 @@ +package consensus + +import "source.quilibrium.com/quilibrium/monorepo/consensus/models" + +// FinalityProof represents a finality proof for a State. By convention, a +// FinalityProof is immutable. Finality in Jolteon/HotStuff is determined by the +// 2-chain rule: +// +// There exists a _certified_ state C, such that State.Rank + 1 = C.Rank +type FinalityProof[StateT models.Unique] struct { + State *models.State[StateT] + CertifiedChild *models.CertifiedState[StateT] +} + +// Forks maintains an in-memory data-structure of all states whose rank-number +// is larger or equal to the latest finalized state. The latest finalized state +// is defined as the finalized state with the largest rank number. When adding +// states, Forks automatically updates its internal state (including finalized +// states). Furthermore, states whose rank number is smaller than the latest +// finalized state are pruned automatically. +// +// PREREQUISITES: +// Forks expects that only states are added that can be connected to its latest +// finalized state (without missing interim ancestors). If this condition is +// violated, Forks will raise an error and ignore the state. +type Forks[StateT models.Unique] interface { + + // GetStatesForRank returns all known states for the given rank + GetStatesForRank(rank uint64) []*models.State[StateT] + + // GetState returns (*models.State[StateT], true) if the state with the + // specified id was found and (nil, false) otherwise. + GetState(stateID models.Identity) (*models.State[StateT], bool) + + // FinalizedRank returns the largest rank number where a finalized state is + // known + FinalizedRank() uint64 + + // FinalizedState returns the finalized state with the largest rank number + FinalizedState() *models.State[StateT] + + // FinalityProof returns the latest finalized state and a certified child from + // the subsequent rank, which proves finality. + // CAUTION: method returns (nil, false), when Forks has not yet finalized any + // states beyond the finalized root state it was initialized with. + FinalityProof() (*FinalityProof[StateT], bool) + + // AddValidatedState appends the validated state to the tree of pending + // states and updates the latest finalized state (if applicable). Unless the + // parent is below the pruning threshold (latest finalized rank), we require + // that the parent is already stored in Forks. Calling this method with + // previously processed states leaves the consensus state invariant (though, + // it will potentially cause some duplicate processing). + // Notes: + // - Method `AddCertifiedState(..)` should be used preferably, if a QC + // certifying `state` is already known. This is generally the case for the + // consensus follower. + // - Method `AddValidatedState` is intended for active consensus + // participants, which fully validate states (incl. payload), i.e. QCs are + // processed as part of validated proposals. + // + // Possible error returns: + // - model.MissingStateError if the parent does not exist in the forest (but + // is above the pruned rank). From the perspective of Forks, this error is + // benign (no-op). + // - model.InvalidStateError if the state is invalid (see + // `Forks.EnsureStateIsValidExtension` for details). From the perspective + // of Forks, this error is benign (no-op). However, we assume all states + // are fully verified, i.e. they should satisfy all consistency + // requirements. Hence, this error is likely an indicator of a bug in the + // compliance layer. + // - model.ByzantineThresholdExceededError if conflicting QCs or conflicting + // finalized states have been detected (violating a foundational consensus + // guarantees). This indicates that there are 1/3+ Byzantine nodes + // (weighted by seniority) in the network, breaking the safety guarantees + // of HotStuff (or there is a critical bug / data corruption). Forks + // cannot recover from this exception. + // - All other errors are potential symptoms of bugs or state corruption. + AddValidatedState(proposal *models.State[StateT]) error + + // AddCertifiedState appends the given certified state to the tree of pending + // states and updates the latest finalized state (if finalization progressed). + // Unless the parent is below the pruning threshold (latest finalized rank), + // we require that the parent is already stored in Forks. Calling this method + // with previously processed states leaves the consensus state invariant + // (though, it will potentially cause some duplicate processing). + // + // Possible error returns: + // - model.MissingStateError if the parent does not exist in the forest (but + // is above the pruned rank). From the perspective of Forks, this error is + // benign (no-op). + // - model.InvalidStateError if the state is invalid (see + // `Forks.EnsureStateIsValidExtension` for details). From the perspective + // of Forks, this error is benign (no-op). However, we assume all states + // are fully verified, i.e. they should satisfy all consistency + // requirements. Hence, this error is likely an indicator of a bug in the + // compliance layer. + // - model.ByzantineThresholdExceededError if conflicting QCs or conflicting + // finalized states have been detected (violating a foundational consensus + // guarantees). This indicates that there are 1/3+ Byzantine nodes + // (weighted by seniority) in the network, breaking the safety guarantees + // of HotStuff (or there is a critical bug / data corruption). Forks + // cannot recover from this exception. + // - All other errors are potential symptoms of bugs or state corruption. + AddCertifiedState(certifiedState *models.CertifiedState[StateT]) error +} diff --git a/consensus/consensus_leader.go b/consensus/consensus_leader.go new file mode 100644 index 0000000..303a10e --- /dev/null +++ b/consensus/consensus_leader.go @@ -0,0 +1,30 @@ +package consensus + +import ( + "context" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// LeaderProvider handles leader selection. State is provided, if relevant to +// the upstream consensus engine. +type LeaderProvider[ + StateT models.Unique, + PeerIDT models.Unique, + CollectedT models.Unique, +] interface { + // GetNextLeaders returns a list of node indices, in priority order. Note that + // it is assumed that if no error is returned, GetNextLeaders should produce + // a non-empty list. If a list of size smaller than minimumProvers is + // provided, the liveness check will loop until the list is greater than that. + GetNextLeaders(ctx context.Context, prior *StateT) ([]PeerIDT, error) + // ProveNextState prepares a non-finalized new state from the prior, to be + // proposed and voted upon. Provided context may be canceled, should be used + // to halt long-running prover operations. + ProveNextState( + ctx context.Context, + rank uint64, + filter []byte, + priorState models.Identity, + ) (*StateT, error) +} diff --git a/consensus/consensus_liveness.go b/consensus/consensus_liveness.go new file mode 100644 index 0000000..426f522 --- /dev/null +++ b/consensus/consensus_liveness.go @@ -0,0 +1,25 @@ +package consensus + +import ( + "context" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// LivenessProvider handles liveness announcements ahead of proving, to +// pre-emptively choose the next prover. In expected leader scenarios, this +// enables a peer to determine if an honest next prover is offline, so that it +// can publish the next state without waiting. +type LivenessProvider[ + StateT models.Unique, + PeerIDT models.Unique, + CollectedT models.Unique, +] interface { + // Collect returns the collected mutation operations ahead of liveness + // announcements. + Collect(ctx context.Context) (CollectedT, error) + // SendLiveness announces liveness ahead of the next prover deterimination and + // subsequent proving. Provides prior state and collected mutation operations + // if relevant. + SendLiveness(ctx context.Context, prior *StateT, collected CollectedT) error +} diff --git a/consensus/consensus_pacemaker.go b/consensus/consensus_pacemaker.go new file mode 100644 index 0000000..1507638 --- /dev/null +++ b/consensus/consensus_pacemaker.go @@ -0,0 +1,65 @@ +package consensus + +import ( + "context" + "time" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Pacemaker defines a standard set of methods for handling pacemaker behaviors +// in the consensus engine. +type Pacemaker interface { + ProposalDurationProvider + // CurrentRank returns the current rank + CurrentRank() uint64 + // LatestQuorumCertificate returns the latest quorum certificate seen. + LatestQuorumCertificate() models.QuorumCertificate + // PriorRankTimeoutCertificate returns the prior rank's timeout certificate, + // if it exists. + PriorRankTimeoutCertificate() models.TimeoutCertificate + // ReceiveQuorumCertificate handles an incoming quorum certificate, advancing + // to a new rank if applicable. + ReceiveQuorumCertificate( + quorumCertificate models.QuorumCertificate, + ) (*models.NextRank, error) + // ReceiveTimeoutCertificate handles an incoming timeout certificate, + // advancing to a new rank if applicable. + ReceiveTimeoutCertificate( + timeoutCertificate models.TimeoutCertificate, + ) (*models.NextRank, error) + // TimeoutCh provides a channel for timing out on the current rank. + TimeoutCh() <-chan time.Time + // Start starts the pacemaker, takes a cancellable context. + Start(ctx context.Context) +} + +// ProposalDurationProvider generates the target publication time for state +// proposals. +type ProposalDurationProvider interface { + + // TargetPublicationTime is intended to be called by the EventHandler, + // whenever it wants to publish a new proposal. The event handler inputs + // - proposalRank: the rank it is proposing for, + // - timeRankEntered: the time when the EventHandler entered this rank + // - parentStateId: the ID of the parent state, which the EventHandler is + // building on + // TargetPublicationTime returns the time stamp when the new proposal should + // be broadcasted. For a given rank where we are the primary, suppose the + // actual time we are done building our proposal is P: + // - if P < TargetPublicationTime(..), then the EventHandler should wait + // until `TargetPublicationTime` to broadcast the proposal + // - if P >= TargetPublicationTime(..), then the EventHandler should + // immediately broadcast the proposal + // + // Note: Technically, our metrics capture the publication delay relative to + // this function's _latest_ call. Currently, the EventHandler is the only + // caller of this function, and only calls it once. + // + // Concurrency safe. + TargetPublicationTime( + proposalRank uint64, + timeRankEntered time.Time, + parentStateId models.Identity, + ) time.Time +} diff --git a/consensus/consensus_producer.go b/consensus/consensus_producer.go new file mode 100644 index 0000000..5c57f4d --- /dev/null +++ b/consensus/consensus_producer.go @@ -0,0 +1,25 @@ +package consensus + +import "source.quilibrium.com/quilibrium/monorepo/consensus/models" + +// StateProducer is responsible for producing new state proposals. It is a +// service component to HotStuff's main state machine (implemented in the +// EventHandler). The StateProducer's central purpose is to mediate concurrent +// signing requests to its embedded `hotstuff.SafetyRules` during state +// production. The actual work of producing a state proposal is delegated to the +// embedded `consensus.LeaderProvider`. +type StateProducer[StateT models.Unique, VoteT models.Unique] interface { + // MakeStateProposal builds a new HotStuff state proposal using the given + // rank, the given quorum certificate for its parent and [optionally] a + // timeout certificate for last rank (could be nil). + // Error Returns: + // - model.NoVoteError if it is not safe for us to vote (our proposal + // includes our vote) for this rank. This can happen if we have already + // proposed or timed out this rank. + // - generic error in case of unexpected failure + MakeStateProposal( + rank uint64, + qc models.QuorumCertificate, + lastRankTC models.TimeoutCertificate, + ) (*models.SignedProposal[StateT, VoteT], error) +} diff --git a/consensus/consensus_safety_rules.go b/consensus/consensus_safety_rules.go new file mode 100644 index 0000000..a056cfb --- /dev/null +++ b/consensus/consensus_safety_rules.go @@ -0,0 +1,73 @@ +package consensus + +import "source.quilibrium.com/quilibrium/monorepo/consensus/models" + +// SafetyRules enforces all consensus rules that guarantee safety. It produces +// votes for the given states or TimeoutState for the given ranks, only if all +// safety rules are satisfied. In particular, SafetyRules guarantees a +// foundational security theorem for HotStuff, which we utilize also outside of +// consensus (e.g. queuing pending states for execution, verification, sealing +// etc): +// +// THEOREM: For each rank, there can be at most 1 certified state. +// +// Implementations are generally *not* concurrency safe. +type SafetyRules[StateT models.Unique, VoteT models.Unique] interface { + // ProduceVote takes a state proposal and current rank, and decides whether to + // vote for the state. Voting is deterministic, i.e. voting for same proposal + // will always result in the same vote. + // Returns: + // * (vote, nil): On the _first_ state for the current rank that is safe to + // vote for. Subsequently, voter does _not_ vote for any _other_ state with + // the same (or lower) rank. SafetyRules internally caches and persists its + // latest vote. As long as the SafetyRules' internal state remains + // unchanged, ProduceVote will return its cached for identical inputs. + // * (nil, model.NoVoteError): If the safety module decides that it is not + // safe to vote for the given state. This is a sentinel error and + // _expected_ during normal operation. + // All other errors are unexpected and potential symptoms of uncovered edge + // cases or corrupted internal state (fatal). + ProduceVote( + proposal *models.SignedProposal[StateT, VoteT], + curRank uint64, + ) (*VoteT, error) + + // ProduceTimeout takes current rank, highest locally known QC and TC + // (optional, must be nil if and only if QC is for previous rank) and decides + // whether to produce timeout for current rank. + // Returns: + // * (timeout, nil): It is safe to timeout for current rank using newestQC + // and lastRankTC. + // * (nil, model.NoTimeoutError): If replica is not part of the authorized + // consensus committee (anymore) and therefore is not authorized to produce + // a valid timeout state. This sentinel error is _expected_ during normal + // operation, e.g. during the grace-period after Rank switchover or after + // the replica self-ejected. + // All other errors are unexpected and potential symptoms of uncovered edge + // cases or corrupted internal state (fatal). + ProduceTimeout( + curRank uint64, + newestQC models.QuorumCertificate, + lastRankTC models.TimeoutCertificate, + ) (*models.TimeoutState[VoteT], error) + + // SignOwnProposal takes an unsigned state proposal and produces a vote for + // it. Vote is a cryptographic commitment to the proposal. By adding the vote + // to an unsigned proposal, the caller constructs a signed state proposal. + // This method has to be used only by the leader, which must be the proposer + // of the state (or an exception is returned). + // Implementors must guarantee that: + // - vote on the proposal satisfies safety rules + // - maximum one proposal is signed per rank + // Returns: + // * (vote, nil): the passed unsigned proposal is a valid one, and it's safe + // to make a proposal. Subsequently, leader does _not_ produce any _other_ + // proposal with the same (or lower) rank. + // * (nil, model.NoVoteError): according to HotStuff's Safety Rules, it is + // not safe to sign the given proposal. This could happen because we have + // already proposed or timed out for the given rank. This is a sentinel + // error and _expected_ during normal operation. + // All other errors are unexpected and potential symptoms of uncovered edge + // cases or corrupted internal state (fatal). + SignOwnProposal(unsignedProposal *models.Proposal[StateT]) (*VoteT, error) +} diff --git a/consensus/consensus_signature.go b/consensus/consensus_signature.go new file mode 100644 index 0000000..9f65891 --- /dev/null +++ b/consensus/consensus_signature.go @@ -0,0 +1,161 @@ +package consensus + +import ( + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// WeightedSignatureAggregator aggregates signatures of the same signature +// scheme and the same message from different signers. The public keys and +// message are agreed upon upfront. It is also recommended to only aggregate +// signatures generated with keys representing equivalent security-bit level. +// Furthermore, a weight [unsigned int64] is assigned to each signer ID. The +// WeightedSignatureAggregator internally tracks the total weight of all +// collected signatures. Implementations must be concurrency safe. +type WeightedSignatureAggregator interface { + // Verify verifies the signature under the stored public keys and message. + // Expected errors during normal operations: + // - model.InvalidSignerError if signerID is invalid (not a consensus + // participant) + // - model.ErrInvalidSignature if signerID is valid but signature is + // cryptographically invalid + Verify(signerID models.Identity, sig []byte) error + + // TrustedAdd adds a signature to the internal set of signatures and adds the + // signer's weight to the total collected weight, iff the signature is _not_ a + // duplicate. The total weight of all collected signatures (excluding + // duplicates) is returned regardless of any returned error. + // Expected errors during normal operations: + // - model.InvalidSignerError if signerID is invalid (not a consensus + // participant) + // - model.DuplicatedSignerError if the signer has been already added + TrustedAdd(signerID models.Identity, sig []byte) ( + totalWeight uint64, + exception error, + ) + + // TotalWeight returns the total weight presented by the collected signatures. + TotalWeight() uint64 + + // Aggregate aggregates the signatures and returns the aggregated consensus. + // The function performs a final verification and errors if the aggregated + // signature is invalid. This is required for the function safety since + // `TrustedAdd` allows adding invalid signatures. + // The function errors with: + // - model.InsufficientSignaturesError if no signatures have been added yet + // - model.InvalidSignatureIncludedError if: + // -- some signature(s), included via TrustedAdd, fail to deserialize + // (regardless of the aggregated public key) + // -- or all signatures deserialize correctly but some signature(s), + // included via TrustedAdd, are invalid (while aggregated public key is + // valid) + // - model.InvalidAggregatedKeyError if all signatures deserialize correctly + // but the signer's proving public keys sum up to an invalid key (BLS + // identity public key). Any aggregated signature would fail the + // cryptographic verification under the identity public key and therefore + // such signature is considered invalid. Such scenario can only happen if + // proving public keys of signers were forged to add up to the identity + // public key. Under the assumption that all proving key PoPs are valid, + // this error case can only happen if all signers are malicious and + // colluding. If there is at least one honest signer, there is a + // negligible probability that the aggregated key is identity. + // + // The function is thread-safe. + Aggregate() ([]models.WeightedIdentity, models.AggregatedSignature, error) +} + +// TimeoutSignatureAggregator aggregates timeout signatures for one particular +// rank. When instantiating a TimeoutSignatureAggregator, the following +// information is supplied: +// - The rank for which the aggregator collects timeouts. +// - For each replicas that is authorized to send a timeout at this particular +// rank: the node ID, public proving keys, and weight +// +// Timeouts for other ranks or from non-authorized replicas are rejected. +// In their TimeoutStates, replicas include a signature over the pair (rank, +// newestQCRank), where `rank` is the rank number the timeout is for and +// `newestQCRank` is the rank of the newest QC known to the replica. +// TimeoutSignatureAggregator collects these signatures, internally tracks the +// total weight of all collected signatures. Note that in general the signed +// messages are different, which makes the aggregation a comparatively expensive +// operation. Upon calling `Aggregate`, the TimeoutSignatureAggregator +// aggregates all valid signatures collected up to this point. The aggregate +// signature is guaranteed to be correct, as only valid signatures are accepted +// as inputs. +// TimeoutSignatureAggregator internally tracks the total weight of all +// collected signatures. Implementations must be concurrency safe. +type TimeoutSignatureAggregator interface { + // VerifyAndAdd verifies the signature under the stored public keys and adds + // the signature and the corresponding highest QC to the internal set. + // Internal set and collected weight is modified iff signature _is_ valid. + // The total weight of all collected signatures (excluding duplicates) is + // returned regardless of any returned error. + // Expected errors during normal operations: + // - model.InvalidSignerError if signerID is invalid (not a consensus + // participant) + // - model.DuplicatedSignerError if the signer has been already added + // - model.ErrInvalidSignature if signerID is valid but signature is + // cryptographically invalid + VerifyAndAdd( + signerID models.Identity, + sig []byte, + newestQCRank uint64, + ) (totalWeight uint64, exception error) + + // TotalWeight returns the total weight presented by the collected signatures. + TotalWeight() uint64 + + // Rank returns the rank that this instance is aggregating signatures for. + Rank() uint64 + + // Aggregate aggregates the signatures and returns with additional data. + // Aggregated signature will be returned as SigData of timeout certificate. + // Caller can be sure that resulting signature is valid. + // Expected errors during normal operations: + // - model.InsufficientSignaturesError if no signatures have been added yet + Aggregate() ( + signersInfo []TimeoutSignerInfo, + aggregatedSig models.AggregatedSignature, + exception error, + ) +} + +// TimeoutSignerInfo is a helper structure that stores the QC ranks that each +// signer contributed to a TC. Used as result of +// TimeoutSignatureAggregator.Aggregate() +type TimeoutSignerInfo struct { + NewestQCRank uint64 + Signer models.Identity +} + +// StateSignatureData is an intermediate struct for Packer to pack the +// aggregated signature data into raw bytes or unpack from raw bytes. +type StateSignatureData struct { + Signers []models.WeightedIdentity + Signature []byte +} + +// Packer packs aggregated signature data into raw bytes to be used in state +// header. +type Packer interface { + // Pack serializes the provided StateSignatureData into a precursor format of + // a QC. rank is the rank of the state that the aggregated signature is for. + // sig is the aggregated signature data. + // Expected error returns during normal operations: + // * none; all errors are symptoms of inconsistent input data or corrupted + // internal state. + Pack(rank uint64, sig *StateSignatureData) ( + signerIndices []byte, + sigData []byte, + err error, + ) + + // Unpack de-serializes the provided signature data. + // sig is the aggregated signature data + // It returns: + // - (sigData, nil) if successfully unpacked the signature data + // - (nil, model.InvalidFormatError) if failed to unpack the signature data + Unpack(signerIdentities []models.WeightedIdentity, sigData []byte) ( + *StateSignatureData, + error, + ) +} diff --git a/consensus/consensus_signer.go b/consensus/consensus_signer.go new file mode 100644 index 0000000..a54f0ac --- /dev/null +++ b/consensus/consensus_signer.go @@ -0,0 +1,39 @@ +package consensus + +import ( + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Signer is responsible for creating votes, proposals for a given state. +type Signer[StateT models.Unique, VoteT models.Unique] interface { + // CreateVote creates a vote for the given state. No error returns are + // expected during normal operations (incl. presence of byz. actors). + CreateVote(state *models.State[StateT]) (*VoteT, error) + + // CreateTimeout creates a timeout for given rank. No errors return are + // expected during normal operations(incl presence of byz. actors). + CreateTimeout( + curRank uint64, + newestQC models.QuorumCertificate, + previousRankTimeoutCert models.TimeoutCertificate, + ) (*models.TimeoutState[VoteT], error) +} + +type SignatureAggregator interface { + VerifySignatureMultiMessage( + publicKeys [][]byte, + signature []byte, + messages [][]byte, + context []byte, + ) bool + VerifySignatureRaw( + publicKey []byte, + signature []byte, + message []byte, + context []byte, + ) bool + Aggregate( + publicKeys [][]byte, + signatures [][]byte, + ) (models.AggregatedSignature, error) +} diff --git a/consensus/consensus_store.go b/consensus/consensus_store.go new file mode 100644 index 0000000..4360574 --- /dev/null +++ b/consensus/consensus_store.go @@ -0,0 +1,18 @@ +package consensus + +import "source.quilibrium.com/quilibrium/monorepo/consensus/models" + +// ConsensusStore defines the methods required for internal state that should +// persist between restarts of the consensus engine. +type ConsensusStore[VoteT models.Unique] interface { + ReadOnlyConsensusStore[VoteT] + PutConsensusState(state *models.ConsensusState[VoteT]) error + PutLivenessState(state *models.LivenessState) error +} + +// ReadOnlyConsensusStore defines the methods required for reading internal +// state persisted between restarts of the consensus engine. +type ReadOnlyConsensusStore[VoteT models.Unique] interface { + GetConsensusState(filter []byte) (*models.ConsensusState[VoteT], error) + GetLivenessState(filter []byte) (*models.LivenessState, error) +} diff --git a/consensus/consensus_sync.go b/consensus/consensus_sync.go new file mode 100644 index 0000000..9a09180 --- /dev/null +++ b/consensus/consensus_sync.go @@ -0,0 +1,20 @@ +package consensus + +import ( + "context" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// SyncProvider handles synchronization management +type SyncProvider[StateT models.Unique] interface { + // Performs synchronization to set internal state. Note that it is assumed + // that errors are transient and synchronization should be reattempted on + // failure. If some other process for synchronization is used and this should + // be bypassed, send nil on the error channel. Provided context may be + // canceled, should be used to halt long-running sync operations. + Synchronize( + ctx context.Context, + existing *StateT, + ) (<-chan *StateT, <-chan error) +} diff --git a/consensus/consensus_timeout.go b/consensus/consensus_timeout.go new file mode 100644 index 0000000..17f7295 --- /dev/null +++ b/consensus/consensus_timeout.go @@ -0,0 +1,127 @@ +package consensus + +import ( + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" +) + +// TimeoutAggregator verifies and aggregates timeout states to build timeout +// certificates [TCs]. When enough timeout states are collected, it builds a TC +// and sends it to the EventLoop TimeoutAggregator also detects protocol +// violation, including invalid timeouts, double timeout, etc and notifies a +// HotStuff consumer for slashing. +type TimeoutAggregator[VoteT models.Unique] interface { + lifecycle.Component + + // AddTimeout verifies and aggregates a timeout state. + // This method can be called concurrently, timeouts will be queued and + // processed asynchronously. + AddTimeout(timeoutState *models.TimeoutState[VoteT]) + + // PruneUpToRank deletes all `TimeoutCollector`s _below_ to the given rank, as + // well as related indices. We only retain and process `TimeoutCollector`s, + // whose rank is equal or larger than `lowestRetainedRank`. If + // `lowestRetainedRank` is smaller than the previous value, the previous value + // is kept and the method call is a NoOp. This value should be set to the + // latest active rank maintained by `Pacemaker`. + PruneUpToRank(lowestRetainedRank uint64) +} + +// TimeoutCollector collects all timeout states for a specified rank. On the +// happy path, it generates a TimeoutCertificate when enough timeouts have been +// collected. The TimeoutCollector is a higher-level structure that orchestrates +// deduplication, caching and processing of timeouts, delegating those tasks to +// underlying modules (such as TimeoutProcessor). Implementations of +// TimeoutCollector must be concurrency safe. +type TimeoutCollector[VoteT models.Unique] interface { + // AddTimeout adds a Timeout State to the collector. When TSs from + // strictly more than 1/3 of consensus participants (measured by weight) were + // collected, the callback for partial TC will be triggered. After collecting + // TSs from a supermajority, a TC will be created and passed to the EventLoop. + // Expected error returns during normal operations: + // * timeoutcollector.ErrTimeoutForIncompatibleRank - submitted timeout for + // incompatible rank + // All other exceptions are symptoms of potential state corruption. + AddTimeout(timeoutState *models.TimeoutState[VoteT]) error + + // Rank returns the rank that this instance is collecting timeouts for. + // This method is useful when adding the newly created timeout collector to + // timeout collectors map. + Rank() uint64 +} + +// TimeoutProcessor ingests Timeout States for a particular rank. It +// implements the algorithms for validating TSs, orchestrates their low-level +// aggregation and emits `OnPartialTimeoutCertificateCreated` and `OnTimeoutCertificateConstructedFromTimeouts` +// notifications. TimeoutProcessor cannot deduplicate TSs (this should be +// handled by the higher-level TimeoutCollector) and errors instead. Depending +// on their implementation, a TimeoutProcessor might drop timeouts or attempt to +// construct a TC. +type TimeoutProcessor[VoteT models.Unique] interface { + // Process performs processing of single timeout state. This function is safe + // to call from multiple goroutines. Expected error returns during normal + // operations: + // * timeoutcollector.ErrTimeoutForIncompatibleRank - submitted timeout for + // incompatible rank + // * models.InvalidTimeoutError - submitted invalid timeout(invalid structure + // or invalid signature) + // * models.DuplicatedSignerError if a timeout from the same signer was + // previously already added. It does _not necessarily_ imply that the + // timeout is invalid or the sender is equivocating. + // All other errors should be treated as exceptions. + Process(timeout *models.TimeoutState[VoteT]) error +} + +// TimeoutCollectorFactory performs creation of TimeoutCollector for a given +// rank +type TimeoutCollectorFactory[VoteT models.Unique] interface { + // Create is a factory method to generate a TimeoutCollector for a given rank + // Expected error returns during normal operations: + // * models.ErrRankUnknown no rank containing the given rank is known + // All other errors should be treated as exceptions. + Create(rank uint64) (TimeoutCollector[VoteT], error) +} + +// TimeoutProcessorFactory performs creation of TimeoutProcessor for a given +// rank +type TimeoutProcessorFactory[VoteT models.Unique] interface { + // Create is a factory method to generate a TimeoutProcessor for a given rank + // Expected error returns during normal operations: + // * models.ErrRankUnknown no rank containing the given rank is known + // All other errors should be treated as exceptions. + Create(rank uint64) (TimeoutProcessor[VoteT], error) +} + +// TimeoutCollectors encapsulates the functionality to generate, store and prune +// `TimeoutCollector` instances (one per rank). Its main purpose is to provide a +// higher-level API to `TimeoutAggregator` for managing and interacting with the +// rank-specific `TimeoutCollector` instances. Implementations are concurrency +// safe. +type TimeoutCollectors[VoteT models.Unique] interface { + // GetOrCreateCollector retrieves the TimeoutCollector for the specified + // rank or creates one if none exists. When creating a timeout collector, + // the rank is used to query the consensus committee for the respective + // Rank the rank belongs to. + // It returns: + // - (collector, true, nil) if no collector can be found by the rank, and a + // new collector was created. + // - (collector, false, nil) if the collector can be found by the rank. + // - (nil, false, error) if running into any exception creating the timeout + // collector. + // Expected error returns during normal operations: + // * models.BelowPrunedThresholdError if rank is below the pruning threshold + // * models.ErrRankUnknown if rank is not yet pruned but no rank containing + // the given rank is known + GetOrCreateCollector(rank uint64) ( + collector TimeoutCollector[VoteT], + created bool, + err error, + ) + + // PruneUpToRank prunes the timeout collectors with ranks _below_ the given + // value, i.e. we only retain and process timeout collectors, whose ranks are + // equal or larger than `lowestRetainedRank`. If `lowestRetainedRank` is + // smaller than the previous value, the previous value is kept and the method + // call is a NoOp. + PruneUpToRank(lowestRetainedRank uint64) +} diff --git a/consensus/consensus_tracing.go b/consensus/consensus_tracing.go new file mode 100644 index 0000000..555143c --- /dev/null +++ b/consensus/consensus_tracing.go @@ -0,0 +1,102 @@ +package consensus + +import ( + "encoding/hex" + "time" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TraceLogger defines a simple tracing interface +type TraceLogger interface { + Trace(message string, params ...LogParam) + Error(message string, err error, params ...LogParam) + With(params ...LogParam) TraceLogger +} + +type LogParam struct { + key string + value any + kind string +} + +func StringParam(key string, value string) LogParam { + return LogParam{ + key: key, + value: value, + kind: "string", + } +} + +func Uint64Param(key string, value uint64) LogParam { + return LogParam{ + key: key, + value: value, + kind: "uint64", + } +} + +func Uint32Param(key string, value uint32) LogParam { + return LogParam{ + key: key, + value: value, + kind: "uint32", + } +} + +func Int64Param(key string, value int64) LogParam { + return LogParam{ + key: key, + value: value, + kind: "int64", + } +} + +func Int32Param(key string, value int32) LogParam { + return LogParam{ + key: key, + value: value, + kind: "int32", + } +} + +func IdentityParam(key string, value models.Identity) LogParam { + return LogParam{ + key: key, + value: hex.EncodeToString([]byte(value)), + kind: "string", + } +} + +func HexParam(key string, value []byte) LogParam { + return LogParam{ + key: key, + value: hex.EncodeToString(value), + kind: "string", + } +} + +func TimeParam(key string, value time.Time) LogParam { + return LogParam{ + key: key, + value: value, + kind: "time", + } +} + +func (l LogParam) GetKey() string { + return l.key +} + +func (l LogParam) GetValue() any { + return l.value +} + +func (l LogParam) GetKind() string { + return l.kind +} + +type nilTracer struct{} + +func (nilTracer) Trace(message string) {} +func (nilTracer) Error(message string, err error) {} diff --git a/consensus/consensus_validator.go b/consensus/consensus_validator.go new file mode 100644 index 0000000..7ac8e68 --- /dev/null +++ b/consensus/consensus_validator.go @@ -0,0 +1,32 @@ +package consensus + +import ( + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Validator provides functions to validate QuorumCertificate, proposals and +// votes. +type Validator[StateT models.Unique, VoteT models.Unique] interface { + + // ValidateQuorumCertificate checks the validity of a QuorumCertificate. + // During normal operations, the following error returns are expected: + // * models.InvalidQuorumCertificateError if the QuorumCertificate is invalid + ValidateQuorumCertificate(qc models.QuorumCertificate) error + + // ValidateTimeoutCertificate checks the validity of a TimeoutCertificate. + // During normal operations, the following error returns are expected: + // * models.InvalidTimeoutCertificateError if the TimeoutCertificate is + // invalid + ValidateTimeoutCertificate(tc models.TimeoutCertificate) error + + // ValidateProposal checks the validity of a proposal. + // During normal operations, the following error returns are expected: + // * models.InvalidProposalError if the state is invalid + ValidateProposal(proposal *models.SignedProposal[StateT, VoteT]) error + + // ValidateVote checks the validity of a vote. + // Returns the full entity for the voter. During normal operations, + // the following errors are expected: + // * models.InvalidVoteError for invalid votes + ValidateVote(vote *VoteT) (*models.WeightedIdentity, error) +} diff --git a/consensus/consensus_verifier.go b/consensus/consensus_verifier.go new file mode 100644 index 0000000..c5ff1dd --- /dev/null +++ b/consensus/consensus_verifier.go @@ -0,0 +1,45 @@ +package consensus + +import "source.quilibrium.com/quilibrium/monorepo/consensus/models" + +// Verifier is the component responsible for the cryptographic integrity of +// votes, proposals and QC's against the state they are signing. +type Verifier[VoteT models.Unique] interface { + // VerifyVote checks the cryptographic validity of a vote's `SigData` w.r.t. + // the rank and stateID. It is the responsibility of the calling code to + // ensure that `voter` is authorized to vote. + // Return values: + // * nil if `sigData` is cryptographically valid + // * models.InvalidFormatError if the signature has an incompatible format. + // * models.ErrInvalidSignature is the signature is invalid + // * unexpected errors should be treated as symptoms of bugs or uncovered + // edge cases in the logic (i.e. as fatal) + VerifyVote(vote *VoteT) error + + // VerifyQC checks the cryptographic validity of a QC's `SigData` w.r.t. the + // given rank and stateID. It is the responsibility of the calling code to + // ensure that all `signers` are authorized, without duplicates. + // Return values: + // * nil if `sigData` is cryptographically valid + // * models.InvalidFormatError if `sigData` has an incompatible format + // * models.InsufficientSignaturesError if `signers is empty. + // Depending on the order of checks in the higher-level logic this error + // might be an indicator of a external byzantine input or an internal bug. + // * models.ErrInvalidSignature if a signature is invalid + // * unexpected errors should be treated as symptoms of bugs or uncovered + // edge cases in the logic (i.e. as fatal) + VerifyQuorumCertificate(quorumCertificate models.QuorumCertificate) error + + // VerifyTimeoutCertificate checks cryptographic validity of the TC's + // `sigData` w.r.t. the given rank. It is the responsibility of the calling + // code to ensure that all `signers` are authorized, without duplicates. + // Return values: + // * nil if `sigData` is cryptographically valid + // * models.InsufficientSignaturesError if `signers is empty. + // * models.InvalidFormatError if `signers`/`highQCRanks` have differing + // lengths + // * models.ErrInvalidSignature if a signature is invalid + // * unexpected errors should be treated as symptoms of bugs or uncovered + // edge cases in the logic (i.e. as fatal) + VerifyTimeoutCertificate(timeoutCertificate models.TimeoutCertificate) error +} diff --git a/consensus/consensus_voting.go b/consensus/consensus_voting.go new file mode 100644 index 0000000..593eaba --- /dev/null +++ b/consensus/consensus_voting.go @@ -0,0 +1,43 @@ +package consensus + +import ( + "context" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VotingProvider handles voting logic by deferring decisions, collection, and +// state finalization to an outside implementation. +type VotingProvider[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +] interface { + // SignVote signs a proposal, produces an output vote for aggregation and + // broadcasting. + SignVote( + ctx context.Context, + state *models.State[StateT], + ) (*VoteT, error) + // SignVote signs a proposal, produces an output vote for aggregation and + // broadcasting. + SignTimeoutVote( + ctx context.Context, + filter []byte, + currentRank uint64, + newestQuorumCertificateRank uint64, + ) (*VoteT, error) + FinalizeQuorumCertificate( + ctx context.Context, + state *models.State[StateT], + aggregatedSignature models.AggregatedSignature, + ) (models.QuorumCertificate, error) + // Produces a timeout certificate + FinalizeTimeout( + ctx context.Context, + rank uint64, + latestQuorumCertificate models.QuorumCertificate, + latestQuorumCertificateRanks []uint64, + aggregatedSignature models.AggregatedSignature, + ) (models.TimeoutCertificate, error) +} diff --git a/consensus/consensus_weight.go b/consensus/consensus_weight.go new file mode 100644 index 0000000..78965c7 --- /dev/null +++ b/consensus/consensus_weight.go @@ -0,0 +1,10 @@ +package consensus + +// WeightProvider defines the methods for handling weighted differentiation of +// voters, such as seniority, or stake. +type WeightProvider interface { + // GetWeightForBitmask returns the total weight of the given bitmask for the + // prover set under the filter. Bitmask is expected to be in ascending ring + // order. + GetWeightForBitmask(filter []byte, bitmask []byte) uint64 +} diff --git a/consensus/counters/strict_monotonic_counter.go b/consensus/counters/strict_monotonic_counter.go new file mode 100644 index 0000000..93b5023 --- /dev/null +++ b/consensus/counters/strict_monotonic_counter.go @@ -0,0 +1,50 @@ +package counters + +import "sync/atomic" + +// StrictMonotonicCounter is a helper struct which implements a strict monotonic +// counter. StrictMonotonicCounter is implemented using atomic operations and +// doesn't allow to set a value which is lower or equal to the already stored +// one. The counter is implemented solely with non-blocking atomic operations +// for concurrency safety. +type StrictMonotonicCounter struct { + atomicCounter uint64 +} + +// NewMonotonicCounter creates new counter with initial value +func NewMonotonicCounter(initialValue uint64) StrictMonotonicCounter { + return StrictMonotonicCounter{ + atomicCounter: initialValue, + } +} + +// Set updates value of counter if and only if it's strictly larger than value +// which is already stored. Returns true if update was successful or false if +// stored value is larger. +func (c *StrictMonotonicCounter) Set(newValue uint64) bool { + for { + oldValue := c.Value() + if newValue <= oldValue { + return false + } + if atomic.CompareAndSwapUint64(&c.atomicCounter, oldValue, newValue) { + return true + } + } +} + +// Value returns value which is stored in atomic variable +func (c *StrictMonotonicCounter) Value() uint64 { + return atomic.LoadUint64(&c.atomicCounter) +} + +// Increment atomically increments counter and returns updated value +func (c *StrictMonotonicCounter) Increment() uint64 { + for { + oldValue := c.Value() + newValue := oldValue + 1 + if atomic.CompareAndSwapUint64(&c.atomicCounter, oldValue, newValue) { + return newValue + } + } +} diff --git a/consensus/eventhandler/event_handler.go b/consensus/eventhandler/event_handler.go new file mode 100644 index 0000000..59c6d0f --- /dev/null +++ b/consensus/eventhandler/event_handler.go @@ -0,0 +1,825 @@ +package eventhandler + +import ( + "context" + "errors" + "fmt" + "time" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// EventHandler is the main handler for individual events that trigger state +// transition. It exposes API to handle one event at a time synchronously. +// EventHandler is *not concurrency safe*. Please use the EventLoop to ensure +// that only a single go-routine executes the EventHandler's algorithms. +// EventHandler is implemented in event-driven way, it reacts to incoming events +// and performs certain actions. It doesn't perform any actions on its own. +// There are 3 main responsibilities of EventHandler, vote, propose, timeout. +// There are specific scenarios that lead to each of those actions. +// - create vote: voting logic is triggered by OnReceiveProposal, after +// receiving proposal we have all required information to create a valid +// vote. Compliance engine makes sure that we receive proposals, whose +// parents are known. Creating a vote can be triggered ONLY by receiving +// proposal. +// - create timeout: creating models.TimeoutState is triggered by +// OnLocalTimeout, after reaching deadline for current round. EventHandler +// gets notified about it and has to create a models.TimeoutState and +// broadcast it to other replicas. Creating a TO can be triggered by +// reaching round deadline or triggered as part of Bracha broadcast when +// superminority of replicas have contributed to TC creation and created a +// partial TC. +// - create a proposal: proposing logic is more complicated. Creating a +// proposal is triggered by the EventHandler receiving a QC or TC that +// induces a rank change to a rank where the replica is primary. As an edge +// case, the EventHandler can receive a QC or TC that triggers the rank +// change, but we can't create a proposal in case we are missing parent +// state the newest QC refers to. In case we already have the QC, but are +// still missing the respective parent, OnReceiveProposal can trigger the +// proposing logic as well, but only when receiving proposal for rank lower +// than active rank. To summarize, to make a valid proposal for rank N we +// need to have a QC or TC for N-1 and know the proposal with stateID +// NewestQC.Identifier. +// +// Not concurrency safe. +type EventHandler[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, + CollectedT models.Unique, +] struct { + tracer consensus.TraceLogger + paceMaker consensus.Pacemaker + stateProducer consensus.StateProducer[StateT, VoteT] + forks consensus.Forks[StateT] + store consensus.ConsensusStore[VoteT] + committee consensus.Replicas + safetyRules consensus.SafetyRules[StateT, VoteT] + notifier consensus.Consumer[StateT, VoteT] +} + +var _ consensus.EventHandler[*nilUnique, *nilUnique] = (*EventHandler[ + *nilUnique, *nilUnique, *nilUnique, *nilUnique, +])(nil) + +// NewEventHandler creates an EventHandler instance with initial components. +func NewEventHandler[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, + CollectedT models.Unique, +]( + paceMaker consensus.Pacemaker, + stateProducer consensus.StateProducer[StateT, VoteT], + forks consensus.Forks[StateT], + store consensus.ConsensusStore[VoteT], + committee consensus.Replicas, + safetyRules consensus.SafetyRules[StateT, VoteT], + notifier consensus.Consumer[StateT, VoteT], + tracer consensus.TraceLogger, +) (*EventHandler[StateT, VoteT, PeerIDT, CollectedT], error) { + e := &EventHandler[StateT, VoteT, PeerIDT, CollectedT]{ + paceMaker: paceMaker, + stateProducer: stateProducer, + forks: forks, + store: store, + safetyRules: safetyRules, + committee: committee, + notifier: notifier, + tracer: tracer, + } + return e, nil +} + +// OnReceiveQuorumCertificate processes a valid qc constructed by internal vote +// aggregator or discovered in TimeoutState. All inputs should be validated +// before feeding into this function. Assuming trusted data. No errors are +// expected during normal operation. +func (e *EventHandler[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) OnReceiveQuorumCertificate(qc models.QuorumCertificate) error { + curRank := e.paceMaker.CurrentRank() + + e.tracer.Trace( + "received QC", + consensus.Uint64Param("current_rank", curRank), + consensus.Uint64Param("qc_rank", qc.GetRank()), + consensus.IdentityParam("state_id", qc.Identity()), + ) + e.notifier.OnReceiveQuorumCertificate(curRank, qc) + defer e.notifier.OnEventProcessed() + + newRankEvent, err := e.paceMaker.ReceiveQuorumCertificate(qc) + if err != nil { + return fmt.Errorf("could not process QC: %w", err) + } + if newRankEvent == nil { + e.tracer.Trace("QC didn't trigger rank change, nothing to do") + return nil + } + + // current rank has changed, go to new rank + e.tracer.Trace("QC triggered rank change, starting new rank now") + return e.proposeForNewRankIfPrimary() +} + +// OnReceiveTimeoutCertificate processes a valid tc constructed by internal +// timeout aggregator, discovered in TimeoutState or broadcast over the network. +// All inputs should be validated before feeding into this function. Assuming +// trusted data. No errors are expected during normal operation. +func (e *EventHandler[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) OnReceiveTimeoutCertificate(tc models.TimeoutCertificate) error { + curRank := e.paceMaker.CurrentRank() + e.tracer.Trace( + "received TC", + consensus.Uint64Param("current_rank", curRank), + consensus.Uint64Param("tc_rank", tc.GetRank()), + consensus.Uint64Param( + "tc_newest_qc_rank", + tc.GetLatestQuorumCert().GetRank(), + ), + consensus.IdentityParam( + "tc_newest_qc_state_id", + tc.GetLatestQuorumCert().Identity(), + ), + ) + e.notifier.OnReceiveTimeoutCertificate(curRank, tc) + defer e.notifier.OnEventProcessed() + + newRankEvent, err := e.paceMaker.ReceiveTimeoutCertificate(tc) + if err != nil { + return fmt.Errorf("could not process TC for rank %d: %w", tc.GetRank(), err) + } + if newRankEvent == nil { + e.tracer.Trace("TC didn't trigger rank change, nothing to do", + consensus.Uint64Param("current_rank", curRank), + consensus.Uint64Param("tc_rank", tc.GetRank()), + consensus.Uint64Param( + "tc_newest_qc_rank", + tc.GetLatestQuorumCert().GetRank(), + ), + consensus.IdentityParam( + "tc_newest_qc_state_id", + tc.GetLatestQuorumCert().Identity(), + )) + return nil + } + + // current rank has changed, go to new rank + e.tracer.Trace("TC triggered rank change, starting new rank now", + consensus.Uint64Param("current_rank", curRank), + consensus.Uint64Param("tc_rank", tc.GetRank()), + consensus.Uint64Param( + "tc_newest_qc_rank", + tc.GetLatestQuorumCert().GetRank(), + ), + consensus.IdentityParam( + "tc_newest_qc_state_id", + tc.GetLatestQuorumCert().Identity(), + )) + return e.proposeForNewRankIfPrimary() +} + +// OnReceiveProposal processes a state proposal received from another HotStuff +// consensus participant. +// All inputs should be validated before feeding into this function. Assuming +// trusted data. No errors are expected during normal operation. +func (e *EventHandler[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) OnReceiveProposal(proposal *models.SignedProposal[StateT, VoteT]) error { + state := proposal.State + curRank := e.paceMaker.CurrentRank() + e.tracer.Trace( + "proposal received from compliance engine", + consensus.Uint64Param("current_rank", curRank), + consensus.Uint64Param("state_rank", state.Rank), + consensus.IdentityParam("state_id", state.Identifier), + consensus.Uint64Param("qc_rank", state.ParentQuorumCertificate.GetRank()), + consensus.IdentityParam("proposer_id", state.ProposerID), + ) + e.notifier.OnReceiveProposal(curRank, proposal) + defer e.notifier.OnEventProcessed() + + // ignore stale proposals + if (*state).Rank < e.forks.FinalizedRank() { + e.tracer.Trace( + "stale proposal", + consensus.Uint64Param("current_rank", curRank), + consensus.Uint64Param("state_rank", state.Rank), + consensus.IdentityParam("state_id", state.Identifier), + consensus.Uint64Param("qc_rank", state.ParentQuorumCertificate.GetRank()), + consensus.IdentityParam("proposer_id", state.ProposerID), + ) + return nil + } + + // store the state. + err := e.forks.AddValidatedState(proposal.State) + if err != nil { + return fmt.Errorf( + "cannot add proposal to forks (%x): %w", + state.Identifier, + err, + ) + } + + _, err = e.paceMaker.ReceiveQuorumCertificate( + proposal.State.ParentQuorumCertificate, + ) + if err != nil { + return fmt.Errorf( + "could not process QC for state %x: %w", + state.Identifier, + err, + ) + } + + _, err = e.paceMaker.ReceiveTimeoutCertificate( + proposal.PreviousRankTimeoutCertificate, + ) + if err != nil { + return fmt.Errorf( + "could not process TC for state %x: %w", + state.Identifier, + err, + ) + } + + // if the state is for the current rank, then try voting for this state + err = e.processStateForCurrentRank(proposal) + if err != nil { + return fmt.Errorf("failed processing current state: %w", err) + } + e.tracer.Trace( + "proposal processed from compliance engine", + consensus.Uint64Param("current_rank", curRank), + consensus.Uint64Param("state_rank", state.Rank), + consensus.IdentityParam("state_id", state.Identifier), + consensus.Uint64Param("qc_rank", state.ParentQuorumCertificate.GetRank()), + consensus.IdentityParam("proposer_id", state.ProposerID), + ) + + // nothing to do if this proposal is for current rank + if proposal.State.Rank == e.paceMaker.CurrentRank() { + return nil + } + + return e.proposeForNewRankIfPrimary() +} + +// TimeoutChannel returns the channel for subscribing the waiting timeout on +// receiving state or votes for the current rank. +func (e *EventHandler[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) TimeoutChannel() <-chan time.Time { + return e.paceMaker.TimeoutCh() +} + +// OnLocalTimeout handles a local timeout event by creating a +// models.TimeoutState and broadcasting it. No errors are expected during normal +// operation. +func (e *EventHandler[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) OnLocalTimeout() error { + curRank := e.paceMaker.CurrentRank() + e.tracer.Trace( + "timeout received from event loop", + consensus.Uint64Param("current_rank", curRank), + ) + e.notifier.OnLocalTimeout(curRank) + defer e.notifier.OnEventProcessed() + + err := e.broadcastTimeoutStateIfAuthorized() + if err != nil { + return fmt.Errorf( + "unexpected exception while processing timeout in rank %d: %w", + curRank, + err, + ) + } + return nil +} + +// OnPartialTimeoutCertificateCreated handles notification produces by the +// internal timeout aggregator. If the notification is for the current rank, a +// corresponding models.TimeoutState is broadcast to the consensus committee. No +// errors are expected during normal operation. +func (e *EventHandler[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) OnPartialTimeoutCertificateCreated( + partialTC *consensus.PartialTimeoutCertificateCreated, +) error { + curRank := e.paceMaker.CurrentRank() + previousRankTimeoutCert := partialTC.PriorRankTimeoutCertificate + e.tracer.Trace( + "constructed partial TC", + consensus.Uint64Param("current_rank", curRank), + consensus.Uint64Param( + "qc_rank", + partialTC.NewestQuorumCertificate.GetRank(), + ), + ) + + e.notifier.OnPartialTimeoutCertificate(curRank, partialTC) + defer e.notifier.OnEventProcessed() + + // process QC, this might trigger rank change + _, err := e.paceMaker.ReceiveQuorumCertificate( + partialTC.NewestQuorumCertificate, + ) + if err != nil { + return fmt.Errorf("could not process newest QC: %w", err) + } + + // process TC, this might trigger rank change + _, err = e.paceMaker.ReceiveTimeoutCertificate(previousRankTimeoutCert) + if err != nil { + return fmt.Errorf( + "could not process TC for rank %d: %w", + previousRankTimeoutCert.GetRank(), + err, + ) + } + + // NOTE: in other cases when we have observed a rank change we will trigger + // proposing logic, this is desired logic for handling proposal, QC and TC. + // However, observing a partial TC means that superminority have timed out and + // there was at least one honest replica in that set. Honest replicas will + // never vote after timing out for current rank meaning we won't be able to + // collect supermajority of votes for a proposal made after observing partial + // TC. + + // by definition, we are allowed to produce timeout state if we have received + // partial TC for current rank + if e.paceMaker.CurrentRank() != partialTC.Rank { + return nil + } + + e.tracer.Trace( + "partial TC generated for current rank, broadcasting timeout", + consensus.Uint64Param("current_rank", curRank), + consensus.Uint64Param( + "qc_rank", + partialTC.NewestQuorumCertificate.GetRank(), + ), + ) + err = e.broadcastTimeoutStateIfAuthorized() + if err != nil { + return fmt.Errorf( + "unexpected exception while processing partial TC in rank %d: %w", + partialTC.Rank, + err, + ) + } + return nil +} + +// Start starts the event handler. No errors are expected during normal +// operation. CAUTION: EventHandler is not concurrency safe. The Start method +// must be executed by the same goroutine that also calls the other business +// logic methods, or concurrency safety has to be implemented externally. +func (e *EventHandler[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) Start(ctx context.Context) error { + e.notifier.OnStart(e.paceMaker.CurrentRank()) + defer e.notifier.OnEventProcessed() + e.paceMaker.Start(ctx) + err := e.proposeForNewRankIfPrimary() + if err != nil { + return fmt.Errorf("could not start new rank: %w", err) + } + return nil +} + +// broadcastTimeoutStateIfAuthorized attempts to generate a +// models.TimeoutState, adds it to `timeoutAggregator` and broadcasts it to the +// consensus commettee. We check, whether this node, at the current rank, is +// part of the consensus committee. Otherwise, this method is functionally a +// no-op. For example, right after an rank switchover a consensus node might +// still be online but not part of the _active_ consensus committee anymore. +// Consequently, it should not broadcast timeouts anymore. No errors are +// expected during normal operation. +func (e *EventHandler[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) broadcastTimeoutStateIfAuthorized() error { + curRank := e.paceMaker.CurrentRank() + newestQC := e.paceMaker.LatestQuorumCertificate() + previousRankTimeoutCert := e.paceMaker.PriorRankTimeoutCertificate() + + if newestQC.GetRank()+1 == curRank { + // in case last rank has ended with QC and TC, make sure that only QC is + // included otherwise such timeout is invalid. This case is possible if TC + // has included QC with the same rank as the TC itself, meaning that + // newestQC.Rank == previousRankTimeoutCert.Rank + previousRankTimeoutCert = nil + } + + timeout, err := e.safetyRules.ProduceTimeout( + curRank, + newestQC, + previousRankTimeoutCert, + ) + if err != nil { + if models.IsNoTimeoutError(err) { + e.tracer.Error( + "not generating timeout as this node is not part of the active committee", + err, + consensus.Uint64Param("current_rank", curRank), + ) + return nil + } + return fmt.Errorf("could not produce timeout: %w", err) + } + + // raise a notification to broadcast timeout + e.notifier.OnOwnTimeout(timeout) + e.tracer.Trace( + "broadcast TimeoutState done", + consensus.Uint64Param("current_rank", curRank), + ) + + return nil +} + +// proposeForNewRankIfPrimary will only be called when we may be able to propose +// a state, after processing a new event. +// - after entering a new rank as a result of processing a QC or TC, then we +// may propose for the newly entered rank +// - after receiving a proposal (but not changing rank), if that proposal is +// referenced by our highest known QC, and the proposal was previously +// unknown, then we can propose a state in the current rank +// +// Enforced INVARIANTS: +// - There will at most be `OnOwnProposal` notification emitted for ranks +// where this node is the leader, and none if another node is the leader. +// This holds irrespective of restarts. Formally, this prevents proposal +// equivocation. +// +// It reads the current rank, and generates a proposal if we are the leader. +// No errors are expected during normal operation. +func (e *EventHandler[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) proposeForNewRankIfPrimary() error { + start := time.Now() // track the start time + curRank := e.paceMaker.CurrentRank() + e.tracer.Trace( + "deciding to propose", + consensus.Uint64Param("current_rank", curRank), + consensus.IdentityParam("self", e.committee.Self()), + ) + currentLeader, err := e.committee.LeaderForRank(curRank) + if err != nil { + return fmt.Errorf( + "failed to determine primary for new rank %d: %w", + curRank, + err, + ) + } + finalizedRank := e.forks.FinalizedRank() + + e.notifier.OnCurrentRankDetails(curRank, finalizedRank, currentLeader) + + // check that I am the primary for this rank + if e.committee.Self() != currentLeader { + e.tracer.Trace( + "not current leader, waiting", + consensus.Uint64Param("current_rank", curRank), + consensus.Uint64Param("finalized_rank", finalizedRank), + consensus.IdentityParam("leader_id", currentLeader), + ) + return nil + } + + // attempt to generate proposal: + newestQC := e.paceMaker.LatestQuorumCertificate() + previousRankTimeoutCert := e.paceMaker.PriorRankTimeoutCertificate() + + _, found := e.forks.GetState(newestQC.Identity()) + if !found { + // we don't know anything about state referenced by our newest QC, in this + // case we can't create a valid proposal since we can't guarantee validity + // of state payload. + e.tracer.Trace( + "haven't synced the latest state yet; can't propose", + consensus.Uint64Param("current_rank", curRank), + consensus.Uint64Param("finalized_rank", finalizedRank), + consensus.IdentityParam("leader_id", currentLeader), + ) + return nil + } + e.tracer.Trace( + "generating proposal as leader", + consensus.Uint64Param("current_rank", curRank), + consensus.Uint64Param("finalized_rank", finalizedRank), + consensus.IdentityParam("leader_id", currentLeader), + ) + + // Sanity checks to make sure that resulting proposal is valid: + // In its proposal, the leader for rank N needs to present evidence that it + // has legitimately entered rank N. As evidence, we include a QC or TC for + // rank N-1, which should always be available as the PaceMaker advances to + // rank N only after observing a QC or TC from rank N-1. Moreover, QC and TC + // are always processed together. As EventHandler is strictly single-threaded + // without reentrancy, we must have a QC or TC for the prior rank (curRank-1). + // Failing one of these sanity checks is a symptom of state corruption or a + // severe implementation bug. + if newestQC.GetRank()+1 != curRank { + if previousRankTimeoutCert == nil { + return fmt.Errorf("possible state corruption, expected previousRankTimeoutCert to be not nil") + } + if previousRankTimeoutCert.GetRank()+1 != curRank { + return fmt.Errorf( + "possible state corruption, don't have QC(rank=%d) and TC(rank=%d) for previous rank(currentRank=%d)", + newestQC.GetRank(), + previousRankTimeoutCert.GetRank(), + curRank, + ) + } + } else { + // In case last rank has ended with QC and TC, make sure that only QC is + // included, otherwise such proposal is invalid. This case is possible if TC + // has included QC with the same rank as the TC itself, meaning that + // newestQC.Rank == previousRankTimeoutCert.Rank + previousRankTimeoutCert = nil + } + + // Construct Own SignedProposal + // CAUTION, design constraints: + // (i) We cannot process our own proposal within the `EventHandler` right + // away. + // (ii) We cannot add our own proposal to Forks here right away. + // (iii) Metrics for the PaceMaker/CruiseControl assume that the EventHandler + // is the only caller of `TargetPublicationTime`. Technically, + // `TargetPublicationTime` records the publication delay relative to + // its _latest_ call. + // + // To satisfy all constraints, we construct the proposal here and query + // (once!) its `TargetPublicationTime`. Though, we do _not_ process our own + // states right away and instead ingest them into the EventHandler the same + // way as proposals from other consensus participants. Specifically, on the + // path through the HotStuff state machine leading to state construction, the + // node's own proposal is largely ephemeral. The proposal is handed to the + // `MessageHub` (via the `OnOwnProposal` notification including the + // `TargetPublicationTime`). The `MessageHub` waits until + // `TargetPublicationTime` and only then broadcast the proposal and puts it + // into the EventLoop's queue for inbound states. This is exactly the same way + // as proposals from other nodes are ingested by the `EventHandler`, except + // that we are skipping the ComplianceEngine (assuming that our own proposals + // are protocol-compliant). + // + // Context: + // • On constraint (i): We want to support consensus committees only + // consisting of a *single* node. If the EventHandler internally processed + // the state right away via a direct message call, the call-stack would be + // ever-growing and the node would crash eventually (we experienced this + // with a very early HotStuff implementation). Specifically, if we wanted + // to process the state directly without taking a detour through the + // EventLoop's inbound queue, we would call `OnReceiveProposal` here. The + // function `OnReceiveProposal` would then end up calling + // `proposeForNewRankIfPrimary` (this function) to generate the next + // proposal, which again would result in calling `OnReceiveProposal` and so + // on so forth until the call stack or memory limit is reached and the node + // crashes. This is only a problem for consensus committees of size 1. + // • On constraint (ii): When adding a proposal to Forks, Forks emits a + // `StateIncorporatedEvent` notification, which is observed by Cruise + // Control and would change its state. However, note that Cruise Control + // is trying to estimate the point in time when _other_ nodes are observing + // the proposal. The time when we broadcast the proposal (i.e. + // `TargetPublicationTime`) is a reasonably good estimator, but *not* the + // time the proposer constructed the state (because there is potentially + // still a significant wait until `TargetPublicationTime`). + // + // The current approach is for a node to process its own proposals at the same + // time and through the same code path as proposals from other nodes. This + // satisfies constraints (i) and (ii) and generates very strong consistency, + // from a software design perspective. + // Just hypothetically, if we changed Cruise Control to be notified about + // own state proposals _only_ when they are broadcast (satisfying constraint + // (ii) without relying on the EventHandler), then we could add a proposal to + // Forks here right away. Nevertheless, the restriction remains that we cannot + // process that proposal right away within the EventHandler and instead need + // to put it into the EventLoop's inbound queue to support consensus + // committees of size 1. + stateProposal, err := e.stateProducer.MakeStateProposal( + curRank, + newestQC, + previousRankTimeoutCert, + ) + if err != nil { + if models.IsNoVoteError(err) { + e.tracer.Error( + "aborting state proposal to prevent equivocation (likely re-entered proposal logic due to crash)", + err, + consensus.Uint64Param("current_rank", curRank), + consensus.Uint64Param("finalized_rank", finalizedRank), + consensus.IdentityParam("leader_id", currentLeader), + ) + return nil + } + return fmt.Errorf( + "can not make state proposal for curRank %d: %w", + curRank, + err, + ) + } + targetPublicationTime := e.paceMaker.TargetPublicationTime( + stateProposal.State.Rank, + start, + stateProposal.State.ParentQuorumCertificate.Identity(), + ) // determine target publication time + e.tracer.Trace( + "forwarding proposal to communicator for broadcasting", + consensus.Uint64Param("state_rank", stateProposal.State.Rank), + consensus.TimeParam("target_publication", targetPublicationTime), + consensus.IdentityParam("state_id", stateProposal.State.Identifier), + consensus.Uint64Param("parent_rank", newestQC.GetRank()), + consensus.IdentityParam("parent_id", newestQC.Identity()), + consensus.IdentityParam("signer", stateProposal.State.ProposerID), + ) + + // emit notification with own proposal (also triggers broadcast) + e.notifier.OnOwnProposal(stateProposal, targetPublicationTime) + return nil +} + +// processStateForCurrentRank processes the state for the current rank. +// It is called AFTER the state has been stored or found in Forks +// It checks whether to vote for this state. +// No errors are expected during normal operation. +func (e *EventHandler[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) processStateForCurrentRank( + proposal *models.SignedProposal[StateT, VoteT], +) error { + // sanity check that state is really for the current rank: + curRank := e.paceMaker.CurrentRank() + state := proposal.State + if state.Rank != curRank { + // ignore outdated proposals in case we have moved forward + return nil + } + // leader (node ID) for next rank + nextLeader, err := e.committee.LeaderForRank(curRank + 1) + if errors.Is(err, models.ErrRankUnknown) { + // We are attempting process a state in an unknown rank + // This should never happen, because: + // * the compliance layer ensures proposals are passed to the event loop + // strictly after their parent + // * the protocol state ensures that, before incorporating the first state + // of an rank R, either R is known or we have triggered fallback mode - in + // either case the current rank is known + return fmt.Errorf("attempting to process a state for unknown rank") + } + if err != nil { + return fmt.Errorf( + "failed to determine primary for next rank %d: %w", + curRank+1, + err, + ) + } + + // safetyRules performs all the checks to decide whether to vote for this + // state or not. + err = e.ownVote(proposal, curRank, nextLeader) + if err != nil { + return fmt.Errorf("unexpected error in voting logic: %w", err) + } + + return nil +} + +// ownVote generates and forwards the own vote, if we decide to vote. +// Any errors are potential symptoms of uncovered edge cases or corrupted +// internal state (fatal). No errors are expected during normal operation. +func (e *EventHandler[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) ownVote( + proposal *models.SignedProposal[StateT, VoteT], + curRank uint64, + nextLeader models.Identity, +) error { + _, found := e.forks.GetState( + proposal.State.ParentQuorumCertificate.Identity(), + ) + if !found { + // we don't have parent for this proposal, we can't vote since we can't + // guarantee validity of proposals payload. Strictly speaking this shouldn't + // ever happen because compliance engine makes sure that we receive + // proposals with valid parents. + return fmt.Errorf( + "won't vote for proposal, no parent state for this proposal", + ) + } + + // safetyRules performs all the checks to decide whether to vote for this + // state or not. + ownVote, err := e.safetyRules.ProduceVote(proposal, curRank) + if err != nil { + if !models.IsNoVoteError(err) { + // unknown error, exit the event loop + return fmt.Errorf("could not produce vote: %w", err) + } + e.tracer.Trace( + "should not vote for this state", + consensus.Uint64Param("state_rank", proposal.State.Rank), + consensus.IdentityParam("state_id", proposal.State.Identifier), + consensus.Uint64Param( + "parent_rank", + proposal.State.ParentQuorumCertificate.GetRank(), + ), + consensus.IdentityParam( + "parent_id", + proposal.State.ParentQuorumCertificate.Identity(), + ), + consensus.IdentityParam("signer", proposal.State.ProposerID[:]), + ) + return nil + } + + e.tracer.Trace( + "forwarding vote to compliance engine", + consensus.Uint64Param("state_rank", proposal.State.Rank), + consensus.IdentityParam("state_id", proposal.State.Identifier), + consensus.Uint64Param( + "parent_rank", + proposal.State.ParentQuorumCertificate.GetRank(), + ), + consensus.IdentityParam( + "parent_id", + proposal.State.ParentQuorumCertificate.Identity(), + ), + consensus.IdentityParam("signer", proposal.State.ProposerID[:]), + ) + e.notifier.OnOwnVote(ownVote, nextLeader) + return nil +} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/eventhandler/event_handler_test.go b/consensus/eventhandler/event_handler_test.go new file mode 100644 index 0000000..9b04086 --- /dev/null +++ b/consensus/eventhandler/event_handler_test.go @@ -0,0 +1,1103 @@ +package eventhandler + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + "source.quilibrium.com/quilibrium/monorepo/consensus/mocks" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/consensus/pacemaker" + "source.quilibrium.com/quilibrium/monorepo/consensus/pacemaker/timeout" +) + +const ( + minRepTimeout float64 = 100.0 // Milliseconds + maxRepTimeout float64 = 600.0 // Milliseconds + multiplicativeIncrease float64 = 1.5 // multiplicative factor + happyPathMaxRoundFailures uint64 = 6 // number of failed rounds before first timeout increase +) + +// TestPacemaker is a real pacemaker module with logging for rank changes +type TestPacemaker[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, + CollectedT models.Unique, +] struct { + consensus.Pacemaker +} + +var _ consensus.Pacemaker = (*TestPacemaker[*nilUnique, *nilUnique, *nilUnique, *nilUnique])(nil) + +func NewTestPacemaker[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, + CollectedT models.Unique, +]( + timeoutController *timeout.Controller, + proposalDelayProvider consensus.ProposalDurationProvider, + notifier consensus.Consumer[StateT, VoteT], + store consensus.ConsensusStore[VoteT], +) *TestPacemaker[StateT, VoteT, PeerIDT, CollectedT] { + p, err := pacemaker.NewPacemaker[StateT, VoteT](nil, timeoutController, proposalDelayProvider, notifier, store, helper.Logger()) + if err != nil { + panic(err) + } + return &TestPacemaker[StateT, VoteT, PeerIDT, CollectedT]{p} +} + +func (p *TestPacemaker[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) ReceiveQuorumCertificate(qc models.QuorumCertificate) (*models.NextRank, error) { + oldRank := p.CurrentRank() + newRank, err := p.Pacemaker.ReceiveQuorumCertificate(qc) + fmt.Printf("pacemaker.ReceiveQuorumCertificate old rank: %v, new rank: %v\n", oldRank, p.CurrentRank()) + return newRank, err +} + +func (p *TestPacemaker[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) ReceiveTimeoutCertificate(tc models.TimeoutCertificate) (*models.NextRank, error) { + oldRank := p.CurrentRank() + newRank, err := p.Pacemaker.ReceiveTimeoutCertificate(tc) + fmt.Printf("pacemaker.ReceiveTimeoutCertificate old rank: %v, new rank: %v\n", oldRank, p.CurrentRank()) + return newRank, err +} + +func (p *TestPacemaker[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) LatestQuorumCertificate() models.QuorumCertificate { + return p.Pacemaker.LatestQuorumCertificate() +} + +func (p *TestPacemaker[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) PriorRankTimeoutCertificate() models.TimeoutCertificate { + return p.Pacemaker.PriorRankTimeoutCertificate() +} + +type nodelay struct{} + +// TargetPublicationTime implements consensus.ProposalDurationProvider. +func (n *nodelay) TargetPublicationTime(proposalRank uint64, timeRankEntered time.Time, parentStateId models.Identity) time.Time { + return timeRankEntered +} + +var _ consensus.ProposalDurationProvider = (*nodelay)(nil) + +// using a real pacemaker for testing event handler +func initPacemaker(t require.TestingT, ctx context.Context, livenessData *models.LivenessState) consensus.Pacemaker { + notifier := &mocks.Consumer[*helper.TestState, *helper.TestVote]{} + tc, err := timeout.NewConfig(time.Duration(minRepTimeout*1e6), time.Duration(maxRepTimeout*1e6), multiplicativeIncrease, happyPathMaxRoundFailures, time.Duration(maxRepTimeout*1e6)) + require.NoError(t, err) + persist := &mocks.ConsensusStore[*helper.TestVote]{} + persist.On("PutLivenessState", mock.Anything).Return(nil).Maybe() + persist.On("GetLivenessState", mock.Anything).Return(livenessData, nil).Once() + pm := NewTestPacemaker[*helper.TestState, *helper.TestVote, *helper.TestPeer, *helper.TestCollected](timeout.NewController(tc), pacemaker.NoProposalDelay(), notifier, persist) + notifier.On("OnStartingTimeout", mock.Anything, mock.Anything).Return() + notifier.On("OnQuorumCertificateTriggeredRankChange", mock.Anything, mock.Anything, mock.Anything).Return() + notifier.On("OnTimeoutCertificateTriggeredRankChange", mock.Anything, mock.Anything, mock.Anything).Return() + notifier.On("OnRankChange", mock.Anything, mock.Anything).Maybe() + pm.Start(ctx) + return pm +} + +// Committee mocks hotstuff.DynamicCommittee and allows to easily control leader for some rank. +type Committee struct { + *mocks.Replicas + // to mock I'm the leader of a certain rank, add the rank into the keys of leaders field + leaders map[uint64]struct{} +} + +func NewCommittee(t *testing.T) *Committee { + committee := &Committee{ + Replicas: mocks.NewReplicas(t), + leaders: make(map[uint64]struct{}), + } + committee.On("LeaderForRank", mock.Anything).Return(func(rank uint64) models.Identity { + _, isLeader := committee.leaders[rank] + if isLeader { + return "1" + } + return "0" + }, func(rank uint64) error { + return nil + }).Maybe() + + committee.On("Self").Return("1").Maybe() + + return committee +} + +// The SafetyRules mock will not vote for any state unless the state's ID exists in votable field's key +type SafetyRules struct { + *mocks.SafetyRules[*helper.TestState, *helper.TestVote] + votable map[models.Identity]struct{} +} + +func NewSafetyRules(t *testing.T) *SafetyRules { + safetyRules := &SafetyRules{ + SafetyRules: mocks.NewSafetyRules[*helper.TestState, *helper.TestVote](t), + votable: make(map[models.Identity]struct{}), + } + + // SafetyRules will not vote for any state, unless the stateID exists in votable map + safetyRules.On("ProduceVote", mock.Anything, mock.Anything).Return( + func(state *models.SignedProposal[*helper.TestState, *helper.TestVote], _ uint64) **helper.TestVote { + _, ok := safetyRules.votable[state.State.Identifier] + if !ok { + return nil + } + v := createVote(state.State) + return &v + }, + func(state *models.SignedProposal[*helper.TestState, *helper.TestVote], _ uint64) error { + _, ok := safetyRules.votable[state.State.Identifier] + if !ok { + return models.NewNoVoteErrorf("state not found") + } + return nil + }).Maybe() + + safetyRules.On("ProduceTimeout", mock.Anything, mock.Anything, mock.Anything).Return( + func(curRank uint64, newestQC models.QuorumCertificate, lastRankTC models.TimeoutCertificate) *models.TimeoutState[*helper.TestVote] { + return helper.TimeoutStateFixture(func(timeout *models.TimeoutState[*helper.TestVote]) { + timeout.Rank = curRank + timeout.LatestQuorumCertificate = newestQC + timeout.PriorRankTimeoutCertificate = lastRankTC + }, helper.WithTimeoutVote(&helper.TestVote{Rank: curRank, ID: helper.MakeIdentity()})) + }, + func(uint64, models.QuorumCertificate, models.TimeoutCertificate) error { return nil }).Maybe() + + return safetyRules +} + +// Forks mock allows to customize the AddState function by specifying the addProposal callbacks +type Forks struct { + *mocks.Forks[*helper.TestState] + // proposals stores all the proposals that have been added to the forks + proposals map[models.Identity]*models.State[*helper.TestState] + finalized uint64 + t require.TestingT + // addProposal is to customize the logic to change finalized rank + addProposal func(state *models.State[*helper.TestState]) error +} + +func NewForks(t *testing.T, finalized uint64) *Forks { + f := &Forks{ + Forks: mocks.NewForks[*helper.TestState](t), + proposals: make(map[models.Identity]*models.State[*helper.TestState]), + finalized: finalized, + } + + f.On("AddValidatedState", mock.Anything).Return(func(proposal *models.State[*helper.TestState]) error { + fmt.Printf("forks.AddValidatedState received State proposal for rank: %v, QC: %v\n", proposal.Rank, proposal.ParentQuorumCertificate.GetRank()) + return f.addProposal(proposal) + }).Maybe() + + f.On("FinalizedRank").Return(func() uint64 { + return f.finalized + }).Maybe() + + f.On("GetState", mock.Anything).Return(func(stateID models.Identity) *models.State[*helper.TestState] { + b := f.proposals[stateID] + return b + }, func(stateID models.Identity) bool { + b, ok := f.proposals[stateID] + var rank uint64 + if ok { + rank = b.Rank + } + fmt.Printf("forks.GetState found %v: rank: %v\n", ok, rank) + return ok + }).Maybe() + + f.On("GetStatesForRank", mock.Anything).Return(func(rank uint64) []*models.State[*helper.TestState] { + proposals := make([]*models.State[*helper.TestState], 0) + for _, b := range f.proposals { + if b.Rank == rank { + proposals = append(proposals, b) + } + } + fmt.Printf("forks.GetStatesForRank found %v state(s) for rank %v\n", len(proposals), rank) + return proposals + }).Maybe() + + f.addProposal = func(state *models.State[*helper.TestState]) error { + f.proposals[state.Identifier] = state + if state.ParentQuorumCertificate == nil { + panic(fmt.Sprintf("state has no QC: %v", state.Rank)) + } + return nil + } + + return f +} + +// StateProducer mock will always make a valid state, exactly once per rank. +// If it is requested to make a state twice for the same rank, returns models.NoVoteError +type StateProducer struct { + proposerID models.Identity + producedStateForRank map[uint64]bool +} + +func NewStateProducer(proposerID models.Identity) *StateProducer { + return &StateProducer{ + proposerID: proposerID, + producedStateForRank: make(map[uint64]bool), + } +} + +func (b *StateProducer) MakeStateProposal(rank uint64, qc models.QuorumCertificate, lastRankTC models.TimeoutCertificate) (*models.SignedProposal[*helper.TestState, *helper.TestVote], error) { + if b.producedStateForRank[rank] { + return nil, models.NewNoVoteErrorf("state already produced") + } + b.producedStateForRank[rank] = true + return helper.MakeSignedProposal[*helper.TestState, *helper.TestVote]( + helper.WithProposal[*helper.TestState, *helper.TestVote]( + helper.MakeProposal(helper.WithState(helper.MakeState( + helper.WithStateRank[*helper.TestState](rank), + helper.WithStateQC[*helper.TestState](qc), + helper.WithStateProposer[*helper.TestState](b.proposerID))), + helper.WithPreviousRankTimeoutCertificate[*helper.TestState](lastRankTC)))), nil +} + +func TestEventHandler(t *testing.T) { + suite.Run(t, new(EventHandlerSuite)) +} + +// EventHandlerSuite contains mocked state for testing event handler under different scenarios. +type EventHandlerSuite struct { + suite.Suite + + eventhandler *EventHandler[*helper.TestState, *helper.TestVote, *helper.TestPeer, *helper.TestCollected] + + paceMaker consensus.Pacemaker + forks *Forks + persist *mocks.ConsensusStore[*helper.TestVote] + stateProducer *StateProducer + committee *Committee + notifier *mocks.Consumer[*helper.TestState, *helper.TestVote] + safetyRules *SafetyRules + + initRank uint64 // the current rank at the beginning of the test case + endRank uint64 // the expected current rank at the end of the test case + parentProposal *models.SignedProposal[*helper.TestState, *helper.TestVote] + votingProposal *models.SignedProposal[*helper.TestState, *helper.TestVote] + qc models.QuorumCertificate + tc models.TimeoutCertificate + newrank *models.NextRank + ctx context.Context + stop context.CancelFunc +} + +func (es *EventHandlerSuite) SetupTest() { + finalized := uint64(3) + + es.parentProposal = createProposal(4, 3) + newestQC := createQC(es.parentProposal.State) + + livenessData := &models.LivenessState{ + CurrentRank: newestQC.GetRank() + 1, + LatestQuorumCertificate: newestQC, + } + + es.ctx, es.stop = context.WithCancel(context.Background()) + + es.committee = NewCommittee(es.T()) + es.paceMaker = initPacemaker(es.T(), es.ctx, livenessData) + es.forks = NewForks(es.T(), finalized) + es.persist = mocks.NewConsensusStore[*helper.TestVote](es.T()) + es.persist.On("PutStarted", mock.Anything).Return(nil).Maybe() + es.stateProducer = NewStateProducer(es.committee.Self()) + es.safetyRules = NewSafetyRules(es.T()) + es.notifier = mocks.NewConsumer[*helper.TestState, *helper.TestVote](es.T()) + es.notifier.On("OnEventProcessed").Maybe() + es.notifier.On("OnEnteringRank", mock.Anything, mock.Anything).Maybe() + es.notifier.On("OnStart", mock.Anything).Maybe() + es.notifier.On("OnReceiveProposal", mock.Anything, mock.Anything).Maybe() + es.notifier.On("OnReceiveQuorumCertificate", mock.Anything, mock.Anything).Maybe() + es.notifier.On("OnReceiveTimeoutCertificate", mock.Anything, mock.Anything).Maybe() + es.notifier.On("OnPartialTimeoutCertificate", mock.Anything, mock.Anything).Maybe() + es.notifier.On("OnLocalTimeout", mock.Anything).Maybe() + es.notifier.On("OnCurrentRankDetails", mock.Anything, mock.Anything, mock.Anything).Maybe() + + eventhandler, err := NewEventHandler[*helper.TestState, *helper.TestVote, *helper.TestPeer, *helper.TestCollected]( + es.paceMaker, + es.stateProducer, + es.forks, + es.persist, + es.committee, + es.safetyRules, + es.notifier, + helper.Logger(), + ) + require.NoError(es.T(), err) + + es.eventhandler = eventhandler + + es.initRank = livenessData.CurrentRank + es.endRank = livenessData.CurrentRank + // voting state is a state for the current rank, which will trigger rank change + es.votingProposal = createProposal(es.paceMaker.CurrentRank(), es.parentProposal.State.Rank) + es.qc = helper.MakeQC(helper.WithQCState[*helper.TestState](es.votingProposal.State)) + + // create a TC that will trigger rank change for current rank, based on newest QC + es.tc = helper.MakeTC(helper.WithTCRank(es.paceMaker.CurrentRank()), + helper.WithTCNewestQC(es.votingProposal.State.ParentQuorumCertificate)) + es.newrank = &models.NextRank{ + Rank: es.votingProposal.State.Rank + 1, // the vote for the voting proposals will trigger a rank change to the next rank + } + + // add es.parentProposal into forks, otherwise we won't vote or propose based on it's QC sicne the parent is unknown + es.forks.proposals[es.parentProposal.State.Identifier] = es.parentProposal.State +} + +// TestStartNewRank_ParentProposalNotFound tests next scenario: constructed TC, it contains NewestQC that references state that we +// don't know about, proposal can't be generated because we can't be sure that resulting state payload is valid. +func (es *EventHandlerSuite) TestStartNewRank_ParentProposalNotFound() { + newestQC := helper.MakeQC(helper.WithQCRank(es.initRank + 10)) + tc := helper.MakeTC(helper.WithTCRank(newestQC.GetRank()+1), + helper.WithTCNewestQC(newestQC)) + + es.endRank = tc.GetRank() + 1 + + // I'm leader for next state + es.committee.leaders[es.endRank] = struct{}{} + + err := es.eventhandler.OnReceiveTimeoutCertificate(tc) + require.NoError(es.T(), err) + + require.Equal(es.T(), es.endRank, es.paceMaker.CurrentRank(), "incorrect rank change") + es.forks.AssertCalled(es.T(), "GetState", newestQC.Identity()) + es.notifier.AssertNotCalled(es.T(), "OnOwnProposal", mock.Anything, mock.Anything) +} + +// TestOnReceiveProposal_StaleProposal test that proposals lower than finalized rank are not processed at all +// we are not interested in this data because we already performed finalization of that height. +func (es *EventHandlerSuite) TestOnReceiveProposal_StaleProposal() { + proposal := createProposal(es.forks.FinalizedRank()-1, es.forks.FinalizedRank()-2) + err := es.eventhandler.OnReceiveProposal(proposal) + require.NoError(es.T(), err) + es.forks.AssertNotCalled(es.T(), "AddState", proposal) +} + +// TestOnReceiveProposal_QCOlderThanCurrentRank tests scenario: received a valid proposal with QC that has older rank, +// the proposal's QC shouldn't trigger rank change. +func (es *EventHandlerSuite) TestOnReceiveProposal_QCOlderThanCurrentRank() { + proposal := createProposal(es.initRank-1, es.initRank-2) + + // should not trigger rank change + err := es.eventhandler.OnReceiveProposal(proposal) + require.NoError(es.T(), err) + require.Equal(es.T(), es.endRank, es.paceMaker.CurrentRank(), "incorrect rank change") + es.forks.AssertCalled(es.T(), "AddValidatedState", proposal.State) +} + +// TestOnReceiveProposal_TCOlderThanCurrentRank tests scenario: received a valid proposal with QC and TC that has older rank, +// the proposal's QC shouldn't trigger rank change. +func (es *EventHandlerSuite) TestOnReceiveProposal_TCOlderThanCurrentRank() { + proposal := createProposal(es.initRank-1, es.initRank-3) + proposal.PreviousRankTimeoutCertificate = helper.MakeTC(helper.WithTCRank(proposal.State.Rank-1), helper.WithTCNewestQC(proposal.State.ParentQuorumCertificate)) + + // should not trigger rank change + err := es.eventhandler.OnReceiveProposal(proposal) + require.NoError(es.T(), err) + require.Equal(es.T(), es.endRank, es.paceMaker.CurrentRank(), "incorrect rank change") + es.forks.AssertCalled(es.T(), "AddValidatedState", proposal.State) +} + +// TestOnReceiveProposal_NoVote tests scenario: received a valid proposal for cur rank, but not a safe node to vote, and I'm the next leader +// should not vote. +func (es *EventHandlerSuite) TestOnReceiveProposal_NoVote() { + proposal := createProposal(es.initRank, es.initRank-1) + + // I'm the next leader + es.committee.leaders[es.initRank+1] = struct{}{} + // no vote for this proposal + err := es.eventhandler.OnReceiveProposal(proposal) + require.NoError(es.T(), err) + require.Equal(es.T(), es.endRank, es.paceMaker.CurrentRank(), "incorrect rank change") + es.forks.AssertCalled(es.T(), "AddValidatedState", proposal.State) +} + +// TestOnReceiveProposal_NoVote_ParentProposalNotFound tests scenario: received a valid proposal for cur rank, no parent for this proposal found +// should not vote. +func (es *EventHandlerSuite) TestOnReceiveProposal_NoVote_ParentProposalNotFound() { + proposal := createProposal(es.initRank, es.initRank-1) + + // remove parent from known proposals + delete(es.forks.proposals, proposal.State.ParentQuorumCertificate.Identity()) + + // no vote for this proposal, no parent found + err := es.eventhandler.OnReceiveProposal(proposal) + require.Error(es.T(), err) + require.Equal(es.T(), es.endRank, es.paceMaker.CurrentRank(), "incorrect rank change") + es.forks.AssertCalled(es.T(), "AddValidatedState", proposal.State) +} + +// TestOnReceiveProposal_Vote_NextLeader tests scenario: received a valid proposal for cur rank, safe to vote, I'm the next leader +// should vote and add vote to VoteAggregator. +func (es *EventHandlerSuite) TestOnReceiveProposal_Vote_NextLeader() { + proposal := createProposal(es.initRank, es.initRank-1) + + // I'm the next leader + es.committee.leaders[es.initRank+1] = struct{}{} + + // proposal is safe to vote + es.safetyRules.votable[proposal.State.Identifier] = struct{}{} + + vote := &helper.TestVote{ + StateID: proposal.State.Identifier, + Rank: proposal.State.Rank, + } + + es.notifier.On("OnOwnVote", mock.MatchedBy(func(v **helper.TestVote) bool { return vote.Rank == (*v).Rank && vote.StateID == (*v).StateID }), mock.Anything).Once() + + // vote should be created for this proposal + err := es.eventhandler.OnReceiveProposal(proposal) + require.NoError(es.T(), err) + require.Equal(es.T(), es.endRank, es.paceMaker.CurrentRank(), "incorrect rank change") +} + +// TestOnReceiveProposal_Vote_NotNextLeader tests scenario: received a valid proposal for cur rank, safe to vote, I'm not the next leader +// should vote and send vote to next leader. +func (es *EventHandlerSuite) TestOnReceiveProposal_Vote_NotNextLeader() { + proposal := createProposal(es.initRank, es.initRank-1) + + // proposal is safe to vote + es.safetyRules.votable[proposal.State.Identifier] = struct{}{} + + vote := &helper.TestVote{ + StateID: proposal.State.Identifier, + Rank: proposal.State.Rank, + ID: "0", + } + + es.notifier.On("OnOwnVote", mock.MatchedBy(func(v **helper.TestVote) bool { + return vote.Rank == (*v).Rank && vote.StateID == (*v).StateID && vote.ID == (*v).ID + }), mock.Anything).Once() + + // vote should be created for this proposal + err := es.eventhandler.OnReceiveProposal(proposal) + require.NoError(es.T(), err) + require.Equal(es.T(), es.endRank, es.paceMaker.CurrentRank(), "incorrect rank change") +} + +// TestOnReceiveProposal_ProposeAfterReceivingTC tests a scenario where we have received TC which advances to rank where we are +// leader but no proposal can be created because we don't have parent proposal. After receiving missing parent proposal we have +// all available data to construct a valid proposal. We need to ensure this. +func (es *EventHandlerSuite) TestOnReceiveProposal_ProposeAfterReceivingQC() { + + qc := es.qc + + // first process QC this should advance rank + err := es.eventhandler.OnReceiveQuorumCertificate(qc) + require.NoError(es.T(), err) + require.Equal(es.T(), qc.GetRank()+1, es.paceMaker.CurrentRank(), "expect a rank change") + es.notifier.AssertNotCalled(es.T(), "OnOwnProposal", mock.Anything, mock.Anything) + + // we are leader for current rank + es.committee.leaders[es.paceMaker.CurrentRank()] = struct{}{} + + es.notifier.On("OnOwnProposal", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + proposal, ok := args[0].(*models.SignedProposal[*helper.TestState, *helper.TestVote]) + require.True(es.T(), ok) + // it should broadcast a header as the same as current rank + require.Equal(es.T(), es.paceMaker.CurrentRank(), proposal.State.Rank) + }).Once() + + // processing this proposal shouldn't trigger rank change since we have already seen QC. + // we have used QC to advance rounds, but no proposal was made because we were missing parent state + // when we have received parent state we can try proposing again. + err = es.eventhandler.OnReceiveProposal(es.votingProposal) + require.NoError(es.T(), err) + + require.Equal(es.T(), qc.GetRank()+1, es.paceMaker.CurrentRank(), "expect a rank change") +} + +// TestOnReceiveProposal_ProposeAfterReceivingTC tests a scenario where we have received TC which advances to rank where we are +// leader but no proposal can be created because we don't have parent proposal. After receiving missing parent proposal we have +// all available data to construct a valid proposal. We need to ensure this. +func (es *EventHandlerSuite) TestOnReceiveProposal_ProposeAfterReceivingTC() { + + // TC contains a QC.StateID == es.votingProposal + tc := helper.MakeTC(helper.WithTCRank(es.votingProposal.State.Rank+1), + helper.WithTCNewestQC(es.qc)) + + // first process TC this should advance rank + err := es.eventhandler.OnReceiveTimeoutCertificate(tc) + require.NoError(es.T(), err) + require.Equal(es.T(), tc.GetRank()+1, es.paceMaker.CurrentRank(), "expect a rank change") + es.notifier.AssertNotCalled(es.T(), "OnOwnProposal", mock.Anything, mock.Anything) + + // we are leader for current rank + es.committee.leaders[es.paceMaker.CurrentRank()] = struct{}{} + + es.notifier.On("OnOwnProposal", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + proposal, ok := args[0].(*models.SignedProposal[*helper.TestState, *helper.TestVote]) + require.True(es.T(), ok) + // it should broadcast a header as the same as current rank + require.Equal(es.T(), es.paceMaker.CurrentRank(), proposal.State.Rank) + }).Once() + + // processing this proposal shouldn't trigger rank change, since we have already seen QC. + // we have used QC to advance rounds, but no proposal was made because we were missing parent state + // when we have received parent state we can try proposing again. + err = es.eventhandler.OnReceiveProposal(es.votingProposal) + require.NoError(es.T(), err) + + require.Equal(es.T(), tc.GetRank()+1, es.paceMaker.CurrentRank(), "expect a rank change") +} + +// TestOnReceiveQuorumCertificate_HappyPath tests that building a QC for current rank triggers rank change. We are not leader for next +// round, so no proposal is expected. +func (es *EventHandlerSuite) TestOnReceiveQuorumCertificate_HappyPath() { + // voting state exists + es.forks.proposals[es.votingProposal.State.Identifier] = es.votingProposal.State + + // a qc is built + qc := createQC(es.votingProposal.State) + + // new qc is added to forks + // rank changed + // I'm not the next leader + // haven't received state for next rank + // goes to the new rank + es.endRank++ + // not the leader of the newrank + // don't have state for the newrank + + err := es.eventhandler.OnReceiveQuorumCertificate(qc) + require.NoError(es.T(), err, "if a vote can trigger a QC to be built,"+ + "and the QC triggered a rank change, then start new rank") + require.Equal(es.T(), es.endRank, es.paceMaker.CurrentRank(), "incorrect rank change") + es.notifier.AssertNotCalled(es.T(), "OnOwnProposal", mock.Anything, mock.Anything) +} + +// TestOnReceiveQuorumCertificate_FutureRank tests that building a QC for future rank triggers rank change +func (es *EventHandlerSuite) TestOnReceiveQuorumCertificate_FutureRank() { + // voting state exists + curRank := es.paceMaker.CurrentRank() + + // b1 is for current rank + // b2 and b3 is for future rank, but branched out from the same parent as b1 + b1 := createProposal(curRank, curRank-1) + b2 := createProposal(curRank+1, curRank-1) + b3 := createProposal(curRank+2, curRank-1) + + // a qc is built + // qc3 is for future rank + // qc2 is an older than qc3 + // since vote aggregator can concurrently process votes and build qcs, + // we prepare qcs at different rank to be processed, and verify the rank change. + qc1 := createQC(b1.State) + qc2 := createQC(b2.State) + qc3 := createQC(b3.State) + + // all three proposals are known + es.forks.proposals[b1.State.Identifier] = b1.State + es.forks.proposals[b2.State.Identifier] = b2.State + es.forks.proposals[b3.State.Identifier] = b3.State + + // test that qc for future rank should trigger rank change + err := es.eventhandler.OnReceiveQuorumCertificate(qc3) + endRank := b3.State.Rank + 1 // next rank + require.NoError(es.T(), err, "if a vote can trigger a QC to be built,"+ + "and the QC triggered a rank change, then start new rank") + require.Equal(es.T(), endRank, es.paceMaker.CurrentRank(), "incorrect rank change") + + // the same qc would not trigger rank change + err = es.eventhandler.OnReceiveQuorumCertificate(qc3) + endRank = b3.State.Rank + 1 // next rank + require.NoError(es.T(), err, "same qc should not trigger rank change") + require.Equal(es.T(), endRank, es.paceMaker.CurrentRank(), "incorrect rank change") + + // old QCs won't trigger rank change + err = es.eventhandler.OnReceiveQuorumCertificate(qc2) + require.NoError(es.T(), err) + require.Equal(es.T(), endRank, es.paceMaker.CurrentRank(), "incorrect rank change") + + err = es.eventhandler.OnReceiveQuorumCertificate(qc1) + require.NoError(es.T(), err) + require.Equal(es.T(), endRank, es.paceMaker.CurrentRank(), "incorrect rank change") +} + +// TestOnReceiveQuorumCertificate_NextLeaderProposes tests that after receiving a valid proposal for cur rank, and I'm the next leader, +// a QC can be built for the state, triggered rank change, and I will propose +func (es *EventHandlerSuite) TestOnReceiveQuorumCertificate_NextLeaderProposes() { + proposal := createProposal(es.initRank, es.initRank-1) + qc := createQC(proposal.State) + // I'm the next leader + es.committee.leaders[es.initRank+1] = struct{}{} + // qc triggered rank change + es.endRank++ + // I'm the leader of cur rank (7) + // I'm not the leader of next rank (8), trigger rank change + + err := es.eventhandler.OnReceiveProposal(proposal) + require.NoError(es.T(), err) + + es.notifier.On("OnOwnProposal", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + proposal, ok := args[0].(*models.SignedProposal[*helper.TestState, *helper.TestVote]) + require.True(es.T(), ok) + // it should broadcast a header as the same as endRank + require.Equal(es.T(), es.endRank, proposal.State.Rank) + }).Once() + + // after receiving proposal build QC and deliver it to event handler + err = es.eventhandler.OnReceiveQuorumCertificate(qc) + require.NoError(es.T(), err) + + require.Equal(es.T(), es.endRank, es.paceMaker.CurrentRank(), "incorrect rank change") + es.forks.AssertCalled(es.T(), "AddValidatedState", proposal.State) +} + +// TestOnReceiveQuorumCertificate_ProposeOnce tests that after constructing proposal we don't attempt to create another +// proposal for same rank. +func (es *EventHandlerSuite) TestOnReceiveQuorumCertificate_ProposeOnce() { + // I'm the next leader + es.committee.leaders[es.initRank+1] = struct{}{} + + es.endRank++ + + es.notifier.On("OnOwnProposal", mock.Anything, mock.Anything).Once() + + err := es.eventhandler.OnReceiveProposal(es.votingProposal) + require.NoError(es.T(), err) + + // constructing QC triggers making state proposal + err = es.eventhandler.OnReceiveQuorumCertificate(es.qc) + require.NoError(es.T(), err) + + // receiving same proposal again triggers proposing logic + err = es.eventhandler.OnReceiveProposal(es.votingProposal) + require.NoError(es.T(), err) + + require.Equal(es.T(), es.endRank, es.paceMaker.CurrentRank(), "incorrect rank change") + es.notifier.AssertNumberOfCalls(es.T(), "OnOwnProposal", 1) +} + +// TestOnTCConstructed_HappyPath tests that building a TC for current rank triggers rank change +func (es *EventHandlerSuite) TestOnReceiveTimeoutCertificate_HappyPath() { + // voting state exists + es.forks.proposals[es.votingProposal.State.Identifier] = es.votingProposal.State + + // a tc is built + tc := helper.MakeTC(helper.WithTCRank(es.initRank), helper.WithTCNewestQC(es.votingProposal.State.ParentQuorumCertificate)) + + // expect a rank change + es.endRank++ + + err := es.eventhandler.OnReceiveTimeoutCertificate(tc) + require.NoError(es.T(), err, "TC should trigger a rank change and start of new rank") + require.Equal(es.T(), es.endRank, es.paceMaker.CurrentRank(), "incorrect rank change") +} + +// TestOnTCConstructed_NextLeaderProposes tests that after receiving TC and advancing rank we as next leader create a proposal +// and broadcast it +func (es *EventHandlerSuite) TestOnReceiveTimeoutCertificate_NextLeaderProposes() { + es.committee.leaders[es.tc.GetRank()+1] = struct{}{} + es.endRank++ + + es.notifier.On("OnOwnProposal", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + proposal, ok := args[0].(*models.SignedProposal[*helper.TestState, *helper.TestVote]) + require.True(es.T(), ok) + // it should broadcast a header as the same as endRank + require.Equal(es.T(), es.endRank, proposal.State.Rank) + + // proposed state should contain valid newest QC and lastRankTC + expectedNewestQC := es.paceMaker.LatestQuorumCertificate() + require.Equal(es.T(), expectedNewestQC, proposal.State.ParentQuorumCertificate) + require.Equal(es.T(), es.paceMaker.PriorRankTimeoutCertificate(), proposal.PreviousRankTimeoutCertificate) + }).Once() + + err := es.eventhandler.OnReceiveTimeoutCertificate(es.tc) + require.NoError(es.T(), err) + require.Equal(es.T(), es.endRank, es.paceMaker.CurrentRank(), "TC didn't trigger rank change") +} + +// TestOnTimeout tests that event handler produces TimeoutState and broadcasts it to other members of consensus +// committee. Additionally, It has to contribute TimeoutState to timeout aggregation process by sending it to TimeoutAggregator. +func (es *EventHandlerSuite) TestOnTimeout() { + es.notifier.On("OnOwnTimeout", mock.Anything).Run(func(args mock.Arguments) { + timeoutState, ok := args[0].(*models.TimeoutState[*helper.TestVote]) + require.True(es.T(), ok) + // it should broadcast a TO with same rank as endRank + require.Equal(es.T(), es.endRank, timeoutState.Rank) + }).Once() + + err := es.eventhandler.OnLocalTimeout() + require.NoError(es.T(), err) + + // TimeoutState shouldn't trigger rank change + require.Equal(es.T(), es.endRank, es.paceMaker.CurrentRank(), "incorrect rank change") +} + +// TestOnTimeout_SanityChecks tests a specific scenario where pacemaker have seen both QC and TC for previous rank +// and EventHandler tries to produce a timeout state, such timeout state is invalid if both QC and TC is present, we +// need to make sure that EventHandler filters out TC for last rank if we know about QC for same rank. +func (es *EventHandlerSuite) TestOnTimeout_SanityChecks() { + // voting state exists + es.forks.proposals[es.votingProposal.State.Identifier] = es.votingProposal.State + + // a tc is built + tc := helper.MakeTC(helper.WithTCRank(es.initRank), helper.WithTCNewestQC(es.votingProposal.State.ParentQuorumCertificate)) + + // expect a rank change + es.endRank++ + + err := es.eventhandler.OnReceiveTimeoutCertificate(tc) + require.NoError(es.T(), err, "TC should trigger a rank change and start of new rank") + require.Equal(es.T(), es.endRank, es.paceMaker.CurrentRank(), "incorrect rank change") + + // receive a QC for the same rank as the TC + qc := helper.MakeQC(helper.WithQCRank(tc.GetRank())) + err = es.eventhandler.OnReceiveQuorumCertificate(qc) + require.NoError(es.T(), err) + require.Equal(es.T(), es.endRank, es.paceMaker.CurrentRank(), "QC shouldn't trigger rank change") + require.Equal(es.T(), tc, es.paceMaker.PriorRankTimeoutCertificate(), "invalid last rank TC") + require.Equal(es.T(), qc, es.paceMaker.LatestQuorumCertificate(), "invalid newest QC") + + es.notifier.On("OnOwnTimeout", mock.Anything).Run(func(args mock.Arguments) { + timeoutState, ok := args[0].(*models.TimeoutState[*helper.TestVote]) + require.True(es.T(), ok) + require.Equal(es.T(), es.endRank, timeoutState.Rank) + require.Equal(es.T(), qc, timeoutState.LatestQuorumCertificate) + require.Nil(es.T(), timeoutState.PriorRankTimeoutCertificate) + }).Once() + + err = es.eventhandler.OnLocalTimeout() + require.NoError(es.T(), err) +} + +// TestOnTimeout_ReplicaEjected tests that EventHandler correctly handles possible errors from SafetyRules and doesn't broadcast +// timeout states when replica is ejected. +func (es *EventHandlerSuite) TestOnTimeout_ReplicaEjected() { + es.Run("no-timeout", func() { + *es.safetyRules.SafetyRules = *mocks.NewSafetyRules[*helper.TestState, *helper.TestVote](es.T()) + es.safetyRules.On("ProduceTimeout", mock.Anything, mock.Anything, mock.Anything).Return(nil, models.NewNoTimeoutErrorf("")) + err := es.eventhandler.OnLocalTimeout() + require.NoError(es.T(), err, "should be handled as sentinel error") + }) + es.Run("create-timeout-exception", func() { + *es.safetyRules.SafetyRules = *mocks.NewSafetyRules[*helper.TestState, *helper.TestVote](es.T()) + exception := errors.New("produce-timeout-exception") + es.safetyRules.On("ProduceTimeout", mock.Anything, mock.Anything, mock.Anything).Return(nil, exception) + err := es.eventhandler.OnLocalTimeout() + require.ErrorIs(es.T(), err, exception, "expect a wrapped exception") + }) + es.notifier.AssertNotCalled(es.T(), "OnOwnTimeout", mock.Anything) +} + +// Test100Timeout tests that receiving 100 TCs for increasing ranks advances rounds +func (es *EventHandlerSuite) Test100Timeout() { + for i := 0; i < 100; i++ { + tc := helper.MakeTC(helper.WithTCRank(es.initRank + uint64(i))) + err := es.eventhandler.OnReceiveTimeoutCertificate(tc) + es.endRank++ + require.NoError(es.T(), err) + } + require.Equal(es.T(), es.endRank, es.paceMaker.CurrentRank(), "incorrect rank change") +} + +// TestLeaderBuild100States tests scenario where leader builds 100 proposals one after another +func (es *EventHandlerSuite) TestLeaderBuild100States() { + require.Equal(es.T(), 1, len(es.forks.proposals), "expect Forks to contain only root state") + + // I'm the leader for the first rank + es.committee.leaders[es.initRank] = struct{}{} + + totalRank := 100 + for i := 0; i < totalRank; i++ { + // I'm the leader for 100 ranks + // I'm the next leader + es.committee.leaders[es.initRank+uint64(i+1)] = struct{}{} + // I can build qc for all 100 ranks + proposal := createProposal(es.initRank+uint64(i), es.initRank+uint64(i)-1) + qc := createQC(proposal.State) + + // for first proposal we need to store the parent otherwise it won't be voted for + if i == 0 { + parentState := helper.MakeState(func(state *models.State[*helper.TestState]) { + state.Identifier = proposal.State.ParentQuorumCertificate.Identity() + state.Rank = proposal.State.ParentQuorumCertificate.GetRank() + }) + es.forks.proposals[parentState.Identifier] = parentState + } + + es.safetyRules.votable[proposal.State.Identifier] = struct{}{} + // should trigger 100 rank change + es.endRank++ + + es.notifier.On("OnOwnProposal", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + ownProposal, ok := args[0].(*models.SignedProposal[*helper.TestState, *helper.TestVote]) + require.True(es.T(), ok) + require.Equal(es.T(), proposal.State.Rank+1, ownProposal.State.Rank) + }).Once() + vote := &helper.TestVote{ + Rank: proposal.State.Rank, + StateID: proposal.State.Identifier, + } + es.notifier.On("OnOwnVote", mock.MatchedBy(func(v **helper.TestVote) bool { return vote.Rank == (*v).Rank && vote.StateID == (*v).StateID }), mock.Anything).Once() + + err := es.eventhandler.OnReceiveProposal(proposal) + require.NoError(es.T(), err) + err = es.eventhandler.OnReceiveQuorumCertificate(qc) + require.NoError(es.T(), err) + } + + require.Equal(es.T(), es.endRank, es.paceMaker.CurrentRank(), "incorrect rank change") + require.Equal(es.T(), totalRank+1, len(es.forks.proposals), "expect Forks to contain root state + 100 proposed states") + es.notifier.AssertExpectations(es.T()) +} + +// TestFollowerFollows100States tests scenario where follower receives 100 proposals one after another +func (es *EventHandlerSuite) TestFollowerFollows100States() { + // add parent proposal otherwise we can't propose + parentProposal := createProposal(es.initRank, es.initRank-1) + es.forks.proposals[parentProposal.State.Identifier] = parentProposal.State + for i := 0; i < 100; i++ { + // create each proposal as if they are created by some leader + proposal := createProposal(es.initRank+uint64(i)+1, es.initRank+uint64(i)) + // as a follower, I receive these proposals + err := es.eventhandler.OnReceiveProposal(proposal) + require.NoError(es.T(), err) + es.endRank++ + } + require.Equal(es.T(), es.endRank, es.paceMaker.CurrentRank(), "incorrect rank change") + require.Equal(es.T(), 100, len(es.forks.proposals)-2) +} + +// TestFollowerReceives100Forks tests scenario where follower receives 100 forks built on top of the same state +func (es *EventHandlerSuite) TestFollowerReceives100Forks() { + for i := 0; i < 100; i++ { + // create each proposal as if they are created by some leader + proposal := createProposal(es.initRank+uint64(i)+1, es.initRank-1) + proposal.PreviousRankTimeoutCertificate = helper.MakeTC(helper.WithTCRank(es.initRank+uint64(i)), + helper.WithTCNewestQC(proposal.State.ParentQuorumCertificate)) + // expect a rank change since fork can be made only if last rank has ended with TC. + es.endRank++ + // as a follower, I receive these proposals + err := es.eventhandler.OnReceiveProposal(proposal) + require.NoError(es.T(), err) + } + require.Equal(es.T(), es.endRank, es.paceMaker.CurrentRank(), "incorrect rank change") + require.Equal(es.T(), 100, len(es.forks.proposals)-1) +} + +// TestStart_ProposeOnce tests that after starting event handler we don't create proposal in case we have already proposed +// for this rank. +func (es *EventHandlerSuite) TestStart_ProposeOnce() { + // I'm the next leader + es.committee.leaders[es.initRank+1] = struct{}{} + es.endRank++ + + // STEP 1: simulating events _before_ a crash: EventHandler receives proposal and then a QC for the proposal (from VoteAggregator) + es.notifier.On("OnOwnProposal", mock.Anything, mock.Anything).Once() + err := es.eventhandler.OnReceiveProposal(es.votingProposal) + require.NoError(es.T(), err) + + // constructing QC triggers making state proposal + err = es.eventhandler.OnReceiveQuorumCertificate(es.qc) + require.NoError(es.T(), err) + es.notifier.AssertNumberOfCalls(es.T(), "OnOwnProposal", 1) + + // Here, a hypothetical crash would happen. + // During crash recovery, Forks and Pacemaker are recovered to have exactly the same in-memory state as before + // Start triggers proposing logic. But as our own proposal for the rank is already in Forks, we should not propose again. + err = es.eventhandler.Start(es.ctx) + require.NoError(es.T(), err) + require.Equal(es.T(), es.endRank, es.paceMaker.CurrentRank(), "incorrect rank change") + + // assert that broadcast wasn't trigger again, i.e. there should have been only one event `OnOwnProposal` in total + es.notifier.AssertNumberOfCalls(es.T(), "OnOwnProposal", 1) +} + +// TestCreateProposal_SanityChecks tests that proposing logic performs sanity checks when creating new state proposal. +// Specifically it tests a case where TC contains QC which: TC.Rank == TC.NewestQC.Rank +func (es *EventHandlerSuite) TestCreateProposal_SanityChecks() { + // round ended with TC where TC.Rank == TC.NewestQC.Rank + tc := helper.MakeTC(helper.WithTCRank(es.initRank), + helper.WithTCNewestQC(helper.MakeQC(helper.WithQCState(es.votingProposal.State)))) + + es.forks.proposals[es.votingProposal.State.Identifier] = es.votingProposal.State + + // I'm the next leader + es.committee.leaders[tc.GetRank()+1] = struct{}{} + + es.notifier.On("OnOwnProposal", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + proposal, ok := args[0].(*models.SignedProposal[*helper.TestState, *helper.TestVote]) + require.True(es.T(), ok) + // we need to make sure that produced proposal contains only QC even if there is TC for previous rank as well + require.Nil(es.T(), proposal.PreviousRankTimeoutCertificate) + }).Once() + + err := es.eventhandler.OnReceiveTimeoutCertificate(tc) + require.NoError(es.T(), err) + + require.Equal(es.T(), tc.GetLatestQuorumCert(), es.paceMaker.LatestQuorumCertificate()) + require.Equal(es.T(), tc, es.paceMaker.PriorRankTimeoutCertificate()) + require.Equal(es.T(), tc.GetRank()+1, es.paceMaker.CurrentRank(), "incorrect rank change") +} + +// TestOnReceiveProposal_ProposalForActiveRank tests that when receiving proposal for active we don't attempt to create a proposal +// Receiving proposal can trigger proposing logic only in case we have received missing state for past ranks. +func (es *EventHandlerSuite) TestOnReceiveProposal_ProposalForActiveRank() { + // receive proposal where we are leader, meaning that we have produced this proposal + es.committee.leaders[es.votingProposal.State.Rank] = struct{}{} + + err := es.eventhandler.OnReceiveProposal(es.votingProposal) + require.NoError(es.T(), err) + + es.notifier.AssertNotCalled(es.T(), "OnOwnProposal", mock.Anything, mock.Anything) +} + +// TestOnPartialTimeoutCertificateCreated_ProducedTimeout tests that when receiving partial TC for active rank we will create a timeout state +// immediately. +func (es *EventHandlerSuite) TestOnPartialTimeoutCertificateCreated_ProducedTimeout() { + partialTimeoutCertificate := &consensus.PartialTimeoutCertificateCreated{ + Rank: es.initRank, + NewestQuorumCertificate: es.parentProposal.State.ParentQuorumCertificate, + PriorRankTimeoutCertificate: nil, + } + + es.notifier.On("OnOwnTimeout", mock.Anything).Run(func(args mock.Arguments) { + timeoutState, ok := args[0].(*models.TimeoutState[*helper.TestVote]) + require.True(es.T(), ok) + // it should broadcast a TO with same rank as partialTimeoutCertificate.Rank + require.Equal(es.T(), partialTimeoutCertificate.Rank, timeoutState.Rank) + }).Once() + + err := es.eventhandler.OnPartialTimeoutCertificateCreated(partialTimeoutCertificate) + require.NoError(es.T(), err) + + // partial TC shouldn't trigger rank change + require.Equal(es.T(), partialTimeoutCertificate.Rank, es.paceMaker.CurrentRank(), "incorrect rank change") +} + +// TestOnPartialTimeoutCertificateCreated_NotActiveRank tests that we don't create timeout state if partial TC was delivered for a past, non-current rank. +// NOTE: it is not possible to receive a partial timeout for a FUTURE rank, unless the partial timeout contains +// either a QC/TC allowing us to enter that rank, therefore that case is not covered here. +// See TestOnPartialTimeoutCertificateCreated_QcAndTimeoutCertificateProcessing instead. +func (es *EventHandlerSuite) TestOnPartialTimeoutCertificateCreated_NotActiveRank() { + partialTimeoutCertificate := &consensus.PartialTimeoutCertificateCreated{ + Rank: es.initRank - 1, + NewestQuorumCertificate: es.parentProposal.State.ParentQuorumCertificate, + } + + err := es.eventhandler.OnPartialTimeoutCertificateCreated(partialTimeoutCertificate) + require.NoError(es.T(), err) + + // partial TC shouldn't trigger rank change + require.Equal(es.T(), es.initRank, es.paceMaker.CurrentRank(), "incorrect rank change") + // we don't want to create timeout if partial TC was delivered for rank different than active one. + es.notifier.AssertNotCalled(es.T(), "OnOwnTimeout", mock.Anything) +} + +// TestOnPartialTimeoutCertificateCreated_QcAndTimeoutCertificateProcessing tests that EventHandler processes QC and TC included in consensus.PartialTimeoutCertificateCreated +// data structure. This tests cases like the following example: +// * the pacemaker is in rank 10 +// * we observe a partial timeout for rank 11 with a QC for rank 10 +// * we should change to rank 11 using the QC, then broadcast a timeout for rank 11 +func (es *EventHandlerSuite) TestOnPartialTimeoutCertificateCreated_QcAndTimeoutCertificateProcessing() { + + testOnPartialTimeoutCertificateCreated := func(partialTimeoutCertificate *consensus.PartialTimeoutCertificateCreated) { + es.endRank++ + + es.notifier.On("OnOwnTimeout", mock.Anything).Run(func(args mock.Arguments) { + timeoutState, ok := args[0].(*models.TimeoutState[*helper.TestVote]) + require.True(es.T(), ok) + // it should broadcast a TO with same rank as partialTimeoutCertificate.Rank + require.Equal(es.T(), partialTimeoutCertificate.Rank, timeoutState.Rank) + }).Once() + + err := es.eventhandler.OnPartialTimeoutCertificateCreated(partialTimeoutCertificate) + require.NoError(es.T(), err) + + require.Equal(es.T(), es.endRank, es.paceMaker.CurrentRank(), "incorrect rank change") + } + + es.Run("qc-triggered-rank-change", func() { + partialTimeoutCertificate := &consensus.PartialTimeoutCertificateCreated{ + Rank: es.qc.GetRank() + 1, + NewestQuorumCertificate: es.qc, + } + testOnPartialTimeoutCertificateCreated(partialTimeoutCertificate) + }) + es.Run("tc-triggered-rank-change", func() { + tc := helper.MakeTC(helper.WithTCRank(es.endRank), helper.WithTCNewestQC(es.qc)) + partialTimeoutCertificate := &consensus.PartialTimeoutCertificateCreated{ + Rank: tc.GetRank() + 1, + NewestQuorumCertificate: tc.GetLatestQuorumCert(), + PriorRankTimeoutCertificate: tc, + } + testOnPartialTimeoutCertificateCreated(partialTimeoutCertificate) + }) +} + +func createState(rank uint64) *models.State[*helper.TestState] { + return &models.State[*helper.TestState]{ + Identifier: fmt.Sprintf("%d", rank), + Rank: rank, + } +} + +func createStateWithQC(rank uint64, qcrank uint64) *models.State[*helper.TestState] { + state := createState(rank) + parent := createState(qcrank) + state.ParentQuorumCertificate = createQC(parent) + return state +} + +func createQC(parent *models.State[*helper.TestState]) models.QuorumCertificate { + qc := &helper.TestQuorumCertificate{ + Selector: parent.Identifier, + Rank: parent.Rank, + FrameNumber: parent.Rank, + AggregatedSignature: &helper.TestAggregatedSignature{ + Signature: make([]byte, 74), + Bitmask: []byte{0x1}, + PublicKey: make([]byte, 585), + }, + } + return qc +} + +func createVote(state *models.State[*helper.TestState]) *helper.TestVote { + return &helper.TestVote{ + Rank: state.Rank, + StateID: state.Identifier, + ID: "0", + Signature: make([]byte, 74), + } +} + +func createProposal(rank uint64, qcrank uint64) *models.SignedProposal[*helper.TestState, *helper.TestVote] { + state := createStateWithQC(rank, qcrank) + return helper.MakeSignedProposal[*helper.TestState, *helper.TestVote]( + helper.WithProposal[*helper.TestState, *helper.TestVote]( + helper.MakeProposal(helper.WithState(state)))) +} diff --git a/consensus/eventloop/event_loop.go b/consensus/eventloop/event_loop.go new file mode 100644 index 0000000..d20a089 --- /dev/null +++ b/consensus/eventloop/event_loop.go @@ -0,0 +1,382 @@ +package eventloop + +import ( + "context" + "fmt" + "time" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/consensus/tracker" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" +) + +// queuedProposal is a helper structure that is used to transmit proposal in +// channel it contains an attached insertionTime that is used to measure how +// long we have waited between queening proposal and actually processing by +// `EventHandler`. +type queuedProposal[StateT models.Unique, VoteT models.Unique] struct { + proposal *models.SignedProposal[StateT, VoteT] + insertionTime time.Time +} + +// EventLoop buffers all incoming events to the hotstuff EventHandler, and feeds +// EventHandler one event at a time. +type EventLoop[StateT models.Unique, VoteT models.Unique] struct { + *lifecycle.ComponentManager + eventHandler consensus.EventHandler[StateT, VoteT] + proposals chan queuedProposal[StateT, VoteT] + newestSubmittedTimeoutCertificate *tracker.NewestTCTracker + newestSubmittedQc *tracker.NewestQCTracker + newestSubmittedPartialTimeoutCertificate *tracker.NewestPartialTimeoutCertificateTracker + tcSubmittedNotifier chan struct{} + qcSubmittedNotifier chan struct{} + partialTimeoutCertificateCreatedNotifier chan struct{} + startTime time.Time + tracer consensus.TraceLogger +} + +var _ consensus.EventLoop[*nilUnique, *nilUnique] = (*EventLoop[*nilUnique, *nilUnique])(nil) + +// NewEventLoop creates an instance of EventLoop. +func NewEventLoop[StateT models.Unique, VoteT models.Unique]( + tracer consensus.TraceLogger, + eventHandler consensus.EventHandler[StateT, VoteT], + startTime time.Time, +) (*EventLoop[StateT, VoteT], error) { + // we will use a buffered channel to avoid blocking of caller + // we can't afford to drop messages since it undermines liveness, but we also + // want to avoid blocking of compliance engine. We assume that we should be + // able to process proposals faster than compliance engine feeds them, worst + // case we will fill the buffer and state compliance engine worker but that + // should happen only if compliance engine receives large number of states in + // short period of time(when catching up for instance). + proposals := make(chan queuedProposal[StateT, VoteT], 1000) + + el := &EventLoop[StateT, VoteT]{ + tracer: tracer, + eventHandler: eventHandler, + proposals: proposals, + tcSubmittedNotifier: make(chan struct{}, 1), + qcSubmittedNotifier: make(chan struct{}, 1), + partialTimeoutCertificateCreatedNotifier: make(chan struct{}, 1), + newestSubmittedTimeoutCertificate: tracker.NewNewestTCTracker(), + newestSubmittedQc: tracker.NewNewestQCTracker(), + newestSubmittedPartialTimeoutCertificate: tracker.NewNewestPartialTimeoutCertificateTracker(), + startTime: startTime, + } + + componentBuilder := lifecycle.NewComponentManagerBuilder() + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + + // launch when scheduled by el.startTime + el.tracer.Trace(fmt.Sprintf("event loop will start at: %v", el.startTime)) + select { + case <-ctx.Done(): + return + case <-time.After(time.Until(el.startTime)): + el.tracer.Trace("starting event loop") + err := el.loop(ctx) + if err != nil { + el.tracer.Error("irrecoverable event loop error", err) + ctx.Throw(err) + } + } + }) + el.ComponentManager = componentBuilder.Build() + + return el, nil +} + +// loop executes the core HotStuff logic in a single thread. It picks inputs +// from the various inbound channels and executes the EventHandler's respective +// method for processing this input. During normal operations, the EventHandler +// is not expected to return any errors, as all inputs are assumed to be fully +// validated (or produced by trusted components within the node). Therefore, +// any error is a symptom of state corruption, bugs or violation of API +// contracts. In all cases, continuing operations is not an option, i.e. we exit +// the event loop and return an exception. +func (el *EventLoop[StateT, VoteT]) loop(ctx context.Context) error { + err := el.eventHandler.Start(ctx) + if err != nil { + return fmt.Errorf("could not start event handler: %w", err) + } + + shutdownSignaled := ctx.Done() + timeoutCertificates := el.tcSubmittedNotifier + quorumCertificates := el.qcSubmittedNotifier + partialTCs := el.partialTimeoutCertificateCreatedNotifier + + for { + // Giving timeout events the priority to be processed first. + // This is to prevent attacks from malicious nodes that attempt + // to block honest nodes' pacemaker from progressing by sending + // other events. + timeoutChannel := el.eventHandler.TimeoutChannel() + + // the first select makes sure we process timeouts with priority + select { + + // if we receive the shutdown signal, exit the loop + case <-shutdownSignaled: + el.tracer.Trace("shutting down event loop") + return nil + + // processing timeout or partial TC event are top priority since + // they allow node to contribute to TC aggregation when replicas can't + // make progress on happy path + case <-timeoutChannel: + el.tracer.Trace("received timeout") + err = el.eventHandler.OnLocalTimeout() + if err != nil { + return fmt.Errorf("could not process timeout: %w", err) + } + + // At this point, we have received and processed an event from the timeout + // channel. A timeout also means that we have made progress. A new timeout + // will have been started and el.eventHandler.TimeoutChannel() will be a + // NEW channel (for the just-started timeout). Very important to start the + // for loop from the beginning, to continue the with the new timeout + // channel! + continue + + case <-partialTCs: + el.tracer.Trace("received partial timeout") + err = el.eventHandler.OnPartialTimeoutCertificateCreated( + el.newestSubmittedPartialTimeoutCertificate.NewestPartialTimeoutCertificate(), + ) + if err != nil { + return fmt.Errorf("could not process partial created TC event: %w", err) + } + + // At this point, we have received and processed partial TC event, it + // could have resulted in several scenarios: + // 1. a rank change with potential voting or proposal creation + // 2. a created and broadcast timeout state + // 3. QC and TC didn't result in rank change and no timeout was created + // since we have already timed out or the partial TC was created for rank + // different from current one. + continue + + default: + el.tracer.Trace("non-priority event") + + // fall through to non-priority events + } + + // select for state headers/QCs here + select { + + // same as before + case <-shutdownSignaled: + el.tracer.Trace("shutting down event loop") + return nil + + // same as before + case <-timeoutChannel: + el.tracer.Trace("received timeout") + + err = el.eventHandler.OnLocalTimeout() + if err != nil { + return fmt.Errorf("could not process timeout: %w", err) + } + + // if we have a new proposal, process it + case queuedItem := <-el.proposals: + el.tracer.Trace("received proposal") + + proposal := queuedItem.proposal + err = el.eventHandler.OnReceiveProposal(proposal) + if err != nil { + return fmt.Errorf( + "could not process proposal %x: %w", + proposal.State.Identifier, + err, + ) + } + + el.tracer.Trace( + "state proposal has been processed successfully", + consensus.Uint64Param("rank", proposal.State.Rank), + ) + + // if we have a new QC, process it + case <-quorumCertificates: + el.tracer.Trace("received quorum certificate") + err = el.eventHandler.OnReceiveQuorumCertificate( + *el.newestSubmittedQc.NewestQC(), + ) + if err != nil { + return fmt.Errorf("could not process QC: %w", err) + } + + // if we have a new TC, process it + case <-timeoutCertificates: + el.tracer.Trace("received timeout certificate") + err = el.eventHandler.OnReceiveTimeoutCertificate( + *el.newestSubmittedTimeoutCertificate.NewestTC(), + ) + if err != nil { + return fmt.Errorf("could not process TC: %w", err) + } + + case <-partialTCs: + el.tracer.Trace("received partial timeout certificate") + err = el.eventHandler.OnPartialTimeoutCertificateCreated( + el.newestSubmittedPartialTimeoutCertificate.NewestPartialTimeoutCertificate(), + ) + if err != nil { + return fmt.Errorf("could no process partial created TC event: %w", err) + } + } + } +} + +// SubmitProposal pushes the received state to the proposals channel +func (el *EventLoop[StateT, VoteT]) SubmitProposal( + proposal *models.SignedProposal[StateT, VoteT], +) { + queueItem := queuedProposal[StateT, VoteT]{ + proposal: proposal, + insertionTime: time.Now(), + } + select { + case el.proposals <- queueItem: + case <-el.ComponentManager.ShutdownSignal(): + return + } +} + +// onTrustedQC pushes the received QC (which MUST be validated) to the +// quorumCertificates channel +func (el *EventLoop[StateT, VoteT]) onTrustedQC(qc *models.QuorumCertificate) { + if el.newestSubmittedQc.Track(qc) { + select { + case el.qcSubmittedNotifier <- struct{}{}: + default: + } + } +} + +// onTrustedTC pushes the received TC (which MUST be validated) to the +// timeoutCertificates channel +func (el *EventLoop[StateT, VoteT]) onTrustedTC(tc *models.TimeoutCertificate) { + if el.newestSubmittedTimeoutCertificate.Track(tc) { + select { + case el.tcSubmittedNotifier <- struct{}{}: + default: + } + } else { + qc := (*tc).GetLatestQuorumCert() + if el.newestSubmittedQc.Track(&qc) { + select { + case el.qcSubmittedNotifier <- struct{}{}: + default: + } + } + } +} + +// OnTimeoutCertificateConstructedFromTimeouts pushes the received TC to the +// timeoutCertificates channel +func (el *EventLoop[StateT, VoteT]) OnTimeoutCertificateConstructedFromTimeouts( + tc models.TimeoutCertificate, +) { + el.onTrustedTC(&tc) +} + +// OnPartialTimeoutCertificateCreated created a +// consensus.PartialTimeoutCertificateCreated payload and pushes it into +// partialTimeoutCertificateCreated buffered channel for further processing by +// EventHandler. Since we use buffered channel this function can block if buffer +// is full. +func (el *EventLoop[StateT, VoteT]) OnPartialTimeoutCertificateCreated( + rank uint64, + newestQC models.QuorumCertificate, + previousRankTimeoutCert models.TimeoutCertificate, +) { + event := &consensus.PartialTimeoutCertificateCreated{ + Rank: rank, + NewestQuorumCertificate: newestQC, + PriorRankTimeoutCertificate: previousRankTimeoutCert, + } + if el.newestSubmittedPartialTimeoutCertificate.Track(event) { + select { + case el.partialTimeoutCertificateCreatedNotifier <- struct{}{}: + default: + } + } +} + +// OnNewQuorumCertificateDiscovered pushes already validated QCs that were +// submitted from TimeoutAggregator to the event handler +func (el *EventLoop[StateT, VoteT]) OnNewQuorumCertificateDiscovered( + qc models.QuorumCertificate, +) { + el.onTrustedQC(&qc) +} + +// OnNewTimeoutCertificateDiscovered pushes already validated TCs that were +// submitted from TimeoutAggregator to the event handler +func (el *EventLoop[StateT, VoteT]) OnNewTimeoutCertificateDiscovered( + tc models.TimeoutCertificate, +) { + el.onTrustedTC(&tc) +} + +// OnQuorumCertificateConstructedFromVotes implements +// consensus.VoteCollectorConsumer and pushes received qc into processing +// pipeline. +func (el *EventLoop[StateT, VoteT]) OnQuorumCertificateConstructedFromVotes( + qc models.QuorumCertificate, +) { + el.onTrustedQC(&qc) +} + +// OnTimeoutProcessed implements consensus.TimeoutCollectorConsumer and is no-op +func (el *EventLoop[StateT, VoteT]) OnTimeoutProcessed( + timeout *models.TimeoutState[VoteT], +) { +} + +// OnVoteProcessed implements consensus.VoteCollectorConsumer and is no-op +func (el *EventLoop[StateT, VoteT]) OnVoteProcessed(vote *VoteT) {} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/eventloop/event_loop_test.go b/consensus/eventloop/event_loop_test.go new file mode 100644 index 0000000..6fa7937 --- /dev/null +++ b/consensus/eventloop/event_loop_test.go @@ -0,0 +1,262 @@ +package eventloop + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "go.uber.org/atomic" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + "source.quilibrium.com/quilibrium/monorepo/consensus/mocks" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/lifecycle/unittest" +) + +// TestEventLoop performs unit testing of event loop, checks if submitted events are propagated +// to event handler as well as handling of timeouts. +func TestEventLoop(t *testing.T) { + suite.Run(t, new(EventLoopTestSuite)) +} + +type EventLoopTestSuite struct { + suite.Suite + + eh *mocks.EventHandler[*helper.TestState, *helper.TestVote] + cancel context.CancelFunc + + eventLoop *EventLoop[*helper.TestState, *helper.TestVote] +} + +func (s *EventLoopTestSuite) SetupTest() { + s.eh = mocks.NewEventHandler[*helper.TestState, *helper.TestVote](s.T()) + s.eh.On("Start", mock.Anything).Return(nil).Maybe() + s.eh.On("TimeoutChannel").Return(make(<-chan time.Time, 1)).Maybe() + s.eh.On("OnLocalTimeout").Return(nil).Maybe() + + eventLoop, err := NewEventLoop(helper.Logger(), s.eh, time.Time{}) + require.NoError(s.T(), err) + s.eventLoop = eventLoop + + ctx, cancel := context.WithCancel(context.Background()) + s.cancel = cancel + signalerCtx := unittest.NewMockSignalerContext(s.T(), ctx) + + s.eventLoop.Start(signalerCtx) + unittest.RequireCloseBefore(s.T(), s.eventLoop.Ready(), 100*time.Millisecond, "event loop not started") +} + +func (s *EventLoopTestSuite) TearDownTest() { + s.cancel() + unittest.RequireCloseBefore(s.T(), s.eventLoop.Done(), 100*time.Millisecond, "event loop not stopped") +} + +// TestReadyDone tests if event loop stops internal worker thread +func (s *EventLoopTestSuite) TestReadyDone() { + time.Sleep(1 * time.Second) + go func() { + s.cancel() + }() + unittest.RequireCloseBefore(s.T(), s.eventLoop.Done(), 100*time.Millisecond, "event loop not stopped") +} + +// Test_SubmitQC tests that submitted proposal is eventually sent to event handler for processing +func (s *EventLoopTestSuite) Test_SubmitProposal() { + proposal := helper.MakeSignedProposal[*helper.TestState, *helper.TestVote]() + processed := atomic.NewBool(false) + s.eh.On("OnReceiveProposal", proposal).Run(func(args mock.Arguments) { + processed.Store(true) + }).Return(nil).Once() + s.eventLoop.SubmitProposal(proposal) + require.Eventually(s.T(), processed.Load, time.Millisecond*100, time.Millisecond*10) +} + +// Test_SubmitQC tests that submitted QC is eventually sent to `EventHandler.OnReceiveQuorumCertificate` for processing +func (s *EventLoopTestSuite) Test_SubmitQC() { + // qcIngestionFunction is the archetype for EventLoop.OnQuorumCertificateConstructedFromVotes and EventLoop.OnNewQuorumCertificateDiscovered + type qcIngestionFunction func(models.QuorumCertificate) + + testQCIngestionFunction := func(f qcIngestionFunction, qcRank uint64) { + qc := helper.MakeQC(helper.WithQCRank(qcRank)) + processed := atomic.NewBool(false) + s.eh.On("OnReceiveQuorumCertificate", qc).Run(func(args mock.Arguments) { + processed.Store(true) + }).Return(nil).Once() + f(qc) + require.Eventually(s.T(), processed.Load, time.Millisecond*100, time.Millisecond*10) + } + + s.Run("QCs handed to EventLoop.OnQuorumCertificateConstructedFromVotes are forwarded to EventHandler", func() { + testQCIngestionFunction(s.eventLoop.OnQuorumCertificateConstructedFromVotes, 100) + }) + + s.Run("QCs handed to EventLoop.OnNewQuorumCertificateDiscovered are forwarded to EventHandler", func() { + testQCIngestionFunction(s.eventLoop.OnNewQuorumCertificateDiscovered, 101) + }) +} + +// Test_SubmitTC tests that submitted TC is eventually sent to `EventHandler.OnReceiveTimeoutCertificate` for processing +func (s *EventLoopTestSuite) Test_SubmitTC() { + // tcIngestionFunction is the archetype for EventLoop.OnTimeoutCertificateConstructedFromTimeouts and EventLoop.OnNewTimeoutCertificateDiscovered + type tcIngestionFunction func(models.TimeoutCertificate) + + testTCIngestionFunction := func(f tcIngestionFunction, tcRank uint64) { + tc := helper.MakeTC(helper.WithTCRank(tcRank)) + processed := atomic.NewBool(false) + s.eh.On("OnReceiveTimeoutCertificate", tc).Run(func(args mock.Arguments) { + processed.Store(true) + }).Return(nil).Once() + f(tc) + require.Eventually(s.T(), processed.Load, time.Millisecond*100, time.Millisecond*10) + } + + s.Run("TCs handed to EventLoop.OnTimeoutCertificateConstructedFromTimeouts are forwarded to EventHandler", func() { + testTCIngestionFunction(s.eventLoop.OnTimeoutCertificateConstructedFromTimeouts, 100) + }) + + s.Run("TCs handed to EventLoop.OnNewTimeoutCertificateDiscovered are forwarded to EventHandler", func() { + testTCIngestionFunction(s.eventLoop.OnNewTimeoutCertificateDiscovered, 101) + }) +} + +// Test_SubmitTC_IngestNewestQC tests that included QC in TC is eventually sent to `EventHandler.OnReceiveQuorumCertificate` for processing +func (s *EventLoopTestSuite) Test_SubmitTC_IngestNewestQC() { + // tcIngestionFunction is the archetype for EventLoop.OnTimeoutCertificateConstructedFromTimeouts and EventLoop.OnNewTimeoutCertificateDiscovered + type tcIngestionFunction func(models.TimeoutCertificate) + + testTCIngestionFunction := func(f tcIngestionFunction, tcRank, qcRank uint64) { + tc := helper.MakeTC(helper.WithTCRank(tcRank), + helper.WithTCNewestQC(helper.MakeQC(helper.WithQCRank(qcRank)))) + processed := atomic.NewBool(false) + s.eh.On("OnReceiveQuorumCertificate", tc.GetLatestQuorumCert()).Run(func(args mock.Arguments) { + processed.Store(true) + }).Return(nil).Once() + f(tc) + require.Eventually(s.T(), processed.Load, time.Millisecond*100, time.Millisecond*10) + } + + // process initial TC, this will track the newest TC + s.eh.On("OnReceiveTimeoutCertificate", mock.Anything).Return(nil).Once() + s.eventLoop.OnTimeoutCertificateConstructedFromTimeouts(helper.MakeTC( + helper.WithTCRank(100), + helper.WithTCNewestQC( + helper.MakeQC( + helper.WithQCRank(80), + ), + ), + )) + + s.Run("QCs handed to EventLoop.OnTimeoutCertificateConstructedFromTimeouts are forwarded to EventHandler", func() { + testTCIngestionFunction(s.eventLoop.OnTimeoutCertificateConstructedFromTimeouts, 100, 99) + }) + + s.Run("QCs handed to EventLoop.OnNewTimeoutCertificateDiscovered are forwarded to EventHandler", func() { + testTCIngestionFunction(s.eventLoop.OnNewTimeoutCertificateDiscovered, 100, 100) + }) +} + +// Test_OnPartialTimeoutCertificateCreated tests that event loop delivers partialTimeoutCertificateCreated events to event handler. +func (s *EventLoopTestSuite) Test_OnPartialTimeoutCertificateCreated() { + rank := uint64(1000) + newestQC := helper.MakeQC(helper.WithQCRank(rank - 10)) + previousRankTimeoutCert := helper.MakeTC(helper.WithTCRank(rank-1), helper.WithTCNewestQC(newestQC)) + + processed := atomic.NewBool(false) + partialTimeoutCertificateCreated := &consensus.PartialTimeoutCertificateCreated{ + Rank: rank, + NewestQuorumCertificate: newestQC, + PriorRankTimeoutCertificate: previousRankTimeoutCert, + } + s.eh.On("OnPartialTimeoutCertificateCreated", partialTimeoutCertificateCreated).Run(func(args mock.Arguments) { + processed.Store(true) + }).Return(nil).Once() + s.eventLoop.OnPartialTimeoutCertificateCreated(rank, newestQC, previousRankTimeoutCert) + require.Eventually(s.T(), processed.Load, time.Millisecond*100, time.Millisecond*10) +} + +// TestEventLoop_Timeout tests that event loop delivers timeout events to event handler under pressure +func TestEventLoop_Timeout(t *testing.T) { + eh := &mocks.EventHandler[*helper.TestState, *helper.TestVote]{} + processed := atomic.NewBool(false) + eh.On("Start", mock.Anything).Return(nil).Once() + eh.On("OnReceiveQuorumCertificate", mock.Anything).Return(nil).Maybe() + eh.On("OnReceiveProposal", mock.Anything).Return(nil).Maybe() + eh.On("OnLocalTimeout").Run(func(args mock.Arguments) { + processed.Store(true) + }).Return(nil).Once() + + eventLoop, err := NewEventLoop(helper.Logger(), eh, time.Time{}) + require.NoError(t, err) + + eh.On("TimeoutChannel").Return(time.After(100 * time.Millisecond)) + + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := unittest.NewMockSignalerContext(t, ctx) + eventLoop.Start(signalerCtx) + + unittest.RequireCloseBefore(t, eventLoop.Ready(), 100*time.Millisecond, "event loop not stopped") + + time.Sleep(10 * time.Millisecond) + + var wg sync.WaitGroup + wg.Add(2) + + // spam with proposals and QCs + go func() { + defer wg.Done() + for !processed.Load() { + qc := helper.MakeQC() + eventLoop.OnQuorumCertificateConstructedFromVotes(qc) + } + }() + + go func() { + defer wg.Done() + for !processed.Load() { + eventLoop.SubmitProposal(helper.MakeSignedProposal[*helper.TestState, *helper.TestVote]()) + } + }() + + require.Eventually(t, processed.Load, time.Millisecond*200, time.Millisecond*10) + unittest.AssertReturnsBefore(t, func() { wg.Wait() }, time.Millisecond*200) + + cancel() + unittest.RequireCloseBefore(t, eventLoop.Done(), 100*time.Millisecond, "event loop not stopped") +} + +// TestReadyDoneWithStartTime tests that event loop correctly starts and schedules start of processing +// when startTime argument is used +func TestReadyDoneWithStartTime(t *testing.T) { + eh := &mocks.EventHandler[*helper.TestState, *helper.TestVote]{} + eh.On("Start", mock.Anything).Return(nil) + eh.On("TimeoutChannel").Return(make(<-chan time.Time, 1)) + eh.On("OnLocalTimeout").Return(nil) + + startTimeDuration := 2 * time.Second + startTime := time.Now().Add(startTimeDuration) + eventLoop, err := NewEventLoop(helper.Logger(), eh, startTime) + require.NoError(t, err) + + done := make(chan struct{}) + eh.On("OnReceiveProposal", mock.Anything).Run(func(args mock.Arguments) { + require.True(t, time.Now().After(startTime)) + close(done) + }).Return(nil).Once() + + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := unittest.NewMockSignalerContext(t, ctx) + eventLoop.Start(signalerCtx) + + unittest.RequireCloseBefore(t, eventLoop.Ready(), 100*time.Millisecond, "event loop not started") + + eventLoop.SubmitProposal(helper.MakeSignedProposal[*helper.TestState, *helper.TestVote]()) + + unittest.RequireCloseBefore(t, done, startTimeDuration+100*time.Millisecond, "proposal wasn't received") + cancel() + unittest.RequireCloseBefore(t, eventLoop.Done(), 100*time.Millisecond, "event loop not stopped") +} diff --git a/consensus/example/generic_consensus_example.go b/consensus/example/generic_consensus_example.go deleted file mode 100644 index db4ce96..0000000 --- a/consensus/example/generic_consensus_example.go +++ /dev/null @@ -1,1102 +0,0 @@ -package main - -import ( - "context" - "errors" - "fmt" - "slices" - "sync" - "time" - - "go.uber.org/zap" - "source.quilibrium.com/quilibrium/monorepo/consensus" -) - -// Example using the generic state machine from the consensus package - -// ConsensusData represents the state data -type ConsensusData struct { - Round uint64 - Hash string - Votes map[string]interface{} - Proof interface{} - IsProver bool - Timestamp time.Time - ProposerID string -} - -// Identity implements Unique interface -func (c ConsensusData) Identity() consensus.Identity { - return fmt.Sprintf("%s-%d", c.Hash, c.Round) -} - -func (c ConsensusData) Rank() uint64 { - return c.Round -} - -func (c ConsensusData) Clone() consensus.Unique { - return ConsensusData{ - Round: c.Round, - Hash: c.Hash, - Votes: c.Votes, - Proof: c.Proof, - IsProver: c.IsProver, - Timestamp: c.Timestamp, - ProposerID: c.ProposerID, - } -} - -// Vote represents a vote in the consensus -type Vote struct { - NodeID string - Round uint64 - VoteValue string - Timestamp time.Time - ProposerID string -} - -// Identity implements Unique interface -func (v Vote) Identity() consensus.Identity { - return fmt.Sprintf("%s-%d-%s", v.ProposerID, v.Round, v.VoteValue) -} - -func (v Vote) Rank() uint64 { - return v.Round -} - -func (v Vote) Clone() consensus.Unique { - return Vote{ - NodeID: v.NodeID, - Round: v.Round, - VoteValue: v.VoteValue, - Timestamp: v.Timestamp, - ProposerID: v.ProposerID, - } -} - -// PeerID represents a peer identifier -type PeerID struct { - ID string -} - -// Identity implements Unique interface -func (p PeerID) Identity() consensus.Identity { - return p.ID -} - -func (p PeerID) Rank() uint64 { - return 0 -} - -func (p PeerID) Clone() consensus.Unique { - return p -} - -// CollectedData represents collected mutations -type CollectedData struct { - Round uint64 - Mutations []string - Timestamp time.Time -} - -// Identity implements Unique interface -func (c CollectedData) Identity() consensus.Identity { - return fmt.Sprintf("collected-%d", c.Timestamp.Unix()) -} - -func (c CollectedData) Rank() uint64 { - return c.Round -} - -func (c CollectedData) Clone() consensus.Unique { - return CollectedData{ - Mutations: slices.Clone(c.Mutations), - Timestamp: c.Timestamp, - } -} - -// MockSyncProvider implements SyncProvider -type MockSyncProvider struct { - logger *zap.Logger -} - -func (m *MockSyncProvider) Synchronize( - existing *ConsensusData, - ctx context.Context, -) (<-chan *ConsensusData, <-chan error) { - dataCh := make(chan *ConsensusData, 1) - errCh := make(chan error, 1) - - go func() { - defer close(dataCh) - defer close(errCh) - - m.logger.Info("synchronizing...") - select { - case <-time.After(10 * time.Millisecond): - m.logger.Info("sync complete") - if existing != nil { - dataCh <- existing - } else { - dataCh <- &ConsensusData{ - Round: 0, - Hash: "genesis", - Votes: make(map[string]interface{}), - Timestamp: time.Now(), - } - } - errCh <- nil - case <-ctx.Done(): - errCh <- ctx.Err() - } - }() - - return dataCh, errCh -} - -// MockVotingProvider implements VotingProvider -type MockVotingProvider struct { - logger *zap.Logger - votes map[string]*Vote - currentRound uint64 - voteTarget int - mu sync.Mutex - isMalicious bool - nodeID string - messageBus *MessageBus -} - -func NewMockVotingProvider( - logger *zap.Logger, - voteTarget int, - nodeID string, -) *MockVotingProvider { - return &MockVotingProvider{ - logger: logger, - votes: make(map[string]*Vote), - voteTarget: voteTarget, - nodeID: nodeID, - } -} - -func NewMaliciousVotingProvider( - logger *zap.Logger, - voteTarget int, - nodeID string, -) *MockVotingProvider { - return &MockVotingProvider{ - logger: logger, - votes: make(map[string]*Vote), - voteTarget: voteTarget, - isMalicious: true, - nodeID: nodeID, - } -} - -func (m *MockVotingProvider) SendProposal( - proposal *ConsensusData, - ctx context.Context, -) error { - m.logger.Info("sending proposal", - zap.Uint64("round", proposal.Round), - zap.String("hash", proposal.Hash)) - - if m.messageBus != nil { - // Make a copy to avoid sharing pointers between nodes - proposalCopy := &ConsensusData{ - Round: proposal.Round, - Hash: proposal.Hash, - Votes: make(map[string]interface{}), - Proof: proposal.Proof, - IsProver: proposal.IsProver, - Timestamp: proposal.Timestamp, - ProposerID: proposal.ProposerID, - } - // Copy votes map - for k, v := range proposal.Votes { - proposalCopy.Votes[k] = v - } - - m.messageBus.Broadcast(Message{ - Type: "proposal", - Sender: m.nodeID, - Data: proposalCopy, - }) - } - - return nil -} - -func (m *MockVotingProvider) DecideAndSendVote( - proposals map[consensus.Identity]*ConsensusData, - ctx context.Context, -) (PeerID, *Vote, error) { - m.mu.Lock() - defer m.mu.Unlock() - - // Log available proposals - m.logger.Info("deciding vote", - zap.Int("proposal_count", len(proposals)), - zap.String("node_id", m.nodeID)) - - nodes := []string{ - "prover-node-1", - "validator-node-1", - "validator-node-2", - "validator-node-3", - } - - var chosenProposal *ConsensusData - var chosenID consensus.Identity - if len(proposals) > 3 { - leaderIdx := int(proposals[nodes[0]].Round % uint64(len(nodes))) - - chosenProposal = proposals[nodes[leaderIdx]] - chosenID = nodes[leaderIdx] - if chosenProposal == nil { - chosenProposal = proposals[nodes[(leaderIdx+1)%len(nodes)]] - chosenID = nodes[(leaderIdx+1)%len(nodes)] - } - m.logger.Info("found proposal", - zap.String("from", chosenID), - zap.Uint64("round", chosenProposal.Round)) - } - if chosenProposal == nil { - return PeerID{}, nil, fmt.Errorf("no proposals to vote on") - } - - vote := &Vote{ - NodeID: m.nodeID, - Round: chosenProposal.Round, - VoteValue: "approve", - Timestamp: time.Now(), - ProposerID: chosenID, - } - - m.votes[vote.NodeID] = vote - m.logger.Info("decided and sent vote", - zap.String("node_id", vote.NodeID), - zap.String("vote", vote.VoteValue), - zap.Uint64("round", vote.Round), - zap.String("for_proposal", chosenID)) - - if m.messageBus != nil { - // Make a copy to avoid sharing pointers - voteCopy := &Vote{ - NodeID: vote.NodeID, - Round: vote.Round, - VoteValue: vote.VoteValue, - Timestamp: vote.Timestamp, - ProposerID: vote.ProposerID, - } - m.messageBus.Broadcast(Message{ - Type: "vote", - Sender: m.nodeID, - Data: voteCopy, - }) - } - - return PeerID{ID: chosenID}, vote, nil -} - -func (m *MockVotingProvider) SendVote(vote *Vote, ctx context.Context) ( - PeerID, - error, -) { - m.logger.Info("re-sent vote", - zap.String("node_id", vote.NodeID), - zap.String("vote", vote.VoteValue), - zap.Uint64("round", vote.Round), - zap.String("for_proposal", vote.ProposerID)) - - if m.messageBus != nil { - // Make a copy to avoid sharing pointers - voteCopy := &Vote{ - NodeID: vote.NodeID, - Round: vote.Round, - VoteValue: vote.VoteValue, - Timestamp: vote.Timestamp, - ProposerID: vote.ProposerID, - } - m.messageBus.Broadcast(Message{ - Type: "vote", - Sender: m.nodeID, - Data: voteCopy, - }) - } - return PeerID{ID: vote.ProposerID}, nil -} - -func (m *MockVotingProvider) IsQuorum( - proposalVotes map[consensus.Identity]*Vote, - ctx context.Context, -) (bool, error) { - m.mu.Lock() - defer m.mu.Unlock() - - m.logger.Info("checking quorum", - zap.Int("target", m.voteTarget)) - totalVotes := 0 - fmt.Printf("%s %+v\n", m.nodeID, proposalVotes) - voteCount := map[string]int{} - for _, votes := range proposalVotes { - count, ok := voteCount[votes.ProposerID] - if !ok { - voteCount[votes.ProposerID] = 1 - } else { - voteCount[votes.ProposerID] = count + 1 - } - totalVotes += 1 - - if count >= m.voteTarget { - return true, nil - } - } - if totalVotes >= m.voteTarget { - return false, errors.New("split quorum") - } - - return false, nil -} - -func (m *MockVotingProvider) FinalizeVotes( - proposals map[consensus.Identity]*ConsensusData, - proposalVotes map[consensus.Identity]*Vote, - ctx context.Context, -) (*ConsensusData, PeerID, error) { - // Count approvals - m.logger.Info("finalizing votes", - zap.Int("total_proposals", len(proposals))) - winnerCount := 0 - var winnerProposal *ConsensusData = nil - var winnerProposer PeerID - voteCount := map[string]int{} - for _, votes := range proposalVotes { - count, ok := voteCount[votes.ProposerID] - if !ok { - voteCount[votes.ProposerID] = 1 - } else { - voteCount[votes.ProposerID] = count + 1 - } - } - for peerID, proposal := range proposals { - if proposal == nil { - continue - } - voteCount := voteCount[proposal.ProposerID] - if voteCount > winnerCount { - winnerCount = voteCount - winnerProposal = proposal - winnerProposer = PeerID{ID: peerID} - } - } - - m.logger.Info("vote summary", - zap.Int("approvals", winnerCount), - zap.Int("required", m.voteTarget)) - - if winnerCount < m.voteTarget { - return nil, PeerID{}, fmt.Errorf( - "not enough approvals: %d < %d", - winnerCount, - m.voteTarget, - ) - } - - if winnerProposal != nil { - return winnerProposal, winnerProposer, nil - } - - // Pick the first proposal - for id, prop := range proposals { - // Create a new finalized state based on the chosen proposal - finalizedState := &ConsensusData{ - Round: prop.Round, - Hash: prop.Hash, - Votes: make(map[string]interface{}), - Proof: prop.Proof, - IsProver: prop.IsProver, - Timestamp: time.Now(), - ProposerID: id, - } - // Copy votes to avoid pointer sharing - for k, v := range prop.Votes { - finalizedState.Votes[k] = v - } - - m.logger.Info("finalized state", - zap.Uint64("round", finalizedState.Round), - zap.String("hash", finalizedState.Hash), - zap.String("proposer", id)) - return finalizedState, PeerID{ID: id}, nil - } - - return nil, PeerID{}, fmt.Errorf("no proposals to finalize") -} - -func (m *MockVotingProvider) SendConfirmation( - finalized *ConsensusData, - ctx context.Context, -) error { - if finalized == nil { - m.logger.Warn("cannot send confirmation for nil state") - return fmt.Errorf("cannot send confirmation for nil state") - } - - m.logger.Info("sending confirmation", - zap.Uint64("round", finalized.Round), - zap.String("hash", finalized.Hash)) - - if m.messageBus != nil { - // Make a copy to avoid sharing pointers - confirmationCopy := &ConsensusData{ - Round: finalized.Round, - Hash: finalized.Hash, - Votes: make(map[string]interface{}), - Proof: finalized.Proof, - IsProver: finalized.IsProver, - Timestamp: finalized.Timestamp, - ProposerID: finalized.ProposerID, - } - // Copy votes map - for k, v := range finalized.Votes { - confirmationCopy.Votes[k] = v - } - - m.messageBus.Broadcast(Message{ - Type: "confirmation", - Sender: m.nodeID, - Data: confirmationCopy, - }) - } - - return nil -} - -func (m *MockVotingProvider) Reset() { - m.mu.Lock() - defer m.mu.Unlock() - m.votes = make(map[string]*Vote) - m.logger.Info( - "reset voting provider", - zap.Uint64("current_round", m.currentRound), - ) -} - -func (m *MockVotingProvider) SetRound(round uint64) { - m.mu.Lock() - defer m.mu.Unlock() - m.currentRound = round - m.logger.Info("voting provider round updated", zap.Uint64("round", round)) -} - -// MockLeaderProvider implements LeaderProvider -type MockLeaderProvider struct { - logger *zap.Logger - isProver bool - nodeID string -} - -func (m *MockLeaderProvider) GetNextLeaders( - prior *ConsensusData, - ctx context.Context, -) ([]PeerID, error) { - // Simple round-robin leader selection - round := uint64(0) - if prior != nil { - round = prior.Round - } - - nodes := []string{ - "prover-node-1", - "validator-node-1", - "validator-node-2", - "validator-node-3", - } - - // Select leader based on round - leaderIdx := int(round % uint64(len(nodes))) - leaders := []PeerID{ - {ID: nodes[leaderIdx]}, - {ID: nodes[uint64(leaderIdx+1)%uint64(len(nodes))]}, - {ID: nodes[uint64(leaderIdx+2)%uint64(len(nodes))]}, - {ID: nodes[uint64(leaderIdx+3)%uint64(len(nodes))]}, - } - - m.logger.Info("selected next leaders", - zap.Uint64("round", round), - zap.String("leader", leaders[0].ID)) - - return leaders, nil -} - -func (m *MockLeaderProvider) ProveNextState( - prior *ConsensusData, - collected CollectedData, - ctx context.Context, -) (*ConsensusData, error) { - priorRound := uint64(0) - priorHash := "genesis" - if prior != nil { - priorRound = prior.Round - priorHash = prior.Hash - } - - m.logger.Info("generating proof", - zap.Uint64("prior_round", priorRound), - zap.String("prior_hash", priorHash), - zap.Int("mutations", len(collected.Mutations))) - - select { - case <-time.After(500 * time.Millisecond): - proof := map[string]interface{}{ - "proof": "mock_proof_data", - "timestamp": time.Now(), - "prover": m.nodeID, - } - - newState := &ConsensusData{ - Round: priorRound + 1, - Hash: fmt.Sprintf("block_%d", priorRound+1), - Votes: make(map[string]interface{}), - Proof: proof, - IsProver: true, - Timestamp: time.Now(), - ProposerID: m.nodeID, - } - - return newState, nil - case <-ctx.Done(): - return nil, ctx.Err() - } -} - -// MockLivenessProvider implements LivenessProvider -type MockLivenessProvider struct { - logger *zap.Logger - round uint64 - nodeID string - messageBus *MessageBus -} - -func (m *MockLivenessProvider) Collect( - ctx context.Context, -) (CollectedData, error) { - m.logger.Info("collecting mutations") - - // Simulate collecting some mutations - mutations := []string{ - "mutation_1", - "mutation_2", - "mutation_3", - } - - return CollectedData{ - Round: m.round, - Mutations: mutations, - Timestamp: time.Now(), - }, nil -} - -func (m *MockLivenessProvider) SendLiveness( - prior *ConsensusData, - collected CollectedData, - ctx context.Context, -) error { - round := uint64(0) - if prior != nil { - round = prior.Round - } - - m.logger.Info("sending liveness signal", - zap.Uint64("round", round), - zap.Int("mutations", len(collected.Mutations))) - - if m.messageBus != nil { - // Make a copy to avoid sharing pointers - collectedCopy := CollectedData{ - Round: round + 1, - Mutations: make([]string, len(collected.Mutations)), - Timestamp: collected.Timestamp, - } - copy(collectedCopy.Mutations, collected.Mutations) - - m.messageBus.Broadcast(Message{ - Type: "liveness_check", - Sender: m.nodeID, - Data: collectedCopy, - }) - } - - return nil -} - -// ConsensusNode represents a node using the generic state machine -type ConsensusNode struct { - sm *consensus.StateMachine[ - ConsensusData, - Vote, - PeerID, - CollectedData, - ] - logger *zap.Logger - nodeID string - ctx context.Context - cancel context.CancelFunc - messageBus *MessageBus - msgChan chan Message - votingProvider *MockVotingProvider - livenessProvider *MockLivenessProvider - isMalicious bool -} - -// NewConsensusNode creates a new consensus node -func NewConsensusNode( - nodeID string, - isProver bool, - voteTarget int, - logger *zap.Logger, -) *ConsensusNode { - return newConsensusNodeWithBehavior( - nodeID, - isProver, - voteTarget, - logger, - false, - ) -} - -// NewMaliciousNode creates a new malicious consensus node -func NewMaliciousNode( - nodeID string, - isProver bool, - voteTarget int, - logger *zap.Logger, -) *ConsensusNode { - return newConsensusNodeWithBehavior( - nodeID, - isProver, - voteTarget, - logger, - true, - ) -} - -func newConsensusNodeWithBehavior( - nodeID string, - isProver bool, - voteTarget int, - logger *zap.Logger, - isMalicious bool, -) *ConsensusNode { - // Create initial consensus data - initialData := &ConsensusData{ - Round: 0, - Hash: "genesis", - Votes: make(map[string]interface{}), - IsProver: isProver, - Timestamp: time.Now(), - ProposerID: "genesis", - } - - // Create mock implementations - syncProvider := &MockSyncProvider{logger: logger} - - var votingProvider *MockVotingProvider - if isMalicious { - votingProvider = NewMaliciousVotingProvider(logger, voteTarget, nodeID) - } else { - votingProvider = NewMockVotingProvider(logger, voteTarget, nodeID) - } - - leaderProvider := &MockLeaderProvider{ - logger: logger, - isProver: isProver, - nodeID: nodeID, - } - - livenessProvider := &MockLivenessProvider{ - logger: logger, - nodeID: nodeID, - } - - // Create the state machine - sm := consensus.NewStateMachine( - PeerID{ID: nodeID}, - initialData, - true, - func() uint64 { return uint64(3) }, - syncProvider, - votingProvider, - leaderProvider, - livenessProvider, - tracer{logger: logger}, - ) - - ctx, cancel := context.WithCancel(context.Background()) - - node := &ConsensusNode{ - sm: sm, - logger: logger, - nodeID: nodeID, - ctx: ctx, - cancel: cancel, - votingProvider: votingProvider, - livenessProvider: livenessProvider, - isMalicious: isMalicious, - } - - // Add transition listener - sm.AddListener(&NodeTransitionListener{ - logger: logger, - node: node, - }) - - return node -} - -type tracer struct { - logger *zap.Logger -} - -// Error implements consensus.TraceLogger. -func (t tracer) Error(message string, err error) { - t.logger.Error(message, zap.Error(err)) -} - -// Trace implements consensus.TraceLogger. -func (t tracer) Trace(message string) { - t.logger.Debug(message) -} - -// Start begins the consensus node -func (n *ConsensusNode) Start() error { - n.logger.Info("starting consensus node", zap.String("node_id", n.nodeID)) - - // Start monitoring for messages - go n.monitor() - - return n.sm.Start() -} - -// Stop halts the consensus node -func (n *ConsensusNode) Stop() error { - n.logger.Info("stopping consensus node", zap.String("node_id", n.nodeID)) - n.cancel() - return n.sm.Stop() -} - -// SetMessageBus connects the node to the message bus -func (n *ConsensusNode) SetMessageBus(mb *MessageBus) { - n.messageBus = mb - n.msgChan = mb.Subscribe(n.nodeID) - - // Also set message bus on providers - if n.votingProvider != nil { - n.votingProvider.messageBus = mb - } - if n.livenessProvider != nil { - n.livenessProvider.messageBus = mb - } -} - -// monitor handles incoming messages -func (n *ConsensusNode) monitor() { - for { - select { - case <-n.ctx.Done(): - return - case msg := <-n.msgChan: - n.handleMessage(msg) - } - } -} - -// handleMessage processes messages from other nodes -func (n *ConsensusNode) handleMessage(msg Message) { - n.logger.Debug("received message", - zap.String("type", msg.Type), - zap.String("from", msg.Sender)) - - switch msg.Type { - case "proposal": - if proposal, ok := msg.Data.(*ConsensusData); ok { - n.sm.ReceiveProposal(PeerID{ID: msg.Sender}, proposal) - } - case "vote": - if vote, ok := msg.Data.(*Vote); ok { - n.sm.ReceiveVote( - PeerID{ID: vote.ProposerID}, - PeerID{ID: msg.Sender}, - vote, - ) - } - case "liveness_check": - if collected, ok := msg.Data.(CollectedData); ok { - n.sm.ReceiveLivenessCheck(PeerID{ID: msg.Sender}, collected) - } - case "confirmation": - if confirmation, ok := msg.Data.(*ConsensusData); ok { - n.sm.ReceiveConfirmation(PeerID{ID: msg.Sender}, confirmation) - } - } -} - -// NodeTransitionListener handles state transitions -type NodeTransitionListener struct { - logger *zap.Logger - node *ConsensusNode -} - -func (l *NodeTransitionListener) OnTransition( - from consensus.State, - to consensus.State, - event consensus.Event, -) { - l.logger.Info("state transition", - zap.String("node_id", l.node.nodeID), - zap.String("from", string(from)), - zap.String("to", string(to)), - zap.String("event", string(event))) - - // Handle state-specific actions - switch to { - case consensus.StateVoting: - if from != consensus.StateVoting { - go l.handleEnterVoting() - } - case consensus.StateCollecting: - go l.handleEnterCollecting() - case consensus.StatePublishing: - go l.handleEnterPublishing() - } -} - -func (l *NodeTransitionListener) handleEnterVoting() { - // Wait a bit to ensure we're in voting state - time.Sleep(50 * time.Millisecond) - - // Malicious nodes exhibit Byzantine behavior - if l.node.isMalicious { - l.logger.Warn( - "MALICIOUS NODE: Executing Byzantine behavior", - zap.String("node_id", l.node.nodeID), - ) - - // Byzantine behavior: Send different votes to different nodes - nodes := []string{ - "prover-node-1", - "validator-node-1", - "validator-node-2", - "validator-node-3", - } - voteValues := []string{"reject", "reject", "approve", "reject"} - - for i, targetNode := range nodes { - if targetNode == l.node.nodeID { - continue - } - - // Create conflicting vote - vote := &Vote{ - NodeID: l.node.nodeID, - Round: 0, // Will be updated based on proposals - VoteValue: voteValues[i], - Timestamp: time.Now(), - ProposerID: targetNode, - } - - l.logger.Warn( - "MALICIOUS: Sending conflicting vote", - zap.String("node_id", l.node.nodeID), - zap.String("target", targetNode), - zap.String("vote", voteValues[i]), - ) - - if i == 0 && l.node.messageBus != nil { - // Make a copy to avoid sharing pointers - voteCopy := &Vote{ - NodeID: vote.NodeID, - Round: vote.Round, - VoteValue: vote.VoteValue, - Timestamp: vote.Timestamp, - ProposerID: vote.ProposerID, - } - l.node.messageBus.Broadcast(Message{ - Type: "vote", - Sender: l.node.nodeID, - Data: voteCopy, - }) - } - } - - // Also try to vote multiple times with same value - time.Sleep(100 * time.Millisecond) - doubleVote := &Vote{ - NodeID: l.node.nodeID, - Round: 0, - VoteValue: "approve", - Timestamp: time.Now(), - ProposerID: nodes[0], - } - - l.logger.Warn( - "MALICIOUS: Attempting double vote", - zap.String("node_id", l.node.nodeID), - ) - - l.node.sm.ReceiveVote( - PeerID{ID: nodes[0]}, - PeerID{ID: l.node.nodeID}, - doubleVote, - ) - - if l.node.messageBus != nil { - // Make a copy to avoid sharing pointers - doubleVoteCopy := &Vote{ - NodeID: doubleVote.NodeID, - Round: doubleVote.Round, - VoteValue: doubleVote.VoteValue, - Timestamp: doubleVote.Timestamp, - ProposerID: doubleVote.ProposerID, - } - l.node.messageBus.Broadcast(Message{ - Type: "vote", - Sender: l.node.nodeID, - Data: doubleVoteCopy, - }) - } - - return - } - - l.logger.Info("entering voting state", - zap.String("node_id", l.node.nodeID)) -} - -func (l *NodeTransitionListener) handleEnterCollecting() { - l.logger.Info("entered collecting state", - zap.String("node_id", l.node.nodeID)) - - // Reset vote handler for new round - l.node.votingProvider.Reset() -} - -func (l *NodeTransitionListener) handleEnterPublishing() { - l.logger.Info("entered publishing state", - zap.String("node_id", l.node.nodeID)) -} - -// MessageBus simulates network communication -type MessageBus struct { - mu sync.RWMutex - subscribers map[string]chan Message -} - -type Message struct { - Type string - Sender string - Data interface{} -} - -func NewMessageBus() *MessageBus { - return &MessageBus{ - subscribers: make(map[string]chan Message), - } -} - -func (mb *MessageBus) Subscribe(nodeID string) chan Message { - mb.mu.Lock() - defer mb.mu.Unlock() - - ch := make(chan Message, 100) - mb.subscribers[nodeID] = ch - return ch -} - -func (mb *MessageBus) Broadcast(msg Message) { - mb.mu.RLock() - defer mb.mu.RUnlock() - - for nodeID, ch := range mb.subscribers { - if nodeID != msg.Sender { - select { - case ch <- msg: - default: - } - } - } -} - -func main() { - logger, _ := zap.NewDevelopment() - defer logger.Sync() - - // Create message bus - messageBus := NewMessageBus() - - // Create nodes (1 prover, 2 validators, 1 malicious validator) - // Note: We need 4 nodes total with vote target of 3 to demonstrate Byzantine - // fault tolerance - nodes := []*ConsensusNode{ - NewConsensusNode("prover-node-1", true, 3, logger.Named("prover")), - NewConsensusNode("validator-node-1", true, 3, logger.Named("validator1")), - NewConsensusNode("validator-node-2", true, 3, logger.Named("validator2")), - NewMaliciousNode("validator-node-3", false, 3, logger.Named("malicious")), - } - - // Connect nodes to message bus - for _, node := range nodes { - node.SetMessageBus(messageBus) - } - - // Start all nodes - logger.Info("=== Starting Consensus Network with Generic State Machine ===") - logger.Info("Using the generic state machine from consensus package") - logger.Warn("Network includes 1 MALICIOUS node (validator-node-3) demonstrating Byzantine behavior") - - for _, node := range nodes { - if err := node.Start(); err != nil { - logger.Fatal("failed to start node", - zap.String("node_id", node.nodeID), - zap.Error(err)) - } - } - - // Run for a while - time.Sleep(30 * time.Second) - - // Print statistics - logger.Info("=== Node Statistics ===") - for _, node := range nodes { - viz := consensus.NewStateMachineViz(node.sm) - - logger.Info(fmt.Sprintf("\nStats for %s:\n%s", - node.nodeID, - viz.GetStateStats())) - - logger.Info("final state", - zap.String("node_id", node.nodeID), - zap.String("current_state", string(node.sm.GetState())), - zap.Uint64("transition_count", node.sm.GetTransitionCount()), - zap.Bool("is_malicious", node.isMalicious)) - } - - // Generate visualization - if len(nodes) > 0 { - viz := consensus.NewStateMachineViz(nodes[0].sm) - logger.Info("\nState Machine Diagram:\n" + viz.GenerateMermaidDiagram()) - } - - // Stop all nodes - logger.Info("=== Stopping Consensus Network ===") - for _, node := range nodes { - if err := node.Stop(); err != nil { - logger.Error("failed to stop node", - zap.String("node_id", node.nodeID), - zap.Error(err)) - } - } - - time.Sleep(2 * time.Second) -} diff --git a/consensus/forest/leveled_forest.go b/consensus/forest/leveled_forest.go new file mode 100644 index 0000000..1ca20d3 --- /dev/null +++ b/consensus/forest/leveled_forest.go @@ -0,0 +1,394 @@ +package forest + +import ( + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// LevelledForest contains multiple trees (which is a potentially disconnected +// planar graph). Each vertex in the graph has a level and a hash. A vertex can +// only have one parent, which must have strictly smaller level. A vertex can +// have multiple children, all with strictly larger level. +// A LevelledForest provides the ability to prune all vertices up to a specific +// level. A tree whose root is below the pruning threshold might decompose into +// multiple disconnected subtrees as a result of pruning. +// By design, the LevelledForest does _not_ touch the parent information for +// vertices that are on the lowest retained level. Thereby, it is possible to +// initialize the LevelledForest with a root vertex at the lowest retained +// level, without this root needing to have a parent. Furthermore, the root +// vertex can be at level 0 and in absence of a parent still satisfy the +// condition that any parent must be of lower level (mathematical principle of +// acuous truth) without the implementation needing to worry about unsigned +// integer underflow. +// +// LevelledForest is NOT safe for concurrent use by multiple goroutines. +type LevelledForest struct { + vertices VertexSet + verticesAtLevel map[uint64]VertexList + size uint64 + LowestLevel uint64 +} + +type VertexList []*vertexContainer +type VertexSet map[models.Identity]*vertexContainer + +// vertexContainer holds information about a tree vertex. Internally, we +// distinguish between +// - FULL container: has non-nil value for vertex. +// Used for vertices, which have been added to the tree. +// - EMPTY container: has NIL value for vertex. +// Used for vertices, which have NOT been added to the tree, but are +// referenced by vertices in the tree. An empty container is converted to a +// full container when the respective vertex is added to the tree +type vertexContainer struct { + id models.Identity + level uint64 + children VertexList + + // the following are only set if the state is actually known + vertex Vertex +} + +// NewLevelledForest initializes a LevelledForest +func NewLevelledForest(lowestLevel uint64) *LevelledForest { + return &LevelledForest{ + vertices: make(VertexSet), + verticesAtLevel: make(map[uint64]VertexList), + LowestLevel: lowestLevel, + } +} + +// PruneUpToLevel prunes all states UP TO but NOT INCLUDING `level`. +func (f *LevelledForest) PruneUpToLevel(level uint64) error { + if level < f.LowestLevel { + return fmt.Errorf( + "new lowest level %d cannot be smaller than previous last retained level %d", + level, + f.LowestLevel, + ) + } + if len(f.vertices) == 0 { + f.LowestLevel = level + return nil + } + + elementsPruned := 0 + + // to optimize the pruning large level-ranges, we compare: + // * the number of levels for which we have stored vertex containers: + // len(f.verticesAtLevel) + // * the number of levels that need to be pruned: level-f.LowestLevel + // We iterate over the dimension which is smaller. + if uint64(len(f.verticesAtLevel)) < level-f.LowestLevel { + for l, vertices := range f.verticesAtLevel { + if l < level { + for _, v := range vertices { + if !f.isEmptyContainer(v) { + elementsPruned++ + } + delete(f.vertices, v.id) + } + delete(f.verticesAtLevel, l) + } + } + } else { + for l := f.LowestLevel; l < level; l++ { + verticesAtLevel := f.verticesAtLevel[l] + for _, v := range verticesAtLevel { + if !f.isEmptyContainer(v) { + elementsPruned++ + } + delete(f.vertices, v.id) + } + delete(f.verticesAtLevel, l) + + } + } + f.LowestLevel = level + f.size -= uint64(elementsPruned) + return nil +} + +// HasVertex returns true iff full vertex exists. +func (f *LevelledForest) HasVertex(id models.Identity) bool { + container, exists := f.vertices[id] + return exists && !f.isEmptyContainer(container) +} + +// isEmptyContainer returns true iff vertexContainer container is empty, i.e. +// full vertex itself has not been added +func (f *LevelledForest) isEmptyContainer( + vertexContainer *vertexContainer, +) bool { + return vertexContainer.vertex == nil +} + +// GetVertex returns (, true) if the vertex with `id` and `level` +// was found (nil, false) if full vertex is unknown +func (f *LevelledForest) GetVertex(id models.Identity) (Vertex, bool) { + container, exists := f.vertices[id] + if !exists || f.isEmptyContainer(container) { + return nil, false + } + return container.vertex, true +} + +// GetSize returns the total number of vertices above the lowest pruned level. +// Note this call is not concurrent-safe, caller is responsible to ensure +// concurrency safety. +func (f *LevelledForest) GetSize() uint64 { + return f.size +} + +// GetChildren returns a VertexIterator to iterate over the children +// An empty VertexIterator is returned, if no vertices are known whose parent is +// `id`. +func (f *LevelledForest) GetChildren(id models.Identity) VertexIterator { + // if vertex does not exist, container will be nil + if container, ok := f.vertices[id]; ok { + return newVertexIterator(container.children) + } + return newVertexIterator(nil) // VertexIterator gracefully handles nil slices +} + +// GetNumberOfChildren returns number of children of given vertex +func (f *LevelledForest) GetNumberOfChildren(id models.Identity) int { + // if vertex does not exist, container is the default zero value for + // vertexContainer, which contains a nil-slice for its children + container := f.vertices[id] + num := 0 + for _, child := range container.children { + if child.vertex != nil { + num++ + } + } + return num +} + +// GetVerticesAtLevel returns a VertexIterator to iterate over the Vertices at +// the specified level. An empty VertexIterator is returned, if no vertices are +// known at the specified level. If `level` is already pruned, an empty +// VertexIterator is returned. +func (f *LevelledForest) GetVerticesAtLevel(level uint64) VertexIterator { + return newVertexIterator(f.verticesAtLevel[level]) +} + +// GetNumberOfVerticesAtLevel returns the number of full vertices at given +// level. A full vertex is a vertex that was explicitly added to the forest. In +// contrast, an empty vertex container represents a vertex that is _referenced_ +// as parent by one or more full vertices, but has not been added itself to the +// forest. We only count vertices that have been explicitly added to the forest +// and not yet pruned. (In comparision, we do _not_ count vertices that are +// _referenced_ as parent by vertices, but have not been added themselves). +func (f *LevelledForest) GetNumberOfVerticesAtLevel(level uint64) int { + num := 0 + for _, container := range f.verticesAtLevel[level] { + if !f.isEmptyContainer(container) { + num++ + } + } + return num +} + +// AddVertex adds vertex to forest if vertex is within non-pruned levels +// Handles repeated addition of same vertex (keeps first added vertex). +// If vertex is at or below pruning level: method is NoOp. +// UNVALIDATED: +// requires that vertex would pass validity check LevelledForest.VerifyVertex(vertex). +func (f *LevelledForest) AddVertex(vertex Vertex) { + if vertex.Level() < f.LowestLevel { + return + } + container := f.getOrCreateVertexContainer(vertex.VertexID(), vertex.Level()) + if !f.isEmptyContainer(container) { // the vertex was already stored + return + } + // container is empty, i.e. full vertex is new and should be stored in container + container.vertex = vertex // add vertex to container + f.registerWithParent(container) + f.size += 1 +} + +// registerWithParent retrieves the parent and registers the given vertex as a +// child. For a state, whose level equal to the pruning threshold, we do not +// inspect the parent at all. Thereby, this implementation can gracefully handle +// the corner case where the tree has a defined end vertex (distinct root). This +// is commonly the case in statechain (genesis, or spork root state). +// Mathematically, this means that this library can also represent bounded +// trees. +func (f *LevelledForest) registerWithParent(vertexContainer *vertexContainer) { + // caution, necessary for handling bounded trees: + // For root vertex (genesis state) the rank is _exactly_ at LowestLevel. For + // these states, a parent does not exist. In the implementation, we + // deliberately do not call the `Parent()` method, as its output is + // conceptually undefined. Thereby, we can gracefully handle the corner case + // of + // vertex.level = vertex.Parent().Level = LowestLevel = 0 + if vertexContainer.level <= f.LowestLevel { // check (a) + return + } + + _, parentRank := vertexContainer.vertex.Parent() + if parentRank < f.LowestLevel { + return + } + parentContainer := f.getOrCreateVertexContainer( + vertexContainer.vertex.Parent(), + ) + parentContainer.children = append(parentContainer.children, vertexContainer) +} + +// getOrCreateVertexContainer returns the vertexContainer if there exists one +// or creates a new vertexContainer and adds it to the internal data structures. +// (i.e. there exists an empty or full container with the same id but different +// level). +func (f *LevelledForest) getOrCreateVertexContainer( + id models.Identity, + level uint64, +) *vertexContainer { + container, exists := f.vertices[id] + if !exists { + container = &vertexContainer{ + id: id, + level: level, + } + f.vertices[container.id] = container + vertices := f.verticesAtLevel[container.level] + f.verticesAtLevel[container.level] = append(vertices, container) + } + return container +} + +// VerifyVertex verifies that adding vertex `v` would yield a valid Levelled +// Forest. Specifically, we verify that _all_ of the following conditions are +// satisfied: +// +// 1. `v.Level()` must be strictly larger than the level that `v` reports +// for its parent (maintains an acyclic graph). +// +// 2. If a vertex with the same ID as `v.VertexID()` exists in the graph or is +// referenced by another vertex within the graph, the level must be +// identical. (In other words, we don't have vertices with the same ID but +// different level) +// +// 3. Let `ParentLevel`, `ParentID` denote the level, ID that `v` reports for +// its parent. If a vertex with `ParentID` exists (or is referenced by other +// vertices as their parent), we require that the respective level is +// identical to `ParentLevel`. +// +// Notes: +// - If `v.Level()` has already been pruned, adding it to the forest is a +// NoOp. Hence, any vertex with level below the pruning threshold +// automatically passes. +// - By design, the LevelledForest does _not_ touch the parent information for +// vertices that are on the lowest retained level. Thereby, it is possible +// to initialize the LevelledForest with a root vertex at the lowest +// retained level, without this root needing to have a parent. Furthermore, +// the root vertex can be at level 0 and in absence of a parent still +// satisfy the condition that any parent must be of lower level +// (mathematical principle of vacuous truth) without the implementation +// needing to worry about unsigned integer underflow. +// +// Error returns: +// - InvalidVertexError if the input vertex is invalid for insertion to the +// forest. +func (f *LevelledForest) VerifyVertex(v Vertex) error { + if v.Level() < f.LowestLevel { + return nil + } + + storedContainer, haveVertexContainer := f.vertices[v.VertexID()] + if !haveVertexContainer { // have no vertex with same id stored + // the only thing remaining to check is the parent information + return f.ensureConsistentParent(v) + } + + // Found a vertex container, i.e. `v` already exists, or it is referenced by + // some other vertex. In all cases, `v.Level()` should match the + // vertexContainer's information + if v.Level() != storedContainer.level { + return NewInvalidVertexErrorf( + v, + "level conflicts with stored vertex with same id (%d!=%d)", + v.Level(), + storedContainer.level, + ) + } + + // vertex container is empty, i.e. `v` is referenced by some other vertex as + // its parent: + if f.isEmptyContainer(storedContainer) { + // the only thing remaining to check is the parent information + return f.ensureConsistentParent(v) + } + + // vertex container holds a vertex with the same ID as `v`: + // The parent information from vertexContainer has already been checked for + // consistency. So we simply compare with the existing vertex for + // inconsistencies + + // the vertex is at or below the lowest retained level, so we can't check the + // parent (it's pruned) + if v.Level() == f.LowestLevel { + return nil + } + + newParentId, newParentLevel := v.Parent() + storedParentId, storedParentLevel := storedContainer.vertex.Parent() + if newParentId != storedParentId { + return NewInvalidVertexErrorf( + v, + "parent ID conflicts with stored parent (%x!=%x)", + newParentId, + storedParentId, + ) + } + if newParentLevel != storedParentLevel { + return NewInvalidVertexErrorf( + v, + "parent level conflicts with stored parent (%d!=%d)", + newParentLevel, + storedParentLevel, + ) + } + // all _relevant_ fields identical + return nil +} + +// ensureConsistentParent verifies that vertex.Parent() is consistent with +// current forest. +// Returns InvalidVertexError if: +// * there is a parent with the same ID but different level; +// * the parent's level is _not_ smaller than the vertex's level +func (f *LevelledForest) ensureConsistentParent(vertex Vertex) error { + if vertex.Level() <= f.LowestLevel { + // the vertex is at or below the lowest retained level, so we can't check + // the parent (it's pruned) + return nil + } + + // verify parent + parentID, parentLevel := vertex.Parent() + if !(vertex.Level() > parentLevel) { + return NewInvalidVertexErrorf( + vertex, + "vertex parent level (%d) must be smaller than proposed vertex level (%d)", + parentLevel, + vertex.Level(), + ) + } + storedParent, haveParentStored := f.GetVertex(parentID) + if !haveParentStored { + return nil + } + if storedParent.Level() != parentLevel { + return NewInvalidVertexErrorf( + vertex, + "parent level conflicts with stored parent (%d!=%d)", + parentLevel, + storedParent.Level(), + ) + } + return nil +} diff --git a/consensus/forest/vertex.go b/consensus/forest/vertex.go new file mode 100644 index 0000000..a4eee35 --- /dev/null +++ b/consensus/forest/vertex.go @@ -0,0 +1,103 @@ +package forest + +import ( + "errors" + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +type Vertex interface { + // VertexID returns the vertex's ID (in most cases its hash) + VertexID() models.Identity + // Level returns the vertex's level + Level() uint64 + // Parent returns the parent's (level, ID) + Parent() (models.Identity, uint64) +} + +// VertexToString returns a string representation of the vertex. +func VertexToString(v Vertex) string { + parentID, parentLevel := v.Parent() + return fmt.Sprintf( + "", + v.VertexID(), + v.Level(), + parentID, + parentLevel, + ) +} + +// VertexIterator is a stateful iterator for VertexList. +// Internally operates directly on the Vertex Containers +// It has one-element look ahead for skipping empty vertex containers. +type VertexIterator struct { + data VertexList + idx int + next Vertex +} + +func (it *VertexIterator) preLoad() { + for it.idx < len(it.data) { + v := it.data[it.idx].vertex + it.idx++ + if v != nil { + it.next = v + return + } + } + it.next = nil +} + +// NextVertex returns the next Vertex or nil if there is none +func (it *VertexIterator) NextVertex() Vertex { + res := it.next + it.preLoad() + return res +} + +// HasNext returns true if and only if there is a next Vertex +func (it *VertexIterator) HasNext() bool { + return it.next != nil +} + +func newVertexIterator(vertexList VertexList) VertexIterator { + it := VertexIterator{ + data: vertexList, + } + it.preLoad() + return it +} + +// InvalidVertexError indicates that a proposed vertex is invalid for insertion +// to the forest. +type InvalidVertexError struct { + // Vertex is the invalid vertex + Vertex Vertex + // msg provides additional context + msg string +} + +func (err InvalidVertexError) Error() string { + return fmt.Sprintf( + "invalid vertex %s: %s", + VertexToString(err.Vertex), + err.msg, + ) +} + +func IsInvalidVertexError(err error) bool { + var target InvalidVertexError + return errors.As(err, &target) +} + +func NewInvalidVertexErrorf( + vertex Vertex, + msg string, + args ...interface{}, +) InvalidVertexError { + return InvalidVertexError{ + Vertex: vertex, + msg: fmt.Sprintf(msg, args...), + } +} diff --git a/consensus/forks/forks.go b/consensus/forks/forks.go new file mode 100644 index 0000000..648b620 --- /dev/null +++ b/consensus/forks/forks.go @@ -0,0 +1,657 @@ +package forks + +import ( + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/forest" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Forks enforces structural validity of the consensus state and implements +// finalization rules as defined in Jolteon consensus +// https://arxiv.org/abs/2106.10362 The same approach has later been adopted by +// the Diem team resulting in DiemBFT v4: +// https://developers.diem.com/papers/diem-consensus-state-machine-replication-in-the-diem-blockchain/2021-08-17.pdf +// Forks is NOT safe for concurrent use by multiple goroutines. +type Forks[StateT models.Unique, VoteT models.Unique] struct { + finalizationCallback consensus.Finalizer + notifier consensus.FollowerConsumer[StateT, VoteT] + forest forest.LevelledForest + trustedRoot *models.CertifiedState[StateT] + + // finalityProof holds the latest finalized state including the certified + // child as proof of finality. CAUTION: is nil, when Forks has not yet + // finalized any states beyond the finalized root state it was initialized + // with + finalityProof *consensus.FinalityProof[StateT] +} + +var _ consensus.Forks[*nilUnique] = (*Forks[*nilUnique, *nilUnique])(nil) + +func NewForks[StateT models.Unique, VoteT models.Unique]( + trustedRoot *models.CertifiedState[StateT], + finalizationCallback consensus.Finalizer, + notifier consensus.FollowerConsumer[StateT, VoteT], +) (*Forks[StateT, VoteT], error) { + if trustedRoot == nil { + return nil, + models.NewConfigurationErrorf("invalid root: root is nil") + } + + if (trustedRoot.State.Identifier != trustedRoot.CertifyingQuorumCertificate.Identity()) || + (trustedRoot.State.Rank != trustedRoot.CertifyingQuorumCertificate.GetRank()) { + return nil, + models.NewConfigurationErrorf( + "invalid root: root QC is not pointing to root state", + ) + } + + forks := Forks[StateT, VoteT]{ + finalizationCallback: finalizationCallback, + notifier: notifier, + forest: *forest.NewLevelledForest(trustedRoot.State.Rank), + trustedRoot: trustedRoot, + finalityProof: nil, + } + + // verify and add root state to levelled forest + err := forks.EnsureStateIsValidExtension(trustedRoot.State) + if err != nil { + return nil, fmt.Errorf( + "invalid root state %x: %w", + trustedRoot.Identifier(), + err, + ) + } + forks.forest.AddVertex(ToStateContainer2[StateT](trustedRoot.State)) + return &forks, nil +} + +// FinalizedRank returns the largest rank number where a finalized state is +// known +func (f *Forks[StateT, VoteT]) FinalizedRank() uint64 { + if f.finalityProof == nil { + return f.trustedRoot.State.Rank + } + return f.finalityProof.State.Rank +} + +// FinalizedState returns the finalized state with the largest rank number +func (f *Forks[StateT, VoteT]) FinalizedState() *models.State[StateT] { + if f.finalityProof == nil { + return f.trustedRoot.State + } + return f.finalityProof.State +} + +// FinalityProof returns the latest finalized state and a certified child from +// the subsequent rank, which proves finality. +// CAUTION: method returns (nil, false), when Forks has not yet finalized any +// states beyond the finalized root state it was initialized with. +func (f *Forks[StateT, VoteT]) FinalityProof() ( + *consensus.FinalityProof[StateT], + bool, +) { + return f.finalityProof, f.finalityProof != nil +} + +// GetState returns (*models.State, true) if the state with the specified +// id was found and (nil, false) otherwise. +func (f *Forks[StateT, VoteT]) GetState(stateID models.Identity) ( + *models.State[StateT], + bool, +) { + stateContainer, hasState := f.forest.GetVertex(stateID) + if !hasState { + return nil, false + } + return stateContainer.(*StateContainer[StateT]).GetState(), true +} + +// GetStatesForRank returns all known states for the given rank +func (f *Forks[StateT, VoteT]) GetStatesForRank( + rank uint64, +) []*models.State[StateT] { + vertexIterator := f.forest.GetVerticesAtLevel(rank) + // in the vast majority of cases, there will only be one proposal for a + // particular rank + states := make([]*models.State[StateT], 0, 1) + for vertexIterator.HasNext() { + v := vertexIterator.NextVertex() + states = append(states, v.(*StateContainer[StateT]).GetState()) + } + return states +} + +// IsKnownState checks whether state is known. +func (f *Forks[StateT, VoteT]) IsKnownState(stateID models.Identity) bool { + _, hasState := f.forest.GetVertex(stateID) + return hasState +} + +// IsProcessingNeeded determines whether the given state needs processing, +// based on the state's rank and hash. +// Returns false if any of the following conditions applies +// - state rank is _below_ the most recently finalized state +// - the state already exists in the consensus state +// +// UNVALIDATED: expects state to pass Forks.EnsureStateIsValidExtension(state) +func (f *Forks[StateT, VoteT]) IsProcessingNeeded(state *models.State[StateT]) bool { + if state.Rank < f.FinalizedRank() || f.IsKnownState(state.Identifier) { + return false + } + return true +} + +// EnsureStateIsValidExtension checks that the given state is a valid extension +// to the tree of states already stored (no state modifications). Specifically, +// the following conditions are enforced, which are critical to the correctness +// of Forks: +// +// 1. If a state with the same ID is already stored, their ranks must be +// identical. +// 2. The state's rank must be strictly larger than the rank of its parent. +// 3. The parent must already be stored (or below the pruning height). +// +// Exclusions to these rules (by design): +// Let W denote the rank of state's parent (i.e. W := state.QC.Rank) and F the +// latest finalized rank. +// +// (i) If state.Rank < F, adding the state would be a no-op. Such states are +// considered compatible (principle of vacuous truth), i.e. we skip +// checking 1, 2, 3. +// (ii) If state.Rank == F, we do not inspect the QC / parent at all (skip 2 +// and 3). This exception is important for compatability with genesis or +// spork-root states, which do not contain a QC. +// (iii) If state.Rank > F, but state.QC.Rank < F the parent has already been +// pruned. In this case, we omit rule 3. (principle of vacuous truth +// applied to the parent) +// +// We assume that all states are fully verified. A valid state must satisfy all +// consistency requirements; otherwise we have a bug in the compliance layer. +// +// Error returns: +// - models.MissingStateError if the parent of the input proposal does not +// exist in the forest (but is above the pruned rank). Represents violation +// of condition 3. +// - models.InvalidStateError if the state violates condition 1. or 2. +// - generic error in case of unexpected bug or internal state corruption +func (f *Forks[StateT, VoteT]) EnsureStateIsValidExtension( + state *models.State[StateT], +) error { + if state.Rank < f.forest.LowestLevel { // exclusion (i) + return nil + } + + // LevelledForest enforces conditions 1. and 2. including the respective + // exclusions (ii) and (iii). + stateContainer := ToStateContainer2[StateT](state) + err := f.forest.VerifyVertex(stateContainer) + if err != nil { + if forest.IsInvalidVertexError(err) { + return models.NewInvalidStateErrorf( + state, + "not a valid vertex for state tree: %w", + err, + ) + } + return fmt.Errorf( + "state tree generated unexpected error validating vertex: %w", + err, + ) + } + + // Condition 3: + // LevelledForest implements a more generalized algorithm that also works for + // disjoint graphs. Therefore, LevelledForest _not_ enforce condition 3. Here, + // we additionally require that the pending states form a tree (connected + // graph), i.e. we need to enforce condition 3 + if (state.Rank == f.forest.LowestLevel) || + (state.ParentQuorumCertificate.GetRank() < f.forest.LowestLevel) { // exclusion (ii) and (iii) + return nil + } + // For a state whose parent is _not_ below the pruning height, we expect the + // parent to be known. + _, isParentKnown := f.forest.GetVertex( + state.ParentQuorumCertificate.Identity(), + ) + if !isParentKnown { // missing parent + return models.MissingStateError{ + Rank: state.ParentQuorumCertificate.GetRank(), + Identifier: state.ParentQuorumCertificate.Identity(), + } + } + return nil +} + +// AddCertifiedState[StateT] appends the given certified state to the tree of +// pending states and updates the latest finalized state (if finalization +// progressed). Unless the parent is below the pruning threshold (latest +// finalized rank), we require that the parent is already stored in Forks. +// Calling this method with previously processed states leaves the consensus +// state invariant (though, it will potentially cause some duplicate +// processing). +// +// Possible error returns: +// - models.MissingStateError if the parent does not exist in the forest (but +// is above the pruned rank). From the perspective of Forks, this error is +// benign (no-op). +// - models.InvalidStateError if the state is invalid (see +// `Forks.EnsureStateIsValidExtension` for details). From the perspective of +// Forks, this error is benign (no-op). However, we assume all states are +// fully verified, i.e. they should satisfy all consistency requirements. +// Hence, this error is likely an indicator of a bug in the compliance +// layer. +// - models.ByzantineThresholdExceededError if conflicting QCs or conflicting +// finalized states have been detected (violating a foundational consensus +// guarantees). This indicates that there are 1/3+ Byzantine nodes (weighted +// by seniority) in the network, breaking the safety guarantees of HotStuff +// (or there is a critical bug / data corruption). Forks cannot recover from +// this exception. +// - All other errors are potential symptoms of bugs or state corruption. +func (f *Forks[StateT, VoteT]) AddCertifiedState( + certifiedState *models.CertifiedState[StateT], +) error { + if !f.IsProcessingNeeded(certifiedState.State) { + return nil + } + + // Check proposal for byzantine evidence, store it and emit + // `OnStateIncorporated` notification. Note: `checkForByzantineEvidence` only + // inspects the state, but _not_ its certifying QC. Hence, we have to + // additionally check here, whether the certifying QC conflicts with any known + // QCs. + err := f.checkForByzantineEvidence(certifiedState.State) + if err != nil { + return fmt.Errorf( + "cannot check for Byzantine evidence in certified state %x: %w", + certifiedState.State.Identifier, + err, + ) + } + err = f.checkForConflictingQCs(&certifiedState.CertifyingQuorumCertificate) + if err != nil { + return fmt.Errorf( + "certifying QC for state %x failed check for conflicts: %w", + certifiedState.State.Identifier, + err, + ) + } + f.forest.AddVertex(ToStateContainer2[StateT](certifiedState.State)) + f.notifier.OnStateIncorporated(certifiedState.State) + + // Update finality status: + err = f.checkForAdvancingFinalization(certifiedState) + if err != nil { + return fmt.Errorf("updating finalization failed: %w", err) + } + return nil +} + +// AddValidatedState appends the validated state to the tree of pending +// states and updates the latest finalized state (if applicable). Unless the +// parent is below the pruning threshold (latest finalized rank), we require +// that the parent is already stored in Forks. Calling this method with +// previously processed states leaves the consensus state invariant (though, it +// will potentially cause some duplicate processing). +// Notes: +// - Method `AddCertifiedState[StateT](..)` should be used preferably, if a QC +// certifying `state` is already known. This is generally the case for the +// consensus follower. Method `AddValidatedState` is intended for active +// consensus participants, which fully validate states (incl. payload), i.e. +// QCs are processed as part of validated proposals. +// +// Possible error returns: +// - models.MissingStateError if the parent does not exist in the forest (but +// is above the pruned rank). From the perspective of Forks, this error is +// benign (no-op). +// - models.InvalidStateError if the state is invalid (see +// `Forks.EnsureStateIsValidExtension` for details). From the perspective of +// Forks, this error is benign (no-op). However, we assume all states are +// fully verified, i.e. they should satisfy all consistency requirements. +// Hence, this error is likely an indicator of a bug in the compliance +// layer. +// - models.ByzantineThresholdExceededError if conflicting QCs or conflicting +// finalized states have been detected (violating a foundational consensus +// guarantees). This indicates that there are 1/3+ Byzantine nodes (weighted +// by seniority) in the network, breaking the safety guarantees of HotStuff +// (or there is a critical bug / data corruption). Forks cannot recover from +// this exception. +// - All other errors are potential symptoms of bugs or state corruption. +func (f *Forks[StateT, VoteT]) AddValidatedState( + proposal *models.State[StateT], +) error { + if !f.IsProcessingNeeded(proposal) { + return nil + } + + // Check proposal for byzantine evidence, store it and emit + // `OnStateIncorporated` notification: + err := f.checkForByzantineEvidence(proposal) + if err != nil { + return fmt.Errorf( + "cannot check Byzantine evidence for state %x: %w", + proposal.Identifier, + err, + ) + } + f.forest.AddVertex(ToStateContainer2[StateT](proposal)) + f.notifier.OnStateIncorporated(proposal) + + // Update finality status: In the implementation, our notion of finality is + // based on certified states. + // The certified parent essentially combines the parent, with the QC contained + // in state, to drive finalization. + parent, found := f.GetState(proposal.ParentQuorumCertificate.Identity()) + if !found { + // Not finding the parent means it is already pruned; hence this state does + // not change the finalization state. + return nil + } + certifiedParent, err := models.NewCertifiedState[StateT]( + parent, + proposal.ParentQuorumCertificate, + ) + if err != nil { + return fmt.Errorf( + "mismatching QC with parent (corrupted Forks state):%w", + err, + ) + } + err = f.checkForAdvancingFinalization(certifiedParent) + if err != nil { + return fmt.Errorf("updating finalization failed: %w", err) + } + return nil +} + +// checkForByzantineEvidence inspects whether the given `state` together with +// the already known information yields evidence of byzantine behaviour. +// Furthermore, the method enforces that `state` is a valid extension of the +// tree of pending states. If the state is a double proposal, we emit an +// `OnStateIncorporated` notification. Though, provided the state is a valid +// extension of the state tree by itself, it passes this method without an +// error. +// +// Possible error returns: +// - models.MissingStateError if the parent does not exist in the forest (but +// is above the pruned rank). From the perspective of Forks, this error is +// benign (no-op). +// - models.InvalidStateError if the state is invalid (see +// `Forks.EnsureStateIsValidExtension` for details). From the perspective of +// Forks, this error is benign (no-op). However, we assume all states are +// fully verified, i.e. they should satisfy all consistency requirements. +// Hence, this error is likely an indicator of a bug in the compliance +// layer. +// - models.ByzantineThresholdExceededError if conflicting QCs have been +// detected. Forks cannot recover from this exception. +// - All other errors are potential symptoms of bugs or state corruption. +func (f *Forks[StateT, VoteT]) checkForByzantineEvidence( + state *models.State[StateT], +) error { + err := f.EnsureStateIsValidExtension(state) + if err != nil { + return fmt.Errorf("consistency check on state failed: %w", err) + } + err = f.checkForConflictingQCs(&state.ParentQuorumCertificate) + if err != nil { + return fmt.Errorf("checking QC for conflicts failed: %w", err) + } + f.checkForDoubleProposal(state) + return nil +} + +// checkForConflictingQCs checks if QC conflicts with a stored Quorum +// Certificate. In case a conflicting QC is found, an +// ByzantineThresholdExceededError is returned. Two Quorum Certificates q1 and +// q2 are defined as conflicting iff: +// +// q1.Rank == q2.Rank AND q1.Identifier ≠ q2.Identifier +// +// This means there are two Quorums for conflicting states at the same rank. +// Per 'Observation 1' from the Jolteon paper https://arxiv.org/pdf/2106.10362v1.pdf, +// two conflicting QCs can exist if and only if the Byzantine threshold is +// exceeded. +// Error returns: +// - models.ByzantineThresholdExceededError if conflicting QCs have been +// detected. Forks cannot recover from this exception. +// - All other errors are potential symptoms of bugs or state corruption. +func (f *Forks[StateT, VoteT]) checkForConflictingQCs( + qc *models.QuorumCertificate, +) error { + it := f.forest.GetVerticesAtLevel((*qc).GetRank()) + for it.HasNext() { + otherState := it.NextVertex() // by construction, must have same rank as qc.Rank + if (*qc).Identity() != otherState.VertexID() { + // * we have just found another state at the same rank number as qc.Rank + // but with different hash + // * if this state has a child c, this child will have + // c.qc.rank = parentRank + // c.qc.ID != parentIdentifier + // => conflicting qc + otherChildren := f.forest.GetChildren(otherState.VertexID()) + if otherChildren.HasNext() { + otherChild := otherChildren.NextVertex().(*StateContainer[StateT]).GetState() + conflictingQC := otherChild.ParentQuorumCertificate + return models.ByzantineThresholdExceededError{Evidence: fmt.Sprintf( + "conflicting QCs at rank %d: %x and %x", + (*qc).GetRank(), (*qc).Identity(), conflictingQC.Identity(), + )} + } + } + } + return nil +} + +// checkForDoubleProposal checks if the input proposal is a double proposal. +// A double proposal occurs when two proposals with the same rank exist in +// Forks. If there is a double proposal, notifier.OnDoubleProposeDetected is +// triggered. +func (f *Forks[StateT, VoteT]) checkForDoubleProposal( + state *models.State[StateT], +) { + it := f.forest.GetVerticesAtLevel(state.Rank) + for it.HasNext() { + otherVertex := it.NextVertex() // by construction, must have same rank as state + otherState := otherVertex.(*StateContainer[StateT]).GetState() + if state.Identifier != otherState.Identifier { + f.notifier.OnDoubleProposeDetected(state, otherState) + } + } +} + +// checkForAdvancingFinalization checks whether observing certifiedState leads +// to progress of finalization. This function should be called every time a new +// state is added to Forks. If the new state is the head of a 2-chain satisfying +// the finalization rule, we update `Forks.finalityProof` to the new latest +// finalized state. Calling this method with previously-processed states leaves +// the consensus state invariant. +// UNVALIDATED: assumes that relevant state properties are consistent with +// previous states +// Error returns: +// - models.MissingStateError if the parent does not exist in the forest (but +// is above the pruned rank). From the perspective of Forks, this error is +// benign (no-op). +// - models.ByzantineThresholdExceededError in case we detect a finalization +// fork (violating a foundational consensus guarantee). This indicates that +// there are 1/3+ Byzantine nodes (weighted by seniority) in the network, +// breaking the safety guarantees of HotStuff (or there is a critical bug / +// data corruption). Forks cannot recover from this exception. +// - generic error in case of unexpected bug or internal state corruption +func (f *Forks[StateT, VoteT]) checkForAdvancingFinalization( + certifiedState *models.CertifiedState[StateT], +) error { + // We prune all states in forest which are below the most recently finalized + // state. Hence, we have a pruned ancestry if and only if either of the + // following conditions applies: + // (a) If a state's parent rank (i.e. state.QC.Rank) is below the most + // recently finalized state. + // (b) If a state's rank is equal to the most recently finalized state. + // Caution: + // * Under normal operation, case (b) is covered by the logic for case (a) + // * However, the existence of a genesis state requires handling case (b) + // explicitly: + // The root state is specified and trusted by the node operator. If the root + // state is the genesis state, it might not contain a QC pointing to a + // parent (as there is no parent). In this case, condition (a) cannot be + // evaluated. + lastFinalizedRank := f.FinalizedRank() + if (certifiedState.Rank() <= lastFinalizedRank) || + (certifiedState.State.ParentQuorumCertificate.GetRank() < lastFinalizedRank) { + // Repeated states are expected during normal operations. We enter this code + // state if and only if the parent's rank is _below_ the last finalized + // state. It is straight forward to show: + // Lemma: Let B be a state whose 2-chain reaches beyond the last finalized + // state => B will not update the locked or finalized state + return nil + } + + // retrieve parent; always expected to succeed, because we passed the checks + // above + qcForParent := certifiedState.State.ParentQuorumCertificate + parentVertex, parentStateKnown := f.forest.GetVertex( + qcForParent.Identity(), + ) + if !parentStateKnown { + return models.MissingStateError{ + Rank: qcForParent.GetRank(), + Identifier: qcForParent.Identity(), + } + } + parentState := parentVertex.(*StateContainer[StateT]).GetState() + + // Note: we assume that all stored states pass + // Forks.EnsureStateIsValidExtension(state); specifically, that state's + // RankNumber is strictly monotonically increasing which is enforced by + // LevelledForest.VerifyVertex(...) + // We denote: + // * a DIRECT 1-chain as '<-' + // * a general 1-chain as '<~' (direct or indirect) + // Jolteon's rule for finalizing `parentState` is + // parentState <- State <~ certifyingQC (i.e. a DIRECT 1-chain PLUS + // ╰─────────────────────╯ any 1-chain) + // certifiedState + // Hence, we can finalize `parentState` as head of a 2-chain, + // if and only if `State.Rank` is exactly 1 higher than the rank of + // `parentState` + if parentState.Rank+1 != certifiedState.Rank() { + return nil + } + + // `parentState` is now finalized: + // * While Forks is single-threaded, there is still the possibility of + // reentrancy. Specifically, the consumers of our finalization events are + // served by the goroutine executing Forks. It is conceivable that a + // consumer might access Forks and query the latest finalization proof. + // This would be legal, if the component supplying the goroutine to Forks + // also consumes the notifications. + // * Therefore, for API safety, we want to first update Fork's + // `finalityProof` before we emit any notifications. + + // Advancing finalization step (i): we collect all states for finalization (no + // notifications are emitted) + statesToBeFinalized, err := f.collectStatesForFinalization(&qcForParent) + if err != nil { + return fmt.Errorf( + "advancing finalization to state %x from rank %d failed: %w", + qcForParent.Identity(), + qcForParent.GetRank(), + err, + ) + } + + // Advancing finalization step (ii): update `finalityProof` and prune + // `LevelledForest` + f.finalityProof = &consensus.FinalityProof[StateT]{ + State: parentState, + CertifiedChild: certifiedState, + } + err = f.forest.PruneUpToLevel(f.FinalizedRank()) + if err != nil { + return fmt.Errorf("pruning levelled forest failed unexpectedly: %w", err) + } + + // Advancing finalization step (iii): iterate over the states from (i) and + // emit finalization events + for _, b := range statesToBeFinalized { + // first notify other critical components about finalized state - all errors + // returned here are fatal exceptions + err = f.finalizationCallback.MakeFinal(b.Identifier) + if err != nil { + return fmt.Errorf("finalization error in other component: %w", err) + } + + // notify less important components about finalized state + f.notifier.OnFinalizedState(b) + } + return nil +} + +// collectStatesForFinalization collects and returns all newly finalized states +// up to (and including) the state pointed to by `qc`. The states are listed in +// order of increasing height. +// Error returns: +// - models.ByzantineThresholdExceededError in case we detect a finalization +// fork (violating a foundational consensus guarantee). This indicates that +// there are 1/3+ Byzantine nodes (weighted by seniority) in the network, +// breaking the safety guarantees of HotStuff (or there is a critical bug / +// data corruption). Forks cannot recover from this exception. +// - generic error in case of bug or internal state corruption +func (f *Forks[StateT, VoteT]) collectStatesForFinalization( + qc *models.QuorumCertificate, +) ([]*models.State[StateT], error) { + lastFinalized := f.FinalizedState() + if (*qc).GetRank() < lastFinalized.Rank { + return nil, models.ByzantineThresholdExceededError{Evidence: fmt.Sprintf( + "finalizing state with rank %d which is lower than previously finalized state at rank %d", + (*qc).GetRank(), lastFinalized.Rank, + )} + } + if (*qc).GetRank() == lastFinalized.Rank { // no new states to be finalized + return nil, nil + } + + // Collect all states that are pending finalization in slice. While we crawl + // the states starting from the newest finalized state backwards (decreasing + // ranks), we would like to return them in order of _increasing_ rank. + // Therefore, we fill the slice starting with the highest index. + l := (*qc).GetRank() - lastFinalized.Rank // l is an upper limit to the number of states that can be maximally finalized + statesToBeFinalized := make([]*models.State[StateT], l) + for (*qc).GetRank() > lastFinalized.Rank { + b, ok := f.GetState((*qc).Identity()) + if !ok { + return nil, fmt.Errorf( + "failed to get state (rank=%d, stateID=%x) for finalization", + (*qc).GetRank(), + (*qc).Identity(), + ) + } + l-- + statesToBeFinalized[l] = b + qc = &b.ParentQuorumCertificate // move to parent + } + // Now, `l` is the index where we stored the oldest state that should be + // finalized. Note that `l` might be larger than zero, if some ranks have no + // finalized states. Hence, `statesToBeFinalized` might start with nil + // entries, which we remove: + statesToBeFinalized = statesToBeFinalized[l:] + + // qc should now point to the latest finalized state. Otherwise, the + // consensus committee is compromised (or we have a critical internal bug). + if (*qc).GetRank() < lastFinalized.Rank { + return nil, models.ByzantineThresholdExceededError{Evidence: fmt.Sprintf( + "finalizing state with rank %d which is lower than previously finalized state at rank %d", + (*qc).GetRank(), lastFinalized.Rank, + )} + } + if (*qc).GetRank() == lastFinalized.Rank && + lastFinalized.Identifier != (*qc).Identity() { + return nil, models.ByzantineThresholdExceededError{Evidence: fmt.Sprintf( + "finalizing states with rank %d at conflicting forks: %x and %x", + (*qc).GetRank(), (*qc).Identity(), lastFinalized.Identifier, + )} + } + + return statesToBeFinalized, nil +} diff --git a/consensus/forks/forks_test.go b/consensus/forks/forks_test.go new file mode 100644 index 0000000..ac4e97a --- /dev/null +++ b/consensus/forks/forks_test.go @@ -0,0 +1,950 @@ +package forks + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + "source.quilibrium.com/quilibrium/monorepo/consensus/mocks" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +/***************************************************************************** + * NOTATION: * + * A state is denoted as [◄() ]. * + * For example, [◄(1) 2] means: a state of rank 2 that has a QC for rank 1. * + *****************************************************************************/ + +// TestInitialization verifies that at initialization, Forks reports: +// - the root / genesis state as finalized +// - it has no finalization proof for the root / genesis state (state and its finalization is trusted) +func TestInitialization(t *testing.T) { + forks, _ := newForks(t) + requireOnlyGenesisStateFinalized(t, forks) + _, hasProof := forks.FinalityProof() + require.False(t, hasProof) +} + +// TestFinalize_Direct1Chain tests adding a direct 1-chain on top of the genesis state: +// - receives [◄(1) 2] [◄(2) 5] +// +// Expected behaviour: +// - On the one hand, Forks should not finalize any _additional_ states, because there is +// no finalizable 2-chain for [◄(1) 2]. Hence, finalization no events should be emitted. +// - On the other hand, after adding the two states, Forks has enough knowledge to construct +// a FinalityProof for the genesis state. +func TestFinalize_Direct1Chain(t *testing.T) { + builder := NewStateBuilder(). + Add(1, 2). + Add(2, 3) + states, err := builder.States() + require.NoError(t, err) + + t.Run("consensus participant mode: ingest validated states", func(t *testing.T) { + forks, _ := newForks(t) + + // adding state [◄(1) 2] should not finalize anything + // as the genesis state is trusted, there should be no FinalityProof available for it + require.NoError(t, forks.AddValidatedState(states[0])) + requireOnlyGenesisStateFinalized(t, forks) + _, hasProof := forks.FinalityProof() + require.False(t, hasProof) + + // After adding state [◄(2) 3], Forks has enough knowledge to construct a FinalityProof for the + // genesis state. However, finalization remains at the genesis state, so no events should be emitted. + expectedFinalityProof := makeFinalityProof(t, builder.GenesisState().State, states[0], states[1].ParentQuorumCertificate) + require.NoError(t, forks.AddValidatedState(states[1])) + requireLatestFinalizedState(t, forks, builder.GenesisState().State) + requireFinalityProof(t, forks, expectedFinalityProof) + }) + + t.Run("consensus follower mode: ingest certified states", func(t *testing.T) { + forks, _ := newForks(t) + + // After adding CertifiedState [◄(1) 2] ◄(2), Forks has enough knowledge to construct a FinalityProof for + // the genesis state. However, finalization remains at the genesis state, so no events should be emitted. + expectedFinalityProof := makeFinalityProof(t, builder.GenesisState().State, states[0], states[1].ParentQuorumCertificate) + c, err := models.NewCertifiedState(states[0], states[1].ParentQuorumCertificate) + require.NoError(t, err) + + require.NoError(t, forks.AddCertifiedState(c)) + requireLatestFinalizedState(t, forks, builder.GenesisState().State) + requireFinalityProof(t, forks, expectedFinalityProof) + }) +} + +// TestFinalize_Direct2Chain tests adding a direct 1-chain on a direct 1-chain (direct 2-chain). +// - receives [◄(1) 2] [◄(2) 3] [◄(3) 4] +// - Forks should finalize [◄(1) 2] +func TestFinalize_Direct2Chain(t *testing.T) { + states, err := NewStateBuilder(). + Add(1, 2). + Add(2, 3). + Add(3, 4). + States() + require.NoError(t, err) + expectedFinalityProof := makeFinalityProof(t, states[0], states[1], states[2].ParentQuorumCertificate) + + t.Run("consensus participant mode: ingest validated states", func(t *testing.T) { + forks, _ := newForks(t) + require.Nil(t, addValidatedStateToForks(forks, states)) + + requireLatestFinalizedState(t, forks, states[0]) + requireFinalityProof(t, forks, expectedFinalityProof) + }) + + t.Run("consensus follower mode: ingest certified states", func(t *testing.T) { + forks, _ := newForks(t) + require.Nil(t, addCertifiedStatesToForks(forks, states)) + + requireLatestFinalizedState(t, forks, states[0]) + requireFinalityProof(t, forks, expectedFinalityProof) + }) +} + +// TestFinalize_DirectIndirect2Chain tests adding an indirect 1-chain on a direct 1-chain. +// receives [◄(1) 2] [◄(2) 3] [◄(3) 5] +// it should finalize [◄(1) 2] +func TestFinalize_DirectIndirect2Chain(t *testing.T) { + states, err := NewStateBuilder(). + Add(1, 2). + Add(2, 3). + Add(3, 5). + States() + require.NoError(t, err) + expectedFinalityProof := makeFinalityProof(t, states[0], states[1], states[2].ParentQuorumCertificate) + + t.Run("consensus participant mode: ingest validated states", func(t *testing.T) { + forks, _ := newForks(t) + require.Nil(t, addValidatedStateToForks(forks, states)) + + requireLatestFinalizedState(t, forks, states[0]) + requireFinalityProof(t, forks, expectedFinalityProof) + }) + + t.Run("consensus follower mode: ingest certified states", func(t *testing.T) { + forks, _ := newForks(t) + require.Nil(t, addCertifiedStatesToForks(forks, states)) + + requireLatestFinalizedState(t, forks, states[0]) + requireFinalityProof(t, forks, expectedFinalityProof) + }) +} + +// TestFinalize_IndirectDirect2Chain tests adding a direct 1-chain on an indirect 1-chain. +// - Forks receives [◄(1) 3] [◄(3) 5] [◄(7) 7] +// - it should not finalize any states because there is no finalizable 2-chain. +func TestFinalize_IndirectDirect2Chain(t *testing.T) { + states, err := NewStateBuilder(). + Add(1, 3). + Add(3, 5). + Add(5, 7). + States() + require.NoError(t, err) + + t.Run("consensus participant mode: ingest validated states", func(t *testing.T) { + forks, _ := newForks(t) + require.Nil(t, addValidatedStateToForks(forks, states)) + + requireOnlyGenesisStateFinalized(t, forks) + _, hasProof := forks.FinalityProof() + require.False(t, hasProof) + }) + + t.Run("consensus follower mode: ingest certified states", func(t *testing.T) { + forks, _ := newForks(t) + require.Nil(t, addCertifiedStatesToForks(forks, states)) + + requireOnlyGenesisStateFinalized(t, forks) + _, hasProof := forks.FinalityProof() + require.False(t, hasProof) + }) +} + +// TestFinalize_Direct2ChainOnIndirect tests adding a direct 2-chain on an indirect 2-chain: +// - ingesting [◄(1) 3] [◄(3) 5] [◄(5) 6] [◄(6) 7] [◄(7) 8] +// - should result in finalization of [◄(5) 6] +func TestFinalize_Direct2ChainOnIndirect(t *testing.T) { + states, err := NewStateBuilder(). + Add(1, 3). + Add(3, 5). + Add(5, 6). + Add(6, 7). + Add(7, 8). + States() + require.NoError(t, err) + expectedFinalityProof := makeFinalityProof(t, states[2], states[3], states[4].ParentQuorumCertificate) + + t.Run("consensus participant mode: ingest validated states", func(t *testing.T) { + forks, _ := newForks(t) + require.Nil(t, addValidatedStateToForks(forks, states)) + + requireLatestFinalizedState(t, forks, states[2]) + requireFinalityProof(t, forks, expectedFinalityProof) + }) + + t.Run("consensus follower mode: ingest certified states", func(t *testing.T) { + forks, _ := newForks(t) + require.Nil(t, addCertifiedStatesToForks(forks, states)) + + requireLatestFinalizedState(t, forks, states[2]) + requireFinalityProof(t, forks, expectedFinalityProof) + }) +} + +// TestFinalize_Direct2ChainOnDirect tests adding a sequence of direct 2-chains: +// - ingesting [◄(1) 2] [◄(2) 3] [◄(3) 4] [◄(4) 5] [◄(5) 6] +// - should result in finalization of [◄(3) 4] +func TestFinalize_Direct2ChainOnDirect(t *testing.T) { + states, err := NewStateBuilder(). + Add(1, 2). + Add(2, 3). + Add(3, 4). + Add(4, 5). + Add(5, 6). + States() + require.NoError(t, err) + expectedFinalityProof := makeFinalityProof(t, states[2], states[3], states[4].ParentQuorumCertificate) + + t.Run("consensus participant mode: ingest validated states", func(t *testing.T) { + forks, _ := newForks(t) + require.Nil(t, addValidatedStateToForks(forks, states)) + + requireLatestFinalizedState(t, forks, states[2]) + requireFinalityProof(t, forks, expectedFinalityProof) + }) + + t.Run("consensus follower mode: ingest certified states", func(t *testing.T) { + forks, _ := newForks(t) + require.Nil(t, addCertifiedStatesToForks(forks, states)) + + requireLatestFinalizedState(t, forks, states[2]) + requireFinalityProof(t, forks, expectedFinalityProof) + }) +} + +// TestFinalize_Multiple2Chains tests the case where a state can be finalized by different 2-chains. +// - ingesting [◄(1) 2] [◄(2) 3] [◄(3) 5] [◄(3) 6] [◄(3) 7] +// - should result in finalization of [◄(1) 2] +func TestFinalize_Multiple2Chains(t *testing.T) { + states, err := NewStateBuilder(). + Add(1, 2). + Add(2, 3). + Add(3, 5). + Add(3, 6). + Add(3, 7). + States() + require.NoError(t, err) + expectedFinalityProof := makeFinalityProof(t, states[0], states[1], states[2].ParentQuorumCertificate) + + t.Run("consensus participant mode: ingest validated states", func(t *testing.T) { + forks, _ := newForks(t) + require.Nil(t, addValidatedStateToForks(forks, states)) + + requireLatestFinalizedState(t, forks, states[0]) + requireFinalityProof(t, forks, expectedFinalityProof) + }) + + t.Run("consensus follower mode: ingest certified states", func(t *testing.T) { + forks, _ := newForks(t) + require.Nil(t, addCertifiedStatesToForks(forks, states)) + + requireLatestFinalizedState(t, forks, states[0]) + requireFinalityProof(t, forks, expectedFinalityProof) + }) +} + +// TestFinalize_OrphanedFork tests that we can finalize a state which causes a conflicting fork to be orphaned. +// We ingest the following state tree: +// +// [◄(1) 2] [◄(2) 3] +// [◄(2) 4] [◄(4) 5] [◄(5) 6] +// +// which should result in finalization of [◄(2) 4] and pruning of [◄(2) 3] +func TestFinalize_OrphanedFork(t *testing.T) { + states, err := NewStateBuilder(). + Add(1, 2). // [◄(1) 2] + Add(2, 3). // [◄(2) 3], should eventually be pruned + Add(2, 4). // [◄(2) 4], should eventually be finalized + Add(4, 5). // [◄(4) 5] + Add(5, 6). // [◄(5) 6] + States() + require.NoError(t, err) + expectedFinalityProof := makeFinalityProof(t, states[2], states[3], states[4].ParentQuorumCertificate) + + t.Run("consensus participant mode: ingest validated states", func(t *testing.T) { + forks, _ := newForks(t) + require.Nil(t, addValidatedStateToForks(forks, states)) + + require.False(t, forks.IsKnownState(states[1].Identifier)) + requireLatestFinalizedState(t, forks, states[2]) + requireFinalityProof(t, forks, expectedFinalityProof) + }) + + t.Run("consensus follower mode: ingest certified states", func(t *testing.T) { + forks, _ := newForks(t) + require.Nil(t, addCertifiedStatesToForks(forks, states)) + + require.False(t, forks.IsKnownState(states[1].Identifier)) + requireLatestFinalizedState(t, forks, states[2]) + requireFinalityProof(t, forks, expectedFinalityProof) + }) +} + +// TestDuplication tests that delivering the same state/qc multiple times has +// the same end state as delivering the state/qc once. +// - Forks receives [◄(1) 2] [◄(2) 3] [◄(2) 3] [◄(3) 4] [◄(3) 4] [◄(4) 5] [◄(4) 5] +// - it should finalize [◄(2) 3] +func TestDuplication(t *testing.T) { + states, err := NewStateBuilder(). + Add(1, 2). + Add(2, 3). + Add(2, 3). + Add(3, 4). + Add(3, 4). + Add(4, 5). + Add(4, 5). + States() + require.NoError(t, err) + expectedFinalityProof := makeFinalityProof(t, states[1], states[3], states[5].ParentQuorumCertificate) + + t.Run("consensus participant mode: ingest validated states", func(t *testing.T) { + forks, _ := newForks(t) + require.Nil(t, addValidatedStateToForks(forks, states)) + + requireLatestFinalizedState(t, forks, states[1]) + requireFinalityProof(t, forks, expectedFinalityProof) + }) + + t.Run("consensus follower mode: ingest certified states", func(t *testing.T) { + forks, _ := newForks(t) + require.Nil(t, addCertifiedStatesToForks(forks, states)) + + requireLatestFinalizedState(t, forks, states[1]) + requireFinalityProof(t, forks, expectedFinalityProof) + }) +} + +// TestIgnoreStatesBelowFinalizedRank tests that states below finalized rank are ignored. +// - Forks receives [◄(1) 2] [◄(2) 3] [◄(3) 4] [◄(1) 5] +// - it should finalize [◄(1) 2] +func TestIgnoreStatesBelowFinalizedRank(t *testing.T) { + builder := NewStateBuilder(). + Add(1, 2). // [◄(1) 2] + Add(2, 3). // [◄(2) 3] + Add(3, 4). // [◄(3) 4] + Add(1, 5) // [◄(1) 5] + states, err := builder.States() + require.NoError(t, err) + expectedFinalityProof := makeFinalityProof(t, states[0], states[1], states[2].ParentQuorumCertificate) + + t.Run("consensus participant mode: ingest validated states", func(t *testing.T) { + // initialize forks and add first 3 states: + // * state [◄(1) 2] should then be finalized + // * and state [1] should be pruned + forks, _ := newForks(t) + require.Nil(t, addValidatedStateToForks(forks, states[:3])) + + // sanity checks to confirm correct test setup + requireLatestFinalizedState(t, forks, states[0]) + requireFinalityProof(t, forks, expectedFinalityProof) + require.False(t, forks.IsKnownState(builder.GenesisState().Identifier())) + + // adding state [◄(1) 5]: note that QC is _below_ the pruning threshold, i.e. cannot resolve the parent + // * Forks should store state, despite the parent already being pruned + // * finalization should not change + orphanedState := states[3] + require.Nil(t, forks.AddValidatedState(orphanedState)) + require.True(t, forks.IsKnownState(orphanedState.Identifier)) + requireLatestFinalizedState(t, forks, states[0]) + requireFinalityProof(t, forks, expectedFinalityProof) + }) + + t.Run("consensus follower mode: ingest certified states", func(t *testing.T) { + // initialize forks and add first 3 states: + // * state [◄(1) 2] should then be finalized + // * and state [1] should be pruned + forks, _ := newForks(t) + require.Nil(t, addCertifiedStatesToForks(forks, states[:3])) + // sanity checks to confirm correct test setup + requireLatestFinalizedState(t, forks, states[0]) + requireFinalityProof(t, forks, expectedFinalityProof) + require.False(t, forks.IsKnownState(builder.GenesisState().Identifier())) + + // adding state [◄(1) 5]: note that QC is _below_ the pruning threshold, i.e. cannot resolve the parent + // * Forks should store state, despite the parent already being pruned + // * finalization should not change + certStateWithUnknownParent := toCertifiedState(t, states[3]) + require.Nil(t, forks.AddCertifiedState(certStateWithUnknownParent)) + require.True(t, forks.IsKnownState(certStateWithUnknownParent.State.Identifier)) + requireLatestFinalizedState(t, forks, states[0]) + requireFinalityProof(t, forks, expectedFinalityProof) + }) +} + +// TestDoubleProposal tests that the DoubleProposal notification is emitted when two different +// states for the same rank are added. We ingest the following state tree: +// +// / [◄(1) 2] +// [1] +// \ [◄(1) 2'] +// +// which should result in a DoubleProposal event referencing the states [◄(1) 2] and [◄(1) 2'] +func TestDoubleProposal(t *testing.T) { + states, err := NewStateBuilder(). + Add(1, 2). // [◄(1) 2] + AddVersioned(1, 2, 0, 1). // [◄(1) 2'] + States() + require.NoError(t, err) + + t.Run("consensus participant mode: ingest validated states", func(t *testing.T) { + forks, notifier := newForks(t) + notifier.On("OnDoubleProposeDetected", states[1], states[0]).Once() + + err = addValidatedStateToForks(forks, states) + require.NoError(t, err) + }) + + t.Run("consensus follower mode: ingest certified states", func(t *testing.T) { + forks, notifier := newForks(t) + notifier.On("OnDoubleProposeDetected", states[1], states[0]).Once() + + err = forks.AddCertifiedState(toCertifiedState(t, states[0])) // add [◄(1) 2] as certified state + require.NoError(t, err) + err = forks.AddCertifiedState(toCertifiedState(t, states[1])) // add [◄(1) 2'] as certified state + require.NoError(t, err) + }) +} + +// TestConflictingQCs checks that adding 2 conflicting QCs should return models.ByzantineThresholdExceededError +// We ingest the following state tree: +// +// [◄(1) 2] [◄(2) 3] [◄(3) 4] [◄(4) 6] +// [◄(2) 3'] [◄(3') 5] +// +// which should result in a `ByzantineThresholdExceededError`, because conflicting states 3 and 3' both have QCs +func TestConflictingQCs(t *testing.T) { + states, err := NewStateBuilder(). + Add(1, 2). // [◄(1) 2] + Add(2, 3). // [◄(2) 3] + AddVersioned(2, 3, 0, 1). // [◄(2) 3'] + Add(3, 4). // [◄(3) 4] + Add(4, 6). // [◄(4) 6] + AddVersioned(3, 5, 1, 0). // [◄(3') 5] + States() + require.NoError(t, err) + + t.Run("consensus participant mode: ingest validated states", func(t *testing.T) { + forks, notifier := newForks(t) + notifier.On("OnDoubleProposeDetected", states[2], states[1]).Return(nil) + + err = addValidatedStateToForks(forks, states) + assert.True(t, models.IsByzantineThresholdExceededError(err)) + }) + + t.Run("consensus follower mode: ingest certified states", func(t *testing.T) { + forks, notifier := newForks(t) + notifier.On("OnDoubleProposeDetected", states[2], states[1]).Return(nil) + + // As [◄(3') 5] is not certified, it will not be added to Forks. However, its QC ◄(3') is + // delivered to Forks as part of the *certified* state [◄(2) 3']. + err = addCertifiedStatesToForks(forks, states) + assert.True(t, models.IsByzantineThresholdExceededError(err)) + }) +} + +// TestConflictingFinalizedForks checks that finalizing 2 conflicting forks should return models.ByzantineThresholdExceededError +// We ingest the following state tree: +// +// [◄(1) 2] [◄(2) 3] [◄(3) 4] [◄(4) 5] +// [◄(2) 6] [◄(6) 7] [◄(7) 8] +// +// Here, both states [◄(2) 3] and [◄(2) 6] satisfy the finalization condition, i.e. we have a fork +// in the finalized states, which should result in a models.ByzantineThresholdExceededError exception. +func TestConflictingFinalizedForks(t *testing.T) { + states, err := NewStateBuilder(). + Add(1, 2). + Add(2, 3). + Add(3, 4). + Add(4, 5). // finalizes [◄(2) 3] + Add(2, 6). + Add(6, 7). + Add(7, 8). // finalizes [◄(2) 6], conflicting with conflicts with [◄(2) 3] + States() + require.NoError(t, err) + + t.Run("consensus participant mode: ingest validated states", func(t *testing.T) { + forks, _ := newForks(t) + err = addValidatedStateToForks(forks, states) + assert.True(t, models.IsByzantineThresholdExceededError(err)) + }) + + t.Run("consensus follower mode: ingest certified states", func(t *testing.T) { + forks, _ := newForks(t) + err = addCertifiedStatesToForks(forks, states) + assert.True(t, models.IsByzantineThresholdExceededError(err)) + }) +} + +// TestAddDisconnectedState checks that adding a state which does not connect to the +// latest finalized state returns a `models.MissingStateError` +// - receives [◄(2) 3] +// - should return `models.MissingStateError`, because the parent is above the pruning +// threshold, but Forks does not know its parent +func TestAddDisconnectedState(t *testing.T) { + states, err := NewStateBuilder(). + Add(1, 2). // we will skip this state [◄(1) 2] + Add(2, 3). // [◄(2) 3] + States() + require.NoError(t, err) + + t.Run("consensus participant mode: ingest validated states", func(t *testing.T) { + forks, _ := newForks(t) + err := forks.AddValidatedState(states[1]) + require.Error(t, err) + assert.True(t, models.IsMissingStateError(err)) + }) + + t.Run("consensus follower mode: ingest certified states", func(t *testing.T) { + forks, _ := newForks(t) + err := forks.AddCertifiedState(toCertifiedState(t, states[1])) + require.Error(t, err) + assert.True(t, models.IsMissingStateError(err)) + }) +} + +// TestGetState tests that we can retrieve stored states. Here, we test that +// attempting to retrieve nonexistent or pruned states fails without causing an exception. +// - Forks receives [◄(1) 2] [◄(2) 3] [◄(3) 4], then [◄(4) 5] +// - should finalize [◄(1) 2], then [◄(2) 3] +func TestGetState(t *testing.T) { + states, err := NewStateBuilder(). + Add(1, 2). // [◄(1) 2] + Add(2, 3). // [◄(2) 3] + Add(3, 4). // [◄(3) 4] + Add(4, 5). // [◄(4) 5] + States() + require.NoError(t, err) + + t.Run("consensus participant mode: ingest validated states", func(t *testing.T) { + statesAddedFirst := states[:3] // [◄(1) 2] [◄(2) 3] [◄(3) 4] + remainingState := states[3] // [◄(4) 5] + forks, _ := newForks(t) + + // should be unable to retrieve a state before it is added + _, ok := forks.GetState(states[0].Identifier) + assert.False(t, ok) + + // add first 3 states - should finalize [◄(1) 2] + err = addValidatedStateToForks(forks, statesAddedFirst) + require.NoError(t, err) + + // should be able to retrieve all stored states + for _, state := range statesAddedFirst { + b, ok := forks.GetState(state.Identifier) + assert.True(t, ok) + assert.Equal(t, state, b) + } + + // add remaining state [◄(4) 5] - should finalize [◄(2) 3] and prune [◄(1) 2] + require.Nil(t, forks.AddValidatedState(remainingState)) + + // should be able to retrieve just added state + b, ok := forks.GetState(remainingState.Identifier) + assert.True(t, ok) + assert.Equal(t, remainingState, b) + + // should be unable to retrieve pruned state + _, ok = forks.GetState(statesAddedFirst[0].Identifier) + assert.False(t, ok) + }) + + // Caution: finalization is driven by QCs. Therefore, we include the QC for state 3 + // in the first batch of states that we add. This is analogous to previous test case, + // except that we are delivering the QC ◄(3) as part of the certified state of rank 2 + // [◄(2) 3] ◄(3) + // while in the previous sub-test, the QC ◄(3) was delivered as part of state [◄(3) 4] + t.Run("consensus follower mode: ingest certified states", func(t *testing.T) { + statesAddedFirst := toCertifiedStates(t, states[:2]...) // [◄(1) 2] [◄(2) 3] ◄(3) + remainingState := toCertifiedState(t, states[2]) // [◄(3) 4] ◄(4) + forks, _ := newForks(t) + + // should be unable to retrieve a state before it is added + _, ok := forks.GetState(states[0].Identifier) + assert.False(t, ok) + + // add first states - should finalize [◄(1) 2] + err := forks.AddCertifiedState(statesAddedFirst[0]) + require.NoError(t, err) + err = forks.AddCertifiedState(statesAddedFirst[1]) + require.NoError(t, err) + + // should be able to retrieve all stored states + for _, state := range statesAddedFirst { + b, ok := forks.GetState(state.State.Identifier) + assert.True(t, ok) + assert.Equal(t, state.State, b) + } + + // add remaining state [◄(4) 5] - should finalize [◄(2) 3] and prune [◄(1) 2] + require.Nil(t, forks.AddCertifiedState(remainingState)) + + // should be able to retrieve just added state + b, ok := forks.GetState(remainingState.State.Identifier) + assert.True(t, ok) + assert.Equal(t, remainingState.State, b) + + // should be unable to retrieve pruned state + _, ok = forks.GetState(statesAddedFirst[0].State.Identifier) + assert.False(t, ok) + }) +} + +// TestGetStatesForRank tests retrieving states for a rank (also including double proposals). +// - Forks receives [◄(1) 2] [◄(2) 4] [◄(2) 4'], +// where [◄(2) 4'] is a double proposal, because it has the same rank as [◄(2) 4] +// +// Expected behaviour: +// - Forks should store all the states +// - Forks should emit a `OnDoubleProposeDetected` notification +// - we can retrieve all states, including the double proposals +func TestGetStatesForRank(t *testing.T) { + states, err := NewStateBuilder(). + Add(1, 2). // [◄(1) 2] + Add(2, 4). // [◄(2) 4] + AddVersioned(2, 4, 0, 1). // [◄(2) 4'] + States() + require.NoError(t, err) + + t.Run("consensus participant mode: ingest validated states", func(t *testing.T) { + forks, notifier := newForks(t) + notifier.On("OnDoubleProposeDetected", states[2], states[1]).Once() + + err = addValidatedStateToForks(forks, states) + require.NoError(t, err) + + // expect 1 state at rank 2 + storedStates := forks.GetStatesForRank(2) + assert.Len(t, storedStates, 1) + assert.Equal(t, states[0], storedStates[0]) + + // expect 2 states at rank 4 + storedStates = forks.GetStatesForRank(4) + assert.Len(t, storedStates, 2) + assert.ElementsMatch(t, states[1:], storedStates) + + // expect 0 states at rank 3 + storedStates = forks.GetStatesForRank(3) + assert.Len(t, storedStates, 0) + }) + + t.Run("consensus follower mode: ingest certified states", func(t *testing.T) { + forks, notifier := newForks(t) + notifier.On("OnDoubleProposeDetected", states[2], states[1]).Once() + + err := forks.AddCertifiedState(toCertifiedState(t, states[0])) + require.NoError(t, err) + err = forks.AddCertifiedState(toCertifiedState(t, states[1])) + require.NoError(t, err) + err = forks.AddCertifiedState(toCertifiedState(t, states[2])) + require.NoError(t, err) + + // expect 1 state at rank 2 + storedStates := forks.GetStatesForRank(2) + assert.Len(t, storedStates, 1) + assert.Equal(t, states[0], storedStates[0]) + + // expect 2 states at rank 4 + storedStates = forks.GetStatesForRank(4) + assert.Len(t, storedStates, 2) + assert.ElementsMatch(t, states[1:], storedStates) + + // expect 0 states at rank 3 + storedStates = forks.GetStatesForRank(3) + assert.Len(t, storedStates, 0) + }) +} + +// TestNotifications tests that Forks emits the expected events: +// - Forks receives [◄(1) 2] [◄(2) 3] [◄(3) 4] +// +// Expected Behaviour: +// - Each of the ingested states should result in an `OnStateIncorporated` notification +// - Forks should finalize [◄(1) 2], resulting in a `MakeFinal` event and an `OnFinalizedState` event +func TestNotifications(t *testing.T) { + builder := NewStateBuilder(). + Add(1, 2). + Add(2, 3). + Add(3, 4) + states, err := builder.States() + require.NoError(t, err) + + t.Run("consensus participant mode: ingest validated states", func(t *testing.T) { + notifier := &mocks.Consumer[*helper.TestState, *helper.TestVote]{} + // 4 states including the genesis are incorporated + notifier.On("OnStateIncorporated", mock.Anything).Return(nil).Times(4) + notifier.On("OnFinalizedState", states[0]).Once() + finalizationCallback := mocks.NewFinalizer(t) + finalizationCallback.On("MakeFinal", states[0].Identifier).Return(nil).Once() + + forks, err := NewForks(builder.GenesisState(), finalizationCallback, notifier) + require.NoError(t, err) + require.NoError(t, addValidatedStateToForks(forks, states)) + }) + + t.Run("consensus follower mode: ingest certified states", func(t *testing.T) { + notifier := &mocks.Consumer[*helper.TestState, *helper.TestVote]{} + // 4 states including the genesis are incorporated + notifier.On("OnStateIncorporated", mock.Anything).Return(nil).Times(4) + notifier.On("OnFinalizedState", states[0]).Once() + finalizationCallback := mocks.NewFinalizer(t) + finalizationCallback.On("MakeFinal", states[0].Identifier).Return(nil).Once() + + forks, err := NewForks(builder.GenesisState(), finalizationCallback, notifier) + require.NoError(t, err) + require.NoError(t, addCertifiedStatesToForks(forks, states)) + }) +} + +// TestFinalizingMultipleStates tests that `OnFinalizedState` notifications are emitted in correct order +// when there are multiple states finalized by adding a _single_ state. +// - receiving [◄(1) 3] [◄(3) 5] [◄(5) 7] [◄(7) 11] [◄(11) 12] should not finalize any states, +// because there is no 2-chain with the first chain link being a _direct_ 1-chain +// - adding [◄(12) 22] should finalize up to state [◄(6) 11] +// +// This test verifies the following expected properties: +// 1. Safety under reentrancy: +// While Forks is single-threaded, there is still the possibility of reentrancy. Specifically, the +// consumers of our finalization events are served by the goroutine executing Forks. It is conceivable +// that a consumer might access Forks and query the latest finalization proof. This would be legal, if +// the component supplying the goroutine to Forks also consumes the notifications. Therefore, for API +// safety, we require forks to _first update_ its `FinalityProof()` before it emits _any_ events. +// 2. For each finalized state, `finalizationCallback` event is executed _before_ `OnFinalizedState` notifications. +// 3. States are finalized in order of increasing height (without skipping any states). +func TestFinalizingMultipleStates(t *testing.T) { + builder := NewStateBuilder(). + Add(1, 3). // index 0: [◄(1) 2] + Add(3, 5). // index 1: [◄(2) 4] + Add(5, 7). // index 2: [◄(4) 6] + Add(7, 11). // index 3: [◄(6) 11] -- expected to be finalized + Add(11, 12). // index 4: [◄(11) 12] + Add(12, 22) // index 5: [◄(12) 22] + states, err := builder.States() + require.NoError(t, err) + + // The Finality Proof should right away point to the _latest_ finalized state. Subsequently emitting + // Finalization events for lower states is fine, because notifications are guaranteed to be + // _eventually_ arriving. I.e. consumers expect notifications / events to be potentially lagging behind. + expectedFinalityProof := makeFinalityProof(t, states[3], states[4], states[5].ParentQuorumCertificate) + + setupForksAndAssertions := func() (*Forks[*helper.TestState, *helper.TestVote], *mocks.Finalizer, *mocks.Consumer[*helper.TestState, *helper.TestVote]) { + // initialize Forks with custom event consumers so we can check order of emitted events + notifier := &mocks.Consumer[*helper.TestState, *helper.TestVote]{} + finalizationCallback := mocks.NewFinalizer(t) + notifier.On("OnStateIncorporated", mock.Anything).Return(nil) + forks, err := NewForks(builder.GenesisState(), finalizationCallback, notifier) + require.NoError(t, err) + + // expecting finalization of [◄(1) 2] [◄(2) 4] [◄(4) 6] [◄(6) 11] in this order + statesAwaitingFinalization := toStateAwaitingFinalization(states[:4]) + + finalizationCallback.On("MakeFinal", mock.Anything).Run(func(args mock.Arguments) { + requireFinalityProof(t, forks, expectedFinalityProof) // Requirement 1: forks should _first update_ its `FinalityProof()` before it emits _any_ events + + // Requirement 3: finalized in order of increasing height (without skipping any states). + expectedNextFinalizationEvents := statesAwaitingFinalization[0] + require.Equal(t, expectedNextFinalizationEvents.State.Identifier, args[0]) + + // Requirement 2: finalized state, `finalizationCallback` event is executed _before_ `OnFinalizedState` notifications. + // no duplication of events under normal operations expected + require.False(t, expectedNextFinalizationEvents.MakeFinalCalled) + require.False(t, expectedNextFinalizationEvents.OnFinalizedStateEmitted) + expectedNextFinalizationEvents.MakeFinalCalled = true + }).Return(nil).Times(4) + + notifier.On("OnFinalizedState", mock.Anything).Run(func(args mock.Arguments) { + requireFinalityProof(t, forks, expectedFinalityProof) // Requirement 1: forks should _first update_ its `FinalityProof()` before it emits _any_ events + + // Requirement 3: finalized in order of increasing height (without skipping any states). + expectedNextFinalizationEvents := statesAwaitingFinalization[0] + require.Equal(t, expectedNextFinalizationEvents.State, args[0]) + + // Requirement 2: finalized state, `finalizationCallback` event is executed _before_ `OnFinalizedState` notifications. + // no duplication of events under normal operations expected + require.True(t, expectedNextFinalizationEvents.MakeFinalCalled) + require.False(t, expectedNextFinalizationEvents.OnFinalizedStateEmitted) + expectedNextFinalizationEvents.OnFinalizedStateEmitted = true + + // At this point, `MakeFinal` and `OnFinalizedState` have both been emitted for the state, so we are done with it + statesAwaitingFinalization = statesAwaitingFinalization[1:] + }).Times(4) + + return forks, finalizationCallback, notifier + } + + t.Run("consensus participant mode: ingest validated states", func(t *testing.T) { + forks, finalizationCallback, notifier := setupForksAndAssertions() + err = addValidatedStateToForks(forks, states[:5]) // adding [◄(1) 2] [◄(2) 4] [◄(4) 6] [◄(6) 11] [◄(11) 12] + require.NoError(t, err) + requireOnlyGenesisStateFinalized(t, forks) // finalization should still be at the genesis state + + require.NoError(t, forks.AddValidatedState(states[5])) // adding [◄(12) 22] should trigger finalization events + requireFinalityProof(t, forks, expectedFinalityProof) + finalizationCallback.AssertExpectations(t) + notifier.AssertExpectations(t) + }) + + t.Run("consensus follower mode: ingest certified states", func(t *testing.T) { + forks, finalizationCallback, notifier := setupForksAndAssertions() + // adding [◄(1) 2] [◄(2) 4] [◄(4) 6] [◄(6) 11] ◄(11) + require.NoError(t, forks.AddCertifiedState(toCertifiedState(t, states[0]))) + require.NoError(t, forks.AddCertifiedState(toCertifiedState(t, states[1]))) + require.NoError(t, forks.AddCertifiedState(toCertifiedState(t, states[2]))) + require.NoError(t, forks.AddCertifiedState(toCertifiedState(t, states[3]))) + require.NoError(t, err) + requireOnlyGenesisStateFinalized(t, forks) // finalization should still be at the genesis state + + // adding certified state [◄(11) 12] ◄(12) should trigger finalization events + require.NoError(t, forks.AddCertifiedState(toCertifiedState(t, states[4]))) + requireFinalityProof(t, forks, expectedFinalityProof) + finalizationCallback.AssertExpectations(t) + notifier.AssertExpectations(t) + }) +} + +//* ************************************* internal functions ************************************* */ + +func newForks(t *testing.T) (*Forks[*helper.TestState, *helper.TestVote], *mocks.Consumer[*helper.TestState, *helper.TestVote]) { + notifier := mocks.NewConsumer[*helper.TestState, *helper.TestVote](t) + notifier.On("OnStateIncorporated", mock.Anything).Return(nil).Maybe() + notifier.On("OnFinalizedState", mock.Anything).Maybe() + finalizationCallback := mocks.NewFinalizer(t) + finalizationCallback.On("MakeFinal", mock.Anything).Return(nil).Maybe() + + genesisBQ := makeGenesis() + + forks, err := NewForks(genesisBQ, finalizationCallback, notifier) + + require.NoError(t, err) + return forks, notifier +} + +// addValidatedStateToForks adds all the given states to Forks, in order. +// If any errors occur, returns the first one. +func addValidatedStateToForks(forks *Forks[*helper.TestState, *helper.TestVote], states []*models.State[*helper.TestState]) error { + for _, state := range states { + err := forks.AddValidatedState(state) + if err != nil { + return fmt.Errorf("test failed to add state for rank %d: %w", state.Rank, err) + } + } + return nil +} + +// addCertifiedStatesToForks iterates over all states, caches them locally in a map, +// constructs certified states whenever possible and adds the certified states to forks, +// Note: if states is a single fork, the _last state_ in the slice will not be added, +// +// because there is no qc for it +// +// If any errors occur, returns the first one. +func addCertifiedStatesToForks(forks *Forks[*helper.TestState, *helper.TestVote], states []*models.State[*helper.TestState]) error { + uncertifiedStates := make(map[models.Identity]*models.State[*helper.TestState]) + for _, b := range states { + uncertifiedStates[b.Identifier] = b + parentID := b.ParentQuorumCertificate.Identity() + parent, found := uncertifiedStates[parentID] + if !found { + continue + } + delete(uncertifiedStates, parentID) + + certParent, err := models.NewCertifiedState(parent, b.ParentQuorumCertificate) + if err != nil { + return fmt.Errorf("test failed to creat certified state for rank %d: %w", certParent.State.Rank, err) + } + err = forks.AddCertifiedState(certParent) + if err != nil { + return fmt.Errorf("test failed to add certified state for rank %d: %w", certParent.State.Rank, err) + } + } + + return nil +} + +// requireLatestFinalizedState asserts that the latest finalized state has the given rank and qc rank. +func requireLatestFinalizedState(t *testing.T, forks *Forks[*helper.TestState, *helper.TestVote], expectedFinalized *models.State[*helper.TestState]) { + require.Equal(t, expectedFinalized, forks.FinalizedState(), "finalized state is not as expected") + require.Equal(t, forks.FinalizedRank(), expectedFinalized.Rank, "FinalizedRank returned wrong value") +} + +// requireOnlyGenesisStateFinalized asserts that no states have been finalized beyond the genesis state. +// Caution: does not inspect output of `forks.FinalityProof()` +func requireOnlyGenesisStateFinalized(t *testing.T, forks *Forks[*helper.TestState, *helper.TestVote]) { + genesis := makeGenesis() + require.Equal(t, forks.FinalizedState(), genesis.State, "finalized state is not the genesis state") + require.Equal(t, forks.FinalizedState().Rank, genesis.State.Rank) + require.Equal(t, forks.FinalizedState().Rank, genesis.CertifyingQuorumCertificate.GetRank()) + require.Equal(t, forks.FinalizedRank(), genesis.State.Rank, "finalized state has wrong qc") + + finalityProof, isKnown := forks.FinalityProof() + require.Nil(t, finalityProof, "expecting finality proof to be nil for genesis state at initialization") + require.False(t, isKnown, "no finality proof should be known for genesis state at initialization") +} + +// requireNoStatesFinalized asserts that no states have been finalized (genesis is latest finalized state). +func requireFinalityProof(t *testing.T, forks *Forks[*helper.TestState, *helper.TestVote], expectedFinalityProof *consensus.FinalityProof[*helper.TestState]) { + finalityProof, isKnown := forks.FinalityProof() + require.True(t, isKnown) + require.Equal(t, expectedFinalityProof, finalityProof) + require.Equal(t, forks.FinalizedState(), expectedFinalityProof.State) + require.Equal(t, forks.FinalizedRank(), expectedFinalityProof.State.Rank) +} + +// toCertifiedState generates a QC for the given state and returns their combination as a certified state +func toCertifiedState(t *testing.T, state *models.State[*helper.TestState]) *models.CertifiedState[*helper.TestState] { + qc := &helper.TestQuorumCertificate{ + Rank: state.Rank, + Selector: state.Identifier, + } + cb, err := models.NewCertifiedState(state, qc) + require.NoError(t, err) + return cb +} + +// toCertifiedStates generates a QC for the given state and returns their combination as a certified states +func toCertifiedStates(t *testing.T, states ...*models.State[*helper.TestState]) []*models.CertifiedState[*helper.TestState] { + certStates := make([]*models.CertifiedState[*helper.TestState], 0, len(states)) + for _, b := range states { + certStates = append(certStates, toCertifiedState(t, b)) + } + return certStates +} + +func makeFinalityProof(t *testing.T, state *models.State[*helper.TestState], directChild *models.State[*helper.TestState], qcCertifyingChild models.QuorumCertificate) *consensus.FinalityProof[*helper.TestState] { + c, err := models.NewCertifiedState(directChild, qcCertifyingChild) // certified child of FinalizedState + require.NoError(t, err) + return &consensus.FinalityProof[*helper.TestState]{State: state, CertifiedChild: c} +} + +// stateAwaitingFinalization is intended for tracking finalization events and their order for a specific state +type stateAwaitingFinalization struct { + State *models.State[*helper.TestState] + MakeFinalCalled bool // indicates whether `Finalizer.MakeFinal` was called + OnFinalizedStateEmitted bool // indicates whether `OnFinalizedStateCalled` notification was emitted +} + +// toStateAwaitingFinalization creates a `stateAwaitingFinalization` tracker for each input state +func toStateAwaitingFinalization(states []*models.State[*helper.TestState]) []*stateAwaitingFinalization { + trackers := make([]*stateAwaitingFinalization, 0, len(states)) + for _, b := range states { + tracker := &stateAwaitingFinalization{b, false, false} + trackers = append(trackers, tracker) + } + return trackers +} diff --git a/consensus/forks/state_builder_test.go b/consensus/forks/state_builder_test.go new file mode 100644 index 0000000..f1b0022 --- /dev/null +++ b/consensus/forks/state_builder_test.go @@ -0,0 +1,165 @@ +package forks + +import ( + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// StateRank specifies the data to create a state +type StateRank struct { + // Rank is the rank of the state to be created + Rank uint64 + // StateVersion is the version of the state for that rank. + // Useful for creating conflicting states at the same rank. + StateVersion int + // QCRank is the rank of the QC embedded in this state (also: the rank of the state's parent) + QCRank uint64 + // QCVersion is the version of the QC for that rank. + QCVersion int +} + +// QCIndex returns a unique identifier for the state's QC. +func (bv *StateRank) QCIndex() string { + return fmt.Sprintf("%v-%v", bv.QCRank, bv.QCVersion) +} + +// StateIndex returns a unique identifier for the state. +func (bv *StateRank) StateIndex() string { + return fmt.Sprintf("%v-%v", bv.Rank, bv.StateVersion) +} + +// StateBuilder is a test utility for creating state structure fixtures. +type StateBuilder struct { + stateRanks []*StateRank +} + +func NewStateBuilder() *StateBuilder { + return &StateBuilder{ + stateRanks: make([]*StateRank, 0), + } +} + +// Add adds a state with the given qcRank and stateRank. Returns self-reference for chaining. +func (bb *StateBuilder) Add(qcRank uint64, stateRank uint64) *StateBuilder { + bb.stateRanks = append(bb.stateRanks, &StateRank{ + Rank: stateRank, + QCRank: qcRank, + }) + return bb +} + +// GenesisState returns the genesis state, which is always finalized. +func (bb *StateBuilder) GenesisState() *models.CertifiedState[*helper.TestState] { + return makeGenesis() +} + +// AddVersioned adds a state with the given qcRank and stateRank. +// In addition, the version identifier of the QC embedded within the state +// is specified by `qcVersion`. The version identifier for the state itself +// (primarily for emulating different state ID) is specified by `stateVersion`. +// [(◄3) 4] denotes a state of rank 4, with a qc for rank 3 +// [(◄3) 4'] denotes a state of rank 4 that is different than [(◄3) 4], with a qc for rank 3 +// [(◄3) 4'] can be created by AddVersioned(3, 4, 0, 1) +// [(◄3') 4] can be created by AddVersioned(3, 4, 1, 0) +// Returns self-reference for chaining. +func (bb *StateBuilder) AddVersioned(qcRank uint64, stateRank uint64, qcVersion int, stateVersion int) *StateBuilder { + bb.stateRanks = append(bb.stateRanks, &StateRank{ + Rank: stateRank, + QCRank: qcRank, + StateVersion: stateVersion, + QCVersion: qcVersion, + }) + return bb +} + +// Proposals returns a list of all proposals added to the StateBuilder. +// Returns an error if the states do not form a connected tree rooted at genesis. +func (bb *StateBuilder) Proposals() ([]*models.Proposal[*helper.TestState], error) { + states := make([]*models.Proposal[*helper.TestState], 0, len(bb.stateRanks)) + + genesisState := makeGenesis() + genesisBV := &StateRank{ + Rank: genesisState.State.Rank, + QCRank: genesisState.CertifyingQuorumCertificate.GetRank(), + } + + qcs := make(map[string]models.QuorumCertificate) + qcs[genesisBV.QCIndex()] = genesisState.CertifyingQuorumCertificate + + for _, bv := range bb.stateRanks { + qc, ok := qcs[bv.QCIndex()] + if !ok { + return nil, fmt.Errorf("test fail: no qc found for qc index: %v", bv.QCIndex()) + } + var previousRankTimeoutCert models.TimeoutCertificate + if qc.GetRank()+1 != bv.Rank { + previousRankTimeoutCert = helper.MakeTC(helper.WithTCRank(bv.Rank - 1)) + } + proposal := &models.Proposal[*helper.TestState]{ + State: &models.State[*helper.TestState]{ + Rank: bv.Rank, + ParentQuorumCertificate: qc, + }, + PreviousRankTimeoutCertificate: previousRankTimeoutCert, + } + proposal.State.Identifier = makeIdentifier(proposal.State, bv.StateVersion) + + states = append(states, proposal) + + // generate QC for the new proposal + qcs[bv.StateIndex()] = &helper.TestQuorumCertificate{ + Rank: proposal.State.Rank, + Selector: proposal.State.Identifier, + AggregatedSignature: nil, + } + } + + return states, nil +} + +// States returns a list of all states added to the StateBuilder. +// Returns an error if the states do not form a connected tree rooted at genesis. +func (bb *StateBuilder) States() ([]*models.State[*helper.TestState], error) { + proposals, err := bb.Proposals() + if err != nil { + return nil, fmt.Errorf("StateBuilder failed to generate proposals: %w", err) + } + return toStates(proposals), nil +} + +// makeIdentifier creates a state identifier based on the state's rank, QC, and state version. +// This is used to identify states uniquely, in this specific test setup. +// ATTENTION: this should not be confused with the state ID used in production code which is a collision-resistant hash +// of the full state content. +func makeIdentifier(state *models.State[*helper.TestState], stateVersion int) models.Identity { + return fmt.Sprintf("%d-%s-%d", state.Rank, state.Identifier, stateVersion) +} + +// constructs the genesis state (identical for all calls) +func makeGenesis() *models.CertifiedState[*helper.TestState] { + genesis := &models.State[*helper.TestState]{ + Rank: 1, + } + genesis.Identifier = makeIdentifier(genesis, 0) + + genesisQC := &helper.TestQuorumCertificate{ + Rank: 1, + Selector: genesis.Identifier, + } + certifiedGenesisState, err := models.NewCertifiedState(genesis, genesisQC) + if err != nil { + panic(fmt.Sprintf("combining genesis state and genensis QC to certified state failed: %s", err.Error())) + } + return certifiedGenesisState +} + +// toStates converts the given proposals to slice of states +func toStates(proposals []*models.Proposal[*helper.TestState]) []*models.State[*helper.TestState] { + states := make([]*models.State[*helper.TestState], 0, len(proposals)) + for _, b := range proposals { + states = append(states, b.State) + } + return states +} diff --git a/consensus/forks/state_container.go b/consensus/forks/state_container.go new file mode 100644 index 0000000..71cd7d3 --- /dev/null +++ b/consensus/forks/state_container.go @@ -0,0 +1,77 @@ +package forks + +import ( + "source.quilibrium.com/quilibrium/monorepo/consensus/forest" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// StateContainer wraps a state proposal to implement forest.Vertex +// so the proposal can be stored in forest.LevelledForest +type StateContainer[StateT models.Unique] models.State[StateT] + +var _ forest.Vertex = (*StateContainer[*nilUnique])(nil) + +func ToStateContainer2[StateT models.Unique]( + state *models.State[StateT], +) *StateContainer[StateT] { + return (*StateContainer[StateT])(state) +} + +func (b *StateContainer[StateT]) GetState() *models.State[StateT] { + return (*models.State[StateT])(b) +} + +// Functions implementing forest.Vertex +func (b *StateContainer[StateT]) VertexID() models.Identity { + return b.Identifier +} + +func (b *StateContainer[StateT]) Level() uint64 { + return b.Rank +} + +func (b *StateContainer[StateT]) Parent() (models.Identity, uint64) { + // Caution: not all states have a QC for the parent, such as the spork root + // states. Per API contract, we are obliged to return a value to prevent + // panics during logging. (see vertex `forest.VertexToString` method). + if b.ParentQuorumCertificate == nil { + return "", 0 + } + return b.ParentQuorumCertificate.Identity(), + b.ParentQuorumCertificate.GetRank() +} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/go.mod b/consensus/go.mod index fe7ec5b..8946d30 100644 --- a/consensus/go.mod +++ b/consensus/go.mod @@ -1,16 +1,8 @@ module source.quilibrium.com/quilibrium/monorepo/consensus -go 1.23.0 +go 1.24.0 -toolchain go1.23.4 - -replace source.quilibrium.com/quilibrium/monorepo/protobufs => ../protobufs - -replace source.quilibrium.com/quilibrium/monorepo/types => ../types - -replace source.quilibrium.com/quilibrium/monorepo/config => ../config - -replace source.quilibrium.com/quilibrium/monorepo/utils => ../utils +toolchain go1.24.9 replace github.com/multiformats/go-multiaddr => ../go-multiaddr @@ -20,13 +12,31 @@ replace github.com/libp2p/go-libp2p => ../go-libp2p replace github.com/libp2p/go-libp2p-kad-dht => ../go-libp2p-kad-dht -replace source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub => ../go-libp2p-blossomsub - -require go.uber.org/zap v1.27.0 +replace source.quilibrium.com/quilibrium/monorepo/lifecycle => ../lifecycle require ( - github.com/stretchr/testify v1.10.0 // indirect - go.uber.org/multierr v1.11.0 // indirect + github.com/gammazero/workerpool v1.1.3 + github.com/rs/zerolog v1.34.0 ) -require github.com/pkg/errors v0.9.1 +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/gammazero/deque v0.2.0 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect + go.uber.org/goleak v1.3.0 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) + +require ( + github.com/pkg/errors v0.9.1 + github.com/stretchr/testify v1.11.1 + go.uber.org/atomic v1.11.0 + golang.org/x/sync v0.17.0 + golang.org/x/sys v0.33.0 // indirect + source.quilibrium.com/quilibrium/monorepo/lifecycle v0.0.0-00010101000000-000000000000 +) diff --git a/consensus/go.sum b/consensus/go.sum index 63e45b1..4eba35a 100644 --- a/consensus/go.sum +++ b/consensus/go.sum @@ -1,16 +1,51 @@ +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gammazero/deque v0.2.0 h1:SkieyNB4bg2/uZZLxvya0Pq6diUlwx7m2TeT7GAIWaA= +github.com/gammazero/deque v0.2.0/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU= +github.com/gammazero/workerpool v1.1.3 h1:WixN4xzukFoN0XSeXF6puqEqFTl2mECI9S6W44HWy9Q= +github.com/gammazero/workerpool v1.1.3/go.mod h1:wPjyBLDbyKnUn2XwwyD3EEwo9dHutia9/fwNmSHWACc= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= +github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/consensus/helper/quorum_certificate.go b/consensus/helper/quorum_certificate.go new file mode 100644 index 0000000..ae0b068 --- /dev/null +++ b/consensus/helper/quorum_certificate.go @@ -0,0 +1,122 @@ +package helper + +import ( + "bytes" + crand "crypto/rand" + "fmt" + "math/rand" + "time" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +type TestAggregatedSignature struct { + Signature []byte + PublicKey []byte + Bitmask []byte +} + +func (t *TestAggregatedSignature) GetSignature() []byte { + return t.Signature +} + +func (t *TestAggregatedSignature) GetPubKey() []byte { + return t.PublicKey +} + +func (t *TestAggregatedSignature) GetBitmask() []byte { + return t.Bitmask +} + +type TestQuorumCertificate struct { + Filter []byte + Rank uint64 + FrameNumber uint64 + Selector models.Identity + Timestamp uint64 + AggregatedSignature models.AggregatedSignature +} + +func (t *TestQuorumCertificate) GetFilter() []byte { + return t.Filter +} + +func (t *TestQuorumCertificate) GetRank() uint64 { + return t.Rank +} + +func (t *TestQuorumCertificate) GetFrameNumber() uint64 { + return t.FrameNumber +} + +func (t *TestQuorumCertificate) Identity() models.Identity { + return t.Selector +} + +func (t *TestQuorumCertificate) GetTimestamp() uint64 { + return t.Timestamp +} + +func (t *TestQuorumCertificate) GetAggregatedSignature() models.AggregatedSignature { + return t.AggregatedSignature +} + +func (t *TestQuorumCertificate) Equals(other models.QuorumCertificate) bool { + return bytes.Equal(t.Filter, other.GetFilter()) && + t.Rank == other.GetRank() && + t.FrameNumber == other.GetFrameNumber() && + t.Selector == other.Identity() && + t.Timestamp == other.GetTimestamp() && + bytes.Equal( + t.AggregatedSignature.GetBitmask(), + other.GetAggregatedSignature().GetBitmask(), + ) && + bytes.Equal( + t.AggregatedSignature.GetPubKey(), + other.GetAggregatedSignature().GetPubKey(), + ) && + bytes.Equal( + t.AggregatedSignature.GetSignature(), + other.GetAggregatedSignature().GetSignature(), + ) +} + +func MakeQC(options ...func(*TestQuorumCertificate)) models.QuorumCertificate { + s := make([]byte, 32) + crand.Read(s) + qc := &TestQuorumCertificate{ + Rank: rand.Uint64(), + FrameNumber: rand.Uint64() + 1, + Selector: string(s), + Timestamp: uint64(time.Now().UnixMilli()), + AggregatedSignature: &TestAggregatedSignature{ + PublicKey: make([]byte, 585), + Signature: make([]byte, 74), + Bitmask: []byte{0x01}, + }, + } + for _, option := range options { + option(qc) + } + return qc +} + +func WithQCState[StateT models.Unique](state *models.State[StateT]) func(*TestQuorumCertificate) { + return func(qc *TestQuorumCertificate) { + qc.Rank = state.Rank + qc.Selector = state.Identifier + } +} + +func WithQCSigners(signerIndices []byte) func(*TestQuorumCertificate) { + return func(qc *TestQuorumCertificate) { + qc.AggregatedSignature.(*TestAggregatedSignature).Bitmask = signerIndices + } +} + +func WithQCRank(rank uint64) func(*TestQuorumCertificate) { + return func(qc *TestQuorumCertificate) { + qc.Rank = rank + qc.Selector = fmt.Sprintf("%d", rank) + } +} diff --git a/consensus/helper/state.go b/consensus/helper/state.go new file mode 100644 index 0000000..f644287 --- /dev/null +++ b/consensus/helper/state.go @@ -0,0 +1,467 @@ +package helper + +import ( + crand "crypto/rand" + "fmt" + "math/rand" + "slices" + "strings" + "time" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +type TestWeightedIdentity struct { + ID string +} + +// Identity implements models.WeightedIdentity. +func (t *TestWeightedIdentity) Identity() models.Identity { + return t.ID +} + +// PublicKey implements models.WeightedIdentity. +func (t *TestWeightedIdentity) PublicKey() []byte { + return make([]byte, 585) +} + +// Weight implements models.WeightedIdentity. +func (t *TestWeightedIdentity) Weight() uint64 { + return 1000 +} + +var _ models.WeightedIdentity = (*TestWeightedIdentity)(nil) + +type TestState struct { + Rank uint64 + Signature []byte + Timestamp uint64 + ID models.Identity + Prover models.Identity +} + +// Clone implements models.Unique. +func (t *TestState) Clone() models.Unique { + return &TestState{ + Rank: t.Rank, + Signature: slices.Clone(t.Signature), + Timestamp: t.Timestamp, + ID: t.ID, + Prover: t.Prover, + } +} + +// GetRank implements models.Unique. +func (t *TestState) GetRank() uint64 { + return t.Rank +} + +// GetSignature implements models.Unique. +func (t *TestState) GetSignature() []byte { + return t.Signature +} + +// GetTimestamp implements models.Unique. +func (t *TestState) GetTimestamp() uint64 { + return t.Timestamp +} + +// Identity implements models.Unique. +func (t *TestState) Identity() models.Identity { + return t.ID +} + +// Source implements models.Unique. +func (t *TestState) Source() models.Identity { + return t.Prover +} + +type TestVote struct { + Rank uint64 + Signature []byte + Timestamp uint64 + ID models.Identity + StateID models.Identity +} + +// Clone implements models.Unique. +func (t *TestVote) Clone() models.Unique { + return &TestVote{ + Rank: t.Rank, + Signature: slices.Clone(t.Signature), + Timestamp: t.Timestamp, + ID: t.ID, + StateID: t.StateID, + } +} + +// GetRank implements models.Unique. +func (t *TestVote) GetRank() uint64 { + return t.Rank +} + +// GetSignature implements models.Unique. +func (t *TestVote) GetSignature() []byte { + return t.Signature +} + +// GetTimestamp implements models.Unique. +func (t *TestVote) GetTimestamp() uint64 { + return t.Timestamp +} + +// Identity implements models.Unique. +func (t *TestVote) Identity() models.Identity { + return t.ID +} + +// Source implements models.Unique. +func (t *TestVote) Source() models.Identity { + return t.StateID +} + +type TestPeer struct { + PeerID string +} + +// Clone implements models.Unique. +func (t *TestPeer) Clone() models.Unique { + return &TestPeer{ + PeerID: t.PeerID, + } +} + +// GetRank implements models.Unique. +func (t *TestPeer) GetRank() uint64 { + return 0 +} + +// GetSignature implements models.Unique. +func (t *TestPeer) GetSignature() []byte { + return []byte{} +} + +// GetTimestamp implements models.Unique. +func (t *TestPeer) GetTimestamp() uint64 { + return 0 +} + +// Identity implements models.Unique. +func (t *TestPeer) Identity() models.Identity { + return t.PeerID +} + +// Source implements models.Unique. +func (t *TestPeer) Source() models.Identity { + return t.PeerID +} + +type TestCollected struct { + Rank uint64 + TXs [][]byte +} + +// Clone implements models.Unique. +func (t *TestCollected) Clone() models.Unique { + return &TestCollected{ + Rank: t.Rank, + TXs: slices.Clone(t.TXs), + } +} + +// GetRank implements models.Unique. +func (t *TestCollected) GetRank() uint64 { + return t.Rank +} + +// GetSignature implements models.Unique. +func (t *TestCollected) GetSignature() []byte { + return []byte{} +} + +// GetTimestamp implements models.Unique. +func (t *TestCollected) GetTimestamp() uint64 { + return 0 +} + +// Identity implements models.Unique. +func (t *TestCollected) Identity() models.Identity { + return fmt.Sprintf("%d", t.Rank) +} + +// Source implements models.Unique. +func (t *TestCollected) Source() models.Identity { + return "" +} + +var _ models.Unique = (*TestState)(nil) +var _ models.Unique = (*TestVote)(nil) +var _ models.Unique = (*TestPeer)(nil) +var _ models.Unique = (*TestCollected)(nil) + +func MakeIdentity() models.Identity { + s := make([]byte, 32) + crand.Read(s) + return models.Identity(s) +} + +func MakeState[StateT models.Unique](options ...func(*models.State[StateT])) *models.State[StateT] { + rank := rand.Uint64() + + state := models.State[StateT]{ + Rank: rank, + Identifier: MakeIdentity(), + ProposerID: MakeIdentity(), + Timestamp: uint64(time.Now().UnixMilli()), + ParentQuorumCertificate: MakeQC(WithQCRank(rank - 1)), + } + for _, option := range options { + option(&state) + } + return &state +} + +func WithStateRank[StateT models.Unique](rank uint64) func(*models.State[StateT]) { + return func(state *models.State[StateT]) { + state.Rank = rank + } +} + +func WithStateProposer[StateT models.Unique](proposerID models.Identity) func(*models.State[StateT]) { + return func(state *models.State[StateT]) { + state.ProposerID = proposerID + } +} + +func WithParentState[StateT models.Unique](parent *models.State[StateT]) func(*models.State[StateT]) { + return func(state *models.State[StateT]) { + state.ParentQuorumCertificate.(*TestQuorumCertificate).Selector = parent.Identifier + state.ParentQuorumCertificate.(*TestQuorumCertificate).Rank = parent.Rank + } +} + +func WithParentSigners[StateT models.Unique](signerIndices []byte) func(*models.State[StateT]) { + return func(state *models.State[StateT]) { + state.ParentQuorumCertificate.(*TestQuorumCertificate).AggregatedSignature.(*TestAggregatedSignature).Bitmask = signerIndices + } +} + +func WithStateQC[StateT models.Unique](qc models.QuorumCertificate) func(*models.State[StateT]) { + return func(state *models.State[StateT]) { + state.ParentQuorumCertificate = qc + } +} + +func MakeVote[VoteT models.Unique]() *VoteT { + return new(VoteT) +} + +func MakeSignedProposal[StateT models.Unique, VoteT models.Unique](options ...func(*models.SignedProposal[StateT, VoteT])) *models.SignedProposal[StateT, VoteT] { + proposal := &models.SignedProposal[StateT, VoteT]{ + Proposal: *MakeProposal[StateT](), + Vote: MakeVote[VoteT](), + } + for _, option := range options { + option(proposal) + } + return proposal +} + +func MakeProposal[StateT models.Unique](options ...func(*models.Proposal[StateT])) *models.Proposal[StateT] { + proposal := &models.Proposal[StateT]{ + State: MakeState[StateT](), + PreviousRankTimeoutCertificate: nil, + } + for _, option := range options { + option(proposal) + } + return proposal +} + +func WithProposal[StateT models.Unique, VoteT models.Unique](proposal *models.Proposal[StateT]) func(*models.SignedProposal[StateT, VoteT]) { + return func(signedProposal *models.SignedProposal[StateT, VoteT]) { + signedProposal.Proposal = *proposal + } +} + +func WithState[StateT models.Unique](state *models.State[StateT]) func(*models.Proposal[StateT]) { + return func(proposal *models.Proposal[StateT]) { + proposal.State = state + } +} + +func WithVote[StateT models.Unique, VoteT models.Unique](vote *VoteT) func(*models.SignedProposal[StateT, VoteT]) { + return func(proposal *models.SignedProposal[StateT, VoteT]) { + proposal.Vote = vote + } +} + +func WithPreviousRankTimeoutCertificate[StateT models.Unique](previousRankTimeoutCert models.TimeoutCertificate) func(*models.Proposal[StateT]) { + return func(proposal *models.Proposal[StateT]) { + proposal.PreviousRankTimeoutCertificate = previousRankTimeoutCert + } +} + +func WithWeightedIdentityList(count int) []models.WeightedIdentity { + wi := []models.WeightedIdentity{} + for _ = range count { + wi = append(wi, &TestWeightedIdentity{ + ID: MakeIdentity(), + }) + } + return wi +} + +func VoteForStateFixture(state *models.State[*TestState], ops ...func(vote **TestVote)) *TestVote { + v := &TestVote{ + Rank: state.Rank, + ID: MakeIdentity(), + StateID: state.Identifier, + Signature: make([]byte, 74), + } + for _, op := range ops { + op(&v) + } + return v +} + +func VoteFixture(op func(vote **TestVote)) *TestVote { + v := &TestVote{ + Rank: rand.Uint64(), + ID: MakeIdentity(), + StateID: MakeIdentity(), + Signature: make([]byte, 74), + } + op(&v) + return v +} + +type FmtLog struct { + params []consensus.LogParam +} + +// Error implements consensus.TraceLogger. +func (n *FmtLog) Error(message string, err error, params ...consensus.LogParam) { + b := strings.Builder{} + b.WriteString(fmt.Sprintf("ERROR: %s: %v\n", message, err)) + for _, param := range n.params { + b.WriteString(fmt.Sprintf( + "\t%s: %s\n", + param.GetKey(), + stringFromValue(param), + )) + } + for _, param := range params { + b.WriteString(fmt.Sprintf( + "\t%s: %s\n", + param.GetKey(), + stringFromValue(param), + )) + } + fmt.Println(b.String()) +} + +// Trace implements consensus.TraceLogger. +func (n *FmtLog) Trace(message string, params ...consensus.LogParam) { + b := strings.Builder{} + b.WriteString(fmt.Sprintf("TRACE: %s\n", message)) + b.WriteString(fmt.Sprintf("\t[%s]\n", time.Now().String())) + for _, param := range n.params { + b.WriteString(fmt.Sprintf( + "\t%s: %s\n", + param.GetKey(), + stringFromValue(param), + )) + } + for _, param := range params { + b.WriteString(fmt.Sprintf( + "\t%s: %s\n", + param.GetKey(), + stringFromValue(param), + )) + } + fmt.Println(b.String()) +} + +func (n *FmtLog) With(params ...consensus.LogParam) consensus.TraceLogger { + return &FmtLog{ + params: slices.Concat(n.params, params), + } +} + +func stringFromValue(param consensus.LogParam) string { + switch param.GetKind() { + case "string": + return param.GetValue().(string) + case "time": + return param.GetValue().(time.Time).String() + default: + return fmt.Sprintf("%v", param.GetValue()) + } +} + +func Logger() *FmtLog { + return &FmtLog{} +} + +type BufferLog struct { + params []consensus.LogParam + b *strings.Builder +} + +// Error implements consensus.TraceLogger. +func (n *BufferLog) Error(message string, err error, params ...consensus.LogParam) { + n.b.WriteString(fmt.Sprintf("ERROR: %s: %v\n", message, err)) + for _, param := range n.params { + n.b.WriteString(fmt.Sprintf( + "\t%s: %s\n", + param.GetKey(), + stringFromValue(param), + )) + } + for _, param := range params { + n.b.WriteString(fmt.Sprintf( + "\t%s: %s\n", + param.GetKey(), + stringFromValue(param), + )) + } +} + +// Trace implements consensus.TraceLogger. +func (n *BufferLog) Trace(message string, params ...consensus.LogParam) { + n.b.WriteString(fmt.Sprintf("TRACE: %s\n", message)) + n.b.WriteString(fmt.Sprintf("\t[%s]\n", time.Now().String())) + for _, param := range n.params { + n.b.WriteString(fmt.Sprintf( + "\t%s: %s\n", + param.GetKey(), + stringFromValue(param), + )) + } + for _, param := range params { + n.b.WriteString(fmt.Sprintf( + "\t%s: %s\n", + param.GetKey(), + stringFromValue(param), + )) + } +} + +func (n *BufferLog) Flush() { + fmt.Println(n.b.String()) +} + +func (n *BufferLog) With(params ...consensus.LogParam) consensus.TraceLogger { + return &BufferLog{ + params: slices.Concat(n.params, params), + b: n.b, + } +} + +func BufferLogger() *BufferLog { + return &BufferLog{ + b: &strings.Builder{}, + } +} diff --git a/consensus/helper/timeout_certificate.go b/consensus/helper/timeout_certificate.go new file mode 100644 index 0000000..90bef9f --- /dev/null +++ b/consensus/helper/timeout_certificate.go @@ -0,0 +1,171 @@ +package helper + +import ( + "bytes" + crand "crypto/rand" + "math/rand" + "slices" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +type TestTimeoutCertificate struct { + Filter []byte + Rank uint64 + LatestRanks []uint64 + LatestQuorumCert models.QuorumCertificate + AggregatedSignature models.AggregatedSignature +} + +func (t *TestTimeoutCertificate) GetFilter() []byte { + return t.Filter +} + +func (t *TestTimeoutCertificate) GetRank() uint64 { + return t.Rank +} + +func (t *TestTimeoutCertificate) GetLatestRanks() []uint64 { + return t.LatestRanks +} + +func (t *TestTimeoutCertificate) GetLatestQuorumCert() models.QuorumCertificate { + return t.LatestQuorumCert +} + +func (t *TestTimeoutCertificate) GetAggregatedSignature() models.AggregatedSignature { + return t.AggregatedSignature +} + +func (t *TestTimeoutCertificate) Equals(other models.TimeoutCertificate) bool { + return bytes.Equal(t.Filter, other.GetFilter()) && + t.Rank == other.GetRank() && + slices.Equal(t.LatestRanks, other.GetLatestRanks()) && + t.LatestQuorumCert.Equals(other.GetLatestQuorumCert()) && + bytes.Equal( + t.AggregatedSignature.GetBitmask(), + other.GetAggregatedSignature().GetBitmask(), + ) && + bytes.Equal( + t.AggregatedSignature.GetPubKey(), + other.GetAggregatedSignature().GetPubKey(), + ) && + bytes.Equal( + t.AggregatedSignature.GetSignature(), + other.GetAggregatedSignature().GetSignature(), + ) +} + +func MakeTC(options ...func(*TestTimeoutCertificate)) models.TimeoutCertificate { + tcRank := rand.Uint64() + s := make([]byte, 32) + crand.Read(s) + qc := MakeQC(WithQCRank(tcRank - 1)) + highQCRanks := make([]uint64, 3) + for i := range highQCRanks { + highQCRanks[i] = qc.GetRank() + } + tc := &TestTimeoutCertificate{ + Rank: tcRank, + LatestQuorumCert: qc, + LatestRanks: highQCRanks, + AggregatedSignature: &TestAggregatedSignature{ + Signature: make([]byte, 74), + PublicKey: make([]byte, 585), + Bitmask: []byte{0x01}, + }, + } + for _, option := range options { + option(tc) + } + return tc +} + +func WithTCNewestQC(qc models.QuorumCertificate) func(*TestTimeoutCertificate) { + return func(tc *TestTimeoutCertificate) { + tc.LatestQuorumCert = qc + tc.LatestRanks = []uint64{qc.GetRank()} + } +} + +func WithTCSigners(signerIndices []byte) func(*TestTimeoutCertificate) { + return func(tc *TestTimeoutCertificate) { + tc.AggregatedSignature.(*TestAggregatedSignature).Bitmask = signerIndices + } +} + +func WithTCRank(rank uint64) func(*TestTimeoutCertificate) { + return func(tc *TestTimeoutCertificate) { + tc.Rank = rank + } +} + +func WithTCHighQCRanks(highQCRanks []uint64) func(*TestTimeoutCertificate) { + return func(tc *TestTimeoutCertificate) { + tc.LatestRanks = highQCRanks + } +} + +func TimeoutStateFixture[VoteT models.Unique]( + opts ...func(TimeoutState *models.TimeoutState[VoteT]), +) *models.TimeoutState[VoteT] { + timeoutRank := uint64(rand.Uint32()) + newestQC := MakeQC(WithQCRank(timeoutRank - 10)) + + timeout := &models.TimeoutState[VoteT]{ + Rank: timeoutRank, + LatestQuorumCertificate: newestQC, + PriorRankTimeoutCertificate: MakeTC( + WithTCRank(timeoutRank-1), + WithTCNewestQC(MakeQC(WithQCRank(newestQC.GetRank()))), + ), + } + + for _, opt := range opts { + opt(timeout) + } + + if timeout.Vote == nil { + panic("WithTimeoutVote must be called") + } + + return timeout +} + +func WithTimeoutVote[VoteT models.Unique]( + vote VoteT, +) func(*models.TimeoutState[VoteT]) { + return func(state *models.TimeoutState[VoteT]) { + state.Vote = &vote + } +} + +func WithTimeoutNewestQC[VoteT models.Unique]( + newestQC models.QuorumCertificate, +) func(*models.TimeoutState[VoteT]) { + return func(timeout *models.TimeoutState[VoteT]) { + timeout.LatestQuorumCertificate = newestQC + } +} + +func WithTimeoutPreviousRankTimeoutCertificate[VoteT models.Unique]( + previousRankTimeoutCert models.TimeoutCertificate, +) func(*models.TimeoutState[VoteT]) { + return func(timeout *models.TimeoutState[VoteT]) { + timeout.PriorRankTimeoutCertificate = previousRankTimeoutCert + } +} + +func WithTimeoutStateRank[VoteT models.Unique]( + rank uint64, +) func(*models.TimeoutState[VoteT]) { + return func(timeout *models.TimeoutState[VoteT]) { + timeout.Rank = rank + if timeout.LatestQuorumCertificate != nil { + timeout.LatestQuorumCertificate.(*TestQuorumCertificate).Rank = rank + } + if timeout.PriorRankTimeoutCertificate != nil { + timeout.PriorRankTimeoutCertificate.(*TestTimeoutCertificate).Rank = rank - 1 + } + } +} diff --git a/consensus/integration/assertion_test.go b/consensus/integration/assertion_test.go new file mode 100644 index 0000000..767e033 --- /dev/null +++ b/consensus/integration/assertion_test.go @@ -0,0 +1,40 @@ +package integration + +import ( + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +func FinalizedStates(in *Instance) []*models.State[*helper.TestState] { + finalized := make([]*models.State[*helper.TestState], 0) + + lastFinalID := in.forks.FinalizedState().Identifier + in.updatingStates.RLock() + finalizedState, found := in.headers[lastFinalID] + defer in.updatingStates.RUnlock() + if !found { + return finalized + } + + for { + finalized = append(finalized, finalizedState) + if finalizedState.ParentQuorumCertificate == nil { + break + } + finalizedState, found = + in.headers[finalizedState.ParentQuorumCertificate.Identity()] + if !found { + break + } + } + return finalized +} + +func FinalizedRanks(in *Instance) []uint64 { + finalizedStates := FinalizedStates(in) + ranks := make([]uint64, 0, len(finalizedStates)) + for _, b := range finalizedStates { + ranks = append(ranks, b.Rank) + } + return ranks +} diff --git a/consensus/integration/conditions_test.go b/consensus/integration/conditions_test.go new file mode 100644 index 0000000..616d5fe --- /dev/null +++ b/consensus/integration/conditions_test.go @@ -0,0 +1,19 @@ +package integration + +type Condition func(*Instance) bool + +func RightAway(*Instance) bool { + return true +} + +func RankFinalized(rank uint64) Condition { + return func(in *Instance) bool { + return in.forks.FinalizedRank() >= rank + } +} + +func RankReached(rank uint64) Condition { + return func(in *Instance) bool { + return in.pacemaker.CurrentRank() >= rank + } +} diff --git a/consensus/integration/connect_test.go b/consensus/integration/connect_test.go new file mode 100644 index 0000000..c000f89 --- /dev/null +++ b/consensus/integration/connect_test.go @@ -0,0 +1,114 @@ +package integration + +import ( + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +func Connect(t *testing.T, instances []*Instance) { + + // first, create a map of all instances and a queue for each + lookup := make(map[models.Identity]*Instance) + for _, in := range instances { + lookup[in.localID] = in + } + + // then, for each instance, initialize a wired up communicator + for _, sender := range instances { + sender := sender // avoid capturing loop variable in closure + + *sender.notifier = *NewMockedCommunicatorConsumer() + sender.notifier.CommunicatorConsumer.On("OnOwnProposal", mock.Anything, mock.Anything).Run( + func(args mock.Arguments) { + proposal, ok := args[0].(*models.SignedProposal[*helper.TestState, *helper.TestVote]) + require.True(t, ok) + // sender should always have the parent + sender.updatingStates.RLock() + _, exists := sender.headers[proposal.State.ParentQuorumCertificate.Identity()] + sender.updatingStates.RUnlock() + if !exists { + t.Fatalf("parent for proposal not found (sender: %x, parent: %x)", sender.localID, proposal.State.ParentQuorumCertificate.Identity()) + } + + // store locally and loop back to engine for processing + sender.ProcessState(proposal) + + // check if we should drop the outgoing proposal + if sender.dropPropOut(proposal) { + return + } + + // iterate through potential receivers + for _, receiver := range instances { + // we should skip ourselves always + if receiver.localID == sender.localID { + continue + } + + // check if we should drop the incoming proposal + if receiver.dropPropIn(proposal) { + continue + } + + receiver.ProcessState(proposal) + } + }, + ) + sender.notifier.CommunicatorConsumer.On("OnOwnVote", mock.Anything, mock.Anything).Run( + func(args mock.Arguments) { + vote, ok := args[0].(**helper.TestVote) + require.True(t, ok) + recipientID, ok := args[1].(models.Identity) + require.True(t, ok) + // get the receiver + receiver, exists := lookup[recipientID] + if !exists { + t.Fatalf("recipient doesn't exist (sender: %x, receiver: %x)", sender.localID, recipientID) + } + // if we are next leader we should be receiving our own vote + if recipientID != sender.localID { + // check if we should drop the outgoing vote + if sender.dropVoteOut(*vote) { + return + } + // check if we should drop the incoming vote + if receiver.dropVoteIn(*vote) { + return + } + } + + // submit the vote to the receiving event loop (non-dropping) + receiver.queue <- *vote + }, + ) + sender.notifier.CommunicatorConsumer.On("OnOwnTimeout", mock.Anything).Run( + func(args mock.Arguments) { + timeoutState, ok := args[0].(*models.TimeoutState[*helper.TestVote]) + require.True(t, ok) + // iterate through potential receivers + for _, receiver := range instances { + + // we should skip ourselves always + if receiver.localID == sender.localID { + continue + } + + // check if we should drop the outgoing value + if sender.dropTimeoutStateOut(timeoutState) { + continue + } + + // check if we should drop the incoming value + if receiver.dropTimeoutStateIn(timeoutState) { + continue + } + + receiver.queue <- timeoutState + } + }) + } +} diff --git a/consensus/integration/defaults_test.go b/consensus/integration/defaults_test.go new file mode 100644 index 0000000..61b4552 --- /dev/null +++ b/consensus/integration/defaults_test.go @@ -0,0 +1,27 @@ +package integration + +import ( + "time" + + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +func DefaultRoot() *models.State[*helper.TestState] { + ts := uint64(time.Now().UnixMilli()) + id := helper.MakeIdentity() + s := &helper.TestState{ + Rank: 0, + Signature: make([]byte, 0), + Timestamp: ts, + ID: id, + Prover: "", + } + header := &models.State[*helper.TestState]{ + Rank: 0, + State: &s, + Identifier: id, + Timestamp: ts, + } + return header +} diff --git a/consensus/integration/filters_test.go b/consensus/integration/filters_test.go new file mode 100644 index 0000000..cc5d4d4 --- /dev/null +++ b/consensus/integration/filters_test.go @@ -0,0 +1,76 @@ +package integration + +import ( + "math/rand" + + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VoteFilter is a filter function for dropping Votes. +// Return value `true` implies that the given Vote should be +// dropped, while `false` indicates that the Vote should be received. +type VoteFilter func(*helper.TestVote) bool + +func DropNoVotes(*helper.TestVote) bool { + return false +} + +func DropAllVotes(*helper.TestVote) bool { + return true +} + +// DropVoteRandomly drops votes randomly with a probability of `dropProbability` ∈ [0,1] +func DropVoteRandomly(dropProbability float64) VoteFilter { + return func(*helper.TestVote) bool { + return rand.Float64() < dropProbability + } +} + +func DropVotesBy(voterID models.Identity) VoteFilter { + return func(vote *helper.TestVote) bool { + return vote.ID == voterID + } +} + +// ProposalFilter is a filter function for dropping Proposals. +// Return value `true` implies that the given SignedProposal should be +// dropped, while `false` indicates that the SignedProposal should be received. +type ProposalFilter func(*models.SignedProposal[*helper.TestState, *helper.TestVote]) bool + +func DropNoProposals(*models.SignedProposal[*helper.TestState, *helper.TestVote]) bool { + return false +} + +func DropAllProposals(*models.SignedProposal[*helper.TestState, *helper.TestVote]) bool { + return true +} + +// DropProposalRandomly drops proposals randomly with a probability of `dropProbability` ∈ [0,1] +func DropProposalRandomly(dropProbability float64) ProposalFilter { + return func(*models.SignedProposal[*helper.TestState, *helper.TestVote]) bool { + return rand.Float64() < dropProbability + } +} + +// DropProposalsBy drops all proposals originating from the specified `proposerID` +func DropProposalsBy(proposerID models.Identity) ProposalFilter { + return func(proposal *models.SignedProposal[*helper.TestState, *helper.TestVote]) bool { + return proposal.State.ProposerID == proposerID + } +} + +// TimeoutStateFilter is a filter function for dropping TimeoutStates. +// Return value `true` implies that the given TimeoutState should be +// dropped, while `false` indicates that the TimeoutState should be received. +type TimeoutStateFilter func(*models.TimeoutState[*helper.TestVote]) bool + +// DropAllTimeoutStates always returns `true`, i.e. drops all TimeoutStates +func DropAllTimeoutStates(*models.TimeoutState[*helper.TestVote]) bool { + return true +} + +// DropNoTimeoutStates always returns `false`, i.e. it lets all TimeoutStates pass. +func DropNoTimeoutStates(*models.TimeoutState[*helper.TestVote]) bool { + return false +} diff --git a/consensus/integration/instance_test.go b/consensus/integration/instance_test.go new file mode 100644 index 0000000..1b10056 --- /dev/null +++ b/consensus/integration/instance_test.go @@ -0,0 +1,734 @@ +package integration + +import ( + "context" + "fmt" + "reflect" + "sync" + "testing" + "time" + + "github.com/gammazero/workerpool" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/counters" + "source.quilibrium.com/quilibrium/monorepo/consensus/eventhandler" + "source.quilibrium.com/quilibrium/monorepo/consensus/forks" + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + "source.quilibrium.com/quilibrium/monorepo/consensus/mocks" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/consensus/notifications" + "source.quilibrium.com/quilibrium/monorepo/consensus/notifications/pubsub" + "source.quilibrium.com/quilibrium/monorepo/consensus/pacemaker" + "source.quilibrium.com/quilibrium/monorepo/consensus/pacemaker/timeout" + "source.quilibrium.com/quilibrium/monorepo/consensus/safetyrules" + "source.quilibrium.com/quilibrium/monorepo/consensus/stateproducer" + "source.quilibrium.com/quilibrium/monorepo/consensus/timeoutaggregator" + "source.quilibrium.com/quilibrium/monorepo/consensus/timeoutcollector" + "source.quilibrium.com/quilibrium/monorepo/consensus/validator" + "source.quilibrium.com/quilibrium/monorepo/consensus/voteaggregator" + "source.quilibrium.com/quilibrium/monorepo/consensus/votecollector" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" + "source.quilibrium.com/quilibrium/monorepo/lifecycle/unittest" +) + +type Instance struct { + + // instance parameters + logger consensus.TraceLogger + participants []models.WeightedIdentity + localID models.Identity + dropVoteIn VoteFilter + dropVoteOut VoteFilter + dropPropIn ProposalFilter + dropPropOut ProposalFilter + dropTimeoutStateIn TimeoutStateFilter + dropTimeoutStateOut TimeoutStateFilter + stop Condition + + // instance data + queue chan interface{} + updatingStates sync.RWMutex + headers map[models.Identity]*models.State[*helper.TestState] + pendings map[models.Identity]*models.SignedProposal[*helper.TestState, *helper.TestVote] // indexed by parent ID + + // mocked dependencies + committee *mocks.DynamicCommittee + builder *mocks.LeaderProvider[*helper.TestState, *helper.TestPeer, *helper.TestCollected] + finalizer *mocks.Finalizer + persist *mocks.ConsensusStore[*helper.TestVote] + signer *mocks.Signer[*helper.TestState, *helper.TestVote] + verifier *mocks.Verifier[*helper.TestVote] + notifier *MockedCommunicatorConsumer + voting *mocks.VotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer] + + // real dependencies + pacemaker consensus.Pacemaker + producer *stateproducer.StateProducer[*helper.TestState, *helper.TestVote, *helper.TestPeer, *helper.TestCollected] + forks *forks.Forks[*helper.TestState, *helper.TestVote] + voteAggregator *voteaggregator.VoteAggregator[*helper.TestState, *helper.TestVote] + timeoutAggregator *timeoutaggregator.TimeoutAggregator[*helper.TestVote] + safetyRules *safetyrules.SafetyRules[*helper.TestState, *helper.TestVote] + validator *validator.Validator[*helper.TestState, *helper.TestVote] + + // main logic + handler *eventhandler.EventHandler[*helper.TestState, *helper.TestVote, *helper.TestPeer, *helper.TestCollected] +} + +type MockedCommunicatorConsumer struct { + notifications.NoopProposalViolationConsumer[*helper.TestState, *helper.TestVote] + notifications.NoopParticipantConsumer[*helper.TestState, *helper.TestVote] + notifications.NoopFinalizationConsumer[*helper.TestState] + *mocks.CommunicatorConsumer[*helper.TestState, *helper.TestVote] +} + +func NewMockedCommunicatorConsumer() *MockedCommunicatorConsumer { + return &MockedCommunicatorConsumer{ + CommunicatorConsumer: &mocks.CommunicatorConsumer[*helper.TestState, *helper.TestVote]{}, + } +} + +var _ consensus.Consumer[*helper.TestState, *helper.TestVote] = (*MockedCommunicatorConsumer)(nil) +var _ consensus.TimeoutCollectorConsumer[*helper.TestVote] = (*Instance)(nil) + +func NewInstance(t *testing.T, options ...Option) *Instance { + + // generate random default identity + identity := helper.MakeIdentity() + + // initialize the default configuration + cfg := Config{ + Logger: helper.Logger(), + Root: DefaultRoot(), + Participants: []models.WeightedIdentity{&helper.TestWeightedIdentity{ + ID: identity, + }}, + LocalID: identity, + Timeouts: timeout.DefaultConfig, + IncomingVotes: DropNoVotes, + OutgoingVotes: DropNoVotes, + IncomingProposals: DropNoProposals, + OutgoingProposals: DropNoProposals, + IncomingTimeoutStates: DropNoTimeoutStates, + OutgoingTimeoutStates: DropNoTimeoutStates, + StopCondition: RightAway, + } + + // apply the custom options + for _, option := range options { + option(&cfg) + } + + // check the local ID is a participant + takesPart := false + for _, participant := range cfg.Participants { + if participant.Identity() == cfg.LocalID { + takesPart = true + break + } + } + require.True(t, takesPart) + + // initialize the instance + in := Instance{ + + // instance parameters + logger: cfg.Logger, + participants: cfg.Participants, + localID: cfg.LocalID, + dropVoteIn: cfg.IncomingVotes, + dropVoteOut: cfg.OutgoingVotes, + dropPropIn: cfg.IncomingProposals, + dropPropOut: cfg.OutgoingProposals, + dropTimeoutStateIn: cfg.IncomingTimeoutStates, + dropTimeoutStateOut: cfg.OutgoingTimeoutStates, + stop: cfg.StopCondition, + + // instance data + pendings: make(map[models.Identity]*models.SignedProposal[*helper.TestState, *helper.TestVote]), + headers: make(map[models.Identity]*models.State[*helper.TestState]), + queue: make(chan interface{}, 1024), + + // instance mocks + committee: &mocks.DynamicCommittee{}, + builder: &mocks.LeaderProvider[*helper.TestState, *helper.TestPeer, *helper.TestCollected]{}, + persist: &mocks.ConsensusStore[*helper.TestVote]{}, + signer: &mocks.Signer[*helper.TestState, *helper.TestVote]{}, + verifier: &mocks.Verifier[*helper.TestVote]{}, + notifier: NewMockedCommunicatorConsumer(), + finalizer: &mocks.Finalizer{}, + voting: &mocks.VotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer]{}, + } + + // insert root state into headers register + in.headers[cfg.Root.Identifier] = cfg.Root + + // program the hotstuff committee state + in.committee.On("IdentitiesByRank", mock.Anything).Return( + func(_ uint64) []models.WeightedIdentity { + return in.participants + }, + nil, + ) + in.committee.On("IdentitiesByState", mock.Anything).Return( + func(_ models.Identity) []models.WeightedIdentity { + return in.participants + }, + nil, + ) + for _, participant := range in.participants { + in.committee.On("IdentityByState", mock.Anything, participant.Identity()).Return(participant, nil) + in.committee.On("IdentityByRank", mock.Anything, participant.Identity()).Return(participant, nil) + } + in.committee.On("Self").Return(in.localID) + in.committee.On("LeaderForRank", mock.Anything).Return( + func(rank uint64) models.Identity { + return in.participants[int(rank)%len(in.participants)].Identity() + }, nil, + ) + in.committee.On("QuorumThresholdForRank", mock.Anything).Return(uint64(len(in.participants)*2000/3), nil) + in.committee.On("TimeoutThresholdForRank", mock.Anything).Return(uint64(len(in.participants)*2000/3), nil) + + // program the builder module behaviour + in.builder.On("ProveNextState", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return( + func(ctx context.Context, rank uint64, filter []byte, parentID models.Identity) **helper.TestState { + in.updatingStates.Lock() + defer in.updatingStates.Unlock() + + _, ok := in.headers[parentID] + if !ok { + return nil + } + s := &helper.TestState{ + Rank: rank, + Signature: []byte{}, + Timestamp: uint64(time.Now().UnixMilli()), + ID: helper.MakeIdentity(), + Prover: in.localID, + } + return &s + }, + func(ctx context.Context, rank uint64, filter []byte, parentID models.Identity) error { + in.updatingStates.RLock() + _, ok := in.headers[parentID] + in.updatingStates.RUnlock() + if !ok { + return fmt.Errorf("parent state not found (parent: %x)", parentID) + } + return nil + }, + ) + + // check on stop condition, stop the tests as soon as entering a certain rank + in.persist.On("PutConsensusState", mock.Anything).Return(nil) + in.persist.On("PutLivenessState", mock.Anything).Return(nil) + + // program the hotstuff signer behaviour + in.signer.On("CreateVote", mock.Anything).Return( + func(state *models.State[*helper.TestState]) **helper.TestVote { + vote := &helper.TestVote{ + Rank: state.Rank, + StateID: state.Identifier, + ID: in.localID, + Signature: make([]byte, 74), + } + return &vote + }, + nil, + ) + in.signer.On("CreateTimeout", mock.Anything, mock.Anything, mock.Anything).Return( + func(curRank uint64, newestQC models.QuorumCertificate, previousRankTimeoutCert models.TimeoutCertificate) *models.TimeoutState[*helper.TestVote] { + v := &helper.TestVote{ + Rank: curRank, + Signature: make([]byte, 74), + Timestamp: uint64(time.Now().UnixMilli()), + ID: in.localID, + } + timeoutState := &models.TimeoutState[*helper.TestVote]{ + Rank: curRank, + LatestQuorumCertificate: newestQC, + PriorRankTimeoutCertificate: previousRankTimeoutCert, + Vote: &v, + } + return timeoutState + }, + nil, + ) + in.signer.On("CreateQuorumCertificate", mock.Anything).Return( + func(votes []*helper.TestVote) models.QuorumCertificate { + voterIDs := make([]models.Identity, 0, len(votes)) + bitmask := []byte{0, 0} + for i, vote := range votes { + bitmask[i/8] |= 1 << (i % 8) + voterIDs = append(voterIDs, vote.ID) + } + + qc := &helper.TestQuorumCertificate{ + Rank: votes[0].Rank, + FrameNumber: votes[0].Rank, + Selector: votes[0].StateID, + Timestamp: uint64(time.Now().UnixMilli()), + AggregatedSignature: &helper.TestAggregatedSignature{ + Signature: make([]byte, 74), + Bitmask: bitmask, + PublicKey: make([]byte, 585), + }, + } + return qc + }, + nil, + ) + + // program the hotstuff verifier behaviour + in.verifier.On("VerifyVote", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + in.verifier.On("VerifyQuorumCertificate", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + in.verifier.On("VerifyTimeoutCertificate", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + + // program the hotstuff communicator behaviour + in.notifier.CommunicatorConsumer.On("OnOwnProposal", mock.Anything, mock.Anything).Run( + func(args mock.Arguments) { + proposal, ok := args[0].(*models.SignedProposal[*helper.TestState, *helper.TestVote]) + require.True(t, ok) + + // sender should always have the parent + in.updatingStates.RLock() + _, exists := in.headers[proposal.State.ParentQuorumCertificate.Identity()] + in.updatingStates.RUnlock() + + if !exists { + t.Fatalf("parent for proposal not found parent: %x", proposal.State.ParentQuorumCertificate.Identity()) + } + + // store locally and loop back to engine for processing + in.ProcessState(proposal) + }, + ) + in.notifier.CommunicatorConsumer.On("OnOwnTimeout", mock.Anything).Run(func(args mock.Arguments) { + timeoutState, ok := args[0].(*models.TimeoutState[*helper.TestVote]) + require.True(t, ok) + in.queue <- timeoutState + }, + ) + // in case of single node setup we should just forward vote to our own node + // for multi-node setup this method will be overridden + in.notifier.CommunicatorConsumer.On("OnOwnVote", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + vote, ok := args[0].(**helper.TestVote) + require.True(t, ok) + in.queue <- *vote + }) + + // program the finalizer module behaviour + in.finalizer.On("MakeFinal", mock.Anything).Return( + func(stateID models.Identity) error { + + // as we don't use mocks to assert expectations, but only to + // simulate behaviour, we should drop the call data regularly + in.updatingStates.RLock() + state, found := in.headers[stateID] + in.updatingStates.RUnlock() + if !found { + return fmt.Errorf("can't broadcast with unknown parent") + } + if state.Rank%100 == 0 { + in.committee.Calls = nil + in.builder.Calls = nil + in.signer.Calls = nil + in.verifier.Calls = nil + in.notifier.CommunicatorConsumer.Calls = nil + in.finalizer.Calls = nil + } + + return nil + }, + ) + + // initialize error handling and logging + var err error + + notifier := pubsub.NewDistributor[*helper.TestState, *helper.TestVote]() + notifier.AddConsumer(in.notifier) + logConsumer := notifications.NewLogConsumer[*helper.TestState, *helper.TestVote](in.logger) + notifier.AddConsumer(logConsumer) + + // initialize the finalizer + var rootState *models.State[*helper.TestState] + if cfg.Root.ParentQuorumCertificate != nil { + rootState = models.StateFrom(cfg.Root.State, cfg.Root.ParentQuorumCertificate) + } else { + rootState = models.GenesisStateFrom(cfg.Root.State) + } + + rootQC := &helper.TestQuorumCertificate{ + Rank: rootState.Rank, + FrameNumber: rootState.Rank, + Selector: rootState.Identifier, + Timestamp: uint64(time.Now().UnixMilli()), + AggregatedSignature: &helper.TestAggregatedSignature{ + Signature: make([]byte, 74), + Bitmask: []byte{0b11111111, 0b00000000}, + PublicKey: make([]byte, 585), + }, + } + certifiedRootState, err := models.NewCertifiedState(rootState, rootQC) + require.NoError(t, err) + + livenessData := &models.LivenessState{ + CurrentRank: rootQC.Rank + 1, + LatestQuorumCertificate: rootQC, + } + + in.persist.On("GetLivenessState", mock.Anything).Return(livenessData, nil).Once() + + // initialize the pacemaker + controller := timeout.NewController(cfg.Timeouts) + in.pacemaker, err = pacemaker.NewPacemaker[*helper.TestState, *helper.TestVote](nil, controller, pacemaker.NoProposalDelay(), notifier, in.persist, in.logger) + require.NoError(t, err) + + // initialize the forks handler + in.forks, err = forks.NewForks(certifiedRootState, in.finalizer, notifier) + require.NoError(t, err) + + // initialize the validator + in.validator = validator.NewValidator[*helper.TestState, *helper.TestVote](in.committee, in.verifier) + + packer := &mocks.Packer{} + packer.On("Pack", mock.Anything, mock.Anything).Return( + func(rank uint64, sig *consensus.StateSignatureData) ([]byte, []byte, error) { + indices := []byte{0, 0} + for i := range sig.Signers { + indices[i/8] |= 1 << (i % 8) + } + + return indices, make([]byte, 74), nil + }, + ).Maybe() + + onQCCreated := func(qc models.QuorumCertificate) { + in.queue <- qc + } + + voteProcessorFactory := mocks.NewVoteProcessorFactory[*helper.TestState, *helper.TestVote, *helper.TestPeer](t) + voteProcessorFactory.On("Create", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return( + func(tracer consensus.TraceLogger, filter []byte, proposal *models.SignedProposal[*helper.TestState, *helper.TestVote], dsTag []byte, aggregator consensus.SignatureAggregator, votingProvider consensus.VotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer]) consensus.VerifyingVoteProcessor[*helper.TestState, *helper.TestVote] { + processor, err := votecollector.NewBootstrapVoteProcessor[*helper.TestState, *helper.TestVote, *helper.TestPeer]( + in.logger, + filter, + in.committee, + proposal.State, + onQCCreated, + []byte{}, + aggregator, + in.voting, + ) + require.NoError(t, err) + + vote, err := proposal.ProposerVote() + require.NoError(t, err) + + err = processor.Process(vote) + if err != nil { + t.Fatalf("invalid vote for own proposal: %v", err) + } + return processor + }, nil).Maybe() + in.voting.On("FinalizeQuorumCertificate", mock.Anything, mock.Anything, mock.Anything).Return( + func( + ctx context.Context, + state *models.State[*helper.TestState], + aggregatedSignature models.AggregatedSignature, + ) (models.QuorumCertificate, error) { + return &helper.TestQuorumCertificate{ + Rank: state.Rank, + Timestamp: state.Timestamp, + FrameNumber: state.Rank, + Selector: state.Identifier, + AggregatedSignature: aggregatedSignature, + }, nil + }, + ) + in.voting.On("FinalizeTimeout", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return( + func(ctx context.Context, rank uint64, latestQuorumCertificate models.QuorumCertificate, latestQuorumCertificateRanks []uint64, aggregatedSignature models.AggregatedSignature) (models.TimeoutCertificate, error) { + return &helper.TestTimeoutCertificate{ + Filter: nil, + Rank: rank, + LatestRanks: latestQuorumCertificateRanks, + LatestQuorumCert: latestQuorumCertificate, + AggregatedSignature: aggregatedSignature, + }, nil + }, + ) + + voteAggregationDistributor := pubsub.NewVoteAggregationDistributor[*helper.TestState, *helper.TestVote]() + sigAgg := mocks.NewSignatureAggregator(t) + sigAgg.On("Aggregate", mock.Anything, mock.Anything).Return( + func(publicKeys [][]byte, signatures [][]byte) (models.AggregatedSignature, error) { + bitmask := []byte{0, 0} + for i := range publicKeys { + bitmask[i/8] |= 1 << (i % 8) + } + return &helper.TestAggregatedSignature{ + Signature: make([]byte, 74), + Bitmask: bitmask, + PublicKey: make([]byte, 585), + }, nil + }).Maybe() + sigAgg.On("VerifySignatureRaw", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Maybe() + createCollectorFactoryMethod := votecollector.NewStateMachineFactory(in.logger, []byte{}, voteAggregationDistributor, voteProcessorFactory.Create, []byte{}, sigAgg, in.voting) + voteCollectors := voteaggregator.NewVoteCollectors[*helper.TestState, *helper.TestVote](in.logger, livenessData.CurrentRank, workerpool.New(2), createCollectorFactoryMethod) + + // initialize the vote aggregator + in.voteAggregator, err = voteaggregator.NewVoteAggregator[*helper.TestState, *helper.TestVote]( + in.logger, + voteAggregationDistributor, + livenessData.CurrentRank, + voteCollectors, + ) + require.NoError(t, err) + + // initialize factories for timeout collector and timeout processor + timeoutAggregationDistributor := pubsub.NewTimeoutAggregationDistributor[*helper.TestVote]() + timeoutProcessorFactory := mocks.NewTimeoutProcessorFactory[*helper.TestVote](t) + timeoutProcessorFactory.On("Create", mock.Anything).Return( + func(rank uint64) consensus.TimeoutProcessor[*helper.TestVote] { + // mock signature aggregator which doesn't perform any crypto operations and just tracks total weight + aggregator := &mocks.TimeoutSignatureAggregator{} + totalWeight := atomic.NewUint64(0) + newestRank := counters.NewMonotonicCounter(0) + bits := counters.NewMonotonicCounter(0) + aggregator.On("Rank").Return(rank).Maybe() + aggregator.On("TotalWeight").Return(func() uint64 { + return totalWeight.Load() + }).Maybe() + aggregator.On("VerifyAndAdd", mock.Anything, mock.Anything, mock.Anything).Return( + func(signerID models.Identity, _ []byte, newestQCRank uint64) uint64 { + newestRank.Set(newestQCRank) + var signer models.WeightedIdentity + for _, p := range in.participants { + if p.Identity() == signerID { + signer = p + } + } + require.NotNil(t, signer) + bits.Increment() + return totalWeight.Add(signer.Weight()) + }, nil, + ).Maybe() + aggregator.On("Aggregate").Return( + func() []consensus.TimeoutSignerInfo { + signersData := make([]consensus.TimeoutSignerInfo, 0, len(in.participants)) + newestQCRank := newestRank.Value() + for _, signer := range in.participants { + signersData = append(signersData, consensus.TimeoutSignerInfo{ + NewestQCRank: newestQCRank, + Signer: signer.Identity(), + }) + } + return signersData + }, + func() models.AggregatedSignature { + bitCount := bits.Value() + bitmask := []byte{0, 0} + for i := range bitCount { + pos := i / 8 + bitmask[pos] |= 1 << (i % 8) + } + return &helper.TestAggregatedSignature{ + Signature: make([]byte, 74), + Bitmask: bitmask, + PublicKey: make([]byte, 585), + } + }, + nil, + ).Maybe() + + p, err := timeoutcollector.NewTimeoutProcessor[*helper.TestState, *helper.TestVote, *helper.TestPeer]( + in.logger, + in.committee, + in.validator, + aggregator, + timeoutAggregationDistributor, + in.voting, + ) + require.NoError(t, err) + return p + }, nil).Maybe() + timeoutCollectorFactory := timeoutcollector.NewTimeoutCollectorFactory( + in.logger, + timeoutAggregationDistributor, + timeoutProcessorFactory, + ) + timeoutCollectors := timeoutaggregator.NewTimeoutCollectors( + in.logger, + livenessData.CurrentRank, + timeoutCollectorFactory, + ) + + // initialize the timeout aggregator + in.timeoutAggregator, err = timeoutaggregator.NewTimeoutAggregator( + in.logger, + livenessData.CurrentRank, + timeoutCollectors, + ) + require.NoError(t, err) + + safetyData := &models.ConsensusState[*helper.TestVote]{ + FinalizedRank: rootState.Rank, + LatestAcknowledgedRank: rootState.Rank, + } + in.persist.On("GetConsensusState", mock.Anything).Return(safetyData, nil).Once() + + // initialize the safety rules + in.safetyRules, err = safetyrules.NewSafetyRules(nil, in.signer, in.persist, in.committee) + require.NoError(t, err) + + // initialize the state producer + in.producer, err = stateproducer.NewStateProducer[*helper.TestState, *helper.TestVote, *helper.TestPeer, *helper.TestCollected](in.safetyRules, in.committee, in.builder) + require.NoError(t, err) + + // initialize the event handler + in.handler, err = eventhandler.NewEventHandler[*helper.TestState, *helper.TestVote, *helper.TestPeer, *helper.TestCollected]( + in.pacemaker, + in.producer, + in.forks, + in.persist, + in.committee, + in.safetyRules, + notifier, + in.logger, + ) + require.NoError(t, err) + + timeoutAggregationDistributor.AddTimeoutCollectorConsumer(logConsumer) + timeoutAggregationDistributor.AddTimeoutCollectorConsumer(&in) + voteAggregationDistributor.AddVoteCollectorConsumer(logConsumer) + + return &in +} + +func (in *Instance) Run(t *testing.T) error { + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + cancel() + <-lifecycle.AllDone(in.voteAggregator, in.timeoutAggregator) + }() + signalerCtx := unittest.NewMockSignalerContext(t, ctx) + in.voteAggregator.Start(signalerCtx) + in.timeoutAggregator.Start(signalerCtx) + <-lifecycle.AllReady(in.voteAggregator, in.timeoutAggregator) + + // start the event handler + err := in.handler.Start(ctx) + if err != nil { + return fmt.Errorf("could not start event handler: %w", err) + } + + // run until an error or stop condition is reached + for { + // check on stop conditions + if in.stop(in) { + return errStopCondition + } + + // we handle timeouts with priority + select { + case <-in.handler.TimeoutChannel(): + err := in.handler.OnLocalTimeout() + if err != nil { + panic(fmt.Errorf("could not process timeout: %w", err)) + } + default: + } + + // check on stop conditions + if in.stop(in) { + return errStopCondition + } + + // otherwise, process first received event + select { + case <-in.handler.TimeoutChannel(): + err := in.handler.OnLocalTimeout() + if err != nil { + return fmt.Errorf("could not process timeout: %w", err) + } + case msg := <-in.queue: + switch m := msg.(type) { + case *models.SignedProposal[*helper.TestState, *helper.TestVote]: + // add state to aggregator + in.voteAggregator.AddState(m) + // then pass to event handler + err := in.handler.OnReceiveProposal(m) + if err != nil { + return fmt.Errorf("could not process proposal: %w", err) + } + case *helper.TestVote: + in.voteAggregator.AddVote(&m) + case *models.TimeoutState[*helper.TestVote]: + in.timeoutAggregator.AddTimeout(m) + case models.QuorumCertificate: + err := in.handler.OnReceiveQuorumCertificate(m) + if err != nil { + return fmt.Errorf("could not process received QC: %w", err) + } + case models.TimeoutCertificate: + err := in.handler.OnReceiveTimeoutCertificate(m) + if err != nil { + return fmt.Errorf("could not process received TC: %w", err) + } + case *consensus.PartialTimeoutCertificateCreated: + err := in.handler.OnPartialTimeoutCertificateCreated(m) + if err != nil { + return fmt.Errorf("could not process partial TC: %w", err) + } + default: + fmt.Printf("unhandled queue event: %s\n", reflect.ValueOf(msg).Type().String()) + } + } + } +} + +func (in *Instance) ProcessState(proposal *models.SignedProposal[*helper.TestState, *helper.TestVote]) { + in.updatingStates.Lock() + defer in.updatingStates.Unlock() + _, parentExists := in.headers[proposal.State.ParentQuorumCertificate.Identity()] + + if parentExists { + next := proposal + for next != nil { + in.headers[next.State.Identifier] = next.State + + in.queue <- next + // keep processing the pending states + next = in.pendings[next.State.ParentQuorumCertificate.Identity()] + } + } else { + // cache it in pendings by ParentID + in.pendings[proposal.State.ParentQuorumCertificate.Identity()] = proposal + } +} + +func (in *Instance) OnTimeoutCertificateConstructedFromTimeouts(tc models.TimeoutCertificate) { + in.queue <- tc +} + +func (in *Instance) OnPartialTimeoutCertificateCreated(rank uint64, newestQC models.QuorumCertificate, previousRankTimeoutCert models.TimeoutCertificate) { + in.queue <- &consensus.PartialTimeoutCertificateCreated{ + Rank: rank, + NewestQuorumCertificate: newestQC, + PriorRankTimeoutCertificate: previousRankTimeoutCert, + } +} + +func (in *Instance) OnNewQuorumCertificateDiscovered(qc models.QuorumCertificate) { + in.queue <- qc +} + +func (in *Instance) OnNewTimeoutCertificateDiscovered(tc models.TimeoutCertificate) { + in.queue <- tc +} + +func (in *Instance) OnTimeoutProcessed(*models.TimeoutState[*helper.TestVote]) { +} diff --git a/consensus/integration/integration_test.go b/consensus/integration/integration_test.go new file mode 100644 index 0000000..b4a8680 --- /dev/null +++ b/consensus/integration/integration_test.go @@ -0,0 +1,153 @@ +package integration + +import ( + "errors" + "fmt" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" +) + +// a pacemaker timeout to wait for proposals. Usually 10 ms is enough, +// but for slow environment like CI, a longer one is needed. +const safeTimeout = 2 * time.Second + +// number of failed rounds before first timeout increase +const happyPathMaxRoundFailures = 6 + +func TestSingleInstance(t *testing.T) { + fmt.Println("starting single instance test") + // set up a single instance to run + finalRank := uint64(10) + in := NewInstance(t, + WithStopCondition(RankFinalized(finalRank)), + ) + + // run the event handler until we reach a stop condition + err := in.Run(t) + require.ErrorIs(t, err, errStopCondition, "should run until stop condition") + + // check if forks and pacemaker are in expected rank state + assert.Equal(t, finalRank, in.forks.FinalizedRank(), "finalized rank should be three lower than current rank") + fmt.Println("ending single instance test") +} + +func TestThreeInstances(t *testing.T) { + fmt.Println("starting three instance test") + // test parameters + num := 3 + finalRank := uint64(100) + + // generate three hotstuff participants + participants := helper.WithWeightedIdentityList(num) + root := DefaultRoot() + + // set up three instances that are exactly the same + // since we don't drop any messages we should have enough data to advance in happy path + // for that reason we will drop all TO related communication. + instances := make([]*Instance, 0, num) + for n := 0; n < num; n++ { + in := NewInstance(t, + WithRoot(root), + WithParticipants(participants), + WithLocalID(participants[n].Identity()), + WithStopCondition(RankFinalized(finalRank)), + WithIncomingTimeoutStates(DropAllTimeoutStates), + ) + instances = append(instances, in) + } + + // connect the communicators of the instances together + Connect(t, instances) + + // start the instances and wait for them to finish + var wg sync.WaitGroup + for _, in := range instances { + wg.Add(1) + go func(in *Instance) { + err := in.Run(t) + require.True(t, errors.Is(err, errStopCondition), "should run until stop condition") + wg.Done() + }(in) + } + wg.Wait() + + // check that all instances have the same finalized state + in1 := instances[0] + in2 := instances[1] + in3 := instances[2] + // verify progress has been made + assert.GreaterOrEqual(t, in1.forks.FinalizedState().Rank, finalRank, "the first instance 's finalized rank should be four lower than current rank") + // verify same progresses have been made + assert.Equal(t, in1.forks.FinalizedState(), in2.forks.FinalizedState(), "second instance should have same finalized state as first instance") + assert.Equal(t, in1.forks.FinalizedState(), in3.forks.FinalizedState(), "third instance should have same finalized state as first instance") + assert.Equal(t, FinalizedRanks(in1), FinalizedRanks(in2)) + assert.Equal(t, FinalizedRanks(in1), FinalizedRanks(in3)) + fmt.Println("ending three instance test") +} + +func TestSevenInstances(t *testing.T) { + fmt.Println("starting seven instance test") + // test parameters + numPass := 5 + numFail := 2 + finalRank := uint64(30) + + // generate the seven hotstuff participants + participants := helper.WithWeightedIdentityList(numPass + numFail) + instances := make([]*Instance, 0, numPass+numFail) + root := DefaultRoot() + + // set up five instances that work fully + for n := 0; n < numPass; n++ { + in := NewInstance(t, + WithRoot(root), + WithParticipants(participants), + WithLocalID(participants[n].Identity()), + WithStopCondition(RankFinalized(finalRank)), + ) + instances = append(instances, in) + } + + // set up two instances which can't vote + for n := numPass; n < numPass+numFail; n++ { + in := NewInstance(t, + WithRoot(root), + WithParticipants(participants), + WithLocalID(participants[n].Identity()), + WithStopCondition(RankFinalized(finalRank)), + WithOutgoingVotes(DropAllVotes), + ) + instances = append(instances, in) + } + + // connect the communicators of the instances together + Connect(t, instances) + + // start all seven instances and wait for them to wrap up + var wg sync.WaitGroup + for _, in := range instances { + wg.Add(1) + go func(in *Instance) { + err := in.Run(t) + require.True(t, errors.Is(err, errStopCondition), "should run until stop condition") + wg.Done() + }(in) + } + wg.Wait() + + // check that all instances have the same finalized state + ref := instances[0] + assert.Less(t, finalRank-uint64(2*numPass+numFail), ref.forks.FinalizedState().Rank, "expect instance 0 should made enough progress, but didn't") + finalizedRanks := FinalizedRanks(ref) + for i := 1; i < numPass; i++ { + assert.Equal(t, ref.forks.FinalizedState(), instances[i].forks.FinalizedState(), "instance %d should have same finalized state as first instance") + assert.Equal(t, finalizedRanks, FinalizedRanks(instances[i]), "instance %d should have same finalized rank as first instance") + } + fmt.Println("ending seven instance test") +} diff --git a/consensus/integration/liveness_test.go b/consensus/integration/liveness_test.go new file mode 100644 index 0000000..a196ccb --- /dev/null +++ b/consensus/integration/liveness_test.go @@ -0,0 +1,422 @@ +package integration + +import ( + "encoding/hex" + "errors" + "fmt" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/consensus/pacemaker/timeout" + "source.quilibrium.com/quilibrium/monorepo/lifecycle/unittest" +) + +// pacemaker timeout +// if your laptop is fast enough, 10 ms is enough +const pmTimeout = 100 * time.Millisecond + +// maxTimeoutRebroadcast specifies how often the PaceMaker rebroadcasts +// its timeout state in case there is no progress. We keep the value +// small so we have smaller latency +const maxTimeoutRebroadcast = 1 * time.Second + +// If 2 nodes are down in a 7 nodes cluster, the rest of 5 nodes can +// still make progress and reach consensus +func Test2TimeoutOutof7Instances(t *testing.T) { + + healthyReplicas := 5 + notVotingReplicas := 2 + finalRank := uint64(30) + + // generate the seven hotstuff participants + participants := helper.WithWeightedIdentityList(healthyReplicas + notVotingReplicas) + instances := make([]*Instance, 0, healthyReplicas+notVotingReplicas) + root := DefaultRoot() + timeouts, err := timeout.NewConfig(pmTimeout, pmTimeout, 1.5, happyPathMaxRoundFailures, maxTimeoutRebroadcast) + require.NoError(t, err) + + // set up five instances that work fully + for n := 0; n < healthyReplicas; n++ { + in := NewInstance(t, + WithRoot(root), + WithParticipants(participants), + WithTimeouts(timeouts), + WithBufferLogger(), + WithLocalID(participants[n].Identity()), + WithLoggerParams(consensus.StringParam("status", "healthy")), + WithStopCondition(RankFinalized(finalRank)), + ) + instances = append(instances, in) + } + + // set up two instances which can't vote, nor propose + for n := healthyReplicas; n < healthyReplicas+notVotingReplicas; n++ { + in := NewInstance(t, + WithRoot(root), + WithParticipants(participants), + WithTimeouts(timeouts), + WithBufferLogger(), + WithLocalID(participants[n].Identity()), + WithLoggerParams(consensus.StringParam("status", "unhealthy")), + WithStopCondition(RankFinalized(finalRank)), + WithOutgoingVotes(DropAllVotes), + WithOutgoingProposals(DropAllProposals), + ) + instances = append(instances, in) + } + + // connect the communicators of the instances together + Connect(t, instances) + + // start all seven instances and wait for them to wrap up + var wg sync.WaitGroup + for _, in := range instances { + wg.Add(1) + go func(in *Instance) { + err := in.Run(t) + require.ErrorIs(t, err, errStopCondition) + wg.Done() + }(in) + } + unittest.AssertReturnsBefore(t, wg.Wait, 20*time.Second, "expect to finish before timeout") + + for i, in := range instances { + fmt.Println("=============================================================================") + fmt.Println("INSTANCE", i, "-", hex.EncodeToString([]byte(in.localID))) + fmt.Println("=============================================================================") + in.logger.(*helper.BufferLog).Flush() + } + + // check that all instances have the same finalized state + ref := instances[0] + assert.Equal(t, finalRank, ref.forks.FinalizedState().Rank, "expect instance 0 should made enough progress, but didn't") + finalizedRanks := FinalizedRanks(ref) + for i := 1; i < healthyReplicas; i++ { + assert.Equal(t, ref.forks.FinalizedState(), instances[i].forks.FinalizedState(), "instance %d should have same finalized state as first instance") + assert.Equal(t, finalizedRanks, FinalizedRanks(instances[i]), "instance %d should have same finalized rank as first instance") + } +} + +// 2 nodes in a 4-node cluster are configured to be able only to send timeout messages (no voting or proposing). +// The other 2 unconstrained nodes should be able to make progress through the recovery path by creating TCs +// for every round, but no state will be finalized, because finalization requires direct 1-chain and QC. +func Test2TimeoutOutof4Instances(t *testing.T) { + + healthyReplicas := 2 + replicasDroppingHappyPathMsgs := 2 + finalRank := uint64(30) + + // generate the 4 hotstuff participants + participants := helper.WithWeightedIdentityList(healthyReplicas + replicasDroppingHappyPathMsgs) + instances := make([]*Instance, 0, healthyReplicas+replicasDroppingHappyPathMsgs) + root := DefaultRoot() + timeouts, err := timeout.NewConfig(10*time.Millisecond, 50*time.Millisecond, 1.5, happyPathMaxRoundFailures, maxTimeoutRebroadcast) + require.NoError(t, err) + + // set up two instances that work fully + for n := 0; n < healthyReplicas; n++ { + in := NewInstance(t, + WithRoot(root), + WithParticipants(participants), + WithLocalID(participants[n].Identity()), + WithTimeouts(timeouts), + WithLoggerParams(consensus.StringParam("status", "healthy")), + WithStopCondition(RankReached(finalRank)), + ) + instances = append(instances, in) + } + + // set up instances which can't vote, nor propose + for n := healthyReplicas; n < healthyReplicas+replicasDroppingHappyPathMsgs; n++ { + in := NewInstance(t, + WithRoot(root), + WithParticipants(participants), + WithLocalID(participants[n].Identity()), + WithTimeouts(timeouts), + WithLoggerParams(consensus.StringParam("status", "unhealthy")), + WithStopCondition(RankReached(finalRank)), + WithOutgoingVotes(DropAllVotes), + WithIncomingVotes(DropAllVotes), + WithOutgoingProposals(DropAllProposals), + ) + instances = append(instances, in) + } + + // connect the communicators of the instances together + Connect(t, instances) + + // start the instances and wait for them to finish + var wg sync.WaitGroup + for _, in := range instances { + wg.Add(1) + go func(in *Instance) { + err := in.Run(t) + require.True(t, errors.Is(err, errStopCondition), "should run until stop condition") + wg.Done() + }(in) + } + unittest.AssertReturnsBefore(t, wg.Wait, 10*time.Second, "expect to finish before timeout") + + // check that all instances have the same finalized state + ref := instances[0] + finalizedRanks := FinalizedRanks(ref) + assert.Equal(t, []uint64{0}, finalizedRanks, "no rank was finalized, because finalization requires 2 direct chain plus a QC which never happen in this case") + assert.Equal(t, finalRank, ref.pacemaker.CurrentRank(), "expect instance 0 should made enough progress, but didn't") + for i := 1; i < healthyReplicas; i++ { + assert.Equal(t, ref.forks.FinalizedState(), instances[i].forks.FinalizedState(), "instance %d should have same finalized state as first instance", i) + assert.Equal(t, finalizedRanks, FinalizedRanks(instances[i]), "instance %d should have same finalized rank as first instance", i) + assert.Equal(t, finalRank, instances[i].pacemaker.CurrentRank(), "instance %d should have same active rank as first instance", i) + } +} + +// If 1 node is down in a 5 nodes cluster, the rest of 4 nodes can +// make progress and reach consensus +func Test1TimeoutOutof5Instances(t *testing.T) { + + healthyReplicas := 4 + stateedReplicas := 1 + finalRank := uint64(30) + + // generate the seven hotstuff participants + participants := helper.WithWeightedIdentityList(healthyReplicas + stateedReplicas) + instances := make([]*Instance, 0, healthyReplicas+stateedReplicas) + root := DefaultRoot() + timeouts, err := timeout.NewConfig(pmTimeout, pmTimeout, 1.5, happyPathMaxRoundFailures, maxTimeoutRebroadcast) + require.NoError(t, err) + + // set up instances that work fully + for n := 0; n < healthyReplicas; n++ { + in := NewInstance(t, + WithRoot(root), + WithParticipants(participants), + WithLocalID(participants[n].Identity()), + WithTimeouts(timeouts), + WithLoggerParams(consensus.StringParam("status", "healthy")), + WithStopCondition(RankFinalized(finalRank)), + ) + instances = append(instances, in) + } + + // set up one instance which can't vote, nor propose + for n := healthyReplicas; n < healthyReplicas+stateedReplicas; n++ { + in := NewInstance(t, + WithRoot(root), + WithParticipants(participants), + WithLocalID(participants[n].Identity()), + WithTimeouts(timeouts), + WithLoggerParams(consensus.StringParam("status", "unhealthy")), + WithStopCondition(RankReached(finalRank)), + WithOutgoingVotes(DropAllVotes), + WithOutgoingProposals(DropAllProposals), + ) + instances = append(instances, in) + } + + // connect the communicators of the instances together + Connect(t, instances) + + // start all seven instances and wait for them to wrap up + var wg sync.WaitGroup + for _, in := range instances { + wg.Add(1) + go func(in *Instance) { + err := in.Run(t) + require.ErrorIs(t, err, errStopCondition) + wg.Done() + }(in) + } + success := unittest.AssertReturnsBefore(t, wg.Wait, 10*time.Second, "expect to finish before timeout") + if !success { + t.Logf("dumping state of system:") + for i, inst := range instances { + t.Logf( + "instance %d: %d %d %d", + i, + inst.pacemaker.CurrentRank(), + inst.pacemaker.LatestQuorumCertificate().GetRank(), + inst.forks.FinalizedState().Rank, + ) + } + } + + // check that all instances have the same finalized state + ref := instances[0] + finalizedRanks := FinalizedRanks(ref) + assert.Equal(t, finalRank, ref.forks.FinalizedState().Rank, "expect instance 0 should made enough progress, but didn't") + for i := 1; i < healthyReplicas; i++ { + assert.Equal(t, ref.forks.FinalizedState(), instances[i].forks.FinalizedState(), "instance %d should have same finalized state as first instance") + assert.Equal(t, finalizedRanks, FinalizedRanks(instances[i]), "instance %d should have same finalized rank as first instance") + } +} + +// TestStateDelayIsHigherThanTimeout tests an edge case protocol edge case, where +// - The state arrives in time for replicas to vote. +// - The next primary does not respond in time with a follow-up proposal, +// so nodes start sending TimeoutStates. +// - However, eventually, the next primary successfully constructs a QC and a new +// state before a TC leads to the round timing out. +// +// This test verifies that nodes still make progress on the happy path (QC constructed), +// despite already having initiated the timeout. +// Example scenarios, how this timing edge case could manifest: +// - state delay is very close (or larger) than round duration +// - delayed message transmission (specifically votes) within network +// - overwhelmed / slowed-down primary +// - byzantine primary +// +// Implementation: +// - We have 4 nodes in total where the TimeoutStates from two of them are always +// discarded. Therefore, no TC can be constructed. +// - To force nodes to initiate the timeout (i.e. send TimeoutStates), we set +// the `stateRateDelay` to _twice_ the PaceMaker Timeout. Furthermore, we configure +// the PaceMaker to only increase timeout duration after 6 successive round failures. +func TestStateDelayIsHigherThanTimeout(t *testing.T) { + healthyReplicas := 2 + replicasNotGeneratingTimeouts := 2 + finalRank := uint64(20) + + // generate the 4 hotstuff participants + participants := helper.WithWeightedIdentityList(healthyReplicas + replicasNotGeneratingTimeouts) + instances := make([]*Instance, 0, healthyReplicas+replicasNotGeneratingTimeouts) + root := DefaultRoot() + timeouts, err := timeout.NewConfig(pmTimeout, pmTimeout, 1.5, happyPathMaxRoundFailures, maxTimeoutRebroadcast) + require.NoError(t, err) + + // set up 2 instances that fully work (incl. sending TimeoutStates) + for n := 0; n < healthyReplicas; n++ { + in := NewInstance(t, + WithRoot(root), + WithParticipants(participants), + WithLocalID(participants[n].Identity()), + WithTimeouts(timeouts), + WithStopCondition(RankFinalized(finalRank)), + ) + instances = append(instances, in) + } + + // set up two instances which don't generate and receive timeout states + for n := healthyReplicas; n < healthyReplicas+replicasNotGeneratingTimeouts; n++ { + in := NewInstance(t, + WithRoot(root), + WithParticipants(participants), + WithLocalID(participants[n].Identity()), + WithTimeouts(timeouts), + WithStopCondition(RankFinalized(finalRank)), + WithIncomingTimeoutStates(DropAllTimeoutStates), + WithOutgoingTimeoutStates(DropAllTimeoutStates), + ) + instances = append(instances, in) + } + + // connect the communicators of the instances together + Connect(t, instances) + + // start all 4 instances and wait for them to wrap up + var wg sync.WaitGroup + for _, in := range instances { + wg.Add(1) + go func(in *Instance) { + err := in.Run(t) + require.ErrorIs(t, err, errStopCondition) + wg.Done() + }(in) + } + unittest.AssertReturnsBefore(t, wg.Wait, 10*time.Second, "expect to finish before timeout") + + // check that all instances have the same finalized state + ref := instances[0] + assert.Equal(t, finalRank, ref.forks.FinalizedState().Rank, "expect instance 0 should made enough progress, but didn't") + finalizedRanks := FinalizedRanks(ref) + // in this test we rely on QC being produced in each rank + // make sure that all ranks are strictly in increasing order with no gaps + for i := 1; i < len(finalizedRanks); i++ { + // finalized ranks are sorted in descending order + if finalizedRanks[i-1] != finalizedRanks[i]+1 { + t.Fatalf("finalized ranks series has gap, this is not expected: %v", finalizedRanks) + return + } + } + for i := 1; i < healthyReplicas; i++ { + assert.Equal(t, ref.forks.FinalizedState(), instances[i].forks.FinalizedState(), "instance %d should have same finalized state as first instance") + assert.Equal(t, finalizedRanks, FinalizedRanks(instances[i]), "instance %d should have same finalized rank as first instance") + } +} + +// TestAsyncClusterStartup tests a realistic scenario where nodes are started asynchronously: +// - Replicas are started in sequential order +// - Each replica skips voting for first state(emulating message omission). +// - Each replica skips first Timeout State (emulating message omission). +// - At this point protocol loses liveness unless a timeout rebroadcast happens from super-majority of replicas. +// +// This test verifies that nodes still make progress, despite first TO messages being lost. +// Implementation: +// - We have 4 replicas in total, each of them skips voting for first rank to force a timeout +// - State TSs for whole committee until each replica has generated its first TO. +// - After each replica has generated a timeout allow subsequent timeout rebroadcasts to make progress. +func TestAsyncClusterStartup(t *testing.T) { + replicas := 4 + finalRank := uint64(20) + + // generate the four hotstuff participants + participants := helper.WithWeightedIdentityList(replicas) + instances := make([]*Instance, 0, replicas) + root := DefaultRoot() + timeouts, err := timeout.NewConfig(pmTimeout, pmTimeout, 1.5, 6, maxTimeoutRebroadcast) + require.NoError(t, err) + + // set up instances that work fully + var lock sync.Mutex + timeoutStateGenerated := make(map[models.Identity]struct{}, 0) + for n := 0; n < replicas; n++ { + in := NewInstance(t, + WithRoot(root), + WithParticipants(participants), + WithLocalID(participants[n].Identity()), + WithTimeouts(timeouts), + WithStopCondition(RankFinalized(finalRank)), + WithOutgoingVotes(func(vote *helper.TestVote) bool { + return vote.Rank == 1 + }), + WithOutgoingTimeoutStates(func(object *models.TimeoutState[*helper.TestVote]) bool { + lock.Lock() + defer lock.Unlock() + timeoutStateGenerated[(*object.Vote).ID] = struct{}{} + // start allowing timeouts when every node has generated one + // when nodes will broadcast again, it will go through + return len(timeoutStateGenerated) != replicas + }), + ) + instances = append(instances, in) + } + + // connect the communicators of the instances together + Connect(t, instances) + + // start each node only after previous one has started + var wg sync.WaitGroup + for _, in := range instances { + wg.Add(1) + go func(in *Instance) { + err := in.Run(t) + require.ErrorIs(t, err, errStopCondition) + wg.Done() + }(in) + } + unittest.AssertReturnsBefore(t, wg.Wait, 20*time.Second, "expect to finish before timeout") + + // check that all instances have the same finalized state + ref := instances[0] + assert.Equal(t, finalRank, ref.forks.FinalizedState().Rank, "expect instance 0 should made enough progress, but didn't") + finalizedRanks := FinalizedRanks(ref) + for i := 1; i < replicas; i++ { + assert.Equal(t, ref.forks.FinalizedState(), instances[i].forks.FinalizedState(), "instance %d should have same finalized state as first instance") + assert.Equal(t, finalizedRanks, FinalizedRanks(instances[i]), "instance %d should have same finalized rank as first instance") + } +} diff --git a/consensus/integration/options_test.go b/consensus/integration/options_test.go new file mode 100644 index 0000000..db994ce --- /dev/null +++ b/consensus/integration/options_test.go @@ -0,0 +1,109 @@ +package integration + +import ( + "errors" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/consensus/pacemaker/timeout" +) + +var errStopCondition = errors.New("stop condition reached") + +type Option func(*Config) + +type Config struct { + Logger consensus.TraceLogger + Root *models.State[*helper.TestState] + Participants []models.WeightedIdentity + LocalID models.Identity + Timeouts timeout.Config + IncomingVotes VoteFilter + OutgoingVotes VoteFilter + IncomingTimeoutStates TimeoutStateFilter + OutgoingTimeoutStates TimeoutStateFilter + IncomingProposals ProposalFilter + OutgoingProposals ProposalFilter + + StopCondition Condition +} + +func WithRoot(root *models.State[*helper.TestState]) Option { + return func(cfg *Config) { + cfg.Root = root + } +} + +func WithParticipants(participants []models.WeightedIdentity) Option { + return func(cfg *Config) { + cfg.Participants = participants + } +} + +func WithLocalID(localID models.Identity) Option { + return func(cfg *Config) { + cfg.LocalID = localID + cfg.Logger = cfg.Logger.With(consensus.IdentityParam("self", localID)) + } +} + +func WithTimeouts(timeouts timeout.Config) Option { + return func(cfg *Config) { + cfg.Timeouts = timeouts + } +} + +func WithBufferLogger() Option { + return func(cfg *Config) { + cfg.Logger = helper.BufferLogger() + } +} + +func WithLoggerParams(params ...consensus.LogParam) Option { + return func(cfg *Config) { + cfg.Logger = cfg.Logger.With(params...) + } +} + +func WithIncomingVotes(Filter VoteFilter) Option { + return func(cfg *Config) { + cfg.IncomingVotes = Filter + } +} + +func WithOutgoingVotes(Filter VoteFilter) Option { + return func(cfg *Config) { + cfg.OutgoingVotes = Filter + } +} + +func WithIncomingProposals(Filter ProposalFilter) Option { + return func(cfg *Config) { + cfg.IncomingProposals = Filter + } +} + +func WithOutgoingProposals(Filter ProposalFilter) Option { + return func(cfg *Config) { + cfg.OutgoingProposals = Filter + } +} + +func WithIncomingTimeoutStates(Filter TimeoutStateFilter) Option { + return func(cfg *Config) { + cfg.IncomingTimeoutStates = Filter + } +} + +func WithOutgoingTimeoutStates(Filter TimeoutStateFilter) Option { + return func(cfg *Config) { + cfg.OutgoingTimeoutStates = Filter + } +} + +func WithStopCondition(stop Condition) Option { + return func(cfg *Config) { + cfg.StopCondition = stop + } +} diff --git a/consensus/mocks/communicator_consumer.go b/consensus/mocks/communicator_consumer.go new file mode 100644 index 0000000..f10469d --- /dev/null +++ b/consensus/mocks/communicator_consumer.go @@ -0,0 +1,48 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + time "time" + + "github.com/stretchr/testify/mock" + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// CommunicatorConsumer is an autogenerated mock type for the CommunicatorConsumer type +type CommunicatorConsumer[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// OnOwnProposal provides a mock function with given fields: proposal, targetPublicationTime +func (_m *CommunicatorConsumer[StateT, VoteT]) OnOwnProposal(proposal *models.SignedProposal[StateT, VoteT], targetPublicationTime time.Time) { + _m.Called(proposal, targetPublicationTime) +} + +// OnOwnTimeout provides a mock function with given fields: timeout +func (_m *CommunicatorConsumer[StateT, VoteT]) OnOwnTimeout(timeout *models.TimeoutState[VoteT]) { + _m.Called(timeout) +} + +// OnOwnVote provides a mock function with given fields: vote, recipientID +func (_m *CommunicatorConsumer[StateT, VoteT]) OnOwnVote(vote *VoteT, recipientID models.Identity) { + _m.Called(vote, recipientID) +} + +// NewCommunicatorConsumer creates a new instance of CommunicatorConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCommunicatorConsumer[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *CommunicatorConsumer[StateT, VoteT] { + mock := &CommunicatorConsumer[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} + +var _ consensus.CommunicatorConsumer[*helper.TestState, *helper.TestVote] = (*CommunicatorConsumer[*helper.TestState, *helper.TestVote])(nil) diff --git a/consensus/mocks/consensus_store.go b/consensus/mocks/consensus_store.go new file mode 100644 index 0000000..6727653 --- /dev/null +++ b/consensus/mocks/consensus_store.go @@ -0,0 +1,123 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// ConsensusStore is an autogenerated mock type for the ConsensusStore type +type ConsensusStore[VoteT models.Unique] struct { + mock.Mock +} + +// GetConsensusState provides a mock function with no fields +func (_m *ConsensusStore[VoteT]) GetConsensusState(filter []byte) (*models.ConsensusState[VoteT], error) { + ret := _m.Called(filter) + + if len(ret) == 0 { + panic("no return value specified for GetConsensusState") + } + + var r0 *models.ConsensusState[VoteT] + var r1 error + if rf, ok := ret.Get(0).(func(filter []byte) (*models.ConsensusState[VoteT], error)); ok { + return rf(filter) + } + if rf, ok := ret.Get(0).(func(filter []byte) *models.ConsensusState[VoteT]); ok { + r0 = rf(filter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.ConsensusState[VoteT]) + } + } + + if rf, ok := ret.Get(1).(func(filter []byte) error); ok { + r1 = rf(filter) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetLivenessState provides a mock function with no fields +func (_m *ConsensusStore[VoteT]) GetLivenessState(filter []byte) (*models.LivenessState, error) { + ret := _m.Called(filter) + + if len(ret) == 0 { + panic("no return value specified for GetLivenessState") + } + + var r0 *models.LivenessState + var r1 error + if rf, ok := ret.Get(0).(func(filter []byte) (*models.LivenessState, error)); ok { + return rf(filter) + } + if rf, ok := ret.Get(0).(func(filter []byte) *models.LivenessState); ok { + r0 = rf(filter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.LivenessState) + } + } + + if rf, ok := ret.Get(1).(func(filter []byte) error); ok { + r1 = rf(filter) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PutConsensusState provides a mock function with given fields: state +func (_m *ConsensusStore[VoteT]) PutConsensusState(state *models.ConsensusState[VoteT]) error { + ret := _m.Called(state) + + if len(ret) == 0 { + panic("no return value specified for PutConsensusState") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*models.ConsensusState[VoteT]) error); ok { + r0 = rf(state) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// PutLivenessState provides a mock function with given fields: state +func (_m *ConsensusStore[VoteT]) PutLivenessState(state *models.LivenessState) error { + ret := _m.Called(state) + + if len(ret) == 0 { + panic("no return value specified for PutLivenessState") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*models.LivenessState) error); ok { + r0 = rf(state) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewConsensusStore creates a new instance of ConsensusStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConsensusStore[VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *ConsensusStore[VoteT] { + mock := &ConsensusStore[VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/consumer.go b/consensus/mocks/consumer.go new file mode 100644 index 0000000..c04ba10 --- /dev/null +++ b/consensus/mocks/consumer.go @@ -0,0 +1,126 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + consensus "source.quilibrium.com/quilibrium/monorepo/consensus" + + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" + + time "time" +) + +// Consumer is an autogenerated mock type for the Consumer type +type Consumer[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// OnCurrentRankDetails provides a mock function with given fields: currentRank, finalizedRank, currentLeader +func (_m *Consumer[StateT, VoteT]) OnCurrentRankDetails(currentRank uint64, finalizedRank uint64, currentLeader models.Identity) { + _m.Called(currentRank, finalizedRank, currentLeader) +} + +// OnDoubleProposeDetected provides a mock function with given fields: _a0, _a1 +func (_m *Consumer[StateT, VoteT]) OnDoubleProposeDetected(_a0 *models.State[StateT], _a1 *models.State[StateT]) { + _m.Called(_a0, _a1) +} + +// OnEventProcessed provides a mock function with no fields +func (_m *Consumer[StateT, VoteT]) OnEventProcessed() { + _m.Called() +} + +// OnFinalizedState provides a mock function with given fields: _a0 +func (_m *Consumer[StateT, VoteT]) OnFinalizedState(_a0 *models.State[StateT]) { + _m.Called(_a0) +} + +// OnInvalidStateDetected provides a mock function with given fields: err +func (_m *Consumer[StateT, VoteT]) OnInvalidStateDetected(err *models.InvalidProposalError[StateT, VoteT]) { + _m.Called(err) +} + +// OnLocalTimeout provides a mock function with given fields: currentRank +func (_m *Consumer[StateT, VoteT]) OnLocalTimeout(currentRank uint64) { + _m.Called(currentRank) +} + +// OnOwnProposal provides a mock function with given fields: proposal, targetPublicationTime +func (_m *Consumer[StateT, VoteT]) OnOwnProposal(proposal *models.SignedProposal[StateT, VoteT], targetPublicationTime time.Time) { + _m.Called(proposal, targetPublicationTime) +} + +// OnOwnTimeout provides a mock function with given fields: timeout +func (_m *Consumer[StateT, VoteT]) OnOwnTimeout(timeout *models.TimeoutState[VoteT]) { + _m.Called(timeout) +} + +// OnOwnVote provides a mock function with given fields: vote, recipientID +func (_m *Consumer[StateT, VoteT]) OnOwnVote(vote *VoteT, recipientID models.Identity) { + _m.Called(vote, recipientID) +} + +// OnPartialTimeoutCertificate provides a mock function with given fields: currentRank, partialTimeoutCertificate +func (_m *Consumer[StateT, VoteT]) OnPartialTimeoutCertificate(currentRank uint64, partialTimeoutCertificate *consensus.PartialTimeoutCertificateCreated) { + _m.Called(currentRank, partialTimeoutCertificate) +} + +// OnQuorumCertificateTriggeredRankChange provides a mock function with given fields: oldRank, newRank, qc +func (_m *Consumer[StateT, VoteT]) OnQuorumCertificateTriggeredRankChange(oldRank uint64, newRank uint64, qc models.QuorumCertificate) { + _m.Called(oldRank, newRank, qc) +} + +// OnRankChange provides a mock function with given fields: oldRank, newRank +func (_m *Consumer[StateT, VoteT]) OnRankChange(oldRank uint64, newRank uint64) { + _m.Called(oldRank, newRank) +} + +// OnReceiveProposal provides a mock function with given fields: currentRank, proposal +func (_m *Consumer[StateT, VoteT]) OnReceiveProposal(currentRank uint64, proposal *models.SignedProposal[StateT, VoteT]) { + _m.Called(currentRank, proposal) +} + +// OnReceiveQuorumCertificate provides a mock function with given fields: currentRank, qc +func (_m *Consumer[StateT, VoteT]) OnReceiveQuorumCertificate(currentRank uint64, qc models.QuorumCertificate) { + _m.Called(currentRank, qc) +} + +// OnReceiveTimeoutCertificate provides a mock function with given fields: currentRank, tc +func (_m *Consumer[StateT, VoteT]) OnReceiveTimeoutCertificate(currentRank uint64, tc models.TimeoutCertificate) { + _m.Called(currentRank, tc) +} + +// OnStart provides a mock function with given fields: currentRank +func (_m *Consumer[StateT, VoteT]) OnStart(currentRank uint64) { + _m.Called(currentRank) +} + +// OnStartingTimeout provides a mock function with given fields: startTime, endTime +func (_m *Consumer[StateT, VoteT]) OnStartingTimeout(startTime time.Time, endTime time.Time) { + _m.Called(startTime, endTime) +} + +// OnStateIncorporated provides a mock function with given fields: _a0 +func (_m *Consumer[StateT, VoteT]) OnStateIncorporated(_a0 *models.State[StateT]) { + _m.Called(_a0) +} + +// OnTimeoutCertificateTriggeredRankChange provides a mock function with given fields: oldRank, newRank, tc +func (_m *Consumer[StateT, VoteT]) OnTimeoutCertificateTriggeredRankChange(oldRank uint64, newRank uint64, tc models.TimeoutCertificate) { + _m.Called(oldRank, newRank, tc) +} + +// NewConsumer creates a new instance of Consumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConsumer[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *Consumer[StateT, VoteT] { + mock := &Consumer[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/dynamic_committee.go b/consensus/mocks/dynamic_committee.go new file mode 100644 index 0000000..468182c --- /dev/null +++ b/consensus/mocks/dynamic_committee.go @@ -0,0 +1,249 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// DynamicCommittee is an autogenerated mock type for the DynamicCommittee type +type DynamicCommittee struct { + mock.Mock +} + +// IdentitiesByRank provides a mock function with given fields: rank +func (_m *DynamicCommittee) IdentitiesByRank(rank uint64) ([]models.WeightedIdentity, error) { + ret := _m.Called(rank) + + if len(ret) == 0 { + panic("no return value specified for IdentitiesByRank") + } + + var r0 []models.WeightedIdentity + var r1 error + if rf, ok := ret.Get(0).(func(uint64) ([]models.WeightedIdentity, error)); ok { + return rf(rank) + } + if rf, ok := ret.Get(0).(func(uint64) []models.WeightedIdentity); ok { + r0 = rf(rank) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]models.WeightedIdentity) + } + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(rank) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IdentitiesByState provides a mock function with given fields: stateID +func (_m *DynamicCommittee) IdentitiesByState(stateID models.Identity) ([]models.WeightedIdentity, error) { + ret := _m.Called(stateID) + + if len(ret) == 0 { + panic("no return value specified for IdentitiesByState") + } + + var r0 []models.WeightedIdentity + var r1 error + if rf, ok := ret.Get(0).(func(models.Identity) ([]models.WeightedIdentity, error)); ok { + return rf(stateID) + } + if rf, ok := ret.Get(0).(func(models.Identity) []models.WeightedIdentity); ok { + r0 = rf(stateID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]models.WeightedIdentity) + } + } + + if rf, ok := ret.Get(1).(func(models.Identity) error); ok { + r1 = rf(stateID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IdentityByRank provides a mock function with given fields: rank, participantID +func (_m *DynamicCommittee) IdentityByRank(rank uint64, participantID models.Identity) (models.WeightedIdentity, error) { + ret := _m.Called(rank, participantID) + + if len(ret) == 0 { + panic("no return value specified for IdentityByRank") + } + + var r0 models.WeightedIdentity + var r1 error + if rf, ok := ret.Get(0).(func(uint64, models.Identity) (models.WeightedIdentity, error)); ok { + return rf(rank, participantID) + } + if rf, ok := ret.Get(0).(func(uint64, models.Identity) models.WeightedIdentity); ok { + r0 = rf(rank, participantID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(models.WeightedIdentity) + } + } + + if rf, ok := ret.Get(1).(func(uint64, models.Identity) error); ok { + r1 = rf(rank, participantID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IdentityByState provides a mock function with given fields: stateID, participantID +func (_m *DynamicCommittee) IdentityByState(stateID models.Identity, participantID models.Identity) (models.WeightedIdentity, error) { + ret := _m.Called(stateID, participantID) + + if len(ret) == 0 { + panic("no return value specified for IdentityByState") + } + + var r0 models.WeightedIdentity + var r1 error + if rf, ok := ret.Get(0).(func(models.Identity, models.Identity) (models.WeightedIdentity, error)); ok { + return rf(stateID, participantID) + } + if rf, ok := ret.Get(0).(func(models.Identity, models.Identity) models.WeightedIdentity); ok { + r0 = rf(stateID, participantID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(models.WeightedIdentity) + } + } + + if rf, ok := ret.Get(1).(func(models.Identity, models.Identity) error); ok { + r1 = rf(stateID, participantID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LeaderForRank provides a mock function with given fields: rank +func (_m *DynamicCommittee) LeaderForRank(rank uint64) (models.Identity, error) { + ret := _m.Called(rank) + + if len(ret) == 0 { + panic("no return value specified for LeaderForRank") + } + + var r0 models.Identity + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (models.Identity, error)); ok { + return rf(rank) + } + if rf, ok := ret.Get(0).(func(uint64) models.Identity); ok { + r0 = rf(rank) + } else { + r0 = ret.Get(0).(models.Identity) + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(rank) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QuorumThresholdForRank provides a mock function with given fields: rank +func (_m *DynamicCommittee) QuorumThresholdForRank(rank uint64) (uint64, error) { + ret := _m.Called(rank) + + if len(ret) == 0 { + panic("no return value specified for QuorumThresholdForRank") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (uint64, error)); ok { + return rf(rank) + } + if rf, ok := ret.Get(0).(func(uint64) uint64); ok { + r0 = rf(rank) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(rank) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Self provides a mock function with no fields +func (_m *DynamicCommittee) Self() models.Identity { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Self") + } + + var r0 models.Identity + if rf, ok := ret.Get(0).(func() models.Identity); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(models.Identity) + } + + return r0 +} + +// TimeoutThresholdForRank provides a mock function with given fields: rank +func (_m *DynamicCommittee) TimeoutThresholdForRank(rank uint64) (uint64, error) { + ret := _m.Called(rank) + + if len(ret) == 0 { + panic("no return value specified for TimeoutThresholdForRank") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (uint64, error)); ok { + return rf(rank) + } + if rf, ok := ret.Get(0).(func(uint64) uint64); ok { + r0 = rf(rank) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(rank) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewDynamicCommittee creates a new instance of DynamicCommittee. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDynamicCommittee(t interface { + mock.TestingT + Cleanup(func()) +}) *DynamicCommittee { + mock := &DynamicCommittee{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/event_handler.go b/consensus/mocks/event_handler.go new file mode 100644 index 0000000..459ba0f --- /dev/null +++ b/consensus/mocks/event_handler.go @@ -0,0 +1,162 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + consensus "source.quilibrium.com/quilibrium/monorepo/consensus" + + mock "github.com/stretchr/testify/mock" + + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" + + time "time" +) + +// EventHandler is an autogenerated mock type for the EventHandler type +type EventHandler[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// OnLocalTimeout provides a mock function with no fields +func (_m *EventHandler[StateT, VoteT]) OnLocalTimeout() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for OnLocalTimeout") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// OnPartialTimeoutCertificateCreated provides a mock function with given fields: partialTimeoutCertificate +func (_m *EventHandler[StateT, VoteT]) OnPartialTimeoutCertificateCreated(partialTimeoutCertificate *consensus.PartialTimeoutCertificateCreated) error { + ret := _m.Called(partialTimeoutCertificate) + + if len(ret) == 0 { + panic("no return value specified for OnPartialTimeoutCertificateCreated") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*consensus.PartialTimeoutCertificateCreated) error); ok { + r0 = rf(partialTimeoutCertificate) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// OnReceiveProposal provides a mock function with given fields: proposal +func (_m *EventHandler[StateT, VoteT]) OnReceiveProposal(proposal *models.SignedProposal[StateT, VoteT]) error { + ret := _m.Called(proposal) + + if len(ret) == 0 { + panic("no return value specified for OnReceiveProposal") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*models.SignedProposal[StateT, VoteT]) error); ok { + r0 = rf(proposal) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// OnReceiveQuorumCertificate provides a mock function with given fields: quorumCertificate +func (_m *EventHandler[StateT, VoteT]) OnReceiveQuorumCertificate(quorumCertificate models.QuorumCertificate) error { + ret := _m.Called(quorumCertificate) + + if len(ret) == 0 { + panic("no return value specified for OnReceiveQuorumCertificate") + } + + var r0 error + if rf, ok := ret.Get(0).(func(models.QuorumCertificate) error); ok { + r0 = rf(quorumCertificate) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// OnReceiveTimeoutCertificate provides a mock function with given fields: timeoutCertificate +func (_m *EventHandler[StateT, VoteT]) OnReceiveTimeoutCertificate(timeoutCertificate models.TimeoutCertificate) error { + ret := _m.Called(timeoutCertificate) + + if len(ret) == 0 { + panic("no return value specified for OnReceiveTimeoutCertificate") + } + + var r0 error + if rf, ok := ret.Get(0).(func(models.TimeoutCertificate) error); ok { + r0 = rf(timeoutCertificate) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: ctx +func (_m *EventHandler[StateT, VoteT]) Start(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// TimeoutChannel provides a mock function with no fields +func (_m *EventHandler[StateT, VoteT]) TimeoutChannel() <-chan time.Time { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for TimeoutChannel") + } + + var r0 <-chan time.Time + if rf, ok := ret.Get(0).(func() <-chan time.Time); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan time.Time) + } + } + + return r0 +} + +// NewEventHandler creates a new instance of EventHandler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEventHandler[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *EventHandler[StateT, VoteT] { + mock := &EventHandler[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/event_loop.go b/consensus/mocks/event_loop.go new file mode 100644 index 0000000..cbee098 --- /dev/null +++ b/consensus/mocks/event_loop.go @@ -0,0 +1,67 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// EventLoop is an autogenerated mock type for the EventLoop type +type EventLoop[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// OnNewQuorumCertificateDiscovered provides a mock function with given fields: certificate +func (_m *EventLoop[StateT, VoteT]) OnNewQuorumCertificateDiscovered(certificate models.QuorumCertificate) { + _m.Called(certificate) +} + +// OnNewTimeoutCertificateDiscovered provides a mock function with given fields: certificate +func (_m *EventLoop[StateT, VoteT]) OnNewTimeoutCertificateDiscovered(certificate models.TimeoutCertificate) { + _m.Called(certificate) +} + +// OnPartialTimeoutCertificateCreated provides a mock function with given fields: rank, newestQC, lastRankTC +func (_m *EventLoop[StateT, VoteT]) OnPartialTimeoutCertificateCreated(rank uint64, newestQC models.QuorumCertificate, lastRankTC models.TimeoutCertificate) { + _m.Called(rank, newestQC, lastRankTC) +} + +// OnQuorumCertificateConstructedFromVotes provides a mock function with given fields: _a0 +func (_m *EventLoop[StateT, VoteT]) OnQuorumCertificateConstructedFromVotes(_a0 models.QuorumCertificate) { + _m.Called(_a0) +} + +// OnTimeoutCertificateConstructedFromTimeouts provides a mock function with given fields: certificate +func (_m *EventLoop[StateT, VoteT]) OnTimeoutCertificateConstructedFromTimeouts(certificate models.TimeoutCertificate) { + _m.Called(certificate) +} + +// OnTimeoutProcessed provides a mock function with given fields: timeout +func (_m *EventLoop[StateT, VoteT]) OnTimeoutProcessed(timeout *models.TimeoutState[VoteT]) { + _m.Called(timeout) +} + +// OnVoteProcessed provides a mock function with given fields: vote +func (_m *EventLoop[StateT, VoteT]) OnVoteProcessed(vote *VoteT) { + _m.Called(vote) +} + +// SubmitProposal provides a mock function with given fields: proposal +func (_m *EventLoop[StateT, VoteT]) SubmitProposal(proposal *models.SignedProposal[StateT, VoteT]) { + _m.Called(proposal) +} + +// NewEventLoop creates a new instance of EventLoop. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEventLoop[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *EventLoop[StateT, VoteT] { + mock := &EventLoop[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/finalization_consumer.go b/consensus/mocks/finalization_consumer.go new file mode 100644 index 0000000..9e9a330 --- /dev/null +++ b/consensus/mocks/finalization_consumer.go @@ -0,0 +1,37 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// FinalizationConsumer is an autogenerated mock type for the FinalizationConsumer type +type FinalizationConsumer[StateT models.Unique] struct { + mock.Mock +} + +// OnFinalizedState provides a mock function with given fields: _a0 +func (_m *FinalizationConsumer[StateT]) OnFinalizedState(_a0 *models.State[StateT]) { + _m.Called(_a0) +} + +// OnStateIncorporated provides a mock function with given fields: _a0 +func (_m *FinalizationConsumer[StateT]) OnStateIncorporated(_a0 *models.State[StateT]) { + _m.Called(_a0) +} + +// NewFinalizationConsumer creates a new instance of FinalizationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFinalizationConsumer[StateT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *FinalizationConsumer[StateT] { + mock := &FinalizationConsumer[StateT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/finalizer.go b/consensus/mocks/finalizer.go new file mode 100644 index 0000000..9e74b8c --- /dev/null +++ b/consensus/mocks/finalizer.go @@ -0,0 +1,45 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Finalizer is an autogenerated mock type for the Finalizer type +type Finalizer struct { + mock.Mock +} + +// MakeFinal provides a mock function with given fields: stateID +func (_m *Finalizer) MakeFinal(stateID models.Identity) error { + ret := _m.Called(stateID) + + if len(ret) == 0 { + panic("no return value specified for MakeFinal") + } + + var r0 error + if rf, ok := ret.Get(0).(func(models.Identity) error); ok { + r0 = rf(stateID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewFinalizer creates a new instance of Finalizer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFinalizer(t interface { + mock.TestingT + Cleanup(func()) +}) *Finalizer { + mock := &Finalizer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/follower_consumer.go b/consensus/mocks/follower_consumer.go new file mode 100644 index 0000000..abc97c9 --- /dev/null +++ b/consensus/mocks/follower_consumer.go @@ -0,0 +1,47 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// FollowerConsumer is an autogenerated mock type for the FollowerConsumer type +type FollowerConsumer[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// OnDoubleProposeDetected provides a mock function with given fields: _a0, _a1 +func (_m *FollowerConsumer[StateT, VoteT]) OnDoubleProposeDetected(_a0 *models.State[StateT], _a1 *models.State[StateT]) { + _m.Called(_a0, _a1) +} + +// OnFinalizedState provides a mock function with given fields: _a0 +func (_m *FollowerConsumer[StateT, VoteT]) OnFinalizedState(_a0 *models.State[StateT]) { + _m.Called(_a0) +} + +// OnInvalidStateDetected provides a mock function with given fields: err +func (_m *FollowerConsumer[StateT, VoteT]) OnInvalidStateDetected(err *models.InvalidProposalError[StateT, VoteT]) { + _m.Called(err) +} + +// OnStateIncorporated provides a mock function with given fields: _a0 +func (_m *FollowerConsumer[StateT, VoteT]) OnStateIncorporated(_a0 *models.State[StateT]) { + _m.Called(_a0) +} + +// NewFollowerConsumer creates a new instance of FollowerConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFollowerConsumer[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *FollowerConsumer[StateT, VoteT] { + mock := &FollowerConsumer[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/follower_loop.go b/consensus/mocks/follower_loop.go new file mode 100644 index 0000000..8360ce5 --- /dev/null +++ b/consensus/mocks/follower_loop.go @@ -0,0 +1,32 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// FollowerLoop is an autogenerated mock type for the FollowerLoop type +type FollowerLoop[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// AddCertifiedState provides a mock function with given fields: certifiedState +func (_m *FollowerLoop[StateT, VoteT]) AddCertifiedState(certifiedState *models.CertifiedState[StateT]) { + _m.Called(certifiedState) +} + +// NewFollowerLoop creates a new instance of FollowerLoop. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFollowerLoop[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *FollowerLoop[StateT, VoteT] { + mock := &FollowerLoop[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/forks.go b/consensus/mocks/forks.go new file mode 100644 index 0000000..6c00114 --- /dev/null +++ b/consensus/mocks/forks.go @@ -0,0 +1,183 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + consensus "source.quilibrium.com/quilibrium/monorepo/consensus" + + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Forks is an autogenerated mock type for the Forks type +type Forks[StateT models.Unique] struct { + mock.Mock +} + +// AddCertifiedState provides a mock function with given fields: certifiedState +func (_m *Forks[StateT]) AddCertifiedState(certifiedState *models.CertifiedState[StateT]) error { + ret := _m.Called(certifiedState) + + if len(ret) == 0 { + panic("no return value specified for AddCertifiedState") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*models.CertifiedState[StateT]) error); ok { + r0 = rf(certifiedState) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AddValidatedState provides a mock function with given fields: proposal +func (_m *Forks[StateT]) AddValidatedState(proposal *models.State[StateT]) error { + ret := _m.Called(proposal) + + if len(ret) == 0 { + panic("no return value specified for AddValidatedState") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*models.State[StateT]) error); ok { + r0 = rf(proposal) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// FinalityProof provides a mock function with no fields +func (_m *Forks[StateT]) FinalityProof() (*consensus.FinalityProof[StateT], bool) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for FinalityProof") + } + + var r0 *consensus.FinalityProof[StateT] + var r1 bool + if rf, ok := ret.Get(0).(func() (*consensus.FinalityProof[StateT], bool)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *consensus.FinalityProof[StateT]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*consensus.FinalityProof[StateT]) + } + } + + if rf, ok := ret.Get(1).(func() bool); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// FinalizedRank provides a mock function with no fields +func (_m *Forks[StateT]) FinalizedRank() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for FinalizedRank") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// FinalizedState provides a mock function with no fields +func (_m *Forks[StateT]) FinalizedState() *models.State[StateT] { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for FinalizedState") + } + + var r0 *models.State[StateT] + if rf, ok := ret.Get(0).(func() *models.State[StateT]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.State[StateT]) + } + } + + return r0 +} + +// GetState provides a mock function with given fields: stateID +func (_m *Forks[StateT]) GetState(stateID models.Identity) (*models.State[StateT], bool) { + ret := _m.Called(stateID) + + if len(ret) == 0 { + panic("no return value specified for GetState") + } + + var r0 *models.State[StateT] + var r1 bool + if rf, ok := ret.Get(0).(func(models.Identity) (*models.State[StateT], bool)); ok { + return rf(stateID) + } + if rf, ok := ret.Get(0).(func(models.Identity) *models.State[StateT]); ok { + r0 = rf(stateID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.State[StateT]) + } + } + + if rf, ok := ret.Get(1).(func(models.Identity) bool); ok { + r1 = rf(stateID) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// GetStatesForRank provides a mock function with given fields: rank +func (_m *Forks[StateT]) GetStatesForRank(rank uint64) []*models.State[StateT] { + ret := _m.Called(rank) + + if len(ret) == 0 { + panic("no return value specified for GetStatesForRank") + } + + var r0 []*models.State[StateT] + if rf, ok := ret.Get(0).(func(uint64) []*models.State[StateT]); ok { + r0 = rf(rank) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*models.State[StateT]) + } + } + + return r0 +} + +// NewForks creates a new instance of Forks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewForks[StateT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *Forks[StateT] { + mock := &Forks[StateT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/leader_provider.go b/consensus/mocks/leader_provider.go new file mode 100644 index 0000000..0ea46c7 --- /dev/null +++ b/consensus/mocks/leader_provider.go @@ -0,0 +1,89 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// LeaderProvider is an autogenerated mock type for the LeaderProvider type +type LeaderProvider[StateT models.Unique, PeerIDT models.Unique, CollectedT models.Unique] struct { + mock.Mock +} + +// GetNextLeaders provides a mock function with given fields: ctx, prior +func (_m *LeaderProvider[StateT, PeerIDT, CollectedT]) GetNextLeaders(ctx context.Context, prior *StateT) ([]PeerIDT, error) { + ret := _m.Called(ctx, prior) + + if len(ret) == 0 { + panic("no return value specified for GetNextLeaders") + } + + var r0 []PeerIDT + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *StateT) ([]PeerIDT, error)); ok { + return rf(ctx, prior) + } + if rf, ok := ret.Get(0).(func(context.Context, *StateT) []PeerIDT); ok { + r0 = rf(ctx, prior) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]PeerIDT) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *StateT) error); ok { + r1 = rf(ctx, prior) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ProveNextState provides a mock function with given fields: ctx, filter, priorState +func (_m *LeaderProvider[StateT, PeerIDT, CollectedT]) ProveNextState(ctx context.Context, rank uint64, filter []byte, priorState models.Identity) (*StateT, error) { + ret := _m.Called(ctx, rank, filter, priorState) + + if len(ret) == 0 { + panic("no return value specified for ProveNextState") + } + + var r0 *StateT + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, []byte, models.Identity) (*StateT, error)); ok { + return rf(ctx, rank, filter, priorState) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, []byte, models.Identity) *StateT); ok { + r0 = rf(ctx, rank, filter, priorState) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*StateT) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, []byte, models.Identity) error); ok { + r1 = rf(ctx, rank, filter, priorState) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewLeaderProvider creates a new instance of LeaderProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLeaderProvider[StateT models.Unique, PeerIDT models.Unique, CollectedT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *LeaderProvider[StateT, PeerIDT, CollectedT] { + mock := &LeaderProvider[StateT, PeerIDT, CollectedT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/liveness_provider.go b/consensus/mocks/liveness_provider.go new file mode 100644 index 0000000..5510dd3 --- /dev/null +++ b/consensus/mocks/liveness_provider.go @@ -0,0 +1,77 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// LivenessProvider is an autogenerated mock type for the LivenessProvider type +type LivenessProvider[StateT models.Unique, PeerIDT models.Unique, CollectedT models.Unique] struct { + mock.Mock +} + +// Collect provides a mock function with given fields: ctx +func (_m *LivenessProvider[StateT, PeerIDT, CollectedT]) Collect(ctx context.Context) (CollectedT, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Collect") + } + + var r0 CollectedT + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (CollectedT, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) CollectedT); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(CollectedT) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SendLiveness provides a mock function with given fields: ctx, prior, collected +func (_m *LivenessProvider[StateT, PeerIDT, CollectedT]) SendLiveness(ctx context.Context, prior *StateT, collected CollectedT) error { + ret := _m.Called(ctx, prior, collected) + + if len(ret) == 0 { + panic("no return value specified for SendLiveness") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *StateT, CollectedT) error); ok { + r0 = rf(ctx, prior, collected) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewLivenessProvider creates a new instance of LivenessProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLivenessProvider[StateT models.Unique, PeerIDT models.Unique, CollectedT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *LivenessProvider[StateT, PeerIDT, CollectedT] { + mock := &LivenessProvider[StateT, PeerIDT, CollectedT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/pacemaker.go b/consensus/mocks/pacemaker.go new file mode 100644 index 0000000..74815e9 --- /dev/null +++ b/consensus/mocks/pacemaker.go @@ -0,0 +1,205 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" + + time "time" +) + +// Pacemaker is an autogenerated mock type for the Pacemaker type +type Pacemaker struct { + mock.Mock +} + +// CurrentRank provides a mock function with no fields +func (_m *Pacemaker) CurrentRank() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for CurrentRank") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// LatestQuorumCertificate provides a mock function with no fields +func (_m *Pacemaker) LatestQuorumCertificate() models.QuorumCertificate { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LatestQuorumCertificate") + } + + var r0 models.QuorumCertificate + if rf, ok := ret.Get(0).(func() models.QuorumCertificate); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(models.QuorumCertificate) + } + } + + return r0 +} + +// PriorRankTimeoutCertificate provides a mock function with no fields +func (_m *Pacemaker) PriorRankTimeoutCertificate() models.TimeoutCertificate { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for PriorRankTimeoutCertificate") + } + + var r0 models.TimeoutCertificate + if rf, ok := ret.Get(0).(func() models.TimeoutCertificate); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(models.TimeoutCertificate) + } + } + + return r0 +} + +// ReceiveQuorumCertificate provides a mock function with given fields: quorumCertificate +func (_m *Pacemaker) ReceiveQuorumCertificate(quorumCertificate models.QuorumCertificate) (*models.NextRank, error) { + ret := _m.Called(quorumCertificate) + + if len(ret) == 0 { + panic("no return value specified for ReceiveQuorumCertificate") + } + + var r0 *models.NextRank + var r1 error + if rf, ok := ret.Get(0).(func(models.QuorumCertificate) (*models.NextRank, error)); ok { + return rf(quorumCertificate) + } + if rf, ok := ret.Get(0).(func(models.QuorumCertificate) *models.NextRank); ok { + r0 = rf(quorumCertificate) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.NextRank) + } + } + + if rf, ok := ret.Get(1).(func(models.QuorumCertificate) error); ok { + r1 = rf(quorumCertificate) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReceiveTimeoutCertificate provides a mock function with given fields: timeoutCertificate +func (_m *Pacemaker) ReceiveTimeoutCertificate(timeoutCertificate models.TimeoutCertificate) (*models.NextRank, error) { + ret := _m.Called(timeoutCertificate) + + if len(ret) == 0 { + panic("no return value specified for ReceiveTimeoutCertificate") + } + + var r0 *models.NextRank + var r1 error + if rf, ok := ret.Get(0).(func(models.TimeoutCertificate) (*models.NextRank, error)); ok { + return rf(timeoutCertificate) + } + if rf, ok := ret.Get(0).(func(models.TimeoutCertificate) *models.NextRank); ok { + r0 = rf(timeoutCertificate) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.NextRank) + } + } + + if rf, ok := ret.Get(1).(func(models.TimeoutCertificate) error); ok { + r1 = rf(timeoutCertificate) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Start provides a mock function with given fields: ctx +func (_m *Pacemaker) Start(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// TargetPublicationTime provides a mock function with given fields: proposalRank, timeRankEntered, parentStateId +func (_m *Pacemaker) TargetPublicationTime(proposalRank uint64, timeRankEntered time.Time, parentStateId models.Identity) time.Time { + ret := _m.Called(proposalRank, timeRankEntered, parentStateId) + + if len(ret) == 0 { + panic("no return value specified for TargetPublicationTime") + } + + var r0 time.Time + if rf, ok := ret.Get(0).(func(uint64, time.Time, models.Identity) time.Time); ok { + r0 = rf(proposalRank, timeRankEntered, parentStateId) + } else { + r0 = ret.Get(0).(time.Time) + } + + return r0 +} + +// TimeoutCh provides a mock function with no fields +func (_m *Pacemaker) TimeoutCh() <-chan time.Time { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for TimeoutCh") + } + + var r0 <-chan time.Time + if rf, ok := ret.Get(0).(func() <-chan time.Time); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan time.Time) + } + } + + return r0 +} + +// NewPacemaker creates a new instance of Pacemaker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPacemaker(t interface { + mock.TestingT + Cleanup(func()) +}) *Pacemaker { + mock := &Pacemaker{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/packer.go b/consensus/mocks/packer.go new file mode 100644 index 0000000..6c525a6 --- /dev/null +++ b/consensus/mocks/packer.go @@ -0,0 +1,98 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + consensus "source.quilibrium.com/quilibrium/monorepo/consensus" + + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Packer is an autogenerated mock type for the Packer type +type Packer struct { + mock.Mock +} + +// Pack provides a mock function with given fields: rank, sig +func (_m *Packer) Pack(rank uint64, sig *consensus.StateSignatureData) ([]byte, []byte, error) { + ret := _m.Called(rank, sig) + + if len(ret) == 0 { + panic("no return value specified for Pack") + } + + var r0 []byte + var r1 []byte + var r2 error + if rf, ok := ret.Get(0).(func(uint64, *consensus.StateSignatureData) ([]byte, []byte, error)); ok { + return rf(rank, sig) + } + if rf, ok := ret.Get(0).(func(uint64, *consensus.StateSignatureData) []byte); ok { + r0 = rf(rank, sig) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(uint64, *consensus.StateSignatureData) []byte); ok { + r1 = rf(rank, sig) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]byte) + } + } + + if rf, ok := ret.Get(2).(func(uint64, *consensus.StateSignatureData) error); ok { + r2 = rf(rank, sig) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// Unpack provides a mock function with given fields: signerIdentities, sigData +func (_m *Packer) Unpack(signerIdentities []models.WeightedIdentity, sigData []byte) (*consensus.StateSignatureData, error) { + ret := _m.Called(signerIdentities, sigData) + + if len(ret) == 0 { + panic("no return value specified for Unpack") + } + + var r0 *consensus.StateSignatureData + var r1 error + if rf, ok := ret.Get(0).(func([]models.WeightedIdentity, []byte) (*consensus.StateSignatureData, error)); ok { + return rf(signerIdentities, sigData) + } + if rf, ok := ret.Get(0).(func([]models.WeightedIdentity, []byte) *consensus.StateSignatureData); ok { + r0 = rf(signerIdentities, sigData) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*consensus.StateSignatureData) + } + } + + if rf, ok := ret.Get(1).(func([]models.WeightedIdentity, []byte) error); ok { + r1 = rf(signerIdentities, sigData) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewPacker creates a new instance of Packer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPacker(t interface { + mock.TestingT + Cleanup(func()) +}) *Packer { + mock := &Packer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/participant_consumer.go b/consensus/mocks/participant_consumer.go new file mode 100644 index 0000000..1c81732 --- /dev/null +++ b/consensus/mocks/participant_consumer.go @@ -0,0 +1,91 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + consensus "source.quilibrium.com/quilibrium/monorepo/consensus" + + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" + + time "time" +) + +// ParticipantConsumer is an autogenerated mock type for the ParticipantConsumer type +type ParticipantConsumer[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// OnCurrentRankDetails provides a mock function with given fields: currentRank, finalizedRank, currentLeader +func (_m *ParticipantConsumer[StateT, VoteT]) OnCurrentRankDetails(currentRank uint64, finalizedRank uint64, currentLeader models.Identity) { + _m.Called(currentRank, finalizedRank, currentLeader) +} + +// OnEventProcessed provides a mock function with no fields +func (_m *ParticipantConsumer[StateT, VoteT]) OnEventProcessed() { + _m.Called() +} + +// OnLocalTimeout provides a mock function with given fields: currentRank +func (_m *ParticipantConsumer[StateT, VoteT]) OnLocalTimeout(currentRank uint64) { + _m.Called(currentRank) +} + +// OnPartialTimeoutCertificate provides a mock function with given fields: currentRank, partialTimeoutCertificate +func (_m *ParticipantConsumer[StateT, VoteT]) OnPartialTimeoutCertificate(currentRank uint64, partialTimeoutCertificate *consensus.PartialTimeoutCertificateCreated) { + _m.Called(currentRank, partialTimeoutCertificate) +} + +// OnQuorumCertificateTriggeredRankChange provides a mock function with given fields: oldRank, newRank, qc +func (_m *ParticipantConsumer[StateT, VoteT]) OnQuorumCertificateTriggeredRankChange(oldRank uint64, newRank uint64, qc models.QuorumCertificate) { + _m.Called(oldRank, newRank, qc) +} + +// OnRankChange provides a mock function with given fields: oldRank, newRank +func (_m *ParticipantConsumer[StateT, VoteT]) OnRankChange(oldRank uint64, newRank uint64) { + _m.Called(oldRank, newRank) +} + +// OnReceiveProposal provides a mock function with given fields: currentRank, proposal +func (_m *ParticipantConsumer[StateT, VoteT]) OnReceiveProposal(currentRank uint64, proposal *models.SignedProposal[StateT, VoteT]) { + _m.Called(currentRank, proposal) +} + +// OnReceiveQuorumCertificate provides a mock function with given fields: currentRank, qc +func (_m *ParticipantConsumer[StateT, VoteT]) OnReceiveQuorumCertificate(currentRank uint64, qc models.QuorumCertificate) { + _m.Called(currentRank, qc) +} + +// OnReceiveTimeoutCertificate provides a mock function with given fields: currentRank, tc +func (_m *ParticipantConsumer[StateT, VoteT]) OnReceiveTimeoutCertificate(currentRank uint64, tc models.TimeoutCertificate) { + _m.Called(currentRank, tc) +} + +// OnStart provides a mock function with given fields: currentRank +func (_m *ParticipantConsumer[StateT, VoteT]) OnStart(currentRank uint64) { + _m.Called(currentRank) +} + +// OnStartingTimeout provides a mock function with given fields: startTime, endTime +func (_m *ParticipantConsumer[StateT, VoteT]) OnStartingTimeout(startTime time.Time, endTime time.Time) { + _m.Called(startTime, endTime) +} + +// OnTimeoutCertificateTriggeredRankChange provides a mock function with given fields: oldRank, newRank, tc +func (_m *ParticipantConsumer[StateT, VoteT]) OnTimeoutCertificateTriggeredRankChange(oldRank uint64, newRank uint64, tc models.TimeoutCertificate) { + _m.Called(oldRank, newRank, tc) +} + +// NewParticipantConsumer creates a new instance of ParticipantConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewParticipantConsumer[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *ParticipantConsumer[StateT, VoteT] { + mock := &ParticipantConsumer[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/proposal_duration_provider.go b/consensus/mocks/proposal_duration_provider.go new file mode 100644 index 0000000..35de879 --- /dev/null +++ b/consensus/mocks/proposal_duration_provider.go @@ -0,0 +1,47 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + time "time" + + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// ProposalDurationProvider is an autogenerated mock type for the ProposalDurationProvider type +type ProposalDurationProvider struct { + mock.Mock +} + +// TargetPublicationTime provides a mock function with given fields: proposalRank, timeRankEntered, parentStateId +func (_m *ProposalDurationProvider) TargetPublicationTime(proposalRank uint64, timeRankEntered time.Time, parentStateId models.Identity) time.Time { + ret := _m.Called(proposalRank, timeRankEntered, parentStateId) + + if len(ret) == 0 { + panic("no return value specified for TargetPublicationTime") + } + + var r0 time.Time + if rf, ok := ret.Get(0).(func(uint64, time.Time, models.Identity) time.Time); ok { + r0 = rf(proposalRank, timeRankEntered, parentStateId) + } else { + r0 = ret.Get(0).(time.Time) + } + + return r0 +} + +// NewProposalDurationProvider creates a new instance of ProposalDurationProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewProposalDurationProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *ProposalDurationProvider { + mock := &ProposalDurationProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/proposal_violation_consumer.go b/consensus/mocks/proposal_violation_consumer.go new file mode 100644 index 0000000..2cb61e4 --- /dev/null +++ b/consensus/mocks/proposal_violation_consumer.go @@ -0,0 +1,37 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// ProposalViolationConsumer is an autogenerated mock type for the ProposalViolationConsumer type +type ProposalViolationConsumer[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// OnDoubleProposeDetected provides a mock function with given fields: _a0, _a1 +func (_m *ProposalViolationConsumer[StateT, VoteT]) OnDoubleProposeDetected(_a0 *models.State[StateT], _a1 *models.State[StateT]) { + _m.Called(_a0, _a1) +} + +// OnInvalidStateDetected provides a mock function with given fields: err +func (_m *ProposalViolationConsumer[StateT, VoteT]) OnInvalidStateDetected(err *models.InvalidProposalError[StateT, VoteT]) { + _m.Called(err) +} + +// NewProposalViolationConsumer creates a new instance of ProposalViolationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewProposalViolationConsumer[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *ProposalViolationConsumer[StateT, VoteT] { + mock := &ProposalViolationConsumer[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/read_only_consensus_store.go b/consensus/mocks/read_only_consensus_store.go new file mode 100644 index 0000000..7cf8fbd --- /dev/null +++ b/consensus/mocks/read_only_consensus_store.go @@ -0,0 +1,87 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// ReadOnlyConsensusStore is an autogenerated mock type for the ReadOnlyConsensusStore type +type ReadOnlyConsensusStore[VoteT models.Unique] struct { + mock.Mock +} + +// GetConsensusState provides a mock function with no fields +func (_m *ReadOnlyConsensusStore[VoteT]) GetConsensusState() (*models.ConsensusState[VoteT], error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetConsensusState") + } + + var r0 *models.ConsensusState[VoteT] + var r1 error + if rf, ok := ret.Get(0).(func() (*models.ConsensusState[VoteT], error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *models.ConsensusState[VoteT]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.ConsensusState[VoteT]) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetLivenessState provides a mock function with no fields +func (_m *ReadOnlyConsensusStore[VoteT]) GetLivenessState() (*models.LivenessState, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetLivenessState") + } + + var r0 *models.LivenessState + var r1 error + if rf, ok := ret.Get(0).(func() (*models.LivenessState, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *models.LivenessState); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.LivenessState) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewReadOnlyConsensusStore creates a new instance of ReadOnlyConsensusStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewReadOnlyConsensusStore[VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *ReadOnlyConsensusStore[VoteT] { + mock := &ReadOnlyConsensusStore[VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/replicas.go b/consensus/mocks/replicas.go new file mode 100644 index 0000000..1daee9d --- /dev/null +++ b/consensus/mocks/replicas.go @@ -0,0 +1,189 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Replicas is an autogenerated mock type for the Replicas type +type Replicas struct { + mock.Mock +} + +// IdentitiesByRank provides a mock function with given fields: rank +func (_m *Replicas) IdentitiesByRank(rank uint64) ([]models.WeightedIdentity, error) { + ret := _m.Called(rank) + + if len(ret) == 0 { + panic("no return value specified for IdentitiesByRank") + } + + var r0 []models.WeightedIdentity + var r1 error + if rf, ok := ret.Get(0).(func(uint64) ([]models.WeightedIdentity, error)); ok { + return rf(rank) + } + if rf, ok := ret.Get(0).(func(uint64) []models.WeightedIdentity); ok { + r0 = rf(rank) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]models.WeightedIdentity) + } + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(rank) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IdentityByRank provides a mock function with given fields: rank, participantID +func (_m *Replicas) IdentityByRank(rank uint64, participantID models.Identity) (models.WeightedIdentity, error) { + ret := _m.Called(rank, participantID) + + if len(ret) == 0 { + panic("no return value specified for IdentityByRank") + } + + var r0 models.WeightedIdentity + var r1 error + if rf, ok := ret.Get(0).(func(uint64, models.Identity) (models.WeightedIdentity, error)); ok { + return rf(rank, participantID) + } + if rf, ok := ret.Get(0).(func(uint64, models.Identity) models.WeightedIdentity); ok { + r0 = rf(rank, participantID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(models.WeightedIdentity) + } + } + + if rf, ok := ret.Get(1).(func(uint64, models.Identity) error); ok { + r1 = rf(rank, participantID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LeaderForRank provides a mock function with given fields: rank +func (_m *Replicas) LeaderForRank(rank uint64) (models.Identity, error) { + ret := _m.Called(rank) + + if len(ret) == 0 { + panic("no return value specified for LeaderForRank") + } + + var r0 models.Identity + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (models.Identity, error)); ok { + return rf(rank) + } + if rf, ok := ret.Get(0).(func(uint64) models.Identity); ok { + r0 = rf(rank) + } else { + r0 = ret.Get(0).(models.Identity) + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(rank) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QuorumThresholdForRank provides a mock function with given fields: rank +func (_m *Replicas) QuorumThresholdForRank(rank uint64) (uint64, error) { + ret := _m.Called(rank) + + if len(ret) == 0 { + panic("no return value specified for QuorumThresholdForRank") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (uint64, error)); ok { + return rf(rank) + } + if rf, ok := ret.Get(0).(func(uint64) uint64); ok { + r0 = rf(rank) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(rank) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Self provides a mock function with no fields +func (_m *Replicas) Self() models.Identity { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Self") + } + + var r0 models.Identity + if rf, ok := ret.Get(0).(func() models.Identity); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(models.Identity) + } + + return r0 +} + +// TimeoutThresholdForRank provides a mock function with given fields: rank +func (_m *Replicas) TimeoutThresholdForRank(rank uint64) (uint64, error) { + ret := _m.Called(rank) + + if len(ret) == 0 { + panic("no return value specified for TimeoutThresholdForRank") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (uint64, error)); ok { + return rf(rank) + } + if rf, ok := ret.Get(0).(func(uint64) uint64); ok { + r0 = rf(rank) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(rank) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewReplicas creates a new instance of Replicas. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewReplicas(t interface { + mock.TestingT + Cleanup(func()) +}) *Replicas { + mock := &Replicas{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/safety_rules.go b/consensus/mocks/safety_rules.go new file mode 100644 index 0000000..04b72b2 --- /dev/null +++ b/consensus/mocks/safety_rules.go @@ -0,0 +1,117 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// SafetyRules is an autogenerated mock type for the SafetyRules type +type SafetyRules[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// ProduceTimeout provides a mock function with given fields: curRank, newestQC, lastRankTC +func (_m *SafetyRules[StateT, VoteT]) ProduceTimeout(curRank uint64, newestQC models.QuorumCertificate, lastRankTC models.TimeoutCertificate) (*models.TimeoutState[VoteT], error) { + ret := _m.Called(curRank, newestQC, lastRankTC) + + if len(ret) == 0 { + panic("no return value specified for ProduceTimeout") + } + + var r0 *models.TimeoutState[VoteT] + var r1 error + if rf, ok := ret.Get(0).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) (*models.TimeoutState[VoteT], error)); ok { + return rf(curRank, newestQC, lastRankTC) + } + if rf, ok := ret.Get(0).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) *models.TimeoutState[VoteT]); ok { + r0 = rf(curRank, newestQC, lastRankTC) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.TimeoutState[VoteT]) + } + } + + if rf, ok := ret.Get(1).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) error); ok { + r1 = rf(curRank, newestQC, lastRankTC) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ProduceVote provides a mock function with given fields: proposal, curRank +func (_m *SafetyRules[StateT, VoteT]) ProduceVote(proposal *models.SignedProposal[StateT, VoteT], curRank uint64) (*VoteT, error) { + ret := _m.Called(proposal, curRank) + + if len(ret) == 0 { + panic("no return value specified for ProduceVote") + } + + var r0 *VoteT + var r1 error + if rf, ok := ret.Get(0).(func(*models.SignedProposal[StateT, VoteT], uint64) (*VoteT, error)); ok { + return rf(proposal, curRank) + } + if rf, ok := ret.Get(0).(func(*models.SignedProposal[StateT, VoteT], uint64) *VoteT); ok { + r0 = rf(proposal, curRank) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*VoteT) + } + } + + if rf, ok := ret.Get(1).(func(*models.SignedProposal[StateT, VoteT], uint64) error); ok { + r1 = rf(proposal, curRank) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SignOwnProposal provides a mock function with given fields: unsignedProposal +func (_m *SafetyRules[StateT, VoteT]) SignOwnProposal(unsignedProposal *models.Proposal[StateT]) (*VoteT, error) { + ret := _m.Called(unsignedProposal) + + if len(ret) == 0 { + panic("no return value specified for SignOwnProposal") + } + + var r0 *VoteT + var r1 error + if rf, ok := ret.Get(0).(func(*models.Proposal[StateT]) (*VoteT, error)); ok { + return rf(unsignedProposal) + } + if rf, ok := ret.Get(0).(func(*models.Proposal[StateT]) *VoteT); ok { + r0 = rf(unsignedProposal) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*VoteT) + } + } + + if rf, ok := ret.Get(1).(func(*models.Proposal[StateT]) error); ok { + r1 = rf(unsignedProposal) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewSafetyRules creates a new instance of SafetyRules. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSafetyRules[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *SafetyRules[StateT, VoteT] { + mock := &SafetyRules[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/signature_aggregator.go b/consensus/mocks/signature_aggregator.go new file mode 100644 index 0000000..97d4c5f --- /dev/null +++ b/consensus/mocks/signature_aggregator.go @@ -0,0 +1,93 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// SignatureAggregator is an autogenerated mock type for the SignatureAggregator type +type SignatureAggregator struct { + mock.Mock +} + +// Aggregate provides a mock function with given fields: publicKeys, signatures +func (_m *SignatureAggregator) Aggregate(publicKeys [][]byte, signatures [][]byte) (models.AggregatedSignature, error) { + ret := _m.Called(publicKeys, signatures) + + if len(ret) == 0 { + panic("no return value specified for Aggregate") + } + + var r0 models.AggregatedSignature + var r1 error + if rf, ok := ret.Get(0).(func([][]byte, [][]byte) (models.AggregatedSignature, error)); ok { + return rf(publicKeys, signatures) + } + if rf, ok := ret.Get(0).(func([][]byte, [][]byte) models.AggregatedSignature); ok { + r0 = rf(publicKeys, signatures) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(models.AggregatedSignature) + } + } + + if rf, ok := ret.Get(1).(func([][]byte, [][]byte) error); ok { + r1 = rf(publicKeys, signatures) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// VerifySignatureMultiMessage provides a mock function with given fields: publicKeys, signature, messages, context +func (_m *SignatureAggregator) VerifySignatureMultiMessage(publicKeys [][]byte, signature []byte, messages [][]byte, context []byte) bool { + ret := _m.Called(publicKeys, signature, messages, context) + + if len(ret) == 0 { + panic("no return value specified for VerifySignatureMultiMessage") + } + + var r0 bool + if rf, ok := ret.Get(0).(func([][]byte, []byte, [][]byte, []byte) bool); ok { + r0 = rf(publicKeys, signature, messages, context) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// VerifySignatureRaw provides a mock function with given fields: publicKey, signature, message, context +func (_m *SignatureAggregator) VerifySignatureRaw(publicKey []byte, signature []byte, message []byte, context []byte) bool { + ret := _m.Called(publicKey, signature, message, context) + + if len(ret) == 0 { + panic("no return value specified for VerifySignatureRaw") + } + + var r0 bool + if rf, ok := ret.Get(0).(func([]byte, []byte, []byte, []byte) bool); ok { + r0 = rf(publicKey, signature, message, context) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// NewSignatureAggregator creates a new instance of SignatureAggregator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSignatureAggregator(t interface { + mock.TestingT + Cleanup(func()) +}) *SignatureAggregator { + mock := &SignatureAggregator{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/signer.go b/consensus/mocks/signer.go new file mode 100644 index 0000000..b29893c --- /dev/null +++ b/consensus/mocks/signer.go @@ -0,0 +1,87 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Signer is an autogenerated mock type for the Signer type +type Signer[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// CreateTimeout provides a mock function with given fields: curRank, newestQC, previousRankTimeoutCert +func (_m *Signer[StateT, VoteT]) CreateTimeout(curRank uint64, newestQC models.QuorumCertificate, previousRankTimeoutCert models.TimeoutCertificate) (*models.TimeoutState[VoteT], error) { + ret := _m.Called(curRank, newestQC, previousRankTimeoutCert) + + if len(ret) == 0 { + panic("no return value specified for CreateTimeout") + } + + var r0 *models.TimeoutState[VoteT] + var r1 error + if rf, ok := ret.Get(0).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) (*models.TimeoutState[VoteT], error)); ok { + return rf(curRank, newestQC, previousRankTimeoutCert) + } + if rf, ok := ret.Get(0).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) *models.TimeoutState[VoteT]); ok { + r0 = rf(curRank, newestQC, previousRankTimeoutCert) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.TimeoutState[VoteT]) + } + } + + if rf, ok := ret.Get(1).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) error); ok { + r1 = rf(curRank, newestQC, previousRankTimeoutCert) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateVote provides a mock function with given fields: state +func (_m *Signer[StateT, VoteT]) CreateVote(state *models.State[StateT]) (*VoteT, error) { + ret := _m.Called(state) + + if len(ret) == 0 { + panic("no return value specified for CreateVote") + } + + var r0 *VoteT + var r1 error + if rf, ok := ret.Get(0).(func(*models.State[StateT]) (*VoteT, error)); ok { + return rf(state) + } + if rf, ok := ret.Get(0).(func(*models.State[StateT]) *VoteT); ok { + r0 = rf(state) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*VoteT) + } + } + + if rf, ok := ret.Get(1).(func(*models.State[StateT]) error); ok { + r1 = rf(state) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewSigner creates a new instance of Signer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSigner[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *Signer[StateT, VoteT] { + mock := &Signer[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/state_producer.go b/consensus/mocks/state_producer.go new file mode 100644 index 0000000..6a396c1 --- /dev/null +++ b/consensus/mocks/state_producer.go @@ -0,0 +1,57 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// StateProducer is an autogenerated mock type for the StateProducer type +type StateProducer[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// MakeStateProposal provides a mock function with given fields: rank, qc, lastRankTC +func (_m *StateProducer[StateT, VoteT]) MakeStateProposal(rank uint64, qc models.QuorumCertificate, lastRankTC models.TimeoutCertificate) (*models.SignedProposal[StateT, VoteT], error) { + ret := _m.Called(rank, qc, lastRankTC) + + if len(ret) == 0 { + panic("no return value specified for MakeStateProposal") + } + + var r0 *models.SignedProposal[StateT, VoteT] + var r1 error + if rf, ok := ret.Get(0).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) (*models.SignedProposal[StateT, VoteT], error)); ok { + return rf(rank, qc, lastRankTC) + } + if rf, ok := ret.Get(0).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) *models.SignedProposal[StateT, VoteT]); ok { + r0 = rf(rank, qc, lastRankTC) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.SignedProposal[StateT, VoteT]) + } + } + + if rf, ok := ret.Get(1).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) error); ok { + r1 = rf(rank, qc, lastRankTC) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewStateProducer creates a new instance of StateProducer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStateProducer[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *StateProducer[StateT, VoteT] { + mock := &StateProducer[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/state_signer_decoder.go b/consensus/mocks/state_signer_decoder.go new file mode 100644 index 0000000..8690501 --- /dev/null +++ b/consensus/mocks/state_signer_decoder.go @@ -0,0 +1,57 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// StateSignerDecoder is an autogenerated mock type for the StateSignerDecoder type +type StateSignerDecoder[StateT models.Unique] struct { + mock.Mock +} + +// DecodeSignerIDs provides a mock function with given fields: state +func (_m *StateSignerDecoder[StateT]) DecodeSignerIDs(state *models.State[StateT]) ([]models.WeightedIdentity, error) { + ret := _m.Called(state) + + if len(ret) == 0 { + panic("no return value specified for DecodeSignerIDs") + } + + var r0 []models.WeightedIdentity + var r1 error + if rf, ok := ret.Get(0).(func(*models.State[StateT]) ([]models.WeightedIdentity, error)); ok { + return rf(state) + } + if rf, ok := ret.Get(0).(func(*models.State[StateT]) []models.WeightedIdentity); ok { + r0 = rf(state) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]models.WeightedIdentity) + } + } + + if rf, ok := ret.Get(1).(func(*models.State[StateT]) error); ok { + r1 = rf(state) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewStateSignerDecoder creates a new instance of StateSignerDecoder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStateSignerDecoder[StateT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *StateSignerDecoder[StateT] { + mock := &StateSignerDecoder[StateT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/sync_provider.go b/consensus/mocks/sync_provider.go new file mode 100644 index 0000000..4ab1fcb --- /dev/null +++ b/consensus/mocks/sync_provider.go @@ -0,0 +1,61 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// SyncProvider is an autogenerated mock type for the SyncProvider type +type SyncProvider[StateT models.Unique] struct { + mock.Mock +} + +// Synchronize provides a mock function with given fields: ctx, existing +func (_m *SyncProvider[StateT]) Synchronize(ctx context.Context, existing *StateT) (<-chan *StateT, <-chan error) { + ret := _m.Called(ctx, existing) + + if len(ret) == 0 { + panic("no return value specified for Synchronize") + } + + var r0 <-chan *StateT + var r1 <-chan error + if rf, ok := ret.Get(0).(func(context.Context, *StateT) (<-chan *StateT, <-chan error)); ok { + return rf(ctx, existing) + } + if rf, ok := ret.Get(0).(func(context.Context, *StateT) <-chan *StateT); ok { + r0 = rf(ctx, existing) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan *StateT) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *StateT) <-chan error); ok { + r1 = rf(ctx, existing) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(<-chan error) + } + } + + return r0, r1 +} + +// NewSyncProvider creates a new instance of SyncProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSyncProvider[StateT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *SyncProvider[StateT] { + mock := &SyncProvider[StateT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/timeout_aggregation_consumer.go b/consensus/mocks/timeout_aggregation_consumer.go new file mode 100644 index 0000000..50bfa2e --- /dev/null +++ b/consensus/mocks/timeout_aggregation_consumer.go @@ -0,0 +1,62 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutAggregationConsumer is an autogenerated mock type for the TimeoutAggregationConsumer type +type TimeoutAggregationConsumer[VoteT models.Unique] struct { + mock.Mock +} + +// OnDoubleTimeoutDetected provides a mock function with given fields: _a0, _a1 +func (_m *TimeoutAggregationConsumer[VoteT]) OnDoubleTimeoutDetected(_a0 *models.TimeoutState[VoteT], _a1 *models.TimeoutState[VoteT]) { + _m.Called(_a0, _a1) +} + +// OnInvalidTimeoutDetected provides a mock function with given fields: err +func (_m *TimeoutAggregationConsumer[VoteT]) OnInvalidTimeoutDetected(err models.InvalidTimeoutError[VoteT]) { + _m.Called(err) +} + +// OnNewQuorumCertificateDiscovered provides a mock function with given fields: certificate +func (_m *TimeoutAggregationConsumer[VoteT]) OnNewQuorumCertificateDiscovered(certificate models.QuorumCertificate) { + _m.Called(certificate) +} + +// OnNewTimeoutCertificateDiscovered provides a mock function with given fields: certificate +func (_m *TimeoutAggregationConsumer[VoteT]) OnNewTimeoutCertificateDiscovered(certificate models.TimeoutCertificate) { + _m.Called(certificate) +} + +// OnPartialTimeoutCertificateCreated provides a mock function with given fields: rank, newestQC, lastRankTC +func (_m *TimeoutAggregationConsumer[VoteT]) OnPartialTimeoutCertificateCreated(rank uint64, newestQC models.QuorumCertificate, lastRankTC models.TimeoutCertificate) { + _m.Called(rank, newestQC, lastRankTC) +} + +// OnTimeoutCertificateConstructedFromTimeouts provides a mock function with given fields: certificate +func (_m *TimeoutAggregationConsumer[VoteT]) OnTimeoutCertificateConstructedFromTimeouts(certificate models.TimeoutCertificate) { + _m.Called(certificate) +} + +// OnTimeoutProcessed provides a mock function with given fields: timeout +func (_m *TimeoutAggregationConsumer[VoteT]) OnTimeoutProcessed(timeout *models.TimeoutState[VoteT]) { + _m.Called(timeout) +} + +// NewTimeoutAggregationConsumer creates a new instance of TimeoutAggregationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutAggregationConsumer[VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *TimeoutAggregationConsumer[VoteT] { + mock := &TimeoutAggregationConsumer[VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/timeout_aggregation_violation_consumer.go b/consensus/mocks/timeout_aggregation_violation_consumer.go new file mode 100644 index 0000000..ce724ac --- /dev/null +++ b/consensus/mocks/timeout_aggregation_violation_consumer.go @@ -0,0 +1,37 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutAggregationViolationConsumer is an autogenerated mock type for the TimeoutAggregationViolationConsumer type +type TimeoutAggregationViolationConsumer[VoteT models.Unique] struct { + mock.Mock +} + +// OnDoubleTimeoutDetected provides a mock function with given fields: _a0, _a1 +func (_m *TimeoutAggregationViolationConsumer[VoteT]) OnDoubleTimeoutDetected(_a0 *models.TimeoutState[VoteT], _a1 *models.TimeoutState[VoteT]) { + _m.Called(_a0, _a1) +} + +// OnInvalidTimeoutDetected provides a mock function with given fields: err +func (_m *TimeoutAggregationViolationConsumer[VoteT]) OnInvalidTimeoutDetected(err models.InvalidTimeoutError[VoteT]) { + _m.Called(err) +} + +// NewTimeoutAggregationViolationConsumer creates a new instance of TimeoutAggregationViolationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutAggregationViolationConsumer[VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *TimeoutAggregationViolationConsumer[VoteT] { + mock := &TimeoutAggregationViolationConsumer[VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/timeout_aggregator.go b/consensus/mocks/timeout_aggregator.go new file mode 100644 index 0000000..b2953ed --- /dev/null +++ b/consensus/mocks/timeout_aggregator.go @@ -0,0 +1,57 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutAggregator is an autogenerated mock type for the TimeoutAggregator type +type TimeoutAggregator[VoteT models.Unique] struct { + mock.Mock +} + +// AddTimeout provides a mock function with given fields: timeoutState +func (_m *TimeoutAggregator[VoteT]) AddTimeout(timeoutState *models.TimeoutState[VoteT]) { + _m.Called(timeoutState) +} + +// PruneUpToRank provides a mock function with given fields: lowestRetainedRank +func (_m *TimeoutAggregator[VoteT]) PruneUpToRank(lowestRetainedRank uint64) { + _m.Called(lowestRetainedRank) +} + +// Start provides a mock function with given fields: ctx +func (_m *TimeoutAggregator[VoteT]) Start(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewTimeoutAggregator creates a new instance of TimeoutAggregator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutAggregator[VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *TimeoutAggregator[VoteT] { + mock := &TimeoutAggregator[VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/timeout_collector.go b/consensus/mocks/timeout_collector.go new file mode 100644 index 0000000..53d84d3 --- /dev/null +++ b/consensus/mocks/timeout_collector.go @@ -0,0 +1,63 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutCollector is an autogenerated mock type for the TimeoutCollector type +type TimeoutCollector[VoteT models.Unique] struct { + mock.Mock +} + +// AddTimeout provides a mock function with given fields: timeoutState +func (_m *TimeoutCollector[VoteT]) AddTimeout(timeoutState *models.TimeoutState[VoteT]) error { + ret := _m.Called(timeoutState) + + if len(ret) == 0 { + panic("no return value specified for AddTimeout") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*models.TimeoutState[VoteT]) error); ok { + r0 = rf(timeoutState) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Rank provides a mock function with no fields +func (_m *TimeoutCollector[VoteT]) Rank() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Rank") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// NewTimeoutCollector creates a new instance of TimeoutCollector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutCollector[VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *TimeoutCollector[VoteT] { + mock := &TimeoutCollector[VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/timeout_collector_consumer.go b/consensus/mocks/timeout_collector_consumer.go new file mode 100644 index 0000000..07708b3 --- /dev/null +++ b/consensus/mocks/timeout_collector_consumer.go @@ -0,0 +1,52 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutCollectorConsumer is an autogenerated mock type for the TimeoutCollectorConsumer type +type TimeoutCollectorConsumer[VoteT models.Unique] struct { + mock.Mock +} + +// OnNewQuorumCertificateDiscovered provides a mock function with given fields: certificate +func (_m *TimeoutCollectorConsumer[VoteT]) OnNewQuorumCertificateDiscovered(certificate models.QuorumCertificate) { + _m.Called(certificate) +} + +// OnNewTimeoutCertificateDiscovered provides a mock function with given fields: certificate +func (_m *TimeoutCollectorConsumer[VoteT]) OnNewTimeoutCertificateDiscovered(certificate models.TimeoutCertificate) { + _m.Called(certificate) +} + +// OnPartialTimeoutCertificateCreated provides a mock function with given fields: rank, newestQC, lastRankTC +func (_m *TimeoutCollectorConsumer[VoteT]) OnPartialTimeoutCertificateCreated(rank uint64, newestQC models.QuorumCertificate, lastRankTC models.TimeoutCertificate) { + _m.Called(rank, newestQC, lastRankTC) +} + +// OnTimeoutCertificateConstructedFromTimeouts provides a mock function with given fields: certificate +func (_m *TimeoutCollectorConsumer[VoteT]) OnTimeoutCertificateConstructedFromTimeouts(certificate models.TimeoutCertificate) { + _m.Called(certificate) +} + +// OnTimeoutProcessed provides a mock function with given fields: timeout +func (_m *TimeoutCollectorConsumer[VoteT]) OnTimeoutProcessed(timeout *models.TimeoutState[VoteT]) { + _m.Called(timeout) +} + +// NewTimeoutCollectorConsumer creates a new instance of TimeoutCollectorConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutCollectorConsumer[VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *TimeoutCollectorConsumer[VoteT] { + mock := &TimeoutCollectorConsumer[VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/timeout_collector_factory.go b/consensus/mocks/timeout_collector_factory.go new file mode 100644 index 0000000..a6843a4 --- /dev/null +++ b/consensus/mocks/timeout_collector_factory.go @@ -0,0 +1,59 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + consensus "source.quilibrium.com/quilibrium/monorepo/consensus" + + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutCollectorFactory is an autogenerated mock type for the TimeoutCollectorFactory type +type TimeoutCollectorFactory[VoteT models.Unique] struct { + mock.Mock +} + +// Create provides a mock function with given fields: rank +func (_m *TimeoutCollectorFactory[VoteT]) Create(rank uint64) (consensus.TimeoutCollector[VoteT], error) { + ret := _m.Called(rank) + + if len(ret) == 0 { + panic("no return value specified for Create") + } + + var r0 consensus.TimeoutCollector[VoteT] + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (consensus.TimeoutCollector[VoteT], error)); ok { + return rf(rank) + } + if rf, ok := ret.Get(0).(func(uint64) consensus.TimeoutCollector[VoteT]); ok { + r0 = rf(rank) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(consensus.TimeoutCollector[VoteT]) + } + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(rank) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewTimeoutCollectorFactory creates a new instance of TimeoutCollectorFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutCollectorFactory[VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *TimeoutCollectorFactory[VoteT] { + mock := &TimeoutCollectorFactory[VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/timeout_collectors.go b/consensus/mocks/timeout_collectors.go new file mode 100644 index 0000000..cb5189f --- /dev/null +++ b/consensus/mocks/timeout_collectors.go @@ -0,0 +1,71 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + consensus "source.quilibrium.com/quilibrium/monorepo/consensus" + + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutCollectors is an autogenerated mock type for the TimeoutCollectors type +type TimeoutCollectors[VoteT models.Unique] struct { + mock.Mock +} + +// GetOrCreateCollector provides a mock function with given fields: rank +func (_m *TimeoutCollectors[VoteT]) GetOrCreateCollector(rank uint64) (consensus.TimeoutCollector[VoteT], bool, error) { + ret := _m.Called(rank) + + if len(ret) == 0 { + panic("no return value specified for GetOrCreateCollector") + } + + var r0 consensus.TimeoutCollector[VoteT] + var r1 bool + var r2 error + if rf, ok := ret.Get(0).(func(uint64) (consensus.TimeoutCollector[VoteT], bool, error)); ok { + return rf(rank) + } + if rf, ok := ret.Get(0).(func(uint64) consensus.TimeoutCollector[VoteT]); ok { + r0 = rf(rank) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(consensus.TimeoutCollector[VoteT]) + } + } + + if rf, ok := ret.Get(1).(func(uint64) bool); ok { + r1 = rf(rank) + } else { + r1 = ret.Get(1).(bool) + } + + if rf, ok := ret.Get(2).(func(uint64) error); ok { + r2 = rf(rank) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// PruneUpToRank provides a mock function with given fields: lowestRetainedRank +func (_m *TimeoutCollectors[VoteT]) PruneUpToRank(lowestRetainedRank uint64) { + _m.Called(lowestRetainedRank) +} + +// NewTimeoutCollectors creates a new instance of TimeoutCollectors. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutCollectors[VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *TimeoutCollectors[VoteT] { + mock := &TimeoutCollectors[VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/timeout_processor.go b/consensus/mocks/timeout_processor.go new file mode 100644 index 0000000..596ec3f --- /dev/null +++ b/consensus/mocks/timeout_processor.go @@ -0,0 +1,45 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutProcessor is an autogenerated mock type for the TimeoutProcessor type +type TimeoutProcessor[VoteT models.Unique] struct { + mock.Mock +} + +// Process provides a mock function with given fields: timeout +func (_m *TimeoutProcessor[VoteT]) Process(timeout *models.TimeoutState[VoteT]) error { + ret := _m.Called(timeout) + + if len(ret) == 0 { + panic("no return value specified for Process") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*models.TimeoutState[VoteT]) error); ok { + r0 = rf(timeout) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewTimeoutProcessor creates a new instance of TimeoutProcessor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutProcessor[VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *TimeoutProcessor[VoteT] { + mock := &TimeoutProcessor[VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/timeout_processor_factory.go b/consensus/mocks/timeout_processor_factory.go new file mode 100644 index 0000000..774b005 --- /dev/null +++ b/consensus/mocks/timeout_processor_factory.go @@ -0,0 +1,59 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + consensus "source.quilibrium.com/quilibrium/monorepo/consensus" + + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutProcessorFactory is an autogenerated mock type for the TimeoutProcessorFactory type +type TimeoutProcessorFactory[VoteT models.Unique] struct { + mock.Mock +} + +// Create provides a mock function with given fields: rank +func (_m *TimeoutProcessorFactory[VoteT]) Create(rank uint64) (consensus.TimeoutProcessor[VoteT], error) { + ret := _m.Called(rank) + + if len(ret) == 0 { + panic("no return value specified for Create") + } + + var r0 consensus.TimeoutProcessor[VoteT] + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (consensus.TimeoutProcessor[VoteT], error)); ok { + return rf(rank) + } + if rf, ok := ret.Get(0).(func(uint64) consensus.TimeoutProcessor[VoteT]); ok { + r0 = rf(rank) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(consensus.TimeoutProcessor[VoteT]) + } + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(rank) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewTimeoutProcessorFactory creates a new instance of TimeoutProcessorFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutProcessorFactory[VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *TimeoutProcessorFactory[VoteT] { + mock := &TimeoutProcessorFactory[VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/timeout_signature_aggregator.go b/consensus/mocks/timeout_signature_aggregator.go new file mode 100644 index 0000000..aeca0b4 --- /dev/null +++ b/consensus/mocks/timeout_signature_aggregator.go @@ -0,0 +1,132 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + consensus "source.quilibrium.com/quilibrium/monorepo/consensus" + + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutSignatureAggregator is an autogenerated mock type for the TimeoutSignatureAggregator type +type TimeoutSignatureAggregator struct { + mock.Mock +} + +// Aggregate provides a mock function with no fields +func (_m *TimeoutSignatureAggregator) Aggregate() ([]consensus.TimeoutSignerInfo, models.AggregatedSignature, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Aggregate") + } + + var r0 []consensus.TimeoutSignerInfo + var r1 models.AggregatedSignature + var r2 error + if rf, ok := ret.Get(0).(func() ([]consensus.TimeoutSignerInfo, models.AggregatedSignature, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []consensus.TimeoutSignerInfo); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]consensus.TimeoutSignerInfo) + } + } + + if rf, ok := ret.Get(1).(func() models.AggregatedSignature); ok { + r1 = rf() + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(models.AggregatedSignature) + } + } + + if rf, ok := ret.Get(2).(func() error); ok { + r2 = rf() + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// Rank provides a mock function with no fields +func (_m *TimeoutSignatureAggregator) Rank() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Rank") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// TotalWeight provides a mock function with no fields +func (_m *TimeoutSignatureAggregator) TotalWeight() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for TotalWeight") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// VerifyAndAdd provides a mock function with given fields: signerID, sig, newestQCRank +func (_m *TimeoutSignatureAggregator) VerifyAndAdd(signerID models.Identity, sig []byte, newestQCRank uint64) (uint64, error) { + ret := _m.Called(signerID, sig, newestQCRank) + + if len(ret) == 0 { + panic("no return value specified for VerifyAndAdd") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(models.Identity, []byte, uint64) (uint64, error)); ok { + return rf(signerID, sig, newestQCRank) + } + if rf, ok := ret.Get(0).(func(models.Identity, []byte, uint64) uint64); ok { + r0 = rf(signerID, sig, newestQCRank) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(models.Identity, []byte, uint64) error); ok { + r1 = rf(signerID, sig, newestQCRank) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewTimeoutSignatureAggregator creates a new instance of TimeoutSignatureAggregator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutSignatureAggregator(t interface { + mock.TestingT + Cleanup(func()) +}) *TimeoutSignatureAggregator { + mock := &TimeoutSignatureAggregator{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/trace_logger.go b/consensus/mocks/trace_logger.go new file mode 100644 index 0000000..a2b4677 --- /dev/null +++ b/consensus/mocks/trace_logger.go @@ -0,0 +1,34 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// TraceLogger is an autogenerated mock type for the TraceLogger type +type TraceLogger struct { + mock.Mock +} + +// Error provides a mock function with given fields: message, err +func (_m *TraceLogger) Error(message string, err error) { + _m.Called(message, err) +} + +// Trace provides a mock function with given fields: message +func (_m *TraceLogger) Trace(message string) { + _m.Called(message) +} + +// NewTraceLogger creates a new instance of TraceLogger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTraceLogger(t interface { + mock.TestingT + Cleanup(func()) +}) *TraceLogger { + mock := &TraceLogger{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/validator.go b/consensus/mocks/validator.go new file mode 100644 index 0000000..9c0d317 --- /dev/null +++ b/consensus/mocks/validator.go @@ -0,0 +1,111 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Validator is an autogenerated mock type for the Validator type +type Validator[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// ValidateProposal provides a mock function with given fields: proposal +func (_m *Validator[StateT, VoteT]) ValidateProposal(proposal *models.SignedProposal[StateT, VoteT]) error { + ret := _m.Called(proposal) + + if len(ret) == 0 { + panic("no return value specified for ValidateProposal") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*models.SignedProposal[StateT, VoteT]) error); ok { + r0 = rf(proposal) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ValidateQuorumCertificate provides a mock function with given fields: qc +func (_m *Validator[StateT, VoteT]) ValidateQuorumCertificate(qc models.QuorumCertificate) error { + ret := _m.Called(qc) + + if len(ret) == 0 { + panic("no return value specified for ValidateQuorumCertificate") + } + + var r0 error + if rf, ok := ret.Get(0).(func(models.QuorumCertificate) error); ok { + r0 = rf(qc) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ValidateTimeoutCertificate provides a mock function with given fields: tc +func (_m *Validator[StateT, VoteT]) ValidateTimeoutCertificate(tc models.TimeoutCertificate) error { + ret := _m.Called(tc) + + if len(ret) == 0 { + panic("no return value specified for ValidateTimeoutCertificate") + } + + var r0 error + if rf, ok := ret.Get(0).(func(models.TimeoutCertificate) error); ok { + r0 = rf(tc) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ValidateVote provides a mock function with given fields: vote +func (_m *Validator[StateT, VoteT]) ValidateVote(vote *VoteT) (*models.WeightedIdentity, error) { + ret := _m.Called(vote) + + if len(ret) == 0 { + panic("no return value specified for ValidateVote") + } + + var r0 *models.WeightedIdentity + var r1 error + if rf, ok := ret.Get(0).(func(*VoteT) (*models.WeightedIdentity, error)); ok { + return rf(vote) + } + if rf, ok := ret.Get(0).(func(*VoteT) *models.WeightedIdentity); ok { + r0 = rf(vote) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.WeightedIdentity) + } + } + + if rf, ok := ret.Get(1).(func(*VoteT) error); ok { + r1 = rf(vote) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewValidator creates a new instance of Validator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewValidator[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *Validator[StateT, VoteT] { + mock := &Validator[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/verifier.go b/consensus/mocks/verifier.go new file mode 100644 index 0000000..14e4d3f --- /dev/null +++ b/consensus/mocks/verifier.go @@ -0,0 +1,81 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Verifier is an autogenerated mock type for the Verifier type +type Verifier[VoteT models.Unique] struct { + mock.Mock +} + +// VerifyQuorumCertificate provides a mock function with given fields: quorumCertificate +func (_m *Verifier[VoteT]) VerifyQuorumCertificate(quorumCertificate models.QuorumCertificate) error { + ret := _m.Called(quorumCertificate) + + if len(ret) == 0 { + panic("no return value specified for VerifyQuorumCertificate") + } + + var r0 error + if rf, ok := ret.Get(0).(func(models.QuorumCertificate) error); ok { + r0 = rf(quorumCertificate) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// VerifyTimeoutCertificate provides a mock function with given fields: timeoutCertificate +func (_m *Verifier[VoteT]) VerifyTimeoutCertificate(timeoutCertificate models.TimeoutCertificate) error { + ret := _m.Called(timeoutCertificate) + + if len(ret) == 0 { + panic("no return value specified for VerifyTimeoutCertificate") + } + + var r0 error + if rf, ok := ret.Get(0).(func(models.TimeoutCertificate) error); ok { + r0 = rf(timeoutCertificate) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// VerifyVote provides a mock function with given fields: vote +func (_m *Verifier[VoteT]) VerifyVote(vote *VoteT) error { + ret := _m.Called(vote) + + if len(ret) == 0 { + panic("no return value specified for VerifyVote") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*VoteT) error); ok { + r0 = rf(vote) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewVerifier creates a new instance of Verifier. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVerifier[VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *Verifier[VoteT] { + mock := &Verifier[VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/verifying_vote_processor.go b/consensus/mocks/verifying_vote_processor.go new file mode 100644 index 0000000..46852c1 --- /dev/null +++ b/consensus/mocks/verifying_vote_processor.go @@ -0,0 +1,85 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + consensus "source.quilibrium.com/quilibrium/monorepo/consensus" + + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VerifyingVoteProcessor is an autogenerated mock type for the VerifyingVoteProcessor type +type VerifyingVoteProcessor[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// Process provides a mock function with given fields: vote +func (_m *VerifyingVoteProcessor[StateT, VoteT]) Process(vote *VoteT) error { + ret := _m.Called(vote) + + if len(ret) == 0 { + panic("no return value specified for Process") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*VoteT) error); ok { + r0 = rf(vote) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// State provides a mock function with no fields +func (_m *VerifyingVoteProcessor[StateT, VoteT]) State() *models.State[StateT] { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for State") + } + + var r0 *models.State[StateT] + if rf, ok := ret.Get(0).(func() *models.State[StateT]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.State[StateT]) + } + } + + return r0 +} + +// Status provides a mock function with no fields +func (_m *VerifyingVoteProcessor[StateT, VoteT]) Status() consensus.VoteCollectorStatus { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Status") + } + + var r0 consensus.VoteCollectorStatus + if rf, ok := ret.Get(0).(func() consensus.VoteCollectorStatus); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(consensus.VoteCollectorStatus) + } + + return r0 +} + +// NewVerifyingVoteProcessor creates a new instance of VerifyingVoteProcessor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVerifyingVoteProcessor[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *VerifyingVoteProcessor[StateT, VoteT] { + mock := &VerifyingVoteProcessor[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/vote_aggregation_consumer.go b/consensus/mocks/vote_aggregation_consumer.go new file mode 100644 index 0000000..bf582da --- /dev/null +++ b/consensus/mocks/vote_aggregation_consumer.go @@ -0,0 +1,52 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VoteAggregationConsumer is an autogenerated mock type for the VoteAggregationConsumer type +type VoteAggregationConsumer[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// OnDoubleVotingDetected provides a mock function with given fields: _a0, _a1 +func (_m *VoteAggregationConsumer[StateT, VoteT]) OnDoubleVotingDetected(_a0 *VoteT, _a1 *VoteT) { + _m.Called(_a0, _a1) +} + +// OnInvalidVoteDetected provides a mock function with given fields: err +func (_m *VoteAggregationConsumer[StateT, VoteT]) OnInvalidVoteDetected(err models.InvalidVoteError[VoteT]) { + _m.Called(err) +} + +// OnQuorumCertificateConstructedFromVotes provides a mock function with given fields: _a0 +func (_m *VoteAggregationConsumer[StateT, VoteT]) OnQuorumCertificateConstructedFromVotes(_a0 models.QuorumCertificate) { + _m.Called(_a0) +} + +// OnVoteForInvalidStateDetected provides a mock function with given fields: vote, invalidProposal +func (_m *VoteAggregationConsumer[StateT, VoteT]) OnVoteForInvalidStateDetected(vote *VoteT, invalidProposal *models.SignedProposal[StateT, VoteT]) { + _m.Called(vote, invalidProposal) +} + +// OnVoteProcessed provides a mock function with given fields: vote +func (_m *VoteAggregationConsumer[StateT, VoteT]) OnVoteProcessed(vote *VoteT) { + _m.Called(vote) +} + +// NewVoteAggregationConsumer creates a new instance of VoteAggregationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVoteAggregationConsumer[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *VoteAggregationConsumer[StateT, VoteT] { + mock := &VoteAggregationConsumer[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/vote_aggregation_violation_consumer.go b/consensus/mocks/vote_aggregation_violation_consumer.go new file mode 100644 index 0000000..f7f3c6a --- /dev/null +++ b/consensus/mocks/vote_aggregation_violation_consumer.go @@ -0,0 +1,42 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VoteAggregationViolationConsumer is an autogenerated mock type for the VoteAggregationViolationConsumer type +type VoteAggregationViolationConsumer[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// OnDoubleVotingDetected provides a mock function with given fields: _a0, _a1 +func (_m *VoteAggregationViolationConsumer[StateT, VoteT]) OnDoubleVotingDetected(_a0 *VoteT, _a1 *VoteT) { + _m.Called(_a0, _a1) +} + +// OnInvalidVoteDetected provides a mock function with given fields: err +func (_m *VoteAggregationViolationConsumer[StateT, VoteT]) OnInvalidVoteDetected(err models.InvalidVoteError[VoteT]) { + _m.Called(err) +} + +// OnVoteForInvalidStateDetected provides a mock function with given fields: vote, invalidProposal +func (_m *VoteAggregationViolationConsumer[StateT, VoteT]) OnVoteForInvalidStateDetected(vote *VoteT, invalidProposal *models.SignedProposal[StateT, VoteT]) { + _m.Called(vote, invalidProposal) +} + +// NewVoteAggregationViolationConsumer creates a new instance of VoteAggregationViolationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVoteAggregationViolationConsumer[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *VoteAggregationViolationConsumer[StateT, VoteT] { + mock := &VoteAggregationViolationConsumer[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/vote_aggregator.go b/consensus/mocks/vote_aggregator.go new file mode 100644 index 0000000..385d136 --- /dev/null +++ b/consensus/mocks/vote_aggregator.go @@ -0,0 +1,80 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VoteAggregator is an autogenerated mock type for the VoteAggregator type +type VoteAggregator[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// AddState provides a mock function with given fields: state +func (_m *VoteAggregator[StateT, VoteT]) AddState(state *models.SignedProposal[StateT, VoteT]) { + _m.Called(state) +} + +// AddVote provides a mock function with given fields: vote +func (_m *VoteAggregator[StateT, VoteT]) AddVote(vote *VoteT) { + _m.Called(vote) +} + +// InvalidState provides a mock function with given fields: state +func (_m *VoteAggregator[StateT, VoteT]) InvalidState(state *models.SignedProposal[StateT, VoteT]) error { + ret := _m.Called(state) + + if len(ret) == 0 { + panic("no return value specified for InvalidState") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*models.SignedProposal[StateT, VoteT]) error); ok { + r0 = rf(state) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// PruneUpToRank provides a mock function with given fields: rank +func (_m *VoteAggregator[StateT, VoteT]) PruneUpToRank(rank uint64) { + _m.Called(rank) +} + +// Start provides a mock function with given fields: ctx +func (_m *VoteAggregator[StateT, VoteT]) Start(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewVoteAggregator creates a new instance of VoteAggregator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVoteAggregator[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *VoteAggregator[StateT, VoteT] { + mock := &VoteAggregator[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/vote_collector.go b/consensus/mocks/vote_collector.go new file mode 100644 index 0000000..c5eef93 --- /dev/null +++ b/consensus/mocks/vote_collector.go @@ -0,0 +1,106 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + consensus "source.quilibrium.com/quilibrium/monorepo/consensus" + + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VoteCollector is an autogenerated mock type for the VoteCollector type +type VoteCollector[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// AddVote provides a mock function with given fields: vote +func (_m *VoteCollector[StateT, VoteT]) AddVote(vote *VoteT) error { + ret := _m.Called(vote) + + if len(ret) == 0 { + panic("no return value specified for AddVote") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*VoteT) error); ok { + r0 = rf(vote) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ProcessState provides a mock function with given fields: state +func (_m *VoteCollector[StateT, VoteT]) ProcessState(state *models.SignedProposal[StateT, VoteT]) error { + ret := _m.Called(state) + + if len(ret) == 0 { + panic("no return value specified for ProcessState") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*models.SignedProposal[StateT, VoteT]) error); ok { + r0 = rf(state) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Rank provides a mock function with no fields +func (_m *VoteCollector[StateT, VoteT]) Rank() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Rank") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// RegisterVoteConsumer provides a mock function with given fields: consumer +func (_m *VoteCollector[StateT, VoteT]) RegisterVoteConsumer(consumer consensus.VoteConsumer[VoteT]) { + _m.Called(consumer) +} + +// Status provides a mock function with no fields +func (_m *VoteCollector[StateT, VoteT]) Status() consensus.VoteCollectorStatus { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Status") + } + + var r0 consensus.VoteCollectorStatus + if rf, ok := ret.Get(0).(func() consensus.VoteCollectorStatus); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(consensus.VoteCollectorStatus) + } + + return r0 +} + +// NewVoteCollector creates a new instance of VoteCollector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVoteCollector[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *VoteCollector[StateT, VoteT] { + mock := &VoteCollector[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/vote_collector_consumer.go b/consensus/mocks/vote_collector_consumer.go new file mode 100644 index 0000000..7e0cdc5 --- /dev/null +++ b/consensus/mocks/vote_collector_consumer.go @@ -0,0 +1,37 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VoteCollectorConsumer is an autogenerated mock type for the VoteCollectorConsumer type +type VoteCollectorConsumer[VoteT models.Unique] struct { + mock.Mock +} + +// OnQuorumCertificateConstructedFromVotes provides a mock function with given fields: _a0 +func (_m *VoteCollectorConsumer[VoteT]) OnQuorumCertificateConstructedFromVotes(_a0 models.QuorumCertificate) { + _m.Called(_a0) +} + +// OnVoteProcessed provides a mock function with given fields: vote +func (_m *VoteCollectorConsumer[VoteT]) OnVoteProcessed(vote *VoteT) { + _m.Called(vote) +} + +// NewVoteCollectorConsumer creates a new instance of VoteCollectorConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVoteCollectorConsumer[VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *VoteCollectorConsumer[VoteT] { + mock := &VoteCollectorConsumer[VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/vote_collectors.go b/consensus/mocks/vote_collectors.go new file mode 100644 index 0000000..ce5fb43 --- /dev/null +++ b/consensus/mocks/vote_collectors.go @@ -0,0 +1,131 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + consensus "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" + + mock "github.com/stretchr/testify/mock" + + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VoteCollectors is an autogenerated mock type for the VoteCollectors type +type VoteCollectors[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// Done provides a mock function with no fields +func (_m *VoteCollectors[StateT, VoteT]) Done() <-chan struct{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Done") + } + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } + } + + return r0 +} + +// GetOrCreateCollector provides a mock function with given fields: rank +func (_m *VoteCollectors[StateT, VoteT]) GetOrCreateCollector(rank uint64) (consensus.VoteCollector[StateT, VoteT], bool, error) { + ret := _m.Called(rank) + + if len(ret) == 0 { + panic("no return value specified for GetOrCreateCollector") + } + + var r0 consensus.VoteCollector[StateT, VoteT] + var r1 bool + var r2 error + if rf, ok := ret.Get(0).(func(uint64) (consensus.VoteCollector[StateT, VoteT], bool, error)); ok { + return rf(rank) + } + if rf, ok := ret.Get(0).(func(uint64) consensus.VoteCollector[StateT, VoteT]); ok { + r0 = rf(rank) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(consensus.VoteCollector[StateT, VoteT]) + } + } + + if rf, ok := ret.Get(1).(func(uint64) bool); ok { + r1 = rf(rank) + } else { + r1 = ret.Get(1).(bool) + } + + if rf, ok := ret.Get(2).(func(uint64) error); ok { + r2 = rf(rank) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// PruneUpToRank provides a mock function with given fields: lowestRetainedRank +func (_m *VoteCollectors[StateT, VoteT]) PruneUpToRank(lowestRetainedRank uint64) { + _m.Called(lowestRetainedRank) +} + +// Ready provides a mock function with no fields +func (_m *VoteCollectors[StateT, VoteT]) Ready() <-chan struct{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } + } + + return r0 +} + +// Start provides a mock function with given fields: ctx +func (_m *VoteCollectors[StateT, VoteT]) Start(ctx lifecycle.SignalerContext) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(lifecycle.SignalerContext) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewVoteCollectors creates a new instance of VoteCollectors. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVoteCollectors[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *VoteCollectors[StateT, VoteT] { + mock := &VoteCollectors[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/vote_processor.go b/consensus/mocks/vote_processor.go new file mode 100644 index 0000000..757a97e --- /dev/null +++ b/consensus/mocks/vote_processor.go @@ -0,0 +1,65 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + consensus "source.quilibrium.com/quilibrium/monorepo/consensus" + + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VoteProcessor is an autogenerated mock type for the VoteProcessor type +type VoteProcessor[VoteT models.Unique] struct { + mock.Mock +} + +// Process provides a mock function with given fields: vote +func (_m *VoteProcessor[VoteT]) Process(vote *VoteT) error { + ret := _m.Called(vote) + + if len(ret) == 0 { + panic("no return value specified for Process") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*VoteT) error); ok { + r0 = rf(vote) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Status provides a mock function with no fields +func (_m *VoteProcessor[VoteT]) Status() consensus.VoteCollectorStatus { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Status") + } + + var r0 consensus.VoteCollectorStatus + if rf, ok := ret.Get(0).(func() consensus.VoteCollectorStatus); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(consensus.VoteCollectorStatus) + } + + return r0 +} + +// NewVoteProcessor creates a new instance of VoteProcessor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVoteProcessor[VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *VoteProcessor[VoteT] { + mock := &VoteProcessor[VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/vote_processor_factory.go b/consensus/mocks/vote_processor_factory.go new file mode 100644 index 0000000..9c3a48a --- /dev/null +++ b/consensus/mocks/vote_processor_factory.go @@ -0,0 +1,59 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + consensus "source.quilibrium.com/quilibrium/monorepo/consensus" + + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VoteProcessorFactory is an autogenerated mock type for the VoteProcessorFactory type +type VoteProcessorFactory[StateT models.Unique, VoteT models.Unique, PeerIDT models.Unique] struct { + mock.Mock +} + +// Create provides a mock function with given fields: tracer, proposal, dsTag, aggregator +func (_m *VoteProcessorFactory[StateT, VoteT, PeerIDT]) Create(tracer consensus.TraceLogger, filter []byte, proposal *models.SignedProposal[StateT, VoteT], dsTag []byte, aggregator consensus.SignatureAggregator, voter consensus.VotingProvider[StateT, VoteT, PeerIDT]) (consensus.VerifyingVoteProcessor[StateT, VoteT], error) { + ret := _m.Called(tracer, filter, proposal, dsTag, aggregator, voter) + + if len(ret) == 0 { + panic("no return value specified for Create") + } + + var r0 consensus.VerifyingVoteProcessor[StateT, VoteT] + var r1 error + if rf, ok := ret.Get(0).(func(consensus.TraceLogger, []byte, *models.SignedProposal[StateT, VoteT], []byte, consensus.SignatureAggregator, consensus.VotingProvider[StateT, VoteT, PeerIDT]) (consensus.VerifyingVoteProcessor[StateT, VoteT], error)); ok { + return rf(tracer, filter, proposal, dsTag, aggregator, voter) + } + if rf, ok := ret.Get(0).(func(consensus.TraceLogger, []byte, *models.SignedProposal[StateT, VoteT], []byte, consensus.SignatureAggregator, consensus.VotingProvider[StateT, VoteT, PeerIDT]) consensus.VerifyingVoteProcessor[StateT, VoteT]); ok { + r0 = rf(tracer, filter, proposal, dsTag, aggregator, voter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(consensus.VerifyingVoteProcessor[StateT, VoteT]) + } + } + + if rf, ok := ret.Get(1).(func(consensus.TraceLogger, []byte, *models.SignedProposal[StateT, VoteT], []byte, consensus.SignatureAggregator, consensus.VotingProvider[StateT, VoteT, PeerIDT]) error); ok { + r1 = rf(tracer, filter, proposal, dsTag, aggregator, voter) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewVoteProcessorFactory creates a new instance of VoteProcessorFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVoteProcessorFactory[StateT models.Unique, VoteT models.Unique, PeerIDT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *VoteProcessorFactory[StateT, VoteT, PeerIDT] { + mock := &VoteProcessorFactory[StateT, VoteT, PeerIDT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/voting_provider.go b/consensus/mocks/voting_provider.go new file mode 100644 index 0000000..11e2688 --- /dev/null +++ b/consensus/mocks/voting_provider.go @@ -0,0 +1,282 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VotingProvider is an autogenerated mock type for the VotingProvider type +type VotingProvider[StateT models.Unique, VoteT models.Unique, PeerIDT models.Unique] struct { + mock.Mock +} + +// FinalizeQuorumCertificate provides a mock function with given fields: ctx, state, aggregatedSignature +func (_m *VotingProvider[StateT, VoteT, PeerIDT]) FinalizeQuorumCertificate(ctx context.Context, state *models.State[StateT], aggregatedSignature models.AggregatedSignature) (models.QuorumCertificate, error) { + ret := _m.Called(ctx, state, aggregatedSignature) + + if len(ret) == 0 { + panic("no return value specified for FinalizeQuorumCertificate") + } + + var r0 models.QuorumCertificate + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *models.State[StateT], models.AggregatedSignature) (models.QuorumCertificate, error)); ok { + return rf(ctx, state, aggregatedSignature) + } + if rf, ok := ret.Get(0).(func(context.Context, *models.State[StateT], models.AggregatedSignature) models.QuorumCertificate); ok { + r0 = rf(ctx, state, aggregatedSignature) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(models.QuorumCertificate) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *models.State[StateT], models.AggregatedSignature) error); ok { + r1 = rf(ctx, state, aggregatedSignature) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FinalizeTimeout provides a mock function with given fields: ctx, rank, latestQuorumCertificate, latestQuorumCertificateRanks, aggregatedSignature +func (_m *VotingProvider[StateT, VoteT, PeerIDT]) FinalizeTimeout(ctx context.Context, rank uint64, latestQuorumCertificate models.QuorumCertificate, latestQuorumCertificateRanks []uint64, aggregatedSignature models.AggregatedSignature) (models.TimeoutCertificate, error) { + ret := _m.Called(ctx, rank, latestQuorumCertificate, latestQuorumCertificateRanks, aggregatedSignature) + + if len(ret) == 0 { + panic("no return value specified for FinalizeTimeout") + } + + var r0 models.TimeoutCertificate + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, models.QuorumCertificate, []uint64, models.AggregatedSignature) (models.TimeoutCertificate, error)); ok { + return rf(ctx, rank, latestQuorumCertificate, latestQuorumCertificateRanks, aggregatedSignature) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, models.QuorumCertificate, []uint64, models.AggregatedSignature) models.TimeoutCertificate); ok { + r0 = rf(ctx, rank, latestQuorumCertificate, latestQuorumCertificateRanks, aggregatedSignature) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(models.TimeoutCertificate) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, models.QuorumCertificate, []uint64, models.AggregatedSignature) error); ok { + r1 = rf(ctx, rank, latestQuorumCertificate, latestQuorumCertificateRanks, aggregatedSignature) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FinalizeVotes provides a mock function with given fields: ctx, proposals, proposalVotes +func (_m *VotingProvider[StateT, VoteT, PeerIDT]) FinalizeVotes(ctx context.Context, proposals map[models.Identity]*StateT, proposalVotes map[models.Identity]*VoteT) (*StateT, PeerIDT, error) { + ret := _m.Called(ctx, proposals, proposalVotes) + + if len(ret) == 0 { + panic("no return value specified for FinalizeVotes") + } + + var r0 *StateT + var r1 PeerIDT + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, map[models.Identity]*StateT, map[models.Identity]*VoteT) (*StateT, PeerIDT, error)); ok { + return rf(ctx, proposals, proposalVotes) + } + if rf, ok := ret.Get(0).(func(context.Context, map[models.Identity]*StateT, map[models.Identity]*VoteT) *StateT); ok { + r0 = rf(ctx, proposals, proposalVotes) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*StateT) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, map[models.Identity]*StateT, map[models.Identity]*VoteT) PeerIDT); ok { + r1 = rf(ctx, proposals, proposalVotes) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(PeerIDT) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, map[models.Identity]*StateT, map[models.Identity]*VoteT) error); ok { + r2 = rf(ctx, proposals, proposalVotes) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// IsQuorum provides a mock function with given fields: ctx, proposalVotes +func (_m *VotingProvider[StateT, VoteT, PeerIDT]) IsQuorum(ctx context.Context, proposalVotes map[models.Identity]*VoteT) (bool, error) { + ret := _m.Called(ctx, proposalVotes) + + if len(ret) == 0 { + panic("no return value specified for IsQuorum") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, map[models.Identity]*VoteT) (bool, error)); ok { + return rf(ctx, proposalVotes) + } + if rf, ok := ret.Get(0).(func(context.Context, map[models.Identity]*VoteT) bool); ok { + r0 = rf(ctx, proposalVotes) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, map[models.Identity]*VoteT) error); ok { + r1 = rf(ctx, proposalVotes) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SendConfirmation provides a mock function with given fields: ctx, finalized +func (_m *VotingProvider[StateT, VoteT, PeerIDT]) SendConfirmation(ctx context.Context, finalized *StateT) error { + ret := _m.Called(ctx, finalized) + + if len(ret) == 0 { + panic("no return value specified for SendConfirmation") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *StateT) error); ok { + r0 = rf(ctx, finalized) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SendProposal provides a mock function with given fields: ctx, proposal +func (_m *VotingProvider[StateT, VoteT, PeerIDT]) SendProposal(ctx context.Context, proposal *StateT) error { + ret := _m.Called(ctx, proposal) + + if len(ret) == 0 { + panic("no return value specified for SendProposal") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *StateT) error); ok { + r0 = rf(ctx, proposal) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SendVote provides a mock function with given fields: ctx, vote +func (_m *VotingProvider[StateT, VoteT, PeerIDT]) SendVote(ctx context.Context, vote *VoteT) (PeerIDT, error) { + ret := _m.Called(ctx, vote) + + if len(ret) == 0 { + panic("no return value specified for SendVote") + } + + var r0 PeerIDT + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *VoteT) (PeerIDT, error)); ok { + return rf(ctx, vote) + } + if rf, ok := ret.Get(0).(func(context.Context, *VoteT) PeerIDT); ok { + r0 = rf(ctx, vote) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(PeerIDT) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *VoteT) error); ok { + r1 = rf(ctx, vote) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SignTimeoutVote provides a mock function with given fields: ctx, filter, currentRank, newestQuorumCertificateRank +func (_m *VotingProvider[StateT, VoteT, PeerIDT]) SignTimeoutVote(ctx context.Context, filter []byte, currentRank uint64, newestQuorumCertificateRank uint64) (*VoteT, error) { + ret := _m.Called(ctx, filter, currentRank, newestQuorumCertificateRank) + + if len(ret) == 0 { + panic("no return value specified for SignTimeoutVote") + } + + var r0 *VoteT + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []byte, uint64, uint64) (*VoteT, error)); ok { + return rf(ctx, filter, currentRank, newestQuorumCertificateRank) + } + if rf, ok := ret.Get(0).(func(context.Context, []byte, uint64, uint64) *VoteT); ok { + r0 = rf(ctx, filter, currentRank, newestQuorumCertificateRank) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*VoteT) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []byte, uint64, uint64) error); ok { + r1 = rf(ctx, filter, currentRank, newestQuorumCertificateRank) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SignVote provides a mock function with given fields: ctx, state +func (_m *VotingProvider[StateT, VoteT, PeerIDT]) SignVote(ctx context.Context, state *models.State[StateT]) (*VoteT, error) { + ret := _m.Called(ctx, state) + + if len(ret) == 0 { + panic("no return value specified for SignVote") + } + + var r0 *VoteT + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *models.State[StateT]) (*VoteT, error)); ok { + return rf(ctx, state) + } + if rf, ok := ret.Get(0).(func(context.Context, *models.State[StateT]) *VoteT); ok { + r0 = rf(ctx, state) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*VoteT) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *models.State[StateT]) error); ok { + r1 = rf(ctx, state) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewVotingProvider creates a new instance of VotingProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVotingProvider[StateT models.Unique, VoteT models.Unique, PeerIDT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *VotingProvider[StateT, VoteT, PeerIDT] { + mock := &VotingProvider[StateT, VoteT, PeerIDT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/weight_provider.go b/consensus/mocks/weight_provider.go new file mode 100644 index 0000000..5e2440c --- /dev/null +++ b/consensus/mocks/weight_provider.go @@ -0,0 +1,42 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// WeightProvider is an autogenerated mock type for the WeightProvider type +type WeightProvider struct { + mock.Mock +} + +// GetWeightForBitmask provides a mock function with given fields: filter, bitmask +func (_m *WeightProvider) GetWeightForBitmask(filter []byte, bitmask []byte) uint64 { + ret := _m.Called(filter, bitmask) + + if len(ret) == 0 { + panic("no return value specified for GetWeightForBitmask") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func([]byte, []byte) uint64); ok { + r0 = rf(filter, bitmask) + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// NewWeightProvider creates a new instance of WeightProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewWeightProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *WeightProvider { + mock := &WeightProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/weighted_signature_aggregator.go b/consensus/mocks/weighted_signature_aggregator.go new file mode 100644 index 0000000..bf2920b --- /dev/null +++ b/consensus/mocks/weighted_signature_aggregator.go @@ -0,0 +1,130 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// WeightedSignatureAggregator is an autogenerated mock type for the WeightedSignatureAggregator type +type WeightedSignatureAggregator struct { + mock.Mock +} + +// Aggregate provides a mock function with no fields +func (_m *WeightedSignatureAggregator) Aggregate() ([]models.WeightedIdentity, models.AggregatedSignature, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Aggregate") + } + + var r0 []models.WeightedIdentity + var r1 models.AggregatedSignature + var r2 error + if rf, ok := ret.Get(0).(func() ([]models.WeightedIdentity, models.AggregatedSignature, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []models.WeightedIdentity); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]models.WeightedIdentity) + } + } + + if rf, ok := ret.Get(1).(func() models.AggregatedSignature); ok { + r1 = rf() + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(models.AggregatedSignature) + } + } + + if rf, ok := ret.Get(2).(func() error); ok { + r2 = rf() + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// TotalWeight provides a mock function with no fields +func (_m *WeightedSignatureAggregator) TotalWeight() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for TotalWeight") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// TrustedAdd provides a mock function with given fields: signerID, sig +func (_m *WeightedSignatureAggregator) TrustedAdd(signerID models.Identity, sig []byte) (uint64, error) { + ret := _m.Called(signerID, sig) + + if len(ret) == 0 { + panic("no return value specified for TrustedAdd") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(models.Identity, []byte) (uint64, error)); ok { + return rf(signerID, sig) + } + if rf, ok := ret.Get(0).(func(models.Identity, []byte) uint64); ok { + r0 = rf(signerID, sig) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(models.Identity, []byte) error); ok { + r1 = rf(signerID, sig) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Verify provides a mock function with given fields: signerID, sig +func (_m *WeightedSignatureAggregator) Verify(signerID models.Identity, sig []byte) error { + ret := _m.Called(signerID, sig) + + if len(ret) == 0 { + panic("no return value specified for Verify") + } + + var r0 error + if rf, ok := ret.Get(0).(func(models.Identity, []byte) error); ok { + r0 = rf(signerID, sig) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewWeightedSignatureAggregator creates a new instance of WeightedSignatureAggregator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewWeightedSignatureAggregator(t interface { + mock.TestingT + Cleanup(func()) +}) *WeightedSignatureAggregator { + mock := &WeightedSignatureAggregator{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/workerpool.go b/consensus/mocks/workerpool.go new file mode 100644 index 0000000..447fc39 --- /dev/null +++ b/consensus/mocks/workerpool.go @@ -0,0 +1,34 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// Workerpool is an autogenerated mock type for the Workerpool type +type Workerpool struct { + mock.Mock +} + +// StopWait provides a mock function with no fields +func (_m *Workerpool) StopWait() { + _m.Called() +} + +// Submit provides a mock function with given fields: task +func (_m *Workerpool) Submit(task func()) { + _m.Called(task) +} + +// NewWorkerpool creates a new instance of Workerpool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewWorkerpool(t interface { + mock.TestingT + Cleanup(func()) +}) *Workerpool { + mock := &Workerpool{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/workers.go b/consensus/mocks/workers.go new file mode 100644 index 0000000..3d0a4b9 --- /dev/null +++ b/consensus/mocks/workers.go @@ -0,0 +1,29 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// Workers is an autogenerated mock type for the Workers type +type Workers struct { + mock.Mock +} + +// Submit provides a mock function with given fields: task +func (_m *Workers) Submit(task func()) { + _m.Called(task) +} + +// NewWorkers creates a new instance of Workers. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewWorkers(t interface { + mock.TestingT + Cleanup(func()) +}) *Workers { + mock := &Workers{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/models/aggregated_signature.go b/consensus/models/aggregated_signature.go new file mode 100644 index 0000000..63d8ee9 --- /dev/null +++ b/consensus/models/aggregated_signature.go @@ -0,0 +1,48 @@ +package models + +// AggregatedSignature provides a generic interface over an aggregatable +// signature type +type AggregatedSignature interface { + // GetSignature returns the aggregated signature in raw canonical bytes + GetSignature() []byte + // GetPubKey returns the public key in raw canonical bytes + GetPubKey() []byte + // GetBitmask returns the bitmask of the signers in the signature, in matching + // order to the clique's prover set (in ascending ring order). + GetBitmask() []byte +} + +// AggregatedSigner provides a generic interface over an aggregatable signature +// scheme. Embeds the validation-only methods. +type AggregatedSigner interface { + AggregatedSignatureValidator + // AggregateSignatures produces an AggregatedSignature object, expecting + // public keys and signatures to be in matching order, with nil slices for + // bitmask entries that are not present. The order should be aligned to the + // clique's prover set (in ascending ring order). + AggregateSignatures( + publicKeys [][]byte, + signatures [][]byte, + ) (AggregatedSignature, error) + // SignWithContext produces an AggregatedSignature object, optionally taking + // an existing AggregatedSignature and builds on top of it. + SignWithContext( + aggregatedSignature AggregatedSignature, + bitmaskIndex int, + privateKey []byte, + message []byte, + context []byte, + ) (AggregatedSignature, error) +} + +// AggregatedSignatureValidator provides a generic interface over aggregated +// signature validation. +type AggregatedSignatureValidator interface { + // VerifySignature validates the AggregatedSignature, with a binary pass/fail + // result. + VerifySignature( + aggregatedSignature AggregatedSignature, + message []byte, + context []byte, + ) bool +} diff --git a/consensus/models/consensus_state.go b/consensus/models/consensus_state.go new file mode 100644 index 0000000..50b6d66 --- /dev/null +++ b/consensus/models/consensus_state.go @@ -0,0 +1,15 @@ +package models + +// ConsensusState defines the core minimum data required to maintain consensus +// safety betwixt the core consensus state machine and the deriving users of the +// state machine, different from StateT (the object being built by the user). +type ConsensusState[VoteT Unique] struct { + // The filter scope of the consensus state. + Filter []byte + // The latest rank that has been finalized (e.g. cannot be forked below). + FinalizedRank uint64 + // The latest rank voted on in a quorum certificate or timeout certificate. + LatestAcknowledgedRank uint64 + // The latest timeout data produced by this instance. + LatestTimeout *TimeoutState[VoteT] +} diff --git a/consensus/models/control_flows.go b/consensus/models/control_flows.go new file mode 100644 index 0000000..c1485cb --- /dev/null +++ b/consensus/models/control_flows.go @@ -0,0 +1,24 @@ +package models + +import "time" + +// NextRank is the control flow event for when the next rank should be entered. +type NextRank struct { + // Rank is the next rank value. + Rank uint64 + // Start is the time the next rank was entered. + Start time.Time + // End is the time the next rank ends (i.e. times out). + End time.Time +} + +// TimerInfo is the control flow event for when the timeout controller +// initiates. +type TimerInfo struct { + // Rank is the next rank value. + Rank uint64 + // StartTime is the time the next timeout is started + StartTime time.Time + // Duration is the time span from the start. + Duration time.Duration +} diff --git a/consensus/models/errors.go b/consensus/models/errors.go new file mode 100644 index 0000000..53ee53f --- /dev/null +++ b/consensus/models/errors.go @@ -0,0 +1,588 @@ +package models + +import ( + "errors" + "fmt" +) + +var ( + ErrUnverifiableState = errors.New("state proposal can't be verified") + ErrInvalidSignature = errors.New("invalid signature") + ErrRankUnknown = errors.New("rank is unknown") +) + +type NoVoteError struct { + Err error +} + +func (e NoVoteError) Error() string { + return fmt.Sprintf("not voting - %s", e.Err.Error()) +} + +func (e NoVoteError) Unwrap() error { + return e.Err +} + +// IsNoVoteError returns whether an error is NoVoteError +func IsNoVoteError(err error) bool { + var e NoVoteError + return errors.As(err, &e) +} + +func NewNoVoteErrorf(msg string, args ...interface{}) error { + return NoVoteError{Err: fmt.Errorf(msg, args...)} +} + +type NoTimeoutError struct { + Err error +} + +func (e NoTimeoutError) Error() string { + return fmt.Sprintf( + "conditions not satisfied to generate valid TimeoutState: %s", + e.Err.Error(), + ) +} + +func (e NoTimeoutError) Unwrap() error { + return e.Err +} + +func IsNoTimeoutError(err error) bool { + var e NoTimeoutError + return errors.As(err, &e) +} + +func NewNoTimeoutErrorf(msg string, args ...interface{}) error { + return NoTimeoutError{Err: fmt.Errorf(msg, args...)} +} + +type InvalidFormatError struct { + err error +} + +func NewInvalidFormatError(err error) error { + return InvalidFormatError{err} +} + +func NewInvalidFormatErrorf(msg string, args ...interface{}) error { + return InvalidFormatError{fmt.Errorf(msg, args...)} +} + +func (e InvalidFormatError) Error() string { return e.err.Error() } +func (e InvalidFormatError) Unwrap() error { return e.err } + +func IsInvalidFormatError(err error) bool { + var e InvalidFormatError + return errors.As(err, &e) +} + +type ConfigurationError struct { + err error +} + +func NewConfigurationError(err error) error { + return ConfigurationError{err} +} + +func NewConfigurationErrorf(msg string, args ...interface{}) error { + return ConfigurationError{fmt.Errorf(msg, args...)} +} + +func (e ConfigurationError) Error() string { return e.err.Error() } +func (e ConfigurationError) Unwrap() error { return e.err } + +func IsConfigurationError(err error) bool { + var e ConfigurationError + return errors.As(err, &e) +} + +type MissingStateError struct { + Rank uint64 + Identifier Identity +} + +func (e MissingStateError) Error() string { + return fmt.Sprintf( + "missing state at rank %d with ID %x", + e.Rank, + e.Identifier, + ) +} + +func IsMissingStateError(err error) bool { + var e MissingStateError + return errors.As(err, &e) +} + +type InvalidQuorumCertificateError struct { + Identifier Identity + Rank uint64 + Err error +} + +func (e InvalidQuorumCertificateError) Error() string { + return fmt.Sprintf( + "invalid QuorumCertificate for state %x at rank %d: %s", + e.Identifier, + e.Rank, + e.Err.Error(), + ) +} + +func IsInvalidQuorumCertificateError(err error) bool { + var e InvalidQuorumCertificateError + return errors.As(err, &e) +} + +func (e InvalidQuorumCertificateError) Unwrap() error { + return e.Err +} + +type InvalidTimeoutCertificateError struct { + Rank uint64 + Err error +} + +func (e InvalidTimeoutCertificateError) Error() string { + return fmt.Sprintf( + "invalid TimeoutCertificate at rank %d: %s", + e.Rank, + e.Err.Error(), + ) +} + +func IsInvalidTimeoutCertificateError(err error) bool { + var e InvalidTimeoutCertificateError + return errors.As(err, &e) +} + +func (e InvalidTimeoutCertificateError) Unwrap() error { + return e.Err +} + +type InvalidProposalError[StateT Unique, VoteT Unique] struct { + InvalidProposal *SignedProposal[StateT, VoteT] + Err error +} + +func NewInvalidProposalErrorf[StateT Unique, VoteT Unique]( + proposal *SignedProposal[StateT, VoteT], + msg string, + args ...interface{}, +) error { + return InvalidProposalError[StateT, VoteT]{ + InvalidProposal: proposal, + Err: fmt.Errorf(msg, args...), + } +} + +func (e InvalidProposalError[StateT, VoteT]) Error() string { + return fmt.Sprintf( + "invalid proposal %x at rank %d: %s", + e.InvalidProposal.State.Identifier, + e.InvalidProposal.State.Rank, + e.Err.Error(), + ) +} + +func (e InvalidProposalError[StateT, VoteT]) Unwrap() error { + return e.Err +} + +func IsInvalidProposalError[StateT Unique, VoteT Unique](err error) bool { + var e InvalidProposalError[StateT, VoteT] + return errors.As(err, &e) +} + +func AsInvalidProposalError[StateT Unique, VoteT Unique]( + err error, +) (*InvalidProposalError[StateT, VoteT], bool) { + var e InvalidProposalError[StateT, VoteT] + ok := errors.As(err, &e) + if ok { + return &e, true + } + return nil, false +} + +type InvalidStateError[StateT Unique] struct { + InvalidState *State[StateT] + Err error +} + +func NewInvalidStateErrorf[StateT Unique]( + state *State[StateT], + msg string, + args ...interface{}, +) error { + return InvalidStateError[StateT]{ + InvalidState: state, + Err: fmt.Errorf(msg, args...), + } +} + +func (e InvalidStateError[StateT]) Error() string { + return fmt.Sprintf( + "invalid state %x at rank %d: %s", + e.InvalidState.Identifier, + e.InvalidState.Rank, + e.Err.Error(), + ) +} + +func IsInvalidStateError[StateT Unique](err error) bool { + var e InvalidStateError[StateT] + return errors.As(err, &e) +} + +func AsInvalidStateError[StateT Unique](err error) ( + *InvalidStateError[StateT], + bool, +) { + var e InvalidStateError[StateT] + ok := errors.As(err, &e) + if ok { + return &e, true + } + return nil, false +} + +func (e InvalidStateError[StateT]) Unwrap() error { + return e.Err +} + +type InvalidVoteError[VoteT Unique] struct { + Vote *VoteT + Err error +} + +func NewInvalidVoteErrorf[VoteT Unique]( + vote *VoteT, + msg string, + args ...interface{}, +) error { + return InvalidVoteError[VoteT]{ + Vote: vote, + Err: fmt.Errorf(msg, args...), + } +} + +func (e InvalidVoteError[VoteT]) Error() string { + return fmt.Sprintf( + "invalid vote at rank %d for state %x: %s", + (*e.Vote).GetRank(), + (*e.Vote).Identity(), + e.Err.Error(), + ) +} + +func IsInvalidVoteError[VoteT Unique](err error) bool { + var e InvalidVoteError[VoteT] + return errors.As(err, &e) +} + +func AsInvalidVoteError[VoteT Unique](err error) ( + *InvalidVoteError[VoteT], + bool, +) { + var e InvalidVoteError[VoteT] + ok := errors.As(err, &e) + if ok { + return &e, true + } + return nil, false +} + +func (e InvalidVoteError[VoteT]) Unwrap() error { + return e.Err +} + +type ByzantineThresholdExceededError struct { + Evidence string +} + +func (e ByzantineThresholdExceededError) Error() string { + return e.Evidence +} + +func IsByzantineThresholdExceededError(err error) bool { + var target ByzantineThresholdExceededError + return errors.As(err, &target) +} + +type DoubleVoteError[VoteT Unique] struct { + FirstVote *VoteT + ConflictingVote *VoteT + err error +} + +func (e DoubleVoteError[VoteT]) Error() string { + return e.err.Error() +} + +func IsDoubleVoteError[VoteT Unique](err error) bool { + var e DoubleVoteError[VoteT] + return errors.As(err, &e) +} + +func AsDoubleVoteError[VoteT Unique](err error) ( + *DoubleVoteError[VoteT], + bool, +) { + var e DoubleVoteError[VoteT] + ok := errors.As(err, &e) + if ok { + return &e, true + } + return nil, false +} + +func (e DoubleVoteError[VoteT]) Unwrap() error { + return e.err +} + +func NewDoubleVoteErrorf[VoteT Unique]( + firstVote, conflictingVote *VoteT, + msg string, + args ...interface{}, +) error { + return DoubleVoteError[VoteT]{ + FirstVote: firstVote, + ConflictingVote: conflictingVote, + err: fmt.Errorf(msg, args...), + } +} + +type DuplicatedSignerError struct { + err error +} + +func NewDuplicatedSignerError(err error) error { + return DuplicatedSignerError{err} +} + +func NewDuplicatedSignerErrorf(msg string, args ...interface{}) error { + return DuplicatedSignerError{err: fmt.Errorf(msg, args...)} +} + +func (e DuplicatedSignerError) Error() string { return e.err.Error() } +func (e DuplicatedSignerError) Unwrap() error { return e.err } + +func IsDuplicatedSignerError(err error) bool { + var e DuplicatedSignerError + return errors.As(err, &e) +} + +type InvalidSignatureIncludedError struct { + err error +} + +func NewInvalidSignatureIncludedError(err error) error { + return InvalidSignatureIncludedError{err} +} + +func NewInvalidSignatureIncludedErrorf(msg string, args ...interface{}) error { + return InvalidSignatureIncludedError{fmt.Errorf(msg, args...)} +} + +func (e InvalidSignatureIncludedError) Error() string { return e.err.Error() } +func (e InvalidSignatureIncludedError) Unwrap() error { return e.err } + +func IsInvalidSignatureIncludedError(err error) bool { + var e InvalidSignatureIncludedError + return errors.As(err, &e) +} + +type InvalidAggregatedKeyError struct { + error +} + +func NewInvalidAggregatedKeyError(err error) error { + return InvalidAggregatedKeyError{err} +} + +func NewInvalidAggregatedKeyErrorf(msg string, args ...interface{}) error { + return InvalidAggregatedKeyError{fmt.Errorf(msg, args...)} +} + +func (e InvalidAggregatedKeyError) Unwrap() error { return e.error } + +func IsInvalidAggregatedKeyError(err error) bool { + var e InvalidAggregatedKeyError + return errors.As(err, &e) +} + +type InsufficientSignaturesError struct { + err error +} + +func NewInsufficientSignaturesError(err error) error { + return InsufficientSignaturesError{err} +} + +func NewInsufficientSignaturesErrorf(msg string, args ...interface{}) error { + return InsufficientSignaturesError{fmt.Errorf(msg, args...)} +} + +func (e InsufficientSignaturesError) Error() string { return e.err.Error() } +func (e InsufficientSignaturesError) Unwrap() error { return e.err } + +func IsInsufficientSignaturesError(err error) bool { + var e InsufficientSignaturesError + return errors.As(err, &e) +} + +type InvalidSignerError struct { + err error +} + +func NewInvalidSignerError(err error) error { + return InvalidSignerError{err} +} + +func NewInvalidSignerErrorf(msg string, args ...interface{}) error { + return InvalidSignerError{fmt.Errorf(msg, args...)} +} + +func (e InvalidSignerError) Error() string { return e.err.Error() } +func (e InvalidSignerError) Unwrap() error { return e.err } + +func IsInvalidSignerError(err error) bool { + var e InvalidSignerError + return errors.As(err, &e) +} + +type DoubleTimeoutError[VoteT Unique] struct { + FirstTimeout *TimeoutState[VoteT] + ConflictingTimeout *TimeoutState[VoteT] + err error +} + +func (e DoubleTimeoutError[VoteT]) Error() string { + return e.err.Error() +} + +func IsDoubleTimeoutError[VoteT Unique](err error) bool { + var e DoubleTimeoutError[VoteT] + return errors.As(err, &e) +} + +func AsDoubleTimeoutError[VoteT Unique](err error) ( + *DoubleTimeoutError[VoteT], + bool, +) { + var e DoubleTimeoutError[VoteT] + ok := errors.As(err, &e) + if ok { + return &e, true + } + return nil, false +} + +func (e DoubleTimeoutError[VoteT]) Unwrap() error { + return e.err +} + +func NewDoubleTimeoutErrorf[VoteT Unique]( + firstTimeout, conflictingTimeout *TimeoutState[VoteT], + msg string, + args ...interface{}, +) error { + return DoubleTimeoutError[VoteT]{ + FirstTimeout: firstTimeout, + ConflictingTimeout: conflictingTimeout, + err: fmt.Errorf(msg, args...), + } +} + +type InvalidTimeoutError[VoteT Unique] struct { + Timeout *TimeoutState[VoteT] + Err error +} + +func NewInvalidTimeoutErrorf[VoteT Unique]( + timeout *TimeoutState[VoteT], + msg string, + args ...interface{}, +) error { + return InvalidTimeoutError[VoteT]{ + Timeout: timeout, + Err: fmt.Errorf(msg, args...), + } +} + +func (e InvalidTimeoutError[VoteT]) Error() string { + return fmt.Sprintf("invalid timeout: %d: %s", + e.Timeout.Rank, + e.Err.Error(), + ) +} + +func IsInvalidTimeoutError[VoteT Unique](err error) bool { + var e InvalidTimeoutError[VoteT] + return errors.As(err, &e) +} + +func AsInvalidTimeoutError[VoteT Unique](err error) ( + *InvalidTimeoutError[VoteT], + bool, +) { + var e InvalidTimeoutError[VoteT] + ok := errors.As(err, &e) + if ok { + return &e, true + } + return nil, false +} + +func (e InvalidTimeoutError[VoteT]) Unwrap() error { + return e.Err +} + +// UnknownExecutionResultError indicates that the Execution Result is unknown +type UnknownExecutionResultError struct { + err error +} + +func NewUnknownExecutionResultErrorf(msg string, args ...interface{}) error { + return UnknownExecutionResultError{ + err: fmt.Errorf(msg, args...), + } +} + +func (e UnknownExecutionResultError) Unwrap() error { + return e.err +} + +func (e UnknownExecutionResultError) Error() string { + return e.err.Error() +} + +func IsUnknownExecutionResultError(err error) bool { + var unknownExecutionResultError UnknownExecutionResultError + return errors.As(err, &unknownExecutionResultError) +} + +type BelowPrunedThresholdError struct { + err error +} + +func NewBelowPrunedThresholdErrorf(msg string, args ...interface{}) error { + return BelowPrunedThresholdError{ + err: fmt.Errorf(msg, args...), + } +} + +func (e BelowPrunedThresholdError) Unwrap() error { + return e.err +} + +func (e BelowPrunedThresholdError) Error() string { + return e.err.Error() +} + +func IsBelowPrunedThresholdError(err error) bool { + var newIsBelowPrunedThresholdError BelowPrunedThresholdError + return errors.As(err, &newIsBelowPrunedThresholdError) +} diff --git a/consensus/models/liveness_state.go b/consensus/models/liveness_state.go new file mode 100644 index 0000000..e7b3495 --- /dev/null +++ b/consensus/models/liveness_state.go @@ -0,0 +1,14 @@ +package models + +// LivenessState defines the core minimum data required to maintain liveness +// of the pacemaker of the consensus state machine. +type LivenessState struct { + // The filter scope of the consensus state. + Filter []byte + // The current rank of the pacemaker. + CurrentRank uint64 + // The latest quorum certificate seen by the pacemaker. + LatestQuorumCertificate QuorumCertificate + // The previous rank's timeout certificate, if applicable. + PriorRankTimeoutCertificate TimeoutCertificate +} diff --git a/consensus/models/proposal.go b/consensus/models/proposal.go new file mode 100644 index 0000000..a21bf86 --- /dev/null +++ b/consensus/models/proposal.go @@ -0,0 +1,45 @@ +package models + +import ( + "errors" +) + +type Proposal[StateT Unique] struct { + State *State[StateT] + PreviousRankTimeoutCertificate TimeoutCertificate +} + +func ProposalFrom[StateT Unique]( + state *State[StateT], + prevTC TimeoutCertificate, +) *Proposal[StateT] { + return &Proposal[StateT]{ + State: state, + PreviousRankTimeoutCertificate: prevTC, + } +} + +type SignedProposal[StateT Unique, VoteT Unique] struct { + Proposal[StateT] + Vote *VoteT +} + +func (p *SignedProposal[StateT, VoteT]) ProposerVote() (*VoteT, error) { + if p.Vote == nil { + return nil, errors.New("missing vote") + } + return p.Vote, nil +} + +func SignedProposalFromState[StateT Unique, VoteT Unique]( + p *Proposal[StateT], + v *VoteT, +) *SignedProposal[StateT, VoteT] { + return &SignedProposal[StateT, VoteT]{ + Proposal: Proposal[StateT]{ + State: p.State, + PreviousRankTimeoutCertificate: p.PreviousRankTimeoutCertificate, + }, + Vote: v, + } +} diff --git a/consensus/models/quorum_certificate.go b/consensus/models/quorum_certificate.go new file mode 100644 index 0000000..bee4d96 --- /dev/null +++ b/consensus/models/quorum_certificate.go @@ -0,0 +1,20 @@ +package models + +// QuorumCertificate defines the minimum properties required of a consensus +// clique's validating set of data for a frame. +type QuorumCertificate interface { + // GetFilter returns the applicable filter for the consensus clique. + GetFilter() []byte + // GetRank returns the rank of the consensus loop. + GetRank() uint64 + // GetFrameNumber returns the frame number applied to the round. + GetFrameNumber() uint64 + // Identity returns the selector of the frame. + Identity() Identity + // GetTimestamp returns the timestamp of the certificate. + GetTimestamp() uint64 + // GetAggregatedSignature returns the set of signers who voted on the round. + GetAggregatedSignature() AggregatedSignature + // Equals compares inner equality with another quorum certificate. + Equals(other QuorumCertificate) bool +} diff --git a/consensus/models/state.go b/consensus/models/state.go new file mode 100644 index 0000000..0b12a98 --- /dev/null +++ b/consensus/models/state.go @@ -0,0 +1,101 @@ +package models + +import ( + "fmt" +) + +// State is the HotStuff algorithm's concept of a state, which - in the bigger +// picture - corresponds to the state header. +type State[StateT Unique] struct { + Rank uint64 + Identifier Identity + ProposerID Identity + ParentQuorumCertificate QuorumCertificate + Timestamp uint64 // Unix milliseconds + State *StateT +} + +// StateFrom combines external state with source parent quorum certificate. +func StateFrom[StateT Unique]( + t *StateT, + parentCert QuorumCertificate, +) *State[StateT] { + state := State[StateT]{ + Identifier: (*t).Identity(), + Rank: (*t).GetRank(), + ParentQuorumCertificate: parentCert, + ProposerID: (*t).Source(), + Timestamp: (*t).GetTimestamp(), + State: t, + } + + return &state +} + +// GenesisStateFrom returns a generic consensus model of genesis state. +func GenesisStateFrom[StateT Unique](internal *StateT) *State[StateT] { + genesis := &State[StateT]{ + Identifier: (*internal).Identity(), + Rank: (*internal).GetRank(), + ProposerID: (*internal).Source(), + ParentQuorumCertificate: nil, + Timestamp: (*internal).GetTimestamp(), + State: internal, + } + return genesis +} + +// CertifiedState holds a certified state, which is a state and a +// QuorumCertificate that is pointing to the state. A QuorumCertificate is the +// aggregated form of votes from a supermajority of HotStuff and +// therefore proves validity of the state. A certified state satisfies: +// State.Rank == QuorumCertificate.Rank and +// State.Identifier == QuorumCertificate.Identifier +type CertifiedState[StateT Unique] struct { + State *State[StateT] + CertifyingQuorumCertificate QuorumCertificate +} + +// NewCertifiedState constructs a new certified state. It checks the consistency +// requirements and returns an exception otherwise: +// +// State.Rank == QuorumCertificate.Rank and State.Identifier == +// +// QuorumCertificate.Identifier +func NewCertifiedState[StateT Unique]( + state *State[StateT], + quorumCertificate QuorumCertificate, +) (*CertifiedState[StateT], error) { + if state.Rank != quorumCertificate.GetRank() { + return &CertifiedState[StateT]{}, + fmt.Errorf( + "state's rank (%d) should equal the qc's rank (%d)", + state.Rank, + quorumCertificate.GetRank(), + ) + } + if state.Identifier != quorumCertificate.Identity() { + return &CertifiedState[StateT]{}, + fmt.Errorf( + "state's ID (%x) should equal the state referenced by the qc (%x)", + state.Identifier, + quorumCertificate.Identity(), + ) + } + return &CertifiedState[StateT]{ + State: state, + CertifyingQuorumCertificate: quorumCertificate, + }, nil +} + +// Identifier returns a unique identifier for the state (the ID signed to +// produce a state vote). To avoid repeated computation, we use value from the +// QuorumCertificate. +func (b *CertifiedState[StateT]) Identifier() Identity { + return b.CertifyingQuorumCertificate.Identity() +} + +// Rank returns rank where the state was proposed. +func (b *CertifiedState[StateT]) Rank() uint64 { + return b.State.Rank +} diff --git a/consensus/models/timeout_certificate.go b/consensus/models/timeout_certificate.go new file mode 100644 index 0000000..288d886 --- /dev/null +++ b/consensus/models/timeout_certificate.go @@ -0,0 +1,19 @@ +package models + +// TimeoutCertificate defines the minimum properties required of a consensus +// clique's invalidating set of data for a frame. +type TimeoutCertificate interface { + // GetFilter returns the applicable filter for the consensus clique. + GetFilter() []byte + // GetRank returns the rank of the consensus loop. + GetRank() uint64 + // GetLatestRanks returns the latest ranks seen by members of clique, in + // matching order to the clique's prover set (in ascending ring order). + GetLatestRanks() []uint64 + // GetLatestQuorumCert returns the latest quorum certificate accepted. + GetLatestQuorumCert() QuorumCertificate + // GetAggregatedSignature returns the set of signers who voted on the round. + GetAggregatedSignature() AggregatedSignature + // Equals compares inner equality with another timeout certificate. + Equals(other TimeoutCertificate) bool +} diff --git a/consensus/models/timeout_state.go b/consensus/models/timeout_state.go new file mode 100644 index 0000000..66f13b5 --- /dev/null +++ b/consensus/models/timeout_state.go @@ -0,0 +1,65 @@ +package models + +import ( + "bytes" +) + +// TimeoutState represents the stored state change step relevant to the point of +// rank of a given instance of the consensus state machine. +type TimeoutState[VoteT Unique] struct { + // The rank of the timeout data. + Rank uint64 + // The latest quorum certificate seen by the pacemaker. + LatestQuorumCertificate QuorumCertificate + // The previous rank's timeout certificate, if applicable. + PriorRankTimeoutCertificate TimeoutCertificate + // The signed payload which will become part of the new timeout certificate. + Vote *VoteT + // TimeoutTick is the number of times the `timeout.Controller` has + // (re-)emitted the timeout for this rank. When the timer for the rank's + // original duration expires, a `TimeoutState` with `TimeoutTick = 0` is + // broadcast. Subsequently, `timeout.Controller` re-broadcasts the + // `TimeoutState` periodically based on some internal heuristic. Each time + // we attempt a re-broadcast, the `TimeoutTick` is incremented. Incrementing + // the field prevents de-duplicated within the network layer, which in turn + // guarantees quick delivery of the `TimeoutState` after GST and facilitates + // recovery. + TimeoutTick uint64 +} + +func (t *TimeoutState[VoteT]) Equals(other *TimeoutState[VoteT]) bool { + // Shortcut if `t` and `other` point to the same object; covers case where + // both are nil. + if t == other { + return true + } + if t == nil || other == nil { + // only one is nil, the other not (otherwise we would have returned above) + return false + } + + if t.Vote != other.Vote && (other.Vote == nil || t.Vote == nil) { + return false + } + + if (t.PriorRankTimeoutCertificate != nil && + other.PriorRankTimeoutCertificate == nil) || + (t.PriorRankTimeoutCertificate == nil && + other.PriorRankTimeoutCertificate != nil) { + return false + } + + // both are not nil, so we can compare the fields + return t.Rank == other.Rank && + ((t.LatestQuorumCertificate == nil && + other.LatestQuorumCertificate == nil) || + t.LatestQuorumCertificate.Equals(other.LatestQuorumCertificate)) && + ((t.PriorRankTimeoutCertificate == nil && + other.PriorRankTimeoutCertificate == nil) || + t.PriorRankTimeoutCertificate.Equals( + other.PriorRankTimeoutCertificate, + )) && + ((t.Vote == other.Vote) || + ((*t.Vote).Source() == (*other.Vote).Source()) && + bytes.Equal((*t.Vote).GetSignature(), (*other.Vote).GetSignature())) +} diff --git a/consensus/models/unique.go b/consensus/models/unique.go new file mode 100644 index 0000000..de158c0 --- /dev/null +++ b/consensus/models/unique.go @@ -0,0 +1,26 @@ +package models + +type Identity = string + +// Unique defines important attributes for distinguishing relative basis of +// items. +type Unique interface { + // Identity provides the relevant identity of the given Unique. + Identity() Identity + // Clone should provide a shallow clone of the Unique. + Clone() Unique + // GetRank indicates the ordinal basis of comparison. + GetRank() uint64 + // Source provides the relevant identity of who issued the given Unique. + Source() Identity + // GetTimestamp provides the relevant timestamp of the given Unique. + GetTimestamp() uint64 + // GetSignature provides the signature of the given Unique (if present). + GetSignature() []byte +} + +type WeightedIdentity interface { + PublicKey() []byte + Identity() Identity + Weight() uint64 +} diff --git a/consensus/notifications/log_consumer.go b/consensus/notifications/log_consumer.go new file mode 100644 index 0000000..e84c255 --- /dev/null +++ b/consensus/notifications/log_consumer.go @@ -0,0 +1,486 @@ +package notifications + +import ( + "time" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// LogConsumer is an implementation of the notifications consumer that logs a +// message for each event. +type LogConsumer[StateT models.Unique, VoteT models.Unique] struct { + log consensus.TraceLogger +} + +var _ consensus.Consumer[*nilUnique, *nilUnique] = (*LogConsumer[*nilUnique, *nilUnique])(nil) +var _ consensus.TimeoutAggregationConsumer[*nilUnique] = (*LogConsumer[*nilUnique, *nilUnique])(nil) +var _ consensus.VoteAggregationConsumer[*nilUnique, *nilUnique] = (*LogConsumer[*nilUnique, *nilUnique])(nil) + +func NewLogConsumer[StateT models.Unique, VoteT models.Unique]( + log consensus.TraceLogger, +) *LogConsumer[StateT, VoteT] { + lc := &LogConsumer[StateT, VoteT]{ + log: log, + } + return lc +} + +func (lc *LogConsumer[StateT, VoteT]) OnEventProcessed() { + lc.log.Trace("event processed") +} + +func (lc *LogConsumer[StateT, VoteT]) OnStart(currentRank uint64) { + lc.log.With( + consensus.Uint64Param("cur_rank", currentRank), + ).Trace("starting event handler") +} + +func (lc *LogConsumer[StateT, VoteT]) OnStateIncorporated( + state *models.State[StateT], +) { + lc.logBasicStateData(lc.log, state). + Trace("state incorporated") +} + +func (lc *LogConsumer[StateT, VoteT]) OnFinalizedState( + state *models.State[StateT], +) { + lc.logBasicStateData(lc.log, state).Trace("state finalized") +} + +func (lc *LogConsumer[StateT, VoteT]) OnInvalidStateDetected( + err *models.InvalidProposalError[StateT, VoteT], +) { + invalidState := err.InvalidProposal.State + lc.log.With( + consensus.StringParam("suspicious", "true"), + consensus.IdentityParam( + "origin_id", + err.InvalidProposal.Proposal.State.ProposerID, + ), + consensus.Uint64Param("state_rank", invalidState.Rank), + consensus.IdentityParam("proposer_id", invalidState.ProposerID), + consensus.IdentityParam("state_id", invalidState.Identifier), + consensus.Uint64Param( + "qc_state_rank", + invalidState.ParentQuorumCertificate.GetRank(), + ), + consensus.IdentityParam( + "qc_state_id", + invalidState.ParentQuorumCertificate.Identity(), + ), + ).Error("invalid state detected", err) +} + +func (lc *LogConsumer[StateT, VoteT]) OnDoubleProposeDetected( + state *models.State[StateT], + alt *models.State[StateT], +) { + lc.log.With( + consensus.StringParam("suspicious", "true"), + consensus.Uint64Param("state_rank", state.Rank), + consensus.IdentityParam("state_id", state.Identifier), + consensus.IdentityParam("alt_id", (*alt.State).Identity()), + consensus.IdentityParam("proposer_id", state.ProposerID), + ).Trace("double proposal detected") +} + +func (lc *LogConsumer[StateT, VoteT]) OnReceiveProposal( + currentRank uint64, + proposal *models.SignedProposal[StateT, VoteT], +) { + logger := lc.logBasicStateData(lc.log, proposal.State).With( + consensus.Uint64Param("cur_rank", currentRank), + ) + lastRankTC := proposal.PreviousRankTimeoutCertificate + if lastRankTC != nil { + logger = logger.With( + consensus.Uint64Param("last_rank_tc_rank", lastRankTC.GetRank()), + consensus.Uint64Param( + "last_rank_tc_newest_qc_rank", + lastRankTC.GetLatestQuorumCert().GetRank(), + ), + consensus.IdentityParam( + "last_rank_tc_newest_qc_state_id", + lastRankTC.GetLatestQuorumCert().Identity(), + ), + ) + } + + logger.Trace("processing proposal") +} + +func (lc *LogConsumer[StateT, VoteT]) OnReceiveQuorumCertificate( + currentRank uint64, + qc models.QuorumCertificate, +) { + lc.log.With( + consensus.Uint64Param("cur_rank", currentRank), + consensus.Uint64Param("qc_rank", qc.GetRank()), + consensus.IdentityParam("qc_state_id", qc.Identity()), + ).Trace("processing QC") +} + +func (lc *LogConsumer[StateT, VoteT]) OnReceiveTimeoutCertificate( + currentRank uint64, + tc models.TimeoutCertificate, +) { + lc.log.With( + consensus.Uint64Param("cur_rank", currentRank), + consensus.Uint64Param("tc_rank", tc.GetRank()), + consensus.Uint64Param("newest_qc_rank", tc.GetLatestQuorumCert().GetRank()), + consensus.IdentityParam( + "newest_qc_state_id", + tc.GetLatestQuorumCert().Identity(), + ), + ).Trace("processing TC") +} + +func (lc *LogConsumer[StateT, VoteT]) OnPartialTimeoutCertificate( + currentRank uint64, + partialTc *consensus.PartialTimeoutCertificateCreated, +) { + logger := lc.log.With( + consensus.Uint64Param("cur_rank", currentRank), + consensus.Uint64Param("rank", partialTc.Rank), + consensus.Uint64Param( + "qc_rank", + partialTc.NewestQuorumCertificate.GetRank(), + ), + consensus.IdentityParam( + "qc_state_id", + partialTc.NewestQuorumCertificate.Identity(), + ), + ) + lastRankTC := partialTc.PriorRankTimeoutCertificate + if lastRankTC != nil { + logger = logger.With( + consensus.Uint64Param("last_rank_tc_rank", lastRankTC.GetRank()), + consensus.Uint64Param( + "last_rank_tc_newest_qc_rank", + lastRankTC.GetLatestQuorumCert().GetRank(), + ), + consensus.IdentityParam( + "last_rank_tc_newest_qc_state_id", + lastRankTC.GetLatestQuorumCert().Identity(), + ), + ) + } + + logger.Trace("processing partial TC") +} + +func (lc *LogConsumer[StateT, VoteT]) OnLocalTimeout(currentRank uint64) { + lc.log.With( + consensus.Uint64Param("cur_rank", currentRank), + ).Trace("processing local timeout") +} + +func (lc *LogConsumer[StateT, VoteT]) OnRankChange(oldRank, newRank uint64) { + lc.log.With( + consensus.Uint64Param("old_rank", oldRank), + consensus.Uint64Param("new_rank", newRank), + ).Trace("entered new rank") +} + +func (lc *LogConsumer[StateT, VoteT]) OnQuorumCertificateTriggeredRankChange( + oldRank uint64, + newRank uint64, + qc models.QuorumCertificate, +) { + lc.log.With( + consensus.Uint64Param("qc_rank", qc.GetRank()), + consensus.IdentityParam("qc_state_id", qc.Identity()), + consensus.Uint64Param("old_rank", oldRank), + consensus.Uint64Param("new_rank", newRank), + ).Trace("QC triggered rank change") +} + +func (lc *LogConsumer[StateT, VoteT]) OnTimeoutCertificateTriggeredRankChange( + oldRank uint64, + newRank uint64, + tc models.TimeoutCertificate, +) { + lc.log.With( + consensus.Uint64Param("tc_rank", tc.GetRank()), + consensus.Uint64Param( + "tc_newest_qc_rank", + tc.GetLatestQuorumCert().GetRank(), + ), + consensus.Uint64Param("new_rank", newRank), + consensus.Uint64Param("old_rank", oldRank), + ).Trace("TC triggered rank change") +} + +func (lc *LogConsumer[StateT, VoteT]) OnStartingTimeout( + startTime time.Time, + endTime time.Time, +) { + lc.log.With( + consensus.TimeParam("timeout_start", startTime), + consensus.TimeParam("timeout_cutoff", endTime), + ).Trace("timeout started") +} + +func (lc *LogConsumer[StateT, VoteT]) OnVoteProcessed(vote *VoteT) { + lc.log.With( + consensus.IdentityParam("state_id", (*vote).Source()), + consensus.Uint64Param("state_rank", (*vote).GetRank()), + consensus.IdentityParam("recipient_id", (*vote).Identity()), + ).Trace("processed valid HotStuff vote") +} + +func (lc *LogConsumer[StateT, VoteT]) OnTimeoutProcessed( + timeout *models.TimeoutState[VoteT], +) { + logger := lc.log.With( + consensus.Uint64Param("timeout_rank", timeout.Rank), + consensus.Uint64Param( + "timeout_newest_qc_rank", + timeout.LatestQuorumCertificate.GetRank(), + ), + consensus.IdentityParam("timeout_vote_id", (*timeout.Vote).Identity()), + consensus.Uint64Param("timeout_tick", timeout.TimeoutTick), + ) + if timeout.PriorRankTimeoutCertificate != nil { + logger = logger.With( + consensus.Uint64Param( + "timeout_last_tc_rank", + timeout.PriorRankTimeoutCertificate.GetRank(), + ), + ) + } + logger.Trace("processed valid timeout object") +} + +func (lc *LogConsumer[StateT, VoteT]) OnCurrentRankDetails( + currentRank, finalizedRank uint64, + currentLeader models.Identity, +) { + lc.log.With( + consensus.Uint64Param("rank", currentRank), + consensus.Uint64Param("finalized_rank", finalizedRank), + consensus.IdentityParam("current_leader", currentLeader), + ).Trace("current rank details") +} + +func (lc *LogConsumer[StateT, VoteT]) OnDoubleVotingDetected( + vote *VoteT, + alt *VoteT, +) { + lc.log.With( + consensus.StringParam("suspicious", "true"), + consensus.Uint64Param("vote_rank", (*vote).GetRank()), + consensus.IdentityParam("voted_state_id", (*vote).Source()), + consensus.IdentityParam("alt_id", (*vote).Source()), + consensus.IdentityParam("voter_id", (*vote).Identity()), + ).Trace("double vote detected") +} + +func (lc *LogConsumer[StateT, VoteT]) OnInvalidVoteDetected( + err models.InvalidVoteError[VoteT], +) { + lc.log.With( + consensus.StringParam("suspicious", "true"), + consensus.Uint64Param("vote_rank", (*err.Vote).GetRank()), + consensus.IdentityParam("voted_state_id", (*err.Vote).Source()), + consensus.IdentityParam("voter_id", (*err.Vote).Identity()), + ).Error("invalid vote detected", err) +} + +func (lc *LogConsumer[StateT, VoteT]) OnVoteForInvalidStateDetected( + vote *VoteT, + proposal *models.SignedProposal[StateT, VoteT], +) { + lc.log.With( + consensus.StringParam("suspicious", "true"), + consensus.Uint64Param("vote_rank", (*vote).GetRank()), + consensus.IdentityParam("voted_state_id", (*vote).Source()), + consensus.IdentityParam("voter_id", (*vote).Identity()), + consensus.IdentityParam("proposer_id", proposal.State.ProposerID), + ).Trace("vote for invalid proposal detected") +} + +func (lc *LogConsumer[StateT, VoteT]) OnDoubleTimeoutDetected( + timeout *models.TimeoutState[VoteT], + alt *models.TimeoutState[VoteT], +) { + lc.log.With( + consensus.StringParam("suspicious", "true"), + consensus.IdentityParam("timeout_signer_id", (*timeout.Vote).Identity()), + consensus.Uint64Param("timeout_rank", timeout.Rank), + consensus.Uint64Param( + "timeout_newest_qc_rank", + timeout.LatestQuorumCertificate.GetRank(), + ), + consensus.IdentityParam("alt_signer_id", (*alt.Vote).Identity()), + consensus.Uint64Param("alt_rank", alt.Rank), + consensus.Uint64Param( + "alt_newest_qc_rank", + alt.LatestQuorumCertificate.GetRank(), + ), + ).Trace("double timeout detected") +} + +func (lc *LogConsumer[StateT, VoteT]) OnInvalidTimeoutDetected( + err models.InvalidTimeoutError[VoteT], +) { + timeout := err.Timeout + logger := lc.log.With( + consensus.StringParam("suspicious", "true"), + consensus.Uint64Param("timeout_rank", timeout.Rank), + consensus.Uint64Param( + "timeout_newest_qc_rank", + timeout.LatestQuorumCertificate.GetRank(), + ), + consensus.IdentityParam("timeout_vote_id", (*timeout.Vote).Identity()), + consensus.Uint64Param("timeout_tick", timeout.TimeoutTick), + ) + if timeout.PriorRankTimeoutCertificate != nil { + logger = logger.With( + consensus.Uint64Param( + "timeout_last_tc_rank", + timeout.PriorRankTimeoutCertificate.GetRank(), + ), + ) + } + logger.Error("invalid timeout detected", err) +} + +func (lc *LogConsumer[StateT, VoteT]) logBasicStateData( + logger consensus.TraceLogger, + state *models.State[StateT], +) consensus.TraceLogger { + return logger.With( + consensus.Uint64Param("state_rank", state.Rank), + consensus.IdentityParam("state_id", state.Identifier), + consensus.IdentityParam("proposer_id", state.ProposerID), + consensus.Uint64Param("qc_rank", state.ParentQuorumCertificate.GetRank()), + consensus.IdentityParam( + "qc_state_id", + state.ParentQuorumCertificate.Identity(), + ), + ) +} + +func ( + lc *LogConsumer[StateT, VoteT], +) OnTimeoutCertificateConstructedFromTimeouts( + tc models.TimeoutCertificate, +) { + lc.log.With( + consensus.Uint64Param("tc_rank", tc.GetRank()), + consensus.Uint64Param("newest_qc_rank", tc.GetLatestQuorumCert().GetRank()), + consensus.IdentityParam( + "newest_qc_state_id", + tc.GetLatestQuorumCert().Identity(), + ), + ).Trace("TC constructed") +} + +func (lc *LogConsumer[StateT, VoteT]) OnPartialTimeoutCertificateCreated( + rank uint64, + newestQC models.QuorumCertificate, + lastRankTC models.TimeoutCertificate, +) { + has := "false" + if lastRankTC != nil { + has = "true" + } + lc.log.With( + consensus.Uint64Param("rank", rank), + consensus.Uint64Param("newest_qc_rank", newestQC.GetRank()), + consensus.IdentityParam("newest_qc_state_id", newestQC.Identity()), + consensus.StringParam("has_last_rank_tc", has), + ).Trace("partial TC constructed") +} + +func (lc *LogConsumer[StateT, VoteT]) OnNewQuorumCertificateDiscovered( + qc models.QuorumCertificate, +) { + lc.log.With( + consensus.Uint64Param("qc_rank", qc.GetRank()), + consensus.IdentityParam("qc_state_id", qc.Identity()), + ).Trace("new QC discovered") +} + +func (lc *LogConsumer[StateT, VoteT]) OnNewTimeoutCertificateDiscovered( + tc models.TimeoutCertificate, +) { + lc.log.With( + consensus.Uint64Param("tc_rank", tc.GetRank()), + consensus.Uint64Param("newest_qc_rank", tc.GetLatestQuorumCert().GetRank()), + consensus.IdentityParam( + "newest_qc_state_id", + tc.GetLatestQuorumCert().Identity(), + ), + ).Trace("new TC discovered") +} + +func (lc *LogConsumer[StateT, VoteT]) OnOwnVote( + vote *VoteT, + recipientID models.Identity, +) { + lc.log.With( + consensus.IdentityParam("state_id", (*vote).Source()), + consensus.Uint64Param("state_rank", (*vote).GetRank()), + consensus.IdentityParam("recipient_id", recipientID), + ).Trace("publishing HotStuff vote") +} + +func (lc *LogConsumer[StateT, VoteT]) OnOwnTimeout( + timeout *models.TimeoutState[VoteT], +) { + logger := lc.log.With( + consensus.Uint64Param("timeout_rank", timeout.Rank), + consensus.Uint64Param( + "timeout_newest_qc_rank", + timeout.LatestQuorumCertificate.GetRank(), + ), + consensus.IdentityParam("timeout_vote_id", (*timeout.Vote).Identity()), + consensus.Uint64Param("timeout_tick", timeout.TimeoutTick), + ) + if timeout.PriorRankTimeoutCertificate != nil { + logger = logger.With( + consensus.Uint64Param( + "timeout_last_tc_rank", + timeout.PriorRankTimeoutCertificate.GetRank(), + ), + ) + } + logger.Trace("publishing HotStuff timeout object") +} + +func (lc *LogConsumer[StateT, VoteT]) OnOwnProposal( + proposal *models.SignedProposal[StateT, VoteT], + targetPublicationTime time.Time, +) { + header := proposal.Proposal + lc.log.With( + consensus.Uint64Param( + "state_frame", + header.State.ParentQuorumCertificate.GetFrameNumber()+1, + ), + consensus.Uint64Param("state_rank", header.State.Rank), + consensus.IdentityParam("state_id", header.State.Identifier), + consensus.IdentityParam( + "parent_qc_id", + header.State.ParentQuorumCertificate.Identity(), + ), + consensus.TimeParam( + "timestamp", + time.UnixMilli(int64(header.State.Timestamp)), + ), + consensus.TimeParam("target_publication_time", targetPublicationTime), + ).Trace("publishing HotStuff state proposal") +} + +func (lc *LogConsumer[StateT, VoteT]) OnQuorumCertificateConstructedFromVotes( + qc models.QuorumCertificate, +) { + lc.log.With( + consensus.Uint64Param("rank", qc.GetRank()), + consensus.IdentityParam("state_id", qc.Identity()), + ).Trace("QC constructed from votes") +} diff --git a/consensus/notifications/noop_consumer.go b/consensus/notifications/noop_consumer.go new file mode 100644 index 0000000..2f7d3b2 --- /dev/null +++ b/consensus/notifications/noop_consumer.go @@ -0,0 +1,177 @@ +package notifications + +import ( + "time" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// NoopConsumer is an implementation of the notifications consumer that +// doesn't do anything. +type NoopConsumer[StateT models.Unique, VoteT models.Unique] struct { + NoopProposalViolationConsumer[StateT, VoteT] + NoopFinalizationConsumer[StateT] + NoopParticipantConsumer[StateT, VoteT] + NoopCommunicatorConsumer[StateT, VoteT] +} + +var _ consensus.Consumer[*nilUnique, *nilUnique] = (*NoopConsumer[*nilUnique, *nilUnique])(nil) + +func NewNoopConsumer[ + StateT models.Unique, + VoteT models.Unique, +]() *NoopConsumer[StateT, VoteT] { + nc := &NoopConsumer[StateT, VoteT]{} + return nc +} + +// no-op implementation of consensus.Consumer(but not nested interfaces) + +type NoopParticipantConsumer[StateT models.Unique, VoteT models.Unique] struct{} + +func (*NoopParticipantConsumer[StateT, VoteT]) OnEventProcessed() {} + +func (*NoopParticipantConsumer[StateT, VoteT]) OnStart(uint64) {} + +func (*NoopParticipantConsumer[StateT, VoteT]) OnReceiveProposal(uint64, *models.SignedProposal[StateT, VoteT]) { +} + +func (*NoopParticipantConsumer[StateT, VoteT]) OnReceiveQuorumCertificate(uint64, models.QuorumCertificate) { +} + +func (*NoopParticipantConsumer[StateT, VoteT]) OnReceiveTimeoutCertificate(uint64, models.TimeoutCertificate) { +} + +func (*NoopParticipantConsumer[StateT, VoteT]) OnPartialTimeoutCertificate(uint64, *consensus.PartialTimeoutCertificateCreated) { +} + +func (*NoopParticipantConsumer[StateT, VoteT]) OnLocalTimeout(uint64) {} + +func (*NoopParticipantConsumer[StateT, VoteT]) OnRankChange(uint64, uint64) {} + +func (*NoopParticipantConsumer[StateT, VoteT]) OnQuorumCertificateTriggeredRankChange(uint64, uint64, models.QuorumCertificate) { +} + +func (*NoopParticipantConsumer[StateT, VoteT]) OnTimeoutCertificateTriggeredRankChange(uint64, uint64, models.TimeoutCertificate) { +} + +func (*NoopParticipantConsumer[StateT, VoteT]) OnStartingTimeout(time.Time, time.Time) {} + +func (*NoopParticipantConsumer[StateT, VoteT]) OnCurrentRankDetails(uint64, uint64, models.Identity) { +} + +// no-op implementation of consensus.FinalizationConsumer + +type NoopFinalizationConsumer[StateT models.Unique] struct{} + +var _ consensus.FinalizationConsumer[*nilUnique] = (*NoopFinalizationConsumer[*nilUnique])(nil) + +func (*NoopFinalizationConsumer[StateT]) OnStateIncorporated(*models.State[StateT]) {} + +func (*NoopFinalizationConsumer[StateT]) OnFinalizedState(*models.State[StateT]) {} + +// no-op implementation of consensus.TimeoutCollectorConsumer + +type NoopTimeoutCollectorConsumer[VoteT models.Unique] struct{} + +var _ consensus.TimeoutCollectorConsumer[*nilUnique] = (*NoopTimeoutCollectorConsumer[*nilUnique])(nil) + +func (*NoopTimeoutCollectorConsumer[VoteT]) OnTimeoutCertificateConstructedFromTimeouts(models.TimeoutCertificate) { +} + +func (*NoopTimeoutCollectorConsumer[VoteT]) OnPartialTimeoutCertificateCreated(uint64, models.QuorumCertificate, models.TimeoutCertificate) { +} + +func (*NoopTimeoutCollectorConsumer[VoteT]) OnNewQuorumCertificateDiscovered(models.QuorumCertificate) { +} + +func (*NoopTimeoutCollectorConsumer[VoteT]) OnNewTimeoutCertificateDiscovered(models.TimeoutCertificate) { +} + +func (*NoopTimeoutCollectorConsumer[VoteT]) OnTimeoutProcessed(*models.TimeoutState[VoteT]) {} + +// no-op implementation of consensus.CommunicatorConsumer + +type NoopCommunicatorConsumer[StateT models.Unique, VoteT models.Unique] struct{} + +var _ consensus.CommunicatorConsumer[*nilUnique, *nilUnique] = (*NoopCommunicatorConsumer[*nilUnique, *nilUnique])(nil) + +func (*NoopCommunicatorConsumer[StateT, VoteT]) OnOwnVote(*VoteT, models.Identity) {} + +func (*NoopCommunicatorConsumer[StateT, VoteT]) OnOwnTimeout(*models.TimeoutState[VoteT]) {} + +func (*NoopCommunicatorConsumer[StateT, VoteT]) OnOwnProposal(*models.SignedProposal[StateT, VoteT], time.Time) { +} + +// no-op implementation of consensus.VoteCollectorConsumer + +type NoopVoteCollectorConsumer[VoteT models.Unique] struct{} + +var _ consensus.VoteCollectorConsumer[*nilUnique] = (*NoopVoteCollectorConsumer[*nilUnique])(nil) + +func (*NoopVoteCollectorConsumer[VoteT]) OnQuorumCertificateConstructedFromVotes(models.QuorumCertificate) { +} + +func (*NoopVoteCollectorConsumer[VoteT]) OnVoteProcessed(*VoteT) {} + +// no-op implementation of consensus.ProposalViolationConsumer + +type NoopProposalViolationConsumer[StateT models.Unique, VoteT models.Unique] struct{} + +var _ consensus.ProposalViolationConsumer[*nilUnique, *nilUnique] = (*NoopProposalViolationConsumer[*nilUnique, *nilUnique])(nil) + +func (*NoopProposalViolationConsumer[StateT, VoteT]) OnInvalidStateDetected(*models.InvalidProposalError[StateT, VoteT]) { +} + +func (*NoopProposalViolationConsumer[StateT, VoteT]) OnDoubleProposeDetected(*models.State[StateT], *models.State[StateT]) { +} + +func (*NoopProposalViolationConsumer[StateT, VoteT]) OnDoubleVotingDetected(*VoteT, *VoteT) {} + +func (*NoopProposalViolationConsumer[StateT, VoteT]) OnInvalidVoteDetected(models.InvalidVoteError[VoteT]) { +} + +func (*NoopProposalViolationConsumer[StateT, VoteT]) OnVoteForInvalidStateDetected(*VoteT, *models.SignedProposal[StateT, VoteT]) { +} + +func (*NoopProposalViolationConsumer[StateT, VoteT]) OnDoubleTimeoutDetected(*models.TimeoutState[VoteT], *models.TimeoutState[VoteT]) { +} + +func (*NoopProposalViolationConsumer[StateT, VoteT]) OnInvalidTimeoutDetected(models.InvalidTimeoutError[VoteT]) { +} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/notifications/pubsub/communicator_distributor.go b/consensus/notifications/pubsub/communicator_distributor.go new file mode 100644 index 0000000..80047ee --- /dev/null +++ b/consensus/notifications/pubsub/communicator_distributor.go @@ -0,0 +1,104 @@ +package pubsub + +import ( + "sync" + "time" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// CommunicatorDistributor ingests outbound consensus messages from HotStuff's +// core logic and distributes them to consumers. This logic only runs inside +// active consensus participants proposing states, voting, collecting + +// aggregating votes to QCs, and participating in the pacemaker (sending +// timeouts, collecting + aggregating timeouts to TCs). +// Concurrency safe. +type CommunicatorDistributor[StateT models.Unique, VoteT models.Unique] struct { + consumers []consensus.CommunicatorConsumer[StateT, VoteT] + lock sync.RWMutex +} + +var _ consensus.CommunicatorConsumer[*nilUnique, *nilUnique] = (*CommunicatorDistributor[*nilUnique, *nilUnique])(nil) + +func NewCommunicatorDistributor[ + StateT models.Unique, + VoteT models.Unique, +]() *CommunicatorDistributor[StateT, VoteT] { + return &CommunicatorDistributor[StateT, VoteT]{} +} + +func (d *CommunicatorDistributor[StateT, VoteT]) AddCommunicatorConsumer( + consumer consensus.CommunicatorConsumer[StateT, VoteT], +) { + d.lock.Lock() + defer d.lock.Unlock() + d.consumers = append(d.consumers, consumer) +} + +func (d *CommunicatorDistributor[StateT, VoteT]) OnOwnVote( + vote *VoteT, + recipientID models.Identity, +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, s := range d.consumers { + s.OnOwnVote(vote, recipientID) + } +} + +func (d *CommunicatorDistributor[StateT, VoteT]) OnOwnTimeout( + timeout *models.TimeoutState[VoteT], +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, s := range d.consumers { + s.OnOwnTimeout(timeout) + } +} + +func (d *CommunicatorDistributor[StateT, VoteT]) OnOwnProposal( + proposal *models.SignedProposal[StateT, VoteT], + targetPublicationTime time.Time, +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, s := range d.consumers { + s.OnOwnProposal(proposal, targetPublicationTime) + } +} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/notifications/pubsub/distributor.go b/consensus/notifications/pubsub/distributor.go new file mode 100644 index 0000000..322369c --- /dev/null +++ b/consensus/notifications/pubsub/distributor.go @@ -0,0 +1,127 @@ +package pubsub + +import ( + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Distributor distributes notifications to a list of consumers (event +// consumers). +// +// It allows thread-safe subscription of multiple consumers to events. +type Distributor[StateT models.Unique, VoteT models.Unique] struct { + *FollowerDistributor[StateT, VoteT] + *CommunicatorDistributor[StateT, VoteT] + *ParticipantDistributor[StateT, VoteT] +} + +var _ consensus.Consumer[*nilUnique, *nilUnique] = (*Distributor[*nilUnique, *nilUnique])(nil) + +func NewDistributor[ + StateT models.Unique, + VoteT models.Unique, +]() *Distributor[StateT, VoteT] { + return &Distributor[StateT, VoteT]{ + FollowerDistributor: NewFollowerDistributor[StateT, VoteT](), + CommunicatorDistributor: NewCommunicatorDistributor[StateT, VoteT](), + ParticipantDistributor: NewParticipantDistributor[StateT, VoteT](), + } +} + +// AddConsumer adds an event consumer to the Distributor +func (p *Distributor[StateT, VoteT]) AddConsumer( + consumer consensus.Consumer[StateT, VoteT], +) { + p.FollowerDistributor.AddFollowerConsumer(consumer) + p.CommunicatorDistributor.AddCommunicatorConsumer(consumer) + p.ParticipantDistributor.AddParticipantConsumer(consumer) +} + +// FollowerDistributor ingests consensus follower events and distributes it to +// consumers. It allows thread-safe subscription of multiple consumers to +// events. +type FollowerDistributor[StateT models.Unique, VoteT models.Unique] struct { + *ProposalViolationDistributor[StateT, VoteT] + *FinalizationDistributor[StateT] +} + +var _ consensus.FollowerConsumer[*nilUnique, *nilUnique] = (*FollowerDistributor[*nilUnique, *nilUnique])(nil) + +func NewFollowerDistributor[ + StateT models.Unique, + VoteT models.Unique, +]() *FollowerDistributor[StateT, VoteT] { + return &FollowerDistributor[StateT, VoteT]{ + ProposalViolationDistributor: NewProposalViolationDistributor[StateT, VoteT](), + FinalizationDistributor: NewFinalizationDistributor[StateT](), + } +} + +// AddFollowerConsumer registers the input `consumer` to be notified on +// `consensus.ConsensusFollowerConsumer` events. +func (d *FollowerDistributor[StateT, VoteT]) AddFollowerConsumer( + consumer consensus.FollowerConsumer[StateT, VoteT], +) { + d.FinalizationDistributor.AddFinalizationConsumer(consumer) + d.ProposalViolationDistributor.AddProposalViolationConsumer(consumer) +} + +// TimeoutAggregationDistributor ingests timeout aggregation events and +// distributes it to consumers. It allows thread-safe subscription of multiple +// consumers to events. +type TimeoutAggregationDistributor[VoteT models.Unique] struct { + *TimeoutAggregationViolationDistributor[VoteT] + *TimeoutCollectorDistributor[VoteT] +} + +var _ consensus.TimeoutAggregationConsumer[*nilUnique] = (*TimeoutAggregationDistributor[*nilUnique])(nil) + +func NewTimeoutAggregationDistributor[ + VoteT models.Unique, +]() *TimeoutAggregationDistributor[VoteT] { + return &TimeoutAggregationDistributor[VoteT]{ + TimeoutAggregationViolationDistributor: NewTimeoutAggregationViolationDistributor[VoteT](), + TimeoutCollectorDistributor: NewTimeoutCollectorDistributor[VoteT](), + } +} + +func (d *TimeoutAggregationDistributor[VoteT]) AddTimeoutAggregationConsumer( + consumer consensus.TimeoutAggregationConsumer[VoteT], +) { + d.TimeoutAggregationViolationDistributor. + AddTimeoutAggregationViolationConsumer(consumer) + d.TimeoutCollectorDistributor.AddTimeoutCollectorConsumer(consumer) +} + +// VoteAggregationDistributor ingests vote aggregation events and distributes it +// to consumers. It allows thread-safe subscription of multiple consumers to +// events. +type VoteAggregationDistributor[ + StateT models.Unique, + VoteT models.Unique, +] struct { + *VoteAggregationViolationDistributor[StateT, VoteT] + *VoteCollectorDistributor[VoteT] +} + +var _ consensus.VoteAggregationConsumer[*nilUnique, *nilUnique] = (*VoteAggregationDistributor[*nilUnique, *nilUnique])(nil) + +func NewVoteAggregationDistributor[ + StateT models.Unique, + VoteT models.Unique, +]() *VoteAggregationDistributor[StateT, VoteT] { + return &VoteAggregationDistributor[StateT, VoteT]{ + VoteAggregationViolationDistributor: NewVoteAggregationViolationDistributor[StateT, VoteT](), + VoteCollectorDistributor: NewQCCreatedDistributor[VoteT](), + } +} + +func ( + d *VoteAggregationDistributor[StateT, VoteT], +) AddVoteAggregationConsumer( + consumer consensus.VoteAggregationConsumer[StateT, VoteT], +) { + d.VoteAggregationViolationDistributor. + AddVoteAggregationViolationConsumer(consumer) + d.VoteCollectorDistributor.AddVoteCollectorConsumer(consumer) +} diff --git a/consensus/notifications/pubsub/finalization_distributor.go b/consensus/notifications/pubsub/finalization_distributor.go new file mode 100644 index 0000000..87aaf30 --- /dev/null +++ b/consensus/notifications/pubsub/finalization_distributor.go @@ -0,0 +1,83 @@ +package pubsub + +import ( + "sync" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +type OnStateFinalizedConsumer[StateT models.Unique] = func( + state *models.State[StateT], +) + +type OnStateIncorporatedConsumer[StateT models.Unique] = func( + state *models.State[StateT], +) + +// FinalizationDistributor ingests events from HotStuff's logic for tracking +// forks + finalization and distributes them to consumers. This logic generally +// runs inside all nodes (irrespectively whether they are active consensus +// participants or or only consensus followers). Concurrency safe. +type FinalizationDistributor[StateT models.Unique] struct { + stateFinalizedConsumers []OnStateFinalizedConsumer[StateT] + stateIncorporatedConsumers []OnStateIncorporatedConsumer[StateT] + consumers []consensus.FinalizationConsumer[StateT] + lock sync.RWMutex +} + +var _ consensus.FinalizationConsumer[*nilUnique] = (*FinalizationDistributor[*nilUnique])(nil) + +func NewFinalizationDistributor[StateT models.Unique]() *FinalizationDistributor[StateT] { + return &FinalizationDistributor[StateT]{} +} + +func (d *FinalizationDistributor[StateT]) AddOnStateFinalizedConsumer( + consumer OnStateFinalizedConsumer[StateT], +) { + d.lock.Lock() + defer d.lock.Unlock() + d.stateFinalizedConsumers = append(d.stateFinalizedConsumers, consumer) +} + +func (d *FinalizationDistributor[StateT]) AddOnStateIncorporatedConsumer( + consumer OnStateIncorporatedConsumer[StateT], +) { + d.lock.Lock() + defer d.lock.Unlock() + d.stateIncorporatedConsumers = append(d.stateIncorporatedConsumers, consumer) +} + +func (d *FinalizationDistributor[StateT]) AddFinalizationConsumer( + consumer consensus.FinalizationConsumer[StateT], +) { + d.lock.Lock() + defer d.lock.Unlock() + d.consumers = append(d.consumers, consumer) +} + +func (d *FinalizationDistributor[StateT]) OnStateIncorporated( + state *models.State[StateT], +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, consumer := range d.stateIncorporatedConsumers { + consumer(state) + } + for _, consumer := range d.consumers { + consumer.OnStateIncorporated(state) + } +} + +func (d *FinalizationDistributor[StateT]) OnFinalizedState( + state *models.State[StateT], +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, consumer := range d.stateFinalizedConsumers { + consumer(state) + } + for _, consumer := range d.consumers { + consumer.OnFinalizedState(state) + } +} diff --git a/consensus/notifications/pubsub/participant_distributor.go b/consensus/notifications/pubsub/participant_distributor.go new file mode 100644 index 0000000..8069df5 --- /dev/null +++ b/consensus/notifications/pubsub/participant_distributor.go @@ -0,0 +1,181 @@ +package pubsub + +import ( + "sync" + "time" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// ParticipantDistributor ingests events from HotStuff's core logic and +// distributes them to consumers. This logic only runs inside active consensus +// participants proposing states, voting, collecting + aggregating votes to QCs, +// and participating in the pacemaker (sending timeouts, collecting + +// aggregating timeouts to TCs). Concurrency safe. +type ParticipantDistributor[ + StateT models.Unique, + VoteT models.Unique, +] struct { + consumers []consensus.ParticipantConsumer[StateT, VoteT] + lock sync.RWMutex +} + +var _ consensus.ParticipantConsumer[*nilUnique, *nilUnique] = (*ParticipantDistributor[*nilUnique, *nilUnique])(nil) + +func NewParticipantDistributor[ + StateT models.Unique, + VoteT models.Unique, +]() *ParticipantDistributor[StateT, VoteT] { + return &ParticipantDistributor[StateT, VoteT]{} +} + +func ( + d *ParticipantDistributor[StateT, VoteT], +) AddParticipantConsumer( + consumer consensus.ParticipantConsumer[StateT, VoteT], +) { + d.lock.Lock() + defer d.lock.Unlock() + d.consumers = append(d.consumers, consumer) +} + +func ( + d *ParticipantDistributor[StateT, VoteT], +) OnEventProcessed() { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnEventProcessed() + } +} + +func ( + d *ParticipantDistributor[StateT, VoteT], +) OnStart(currentRank uint64) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnStart(currentRank) + } +} + +func ( + d *ParticipantDistributor[StateT, VoteT], +) OnReceiveProposal( + currentRank uint64, + proposal *models.SignedProposal[StateT, VoteT], +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnReceiveProposal(currentRank, proposal) + } +} + +func ( + d *ParticipantDistributor[StateT, VoteT], +) OnReceiveQuorumCertificate(currentRank uint64, qc models.QuorumCertificate) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnReceiveQuorumCertificate(currentRank, qc) + } +} + +func ( + d *ParticipantDistributor[StateT, VoteT], +) OnReceiveTimeoutCertificate( + currentRank uint64, + tc models.TimeoutCertificate, +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnReceiveTimeoutCertificate(currentRank, tc) + } +} + +func ( + d *ParticipantDistributor[StateT, VoteT], +) OnPartialTimeoutCertificate( + currentRank uint64, + partialTimeoutCertificate *consensus.PartialTimeoutCertificateCreated, +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnPartialTimeoutCertificate(currentRank, partialTimeoutCertificate) + } +} + +func ( + d *ParticipantDistributor[StateT, VoteT], +) OnLocalTimeout(currentRank uint64) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnLocalTimeout(currentRank) + } +} + +func ( + d *ParticipantDistributor[StateT, VoteT], +) OnRankChange(oldRank, newRank uint64) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnRankChange(oldRank, newRank) + } +} + +func ( + d *ParticipantDistributor[StateT, VoteT], +) OnQuorumCertificateTriggeredRankChange( + oldRank uint64, + newRank uint64, + qc models.QuorumCertificate, +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnQuorumCertificateTriggeredRankChange(oldRank, newRank, qc) + } +} + +func ( + d *ParticipantDistributor[StateT, VoteT], +) OnTimeoutCertificateTriggeredRankChange( + oldRank uint64, + newRank uint64, + tc models.TimeoutCertificate, +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnTimeoutCertificateTriggeredRankChange(oldRank, newRank, tc) + } +} + +func ( + d *ParticipantDistributor[StateT, VoteT], +) OnStartingTimeout(start time.Time, end time.Time) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnStartingTimeout(start, end) + } +} + +func ( + d *ParticipantDistributor[StateT, VoteT], +) OnCurrentRankDetails( + currentRank, finalizedRank uint64, + currentLeader models.Identity, +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnCurrentRankDetails(currentRank, finalizedRank, currentLeader) + } +} diff --git a/consensus/notifications/pubsub/proposal_violation_distributor.go b/consensus/notifications/pubsub/proposal_violation_distributor.go new file mode 100644 index 0000000..40b6502 --- /dev/null +++ b/consensus/notifications/pubsub/proposal_violation_distributor.go @@ -0,0 +1,59 @@ +package pubsub + +import ( + "sync" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// ProposalViolationDistributor ingests notifications about HotStuff-protocol +// violations and distributes them to consumers. Such notifications are produced +// by the active consensus participants and the consensus follower. Concurrently +// safe. +type ProposalViolationDistributor[ + StateT models.Unique, + VoteT models.Unique, +] struct { + consumers []consensus.ProposalViolationConsumer[StateT, VoteT] + lock sync.RWMutex +} + +var _ consensus.ProposalViolationConsumer[*nilUnique, *nilUnique] = (*ProposalViolationDistributor[*nilUnique, *nilUnique])(nil) + +func NewProposalViolationDistributor[ + StateT models.Unique, + VoteT models.Unique, +]() *ProposalViolationDistributor[StateT, VoteT] { + return &ProposalViolationDistributor[StateT, VoteT]{} +} + +func ( + d *ProposalViolationDistributor[StateT, VoteT], +) AddProposalViolationConsumer( + consumer consensus.ProposalViolationConsumer[StateT, VoteT], +) { + d.lock.Lock() + defer d.lock.Unlock() + d.consumers = append(d.consumers, consumer) +} + +func ( + d *ProposalViolationDistributor[StateT, VoteT], +) OnInvalidStateDetected(err *models.InvalidProposalError[StateT, VoteT]) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnInvalidStateDetected(err) + } +} + +func ( + d *ProposalViolationDistributor[StateT, VoteT], +) OnDoubleProposeDetected(state1, state2 *models.State[StateT]) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnDoubleProposeDetected(state1, state2) + } +} diff --git a/consensus/notifications/pubsub/timeout_aggregation_violation_consumer.go b/consensus/notifications/pubsub/timeout_aggregation_violation_consumer.go new file mode 100644 index 0000000..8e7a1a7 --- /dev/null +++ b/consensus/notifications/pubsub/timeout_aggregation_violation_consumer.go @@ -0,0 +1,59 @@ +package pubsub + +import ( + "sync" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutAggregationViolationDistributor ingests notifications about timeout +// aggregation violations and distributes them to consumers. Such notifications +// are produced by the timeout aggregation logic. Concurrency safe. +type TimeoutAggregationViolationDistributor[VoteT models.Unique] struct { + consumers []consensus.TimeoutAggregationViolationConsumer[VoteT] + lock sync.RWMutex +} + +var _ consensus.TimeoutAggregationViolationConsumer[*nilUnique] = (*TimeoutAggregationViolationDistributor[*nilUnique])(nil) + +func NewTimeoutAggregationViolationDistributor[ + VoteT models.Unique, +]() *TimeoutAggregationViolationDistributor[VoteT] { + return &TimeoutAggregationViolationDistributor[VoteT]{} +} + +func ( + d *TimeoutAggregationViolationDistributor[VoteT], +) AddTimeoutAggregationViolationConsumer( + consumer consensus.TimeoutAggregationViolationConsumer[VoteT], +) { + d.lock.Lock() + defer d.lock.Unlock() + d.consumers = append(d.consumers, consumer) +} + +func ( + d *TimeoutAggregationViolationDistributor[VoteT], +) OnDoubleTimeoutDetected( + timeout *models.TimeoutState[VoteT], + altTimeout *models.TimeoutState[VoteT], +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnDoubleTimeoutDetected(timeout, altTimeout) + } +} + +func ( + d *TimeoutAggregationViolationDistributor[VoteT], +) OnInvalidTimeoutDetected( + err models.InvalidTimeoutError[VoteT], +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnInvalidTimeoutDetected(err) + } +} diff --git a/consensus/notifications/pubsub/timeout_collector_distributor.go b/consensus/notifications/pubsub/timeout_collector_distributor.go new file mode 100644 index 0000000..3fe319c --- /dev/null +++ b/consensus/notifications/pubsub/timeout_collector_distributor.go @@ -0,0 +1,88 @@ +package pubsub + +import ( + "sync" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutCollectorDistributor ingests notifications about timeout aggregation +// and distributes them to consumers. Such notifications are produced by the +// timeout aggregation logic. Concurrency safe. +type TimeoutCollectorDistributor[VoteT models.Unique] struct { + lock sync.RWMutex + consumers []consensus.TimeoutCollectorConsumer[VoteT] +} + +var _ consensus.TimeoutCollectorConsumer[*nilUnique] = (*TimeoutCollectorDistributor[*nilUnique])(nil) + +func NewTimeoutCollectorDistributor[VoteT models.Unique]() *TimeoutCollectorDistributor[VoteT] { + return &TimeoutCollectorDistributor[VoteT]{} +} + +func (d *TimeoutCollectorDistributor[VoteT]) AddTimeoutCollectorConsumer( + consumer consensus.TimeoutCollectorConsumer[VoteT], +) { + d.lock.Lock() + defer d.lock.Unlock() + d.consumers = append(d.consumers, consumer) +} + +func ( + d *TimeoutCollectorDistributor[VoteT], +) OnTimeoutCertificateConstructedFromTimeouts( + tc models.TimeoutCertificate, +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, consumer := range d.consumers { + consumer.OnTimeoutCertificateConstructedFromTimeouts(tc) + } +} + +func (d *TimeoutCollectorDistributor[VoteT]) OnPartialTimeoutCertificateCreated( + rank uint64, + newestQC models.QuorumCertificate, + previousRankTimeoutCert models.TimeoutCertificate, +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, consumer := range d.consumers { + consumer.OnPartialTimeoutCertificateCreated( + rank, + newestQC, + previousRankTimeoutCert, + ) + } +} + +func (d *TimeoutCollectorDistributor[VoteT]) OnNewQuorumCertificateDiscovered( + qc models.QuorumCertificate, +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, consumer := range d.consumers { + consumer.OnNewQuorumCertificateDiscovered(qc) + } +} + +func (d *TimeoutCollectorDistributor[VoteT]) OnNewTimeoutCertificateDiscovered( + tc models.TimeoutCertificate, +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, consumer := range d.consumers { + consumer.OnNewTimeoutCertificateDiscovered(tc) + } +} + +func (d *TimeoutCollectorDistributor[VoteT]) OnTimeoutProcessed( + timeout *models.TimeoutState[VoteT], +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnTimeoutProcessed(timeout) + } +} diff --git a/consensus/notifications/pubsub/vote_aggregation_violation_consumer.go b/consensus/notifications/pubsub/vote_aggregation_violation_consumer.go new file mode 100644 index 0000000..bc63dee --- /dev/null +++ b/consensus/notifications/pubsub/vote_aggregation_violation_consumer.go @@ -0,0 +1,75 @@ +package pubsub + +import ( + "sync" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VoteAggregationViolationDistributor ingests notifications about vote +// aggregation violations and distributes them to consumers. Such notifications +// are produced by the vote aggregation logic. Concurrency safe. +type VoteAggregationViolationDistributor[ + StateT models.Unique, + VoteT models.Unique, +] struct { + consumers []consensus.VoteAggregationViolationConsumer[StateT, VoteT] + lock sync.RWMutex +} + +var _ consensus.VoteAggregationViolationConsumer[*nilUnique, *nilUnique] = (*VoteAggregationViolationDistributor[*nilUnique, *nilUnique])(nil) + +func NewVoteAggregationViolationDistributor[ + StateT models.Unique, + VoteT models.Unique, +]() *VoteAggregationViolationDistributor[StateT, VoteT] { + return &VoteAggregationViolationDistributor[StateT, VoteT]{} +} + +func (d *VoteAggregationViolationDistributor[ + StateT, + VoteT, +]) AddVoteAggregationViolationConsumer( + consumer consensus.VoteAggregationViolationConsumer[StateT, VoteT], +) { + d.lock.Lock() + defer d.lock.Unlock() + d.consumers = append(d.consumers, consumer) +} + +func (d *VoteAggregationViolationDistributor[ + StateT, + VoteT, +]) OnDoubleVotingDetected(vote1, vote2 *VoteT) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnDoubleVotingDetected(vote1, vote2) + } +} + +func (d *VoteAggregationViolationDistributor[ + StateT, + VoteT, +]) OnInvalidVoteDetected(err models.InvalidVoteError[VoteT]) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnInvalidVoteDetected(err) + } +} + +func (d *VoteAggregationViolationDistributor[ + StateT, + VoteT, +]) OnVoteForInvalidStateDetected( + vote *VoteT, + invalidProposal *models.SignedProposal[StateT, VoteT], +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnVoteForInvalidStateDetected(vote, invalidProposal) + } +} diff --git a/consensus/notifications/pubsub/vote_collector_distributor.go b/consensus/notifications/pubsub/vote_collector_distributor.go new file mode 100644 index 0000000..89af85b --- /dev/null +++ b/consensus/notifications/pubsub/vote_collector_distributor.go @@ -0,0 +1,52 @@ +package pubsub + +import ( + "sync" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VoteCollectorDistributor ingests notifications about vote aggregation and +// distributes them to consumers. Such notifications are produced by the vote +// aggregation logic. Concurrency safe. +type VoteCollectorDistributor[VoteT models.Unique] struct { + consumers []consensus.VoteCollectorConsumer[VoteT] + lock sync.RWMutex +} + +var _ consensus.VoteCollectorConsumer[*nilUnique] = (*VoteCollectorDistributor[*nilUnique])(nil) + +func NewQCCreatedDistributor[ + VoteT models.Unique, +]() *VoteCollectorDistributor[VoteT] { + return &VoteCollectorDistributor[VoteT]{} +} + +func (d *VoteCollectorDistributor[VoteT]) AddVoteCollectorConsumer( + consumer consensus.VoteCollectorConsumer[VoteT], +) { + d.lock.Lock() + defer d.lock.Unlock() + d.consumers = append(d.consumers, consumer) +} + +func ( + d *VoteCollectorDistributor[VoteT], +) OnQuorumCertificateConstructedFromVotes( + qc models.QuorumCertificate, +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, consumer := range d.consumers { + consumer.OnQuorumCertificateConstructedFromVotes(qc) + } +} + +func (d *VoteCollectorDistributor[VoteT]) OnVoteProcessed(vote *VoteT) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnVoteProcessed(vote) + } +} diff --git a/consensus/pacemaker/pacemaker.go b/consensus/pacemaker/pacemaker.go new file mode 100644 index 0000000..2fe6630 --- /dev/null +++ b/consensus/pacemaker/pacemaker.go @@ -0,0 +1,331 @@ +package pacemaker + +import ( + "context" + "fmt" + "time" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/consensus/pacemaker/timeout" + "source.quilibrium.com/quilibrium/monorepo/consensus/tracker" +) + +// Pacemaker implements consensus.Pacemaker +// Conceptually, we use the Pacemaker algorithm first proposed in [1] +// (specifically Jolteon) and described in more detail in [2] (aka DiemBFT v4). +// [1] https://arxiv.org/abs/2106.10362 +// [2] https://developers.diem.com/papers/diem-consensus-state-machine-replication-in-the-diem-statechain/2021-08-17.pdf +// +// To enter a new rank `r`, the Pacemaker must observe a valid QC or TC for rank +// `r-1`. The Pacemaker also controls when a node should locally time out for a +// given rank. Locally timing a rank does not cause a rank change. +// A local timeout for a rank `r` causes a node to: +// - never produce a vote for any proposal with rank ≤ `r`, after the timeout +// - produce and broadcast a timeout object, which can form a part of the TC +// for the timed out rank +// +// Not concurrency safe. +type Pacemaker[StateT models.Unique, VoteT models.Unique] struct { + consensus.ProposalDurationProvider + + ctx context.Context + tracer consensus.TraceLogger + timeoutControl *timeout.Controller + notifier consensus.ParticipantConsumer[StateT, VoteT] + rankTracker rankTracker[StateT, VoteT] + started bool +} + +var _ consensus.Pacemaker = (*Pacemaker[*nilUnique, *nilUnique])(nil) +var _ consensus.ProposalDurationProvider = (*Pacemaker[*nilUnique, *nilUnique])(nil) + +// New creates a new Pacemaker instance +// - startRank is the rank for the pacemaker to start with. +// - timeoutController controls the timeout trigger. +// - notifier provides callbacks for pacemaker events. +// +// Expected error conditions: +// * models.ConfigurationError if initial LivenessState is invalid +func NewPacemaker[StateT models.Unique, VoteT models.Unique]( + filter []byte, + timeoutController *timeout.Controller, + proposalDurationProvider consensus.ProposalDurationProvider, + notifier consensus.Consumer[StateT, VoteT], + store consensus.ConsensusStore[VoteT], + tracer consensus.TraceLogger, + recovery ...recoveryInformation[StateT, VoteT], +) (*Pacemaker[StateT, VoteT], error) { + vt, err := newRankTracker[StateT, VoteT](filter, store) + if err != nil { + return nil, fmt.Errorf("initializing rank tracker failed: %w", err) + } + + pm := &Pacemaker[StateT, VoteT]{ + ProposalDurationProvider: proposalDurationProvider, + timeoutControl: timeoutController, + notifier: notifier, + rankTracker: vt, + tracer: tracer, + started: false, + } + for _, recoveryAction := range recovery { + err = recoveryAction(pm) + if err != nil { + return nil, fmt.Errorf("ingesting recovery information failed: %w", err) + } + } + return pm, nil +} + +// CurrentRank returns the current rank +func (p *Pacemaker[StateT, VoteT]) CurrentRank() uint64 { + return p.rankTracker.CurrentRank() +} + +// LatestQuorumCertificate returns QC with the highest rank discovered by +// Pacemaker. +func ( + p *Pacemaker[StateT, VoteT], +) LatestQuorumCertificate() models.QuorumCertificate { + return p.rankTracker.LatestQuorumCertificate() +} + +// PriorRankTimeoutCertificate returns TC for last rank, this will be nil only +// if the current rank was entered with a QC. +func ( + p *Pacemaker[StateT, VoteT], +) PriorRankTimeoutCertificate() models.TimeoutCertificate { + return p.rankTracker.PriorRankTimeoutCertificate() +} + +// TimeoutCh returns the timeout channel for current active timeout. +// Note the returned timeout channel returns only one timeout, which is the +// current timeout. To get the timeout for the next timeout, you need to call +// TimeoutCh() again. +func (p *Pacemaker[StateT, VoteT]) TimeoutCh() <-chan time.Time { + return p.timeoutControl.Channel() +} + +// ReceiveQuorumCertificate notifies the pacemaker with a new QC, which might +// allow pacemaker to fast-forward its rank. In contrast to +// `ReceiveTimeoutCertificate`, this function does _not_ handle `nil` inputs. +// No errors are expected, any error should be treated as exception. +func (p *Pacemaker[StateT, VoteT]) ReceiveQuorumCertificate( + qc models.QuorumCertificate, +) (*models.NextRank, error) { + initialRank := p.CurrentRank() + resultingRank, err := p.rankTracker.ReceiveQuorumCertificate(qc) + if err != nil { + return nil, fmt.Errorf( + "unexpected exception in rankTracker while processing QC for rank %d: %w", + qc.GetRank(), + err, + ) + } + if resultingRank <= initialRank { + return nil, nil + } + + // QC triggered rank change: + p.timeoutControl.OnProgressBeforeTimeout() + p.notifier.OnQuorumCertificateTriggeredRankChange( + initialRank, + resultingRank, + qc, + ) + + p.notifier.OnRankChange(initialRank, resultingRank) + timerInfo := p.timeoutControl.StartTimeout(p.ctx, resultingRank) + p.notifier.OnStartingTimeout( + timerInfo.StartTime, + timerInfo.StartTime.Add(timerInfo.Duration), + ) + + return &models.NextRank{ + Rank: timerInfo.Rank, + Start: timerInfo.StartTime, + End: timerInfo.StartTime.Add(timerInfo.Duration), + }, nil +} + +// ReceiveTimeoutCertificate notifies the Pacemaker of a new timeout +// certificate, which may allow Pacemaker to fast-forward its current rank. A +// nil TC is an expected valid input, so that callers may pass in e.g. +// `Proposal.PriorRankTimeoutCertificate`, which may or may not have a value. +// No errors are expected, any error should be treated as exception +func (p *Pacemaker[StateT, VoteT]) ReceiveTimeoutCertificate( + tc models.TimeoutCertificate, +) (*models.NextRank, error) { + initialRank := p.CurrentRank() + resultingRank, err := p.rankTracker.ReceiveTimeoutCertificate(tc) + if err != nil { + return nil, fmt.Errorf( + "unexpected exception in rankTracker while processing TC for rank %d: %w", + tc.GetRank(), + err, + ) + } + p.tracer.Trace( + "pacemaker receive tc", + consensus.Uint64Param("resulting_rank", resultingRank), + consensus.Uint64Param("initial_rank", initialRank), + ) + if resultingRank <= initialRank { + return nil, nil + } + + // TC triggered rank change: + p.timeoutControl.OnTimeout() + p.notifier.OnTimeoutCertificateTriggeredRankChange( + initialRank, + resultingRank, + tc, + ) + + p.notifier.OnRankChange(initialRank, resultingRank) + timerInfo := p.timeoutControl.StartTimeout(p.ctx, resultingRank) + p.notifier.OnStartingTimeout( + timerInfo.StartTime, + timerInfo.StartTime.Add(timerInfo.Duration), + ) + + return &models.NextRank{ + Rank: timerInfo.Rank, + Start: timerInfo.StartTime, + End: timerInfo.StartTime.Add(timerInfo.Duration), + }, nil +} + +// Start starts the pacemaker by starting the initial timer for the current +// rank. Start should only be called once - subsequent calls are a no-op. +// CAUTION: Pacemaker is not concurrency safe. The Start method must +// be executed by the same goroutine that also calls the other business logic +// methods, or concurrency safety has to be implemented externally. +func (p *Pacemaker[StateT, VoteT]) Start(ctx context.Context) { + if p.started { + return + } + p.started = true + p.ctx = ctx + timerInfo := p.timeoutControl.StartTimeout(ctx, p.CurrentRank()) + p.notifier.OnStartingTimeout( + timerInfo.StartTime, + timerInfo.StartTime.Add(timerInfo.Duration), + ) +} + +/* ------------------------------------ recovery parameters for Pacemaker ------------------------------------ */ + +// recoveryInformation provides optional information to the Pacemaker during its +// construction to ingest additional information that was potentially lost +// during a crash or reboot. Following the "information-driven" approach, we +// consider potentially older or redundant information as consistent with our +// already-present knowledge, i.e. as a no-op. +type recoveryInformation[ + StateT models.Unique, + VoteT models.Unique, +] func(p *Pacemaker[StateT, VoteT]) error + +// WithQCs informs the Pacemaker about the given QCs. Old and nil QCs are +// accepted (no-op). +func WithQCs[ + StateT models.Unique, + VoteT models.Unique, +](qcs ...models.QuorumCertificate) recoveryInformation[StateT, VoteT] { + // To avoid excessive database writes during initialization, we pre-filter the + // newest QC here and only hand that one to the rankTracker. For recovery, we + // allow the special case of nil QCs, because the genesis state has no QC. + tracker := tracker.NewNewestQCTracker() + for _, qc := range qcs { + if qc == nil { + continue // no-op + } + tracker.Track(&qc) + } + newestQC := tracker.NewestQC() + if newestQC == nil { + return func(p *Pacemaker[StateT, VoteT]) error { return nil } // no-op + } + + return func(p *Pacemaker[StateT, VoteT]) error { + _, err := p.rankTracker.ReceiveQuorumCertificate(*newestQC) + return err + } +} + +// WithTCs informs the Pacemaker about the given TCs. Old and nil TCs are +// accepted (no-op). +func WithTCs[ + StateT models.Unique, + VoteT models.Unique, +](tcs ...models.TimeoutCertificate) recoveryInformation[StateT, VoteT] { + qcTracker := tracker.NewNewestQCTracker() + tcTracker := tracker.NewNewestTCTracker() + for _, tc := range tcs { + if tc == nil { + continue // no-op + } + tcTracker.Track(&tc) + qc := tc.GetLatestQuorumCert() + qcTracker.Track(&qc) + } + newestTC := tcTracker.NewestTC() + newestQC := qcTracker.NewestQC() + if newestTC == nil { // shortcut if no TCs provided + return func(p *Pacemaker[StateT, VoteT]) error { return nil } // no-op + } + + return func(p *Pacemaker[StateT, VoteT]) error { + _, err := p.rankTracker.ReceiveTimeoutCertificate(*newestTC) // allows nil inputs + if err != nil { + return fmt.Errorf( + "rankTracker failed to process newest TC provided in constructor: %w", + err, + ) + } + _, err = p.rankTracker.ReceiveQuorumCertificate(*newestQC) // should never be nil, because a valid TC always contain a QC + if err != nil { + return fmt.Errorf( + "rankTracker failed to process newest QC extracted from the TCs provided in constructor: %w", + err, + ) + } + return nil + } +} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/pacemaker/pacemaker_test.go b/consensus/pacemaker/pacemaker_test.go new file mode 100644 index 0000000..37493de --- /dev/null +++ b/consensus/pacemaker/pacemaker_test.go @@ -0,0 +1,439 @@ +package pacemaker + +import ( + "context" + "errors" + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + "source.quilibrium.com/quilibrium/monorepo/consensus/mocks" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/consensus/pacemaker/timeout" +) + +const ( + minRepTimeout float64 = 100.0 // Milliseconds + maxRepTimeout float64 = 600.0 // Milliseconds + multiplicativeIncrease float64 = 1.5 // multiplicative factor + happyPathMaxRoundFailures uint64 = 6 // number of failed rounds before first timeout increase +) + +func TestPacemaker(t *testing.T) { + suite.Run(t, new(PacemakerTestSuite)) +} + +type PacemakerTestSuite struct { + suite.Suite + + initialRank uint64 + initialQC models.QuorumCertificate + initialTC models.TimeoutCertificate + + notifier *mocks.Consumer[*helper.TestState, *helper.TestVote] + proposalDurationProvider consensus.ProposalDurationProvider + store *mocks.ConsensusStore[*helper.TestVote] + pacemaker *Pacemaker[*helper.TestState, *helper.TestVote] + stop context.CancelFunc + timeoutConf timeout.Config +} + +func (s *PacemakerTestSuite) SetupTest() { + s.initialRank = 3 + s.initialQC = QC(2) + s.initialTC = nil + var err error + + s.timeoutConf, err = timeout.NewConfig(time.Duration(minRepTimeout*1e6), time.Duration(maxRepTimeout*1e6), multiplicativeIncrease, happyPathMaxRoundFailures, time.Duration(maxRepTimeout*1e6)) + require.NoError(s.T(), err) + + // init consumer for notifications emitted by Pacemaker + s.notifier = mocks.NewConsumer[*helper.TestState, *helper.TestVote](s.T()) + s.notifier.On("OnStartingTimeout", mock.Anything, mock.Anything).Return().Once() + + // init Persister dependency for Pacemaker + // CAUTION: The Persister hands a pointer to `livenessState` to the Pacemaker, which means the Pacemaker + // could modify our struct in-place. `livenessState` should not be used by tests to determine expected values! + s.store = mocks.NewConsensusStore[*helper.TestVote](s.T()) + livenessState := &models.LivenessState{ + CurrentRank: 3, + PriorRankTimeoutCertificate: nil, + LatestQuorumCertificate: s.initialQC, + } + s.store.On("GetLivenessState", mock.Anything).Return(livenessState, nil) + + // init Pacemaker and start + s.pacemaker, err = NewPacemaker(nil, timeout.NewController(s.timeoutConf), NoProposalDelay(), s.notifier, s.store, helper.Logger()) + require.NoError(s.T(), err) + + var ctx context.Context + ctx, s.stop = context.WithCancel(context.Background()) + s.pacemaker.Start(ctx) +} + +func (s *PacemakerTestSuite) TearDownTest() { + s.stop() +} + +func QC(rank uint64) models.QuorumCertificate { + return helper.MakeQC(helper.WithQCRank(rank)) +} + +func LivenessState(qc models.QuorumCertificate) *models.LivenessState { + return &models.LivenessState{ + CurrentRank: qc.GetRank() + 1, + PriorRankTimeoutCertificate: nil, + LatestQuorumCertificate: qc, + } +} + +// TestReceiveQuorumCertificate_SkipIncreaseRankThroughQC tests that Pacemaker increases rank when receiving QC, +// if applicable, by skipping ranks +func (s *PacemakerTestSuite) TestReceiveQuorumCertificate_SkipIncreaseRankThroughQC() { + // seeing a QC for the current rank should advance the rank by one + qc := QC(s.initialRank) + s.store.On("PutLivenessState", LivenessState(qc)).Return(nil).Once() + s.notifier.On("OnStartingTimeout", mock.Anything, mock.Anything).Return().Once() + s.notifier.On("OnQuorumCertificateTriggeredRankChange", s.initialRank, uint64(4), qc).Return().Once() + s.notifier.On("OnRankChange", s.initialRank, qc.GetRank()+1).Once() + nve, err := s.pacemaker.ReceiveQuorumCertificate(qc) + require.NoError(s.T(), err) + require.Equal(s.T(), qc.GetRank()+1, s.pacemaker.CurrentRank()) + require.True(s.T(), nve.Rank == qc.GetRank()+1) + require.Equal(s.T(), qc, s.pacemaker.LatestQuorumCertificate()) + require.Nil(s.T(), s.pacemaker.PriorRankTimeoutCertificate()) + + // seeing a QC for 10 ranks in the future should advance to rank +11 + curRank := s.pacemaker.CurrentRank() + qc = QC(curRank + 10) + s.store.On("PutLivenessState", LivenessState(qc)).Return(nil).Once() + s.notifier.On("OnStartingTimeout", mock.Anything, mock.Anything).Return().Once() + s.notifier.On("OnQuorumCertificateTriggeredRankChange", curRank, qc.GetRank()+1, qc).Return().Once() + s.notifier.On("OnRankChange", curRank, qc.GetRank()+1).Once() + nve, err = s.pacemaker.ReceiveQuorumCertificate(qc) + require.NoError(s.T(), err) + require.True(s.T(), nve.Rank == qc.GetRank()+1) + require.Equal(s.T(), qc, s.pacemaker.LatestQuorumCertificate()) + require.Nil(s.T(), s.pacemaker.PriorRankTimeoutCertificate()) + + require.Equal(s.T(), qc.GetRank()+1, s.pacemaker.CurrentRank()) +} + +// TestReceiveTimeoutCertificate_SkipIncreaseRankThroughTC tests that Pacemaker increases rank when receiving TC, +// if applicable, by skipping ranks +func (s *PacemakerTestSuite) TestReceiveTimeoutCertificate_SkipIncreaseRankThroughTC() { + // seeing a TC for the current rank should advance the rank by one + tc := helper.MakeTC(helper.WithTCRank(s.initialRank), helper.WithTCNewestQC(s.initialQC)) + expectedLivenessState := &models.LivenessState{ + CurrentRank: tc.GetRank() + 1, + PriorRankTimeoutCertificate: tc, + LatestQuorumCertificate: s.initialQC, + } + s.store.On("PutLivenessState", expectedLivenessState).Return(nil).Once() + s.notifier.On("OnStartingTimeout", mock.Anything, mock.Anything).Return().Once() + s.notifier.On("OnTimeoutCertificateTriggeredRankChange", s.initialRank, tc.GetRank()+1, tc).Return().Once() + s.notifier.On("OnRankChange", s.initialRank, tc.GetRank()+1).Once() + nve, err := s.pacemaker.ReceiveTimeoutCertificate(tc) + require.NoError(s.T(), err) + require.Equal(s.T(), tc.GetRank()+1, s.pacemaker.CurrentRank()) + require.True(s.T(), nve.Rank == tc.GetRank()+1) + require.Equal(s.T(), tc, s.pacemaker.PriorRankTimeoutCertificate()) + + // seeing a TC for 10 ranks in the future should advance to rank +11 + curRank := s.pacemaker.CurrentRank() + tc = helper.MakeTC(helper.WithTCRank(curRank+10), helper.WithTCNewestQC(s.initialQC)) + expectedLivenessState = &models.LivenessState{ + CurrentRank: tc.GetRank() + 1, + PriorRankTimeoutCertificate: tc, + LatestQuorumCertificate: s.initialQC, + } + s.store.On("PutLivenessState", expectedLivenessState).Return(nil).Once() + s.notifier.On("OnStartingTimeout", mock.Anything, mock.Anything).Return().Once() + s.notifier.On("OnTimeoutCertificateTriggeredRankChange", curRank, tc.GetRank()+1, tc).Return().Once() + s.notifier.On("OnRankChange", curRank, tc.GetRank()+1).Once() + nve, err = s.pacemaker.ReceiveTimeoutCertificate(tc) + require.NoError(s.T(), err) + require.True(s.T(), nve.Rank == tc.GetRank()+1) + require.Equal(s.T(), tc, s.pacemaker.PriorRankTimeoutCertificate()) + require.Equal(s.T(), tc.GetLatestQuorumCert(), s.pacemaker.LatestQuorumCertificate()) + + require.Equal(s.T(), tc.GetRank()+1, s.pacemaker.CurrentRank()) +} + +// TestReceiveTimeoutCertificate_IgnoreOldTC tests that Pacemaker ignores old TC and doesn't advance round. +func (s *PacemakerTestSuite) TestReceiveTimeoutCertificate_IgnoreOldTC() { + nve, err := s.pacemaker.ReceiveTimeoutCertificate(helper.MakeTC(helper.WithTCRank(s.initialRank-1), + helper.WithTCNewestQC(s.initialQC))) + require.NoError(s.T(), err) + require.Nil(s.T(), nve) + require.Equal(s.T(), s.initialRank, s.pacemaker.CurrentRank()) +} + +// TestReceiveTimeoutCertificate_IgnoreNilTC tests that Pacemaker accepts nil TC as allowed input but doesn't trigger a new rank event +func (s *PacemakerTestSuite) TestReceiveTimeoutCertificate_IgnoreNilTC() { + nve, err := s.pacemaker.ReceiveTimeoutCertificate(nil) + require.NoError(s.T(), err) + require.Nil(s.T(), nve) + require.Equal(s.T(), s.initialRank, s.pacemaker.CurrentRank()) +} + +// TestReceiveQuorumCertificate_PersistException tests that Pacemaker propagates exception +// when processing QC +func (s *PacemakerTestSuite) TestReceiveQuorumCertificate_PersistException() { + exception := errors.New("persist-exception") + qc := QC(s.initialRank) + s.store.On("PutLivenessState", mock.Anything).Return(exception).Once() + nve, err := s.pacemaker.ReceiveQuorumCertificate(qc) + require.Nil(s.T(), nve) + require.ErrorIs(s.T(), err, exception) +} + +// TestReceiveTimeoutCertificate_PersistException tests that Pacemaker propagates exception +// when processing TC +func (s *PacemakerTestSuite) TestReceiveTimeoutCertificate_PersistException() { + exception := errors.New("persist-exception") + tc := helper.MakeTC(helper.WithTCRank(s.initialRank)) + s.store.On("PutLivenessState", mock.Anything).Return(exception).Once() + nve, err := s.pacemaker.ReceiveTimeoutCertificate(tc) + require.Nil(s.T(), nve) + require.ErrorIs(s.T(), err, exception) +} + +// TestReceiveQuorumCertificate_InvalidatesPriorRankTimeoutCertificate verifies that Pacemaker does not retain any old +// TC if the last rank change was triggered by observing a QC from the previous rank. +func (s *PacemakerTestSuite) TestReceiveQuorumCertificate_InvalidatesPriorRankTimeoutCertificate() { + tc := helper.MakeTC(helper.WithTCRank(s.initialRank+1), helper.WithTCNewestQC(s.initialQC)) + s.store.On("PutLivenessState", mock.Anything).Return(nil).Times(2) + s.notifier.On("OnStartingTimeout", mock.Anything, mock.Anything).Return().Times(2) + s.notifier.On("OnTimeoutCertificateTriggeredRankChange", mock.Anything, mock.Anything, mock.Anything).Return().Once() + s.notifier.On("OnQuorumCertificateTriggeredRankChange", mock.Anything, mock.Anything, mock.Anything).Return().Once() + s.notifier.On("OnRankChange", s.initialRank, tc.GetRank()+1).Once() + nve, err := s.pacemaker.ReceiveTimeoutCertificate(tc) + require.NotNil(s.T(), nve) + require.NoError(s.T(), err) + require.NotNil(s.T(), s.pacemaker.PriorRankTimeoutCertificate()) + + qc := QC(tc.GetRank() + 1) + s.notifier.On("OnRankChange", tc.GetRank()+1, qc.GetRank()+1).Once() + nve, err = s.pacemaker.ReceiveQuorumCertificate(qc) + require.NotNil(s.T(), nve) + require.NoError(s.T(), err) + require.Nil(s.T(), s.pacemaker.PriorRankTimeoutCertificate()) +} + +// TestReceiveQuorumCertificate_IgnoreOldQC tests that Pacemaker ignores old QC and doesn't advance round +func (s *PacemakerTestSuite) TestReceiveQuorumCertificate_IgnoreOldQC() { + qc := QC(s.initialRank - 1) + nve, err := s.pacemaker.ReceiveQuorumCertificate(qc) + require.NoError(s.T(), err) + require.Nil(s.T(), nve) + require.Equal(s.T(), s.initialRank, s.pacemaker.CurrentRank()) + require.NotEqual(s.T(), qc, s.pacemaker.LatestQuorumCertificate()) +} + +// TestReceiveQuorumCertificate_UpdateLatestQuorumCertificate tests that Pacemaker tracks the newest QC even if it has advanced past this rank. +// In this test, we feed a newer QC as part of a TC into the Pacemaker. +func (s *PacemakerTestSuite) TestReceiveQuorumCertificate_UpdateLatestQuorumCertificate() { + tc := helper.MakeTC(helper.WithTCRank(s.initialRank+10), helper.WithTCNewestQC(s.initialQC)) + expectedRank := tc.GetRank() + 1 + s.notifier.On("OnTimeoutCertificateTriggeredRankChange", mock.Anything, mock.Anything, mock.Anything).Return().Once() + s.notifier.On("OnRankChange", s.initialRank, expectedRank).Once() + s.notifier.On("OnStartingTimeout", mock.Anything, mock.Anything).Return().Once() + s.store.On("PutLivenessState", mock.Anything).Return(nil).Once() + nve, err := s.pacemaker.ReceiveTimeoutCertificate(tc) + require.NoError(s.T(), err) + require.NotNil(s.T(), nve) + + qc := QC(s.initialRank + 5) + expectedLivenessState := &models.LivenessState{ + CurrentRank: expectedRank, + PriorRankTimeoutCertificate: tc, + LatestQuorumCertificate: qc, + } + s.store.On("PutLivenessState", expectedLivenessState).Return(nil).Once() + + nve, err = s.pacemaker.ReceiveQuorumCertificate(qc) + require.NoError(s.T(), err) + require.Nil(s.T(), nve) + require.Equal(s.T(), qc, s.pacemaker.LatestQuorumCertificate()) +} + +// TestReceiveTimeoutCertificate_UpdateLatestQuorumCertificate tests that Pacemaker tracks the newest QC included in TC even if it has advanced past this rank. +func (s *PacemakerTestSuite) TestReceiveTimeoutCertificate_UpdateLatestQuorumCertificate() { + tc := helper.MakeTC(helper.WithTCRank(s.initialRank+10), helper.WithTCNewestQC(s.initialQC)) + expectedRank := tc.GetRank() + 1 + s.notifier.On("OnTimeoutCertificateTriggeredRankChange", mock.Anything, mock.Anything, mock.Anything).Return().Once() + s.notifier.On("OnRankChange", s.initialRank, expectedRank).Once() + s.notifier.On("OnStartingTimeout", mock.Anything, mock.Anything).Return().Once() + s.store.On("PutLivenessState", mock.Anything).Return(nil).Once() + nve, err := s.pacemaker.ReceiveTimeoutCertificate(tc) + require.NoError(s.T(), err) + require.NotNil(s.T(), nve) + + qc := QC(s.initialRank + 5) + olderTC := helper.MakeTC(helper.WithTCRank(s.pacemaker.CurrentRank()-1), helper.WithTCNewestQC(qc)) + expectedLivenessState := &models.LivenessState{ + CurrentRank: expectedRank, + PriorRankTimeoutCertificate: tc, + LatestQuorumCertificate: qc, + } + s.store.On("PutLivenessState", expectedLivenessState).Return(nil).Once() + + nve, err = s.pacemaker.ReceiveTimeoutCertificate(olderTC) + require.NoError(s.T(), err) + require.Nil(s.T(), nve) + require.Equal(s.T(), qc, s.pacemaker.LatestQuorumCertificate()) +} + +// Test_Initialization tests QCs and TCs provided as optional constructor arguments. +// We want to test that nil, old and duplicate TCs & QCs are accepted in arbitrary order. +// The constructed Pacemaker should be in the state: +// - in rank V+1, where V is the _largest rank of _any_ of the ingested QCs and TCs +// - method `LatestQuorumCertificate` should report the QC with the highest Rank in _any_ of the inputs +func (s *PacemakerTestSuite) Test_Initialization() { + highestRank := uint64(0) // highest Rank of any QC or TC constructed below + + // Randomly create 80 TCs: + // * their rank is randomly sampled from the range [3, 103) + // * as we sample 80 times, probability of creating 2 TCs for the same + // rank is practically 1 (-> birthday problem) + // * we place the TCs in a slice of length 110, i.e. some elements are guaranteed to be nil + // * Note: we specifically allow for the TC to have the same rank as the highest QC. + // This is useful as a fallback, because it allows replicas other than the designated + // leader to also collect votes and generate a QC. + tcs := make([]models.TimeoutCertificate, 110) + for i := 0; i < 80; i++ { + tcRank := s.initialRank + uint64(rand.Intn(100)) + qcRank := 1 + uint64(rand.Intn(int(tcRank))) + tcs[i] = helper.MakeTC(helper.WithTCRank(tcRank), helper.WithTCNewestQC(QC(qcRank))) + highestRank = max(highestRank, tcRank, qcRank) + } + rand.Shuffle(len(tcs), func(i, j int) { + tcs[i], tcs[j] = tcs[j], tcs[i] + }) + + // randomly create 80 QCs (same logic as above) + qcs := make([]models.QuorumCertificate, 110) + for i := 0; i < 80; i++ { + qcs[i] = QC(s.initialRank + uint64(rand.Intn(100))) + highestRank = max(highestRank, qcs[i].GetRank()) + } + rand.Shuffle(len(qcs), func(i, j int) { + qcs[i], qcs[j] = qcs[j], qcs[i] + }) + + // set up mocks + s.store.On("PutLivenessState", mock.Anything).Return(nil) + + // test that the constructor finds the newest QC and TC + s.Run("Random TCs and QCs combined", func() { + pm, err := NewPacemaker( + nil, + timeout.NewController(s.timeoutConf), NoProposalDelay(), s.notifier, s.store, helper.Logger(), + WithQCs[*helper.TestState, *helper.TestVote](qcs...), WithTCs[*helper.TestState, *helper.TestVote](tcs...), + ) + require.NoError(s.T(), err) + + require.Equal(s.T(), highestRank+1, pm.CurrentRank()) + if tc := pm.PriorRankTimeoutCertificate(); tc != nil { + require.Equal(s.T(), highestRank, tc.GetRank()) + } else { + require.Equal(s.T(), highestRank, pm.LatestQuorumCertificate().GetRank()) + } + }) + + // We specifically test an edge case: an outdated TC can still contain a QC that + // is newer than the newest QC the pacemaker knows so far. + s.Run("Newest QC in older TC", func() { + tcs[17] = helper.MakeTC(helper.WithTCRank(highestRank+20), helper.WithTCNewestQC(QC(highestRank+5))) + tcs[45] = helper.MakeTC(helper.WithTCRank(highestRank+15), helper.WithTCNewestQC(QC(highestRank+12))) + + pm, err := NewPacemaker( + nil, + timeout.NewController(s.timeoutConf), NoProposalDelay(), s.notifier, s.store, helper.Logger(), + WithTCs[*helper.TestState, *helper.TestVote](tcs...), WithQCs[*helper.TestState, *helper.TestVote](qcs...), + ) + require.NoError(s.T(), err) + + // * when observing tcs[17], which is newer than any other QC or TC, the pacemaker should enter rank tcs[17].Rank + 1 + // * when observing tcs[45], which is older than tcs[17], the Pacemaker should notice that the QC in tcs[45] + // is newer than its local QC and update it + require.Equal(s.T(), tcs[17].GetRank()+1, pm.CurrentRank()) + require.Equal(s.T(), tcs[17], pm.PriorRankTimeoutCertificate()) + require.Equal(s.T(), tcs[45].GetLatestQuorumCert(), pm.LatestQuorumCertificate()) + }) + + // Another edge case: a TC from a past rank contains QC for the same rank. + // While is TC is outdated, the contained QC is still newer that the QC the pacemaker knows so far. + s.Run("Newest QC in older TC", func() { + tcs[17] = helper.MakeTC(helper.WithTCRank(highestRank+20), helper.WithTCNewestQC(QC(highestRank+5))) + tcs[45] = helper.MakeTC(helper.WithTCRank(highestRank+15), helper.WithTCNewestQC(QC(highestRank+15))) + + pm, err := NewPacemaker( + nil, + timeout.NewController(s.timeoutConf), NoProposalDelay(), s.notifier, s.store, helper.Logger(), + WithTCs[*helper.TestState, *helper.TestVote](tcs...), WithQCs[*helper.TestState, *helper.TestVote](qcs...), + ) + require.NoError(s.T(), err) + + // * when observing tcs[17], which is newer than any other QC or TC, the pacemaker should enter rank tcs[17].Rank + 1 + // * when observing tcs[45], which is older than tcs[17], the Pacemaker should notice that the QC in tcs[45] + // is newer than its local QC and update it + require.Equal(s.T(), tcs[17].GetRank()+1, pm.CurrentRank()) + require.Equal(s.T(), tcs[17], pm.PriorRankTimeoutCertificate()) + require.Equal(s.T(), tcs[45].GetLatestQuorumCert(), pm.LatestQuorumCertificate()) + }) + + // Verify that WithTCs still works correctly if no TCs are given: + // the list of TCs is empty or all contained TCs are nil + s.Run("Only nil TCs", func() { + pm, err := NewPacemaker(nil, timeout.NewController(s.timeoutConf), NoProposalDelay(), s.notifier, s.store, helper.Logger(), WithTCs[*helper.TestState, *helper.TestVote]()) + require.NoError(s.T(), err) + require.Equal(s.T(), s.initialRank, pm.CurrentRank()) + + pm, err = NewPacemaker(nil, timeout.NewController(s.timeoutConf), NoProposalDelay(), s.notifier, s.store, helper.Logger(), WithTCs[*helper.TestState, *helper.TestVote](nil, nil, nil)) + require.NoError(s.T(), err) + require.Equal(s.T(), s.initialRank, pm.CurrentRank()) + }) + + // Verify that WithQCs still works correctly if no QCs are given: + // the list of QCs is empty or all contained QCs are nil + s.Run("Only nil QCs", func() { + pm, err := NewPacemaker(nil, timeout.NewController(s.timeoutConf), NoProposalDelay(), s.notifier, s.store, helper.Logger(), WithQCs[*helper.TestState, *helper.TestVote]()) + require.NoError(s.T(), err) + require.Equal(s.T(), s.initialRank, pm.CurrentRank()) + + pm, err = NewPacemaker(nil, timeout.NewController(s.timeoutConf), NoProposalDelay(), s.notifier, s.store, helper.Logger(), WithQCs[*helper.TestState, *helper.TestVote](nil, nil, nil)) + require.NoError(s.T(), err) + require.Equal(s.T(), s.initialRank, pm.CurrentRank()) + }) + +} + +// TestProposalDuration tests that the active pacemaker forwards proposal duration values from the provider. +func (s *PacemakerTestSuite) TestProposalDuration() { + proposalDurationProvider := NewStaticProposalDurationProvider(time.Millisecond * 500) + pm, err := NewPacemaker(nil, timeout.NewController(s.timeoutConf), &proposalDurationProvider, s.notifier, s.store, helper.Logger()) + require.NoError(s.T(), err) + + now := time.Now().UTC() + assert.Equal(s.T(), now.Add(time.Millisecond*500), pm.TargetPublicationTime(117, now, helper.MakeIdentity())) + proposalDurationProvider.dur = time.Second + assert.Equal(s.T(), now.Add(time.Second), pm.TargetPublicationTime(117, now, helper.MakeIdentity())) +} + +func max(a uint64, values ...uint64) uint64 { + for _, v := range values { + if v > a { + a = v + } + } + return a +} diff --git a/consensus/pacemaker/proposal_timing.go b/consensus/pacemaker/proposal_timing.go new file mode 100644 index 0000000..804b5fd --- /dev/null +++ b/consensus/pacemaker/proposal_timing.go @@ -0,0 +1,36 @@ +package pacemaker + +import ( + "time" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// StaticProposalDurationProvider is a consensus.ProposalDurationProvider which +// provides a static ProposalDuration. The constant dur represents the time to +// produce and broadcast the proposal (ProposalDuration), NOT the time for the +// entire rank (RankDuration). +type StaticProposalDurationProvider struct { + dur time.Duration +} + +var _ consensus.ProposalDurationProvider = (*StaticProposalDurationProvider)(nil) + +func NewStaticProposalDurationProvider( + dur time.Duration, +) StaticProposalDurationProvider { + return StaticProposalDurationProvider{dur: dur} +} + +func (p StaticProposalDurationProvider) TargetPublicationTime( + _ uint64, + timeRankEntered time.Time, + _ models.Identity, +) time.Time { + return timeRankEntered.Add(p.dur) +} + +func NoProposalDelay() StaticProposalDurationProvider { + return NewStaticProposalDurationProvider(0) +} diff --git a/consensus/pacemaker/rank_tracker.go b/consensus/pacemaker/rank_tracker.go new file mode 100644 index 0000000..b844a55 --- /dev/null +++ b/consensus/pacemaker/rank_tracker.go @@ -0,0 +1,190 @@ +package pacemaker + +import ( + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// rankTracker is a sub-component of the PaceMaker, which encapsulates the logic +// for tracking and updating the current rank. For crash resilience, the +// rankTracker persists its latest internal state. +// +// In addition, rankTracker maintains and persists a proof to show that it +// entered the current rank according to protocol rules. To enter a new rank +// `r`, the Pacemaker must observe a valid QC or TC for rank `r-1`. Per +// convention, the proof has the following structure: +// - If the current rank was entered by observing a QC, this QC is returned by +// `NewestQC()`. +// Furthermore, `PriorRankTimeoutCertificate()` returns nil. +// - If the current rank was entered by observing a TC, `NewestQC()` returns +// the newest QC known. `PriorRankTimeoutCertificate()` returns the TC that +// triggered the rank change +type rankTracker[StateT models.Unique, VoteT models.Unique] struct { + livenessState models.LivenessState + store consensus.ConsensusStore[VoteT] +} + +// newRankTracker instantiates a rankTracker. +func newRankTracker[StateT models.Unique, VoteT models.Unique]( + filter []byte, + store consensus.ConsensusStore[VoteT], +) (rankTracker[StateT, VoteT], error) { + livenessState, err := store.GetLivenessState(filter) + if err != nil { + return rankTracker[StateT, VoteT]{}, + fmt.Errorf("could not load liveness data: %w", err) + } + + return rankTracker[StateT, VoteT]{ + livenessState: *livenessState, + store: store, + }, nil +} + +// CurrentRank returns the current rank. +func (vt *rankTracker[StateT, VoteT]) CurrentRank() uint64 { + return vt.livenessState.CurrentRank +} + +// LatestQuorumCertificate returns the QC with the highest rank known. +func ( + vt *rankTracker[StateT, VoteT], +) LatestQuorumCertificate() models.QuorumCertificate { + return vt.livenessState.LatestQuorumCertificate +} + +// PriorRankTimeoutCertificate returns TC for last rank, this is nil if and only +// of the current rank was entered with a QC. +func ( + vt *rankTracker[StateT, VoteT], +) PriorRankTimeoutCertificate() models.TimeoutCertificate { + return vt.livenessState.PriorRankTimeoutCertificate +} + +// ReceiveQuorumCertificate ingests a QC, which might advance the current rank. +// Panics for nil input! QCs with ranks smaller or equal to the newest QC known +// are a no-op. ReceiveQuorumCertificate returns the resulting rank after +// processing the QC. No errors are expected, any error should be treated as +// exception. +func (vt *rankTracker[StateT, VoteT]) ReceiveQuorumCertificate( + qc models.QuorumCertificate, +) ( + uint64, + error, +) { + rank := vt.livenessState.CurrentRank + if qc.GetRank() < rank { + // If the QC is for a past rank, our rank does not change. Nevertheless, the + // QC might be newer than the newest QC we know, since rank changes can + // happen through TCs as well. While not very likely, is is possible that + // individual replicas know newer QCs than the ones previously included in + // TCs. E.g. a primary that crashed before it could construct its state is + // has rebooted and is now sharing its newest QC as part of a TimeoutState. + err := vt.updateNewestQC(qc) + if err != nil { + return rank, fmt.Errorf("could not update tracked newest QC: %w", err) + } + return rank, nil + } + + // supermajority of replicas have already voted during round `qc.rank`, hence + // it is safe to proceed to subsequent rank + newRank := qc.GetRank() + 1 + err := vt.updateLivenessState(newRank, qc, nil) + if err != nil { + return 0, fmt.Errorf("failed to update liveness data: %w", err) + } + return newRank, nil +} + +// ReceiveTimeoutCertificate ingests a TC, which might advance the current rank. +// A nil TC is accepted as input, so that callers may pass in e.g. +// `Proposal.PriorRankTimeoutCertificate`, which may or may not have a value. It +// returns the resulting rank after processing the TC and embedded QC. No errors +// are expected, any error should be treated as exception. +func (vt *rankTracker[StateT, VoteT]) ReceiveTimeoutCertificate( + tc models.TimeoutCertificate, +) (uint64, error) { + rank := vt.livenessState.CurrentRank + + if tc == nil { + return rank, nil + } + + if tc.GetRank() < rank { + // TC and the embedded QC are for a past rank, hence our rank does not + // change. Nevertheless, the QC might be newer than the newest QC we know. + // While not very likely, is is possible that individual replicas know newer + // QCs than the ones previously included in any TCs. E.g. a primary that + // crashed before it could construct its state is has rebooted and now + // contributed its newest QC to this TC. + err := vt.updateNewestQC(tc.GetLatestQuorumCert()) + if err != nil { + return 0, fmt.Errorf("could not update tracked newest QC: %w", err) + } + return rank, nil + } + + // supermajority of replicas have already reached their timeout for rank + // `tc.GetRank()`, hence it is safe to proceed to subsequent rank + newRank := tc.GetRank() + 1 + err := vt.updateLivenessState(newRank, tc.GetLatestQuorumCert(), tc) + if err != nil { + return 0, fmt.Errorf("failed to update liveness state: %w", err) + } + return newRank, nil +} + +// updateLivenessState updates the current rank, qc, tc. We want to avoid +// unnecessary database writes, which we enforce by requiring that the rank +// number is STRICTLY monotonicly increasing. Otherwise, an exception is +// returned. No errors are expected, any error should be treated as exception. +func (vt *rankTracker[StateT, VoteT]) updateLivenessState( + newRank uint64, + qc models.QuorumCertificate, + tc models.TimeoutCertificate, +) error { + if newRank <= vt.livenessState.CurrentRank { + // This should never happen: in the current implementation, it is trivially + // apparent that newRank is _always_ larger than currentRank. This check is + // to protect the code from future modifications that violate the necessary + // condition for STRICTLY monotonicly increasing rank numbers. + return fmt.Errorf( + "cannot move from rank %d to %d: currentRank must be strictly monotonicly increasing", + vt.livenessState.CurrentRank, + newRank, + ) + } + + vt.livenessState.CurrentRank = newRank + if vt.livenessState.LatestQuorumCertificate.GetRank() < qc.GetRank() { + vt.livenessState.LatestQuorumCertificate = qc + } + vt.livenessState.PriorRankTimeoutCertificate = tc + err := vt.store.PutLivenessState(&vt.livenessState) + if err != nil { + return fmt.Errorf("could not persist liveness state: %w", err) + } + return nil +} + +// updateNewestQC updates the highest QC tracked by rank, iff `qc` has a larger +// rank than the newest stored QC. Otherwise, this method is a no-op. +// No errors are expected, any error should be treated as exception. +func (vt *rankTracker[StateT, VoteT]) updateNewestQC( + qc models.QuorumCertificate, +) error { + if vt.livenessState.LatestQuorumCertificate.GetRank() >= qc.GetRank() { + return nil + } + + vt.livenessState.LatestQuorumCertificate = qc + err := vt.store.PutLivenessState(&vt.livenessState) + if err != nil { + return fmt.Errorf("could not persist liveness state: %w", err) + } + + return nil +} diff --git a/consensus/pacemaker/rank_tracker_test.go b/consensus/pacemaker/rank_tracker_test.go new file mode 100644 index 0000000..7ca6236 --- /dev/null +++ b/consensus/pacemaker/rank_tracker_test.go @@ -0,0 +1,253 @@ +package pacemaker + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + "source.quilibrium.com/quilibrium/monorepo/consensus/mocks" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +func TestRankTracker(t *testing.T) { + suite.Run(t, new(RankTrackerTestSuite)) +} + +type RankTrackerTestSuite struct { + suite.Suite + + initialRank uint64 + initialQC models.QuorumCertificate + initialTC models.TimeoutCertificate + + livenessState *models.LivenessState // Caution: we hand the memory address to rankTracker, which could modify this + store *mocks.ConsensusStore[*helper.TestVote] + tracker rankTracker[*helper.TestState, *helper.TestVote] +} + +func (s *RankTrackerTestSuite) SetupTest() { + s.initialRank = 5 + s.initialQC = helper.MakeQC(helper.WithQCRank(4)) + s.initialTC = nil + + s.livenessState = &models.LivenessState{ + LatestQuorumCertificate: s.initialQC, + PriorRankTimeoutCertificate: s.initialTC, + CurrentRank: s.initialRank, // we entered rank 5 by observing a QC for rank 4 + } + s.store = mocks.NewConsensusStore[*helper.TestVote](s.T()) + s.store.On("GetLivenessState", mock.Anything).Return(s.livenessState, nil).Once() + + var err error + s.tracker, err = newRankTracker[*helper.TestState, *helper.TestVote](nil, s.store) + require.NoError(s.T(), err) +} + +// confirmResultingState asserts that the rank tracker's stored LivenessState reflects the provided +// current rank, newest QC, and last rank TC. +func (s *RankTrackerTestSuite) confirmResultingState(curRank uint64, qc models.QuorumCertificate, tc models.TimeoutCertificate) { + require.Equal(s.T(), curRank, s.tracker.CurrentRank()) + require.Equal(s.T(), qc, s.tracker.LatestQuorumCertificate()) + if tc == nil { + require.Nil(s.T(), s.tracker.PriorRankTimeoutCertificate()) + } else { + require.Equal(s.T(), tc, s.tracker.PriorRankTimeoutCertificate()) + } +} + +// TestReceiveQuorumCertificate_SkipIncreaseRankThroughQC tests that rankTracker increases rank when receiving QC, +// if applicable, by skipping ranks +func (s *RankTrackerTestSuite) TestReceiveQuorumCertificate_SkipIncreaseRankThroughQC() { + // seeing a QC for the current rank should advance the rank by one + qc := QC(s.initialRank) + expectedResultingRank := s.initialRank + 1 + s.store.On("PutLivenessState", LivenessState(qc)).Return(nil).Once() + resultingCurrentRank, err := s.tracker.ReceiveQuorumCertificate(qc) + require.NoError(s.T(), err) + require.Equal(s.T(), expectedResultingRank, resultingCurrentRank) + s.confirmResultingState(expectedResultingRank, qc, nil) + + // seeing a QC for 10 ranks in the future should advance to rank +11 + curRank := s.tracker.CurrentRank() + qc = QC(curRank + 10) + expectedResultingRank = curRank + 11 + s.store.On("PutLivenessState", LivenessState(qc)).Return(nil).Once() + resultingCurrentRank, err = s.tracker.ReceiveQuorumCertificate(qc) + require.NoError(s.T(), err) + require.Equal(s.T(), expectedResultingRank, resultingCurrentRank) + s.confirmResultingState(expectedResultingRank, qc, nil) +} + +// TestReceiveTimeoutCertificate_SkipIncreaseRankThroughTC tests that rankTracker increases rank when receiving TC, +// if applicable, by skipping ranks +func (s *RankTrackerTestSuite) TestReceiveTimeoutCertificate_SkipIncreaseRankThroughTC() { + // seeing a TC for the current rank should advance the rank by one + qc := s.initialQC + tc := helper.MakeTC(helper.WithTCRank(s.initialRank), helper.WithTCNewestQC(qc)) + expectedResultingRank := s.initialRank + 1 + expectedLivenessState := &models.LivenessState{ + CurrentRank: expectedResultingRank, + PriorRankTimeoutCertificate: tc, + LatestQuorumCertificate: qc, + } + s.store.On("PutLivenessState", expectedLivenessState).Return(nil).Once() + resultingCurrentRank, err := s.tracker.ReceiveTimeoutCertificate(tc) + require.NoError(s.T(), err) + require.Equal(s.T(), expectedResultingRank, resultingCurrentRank) + s.confirmResultingState(expectedResultingRank, qc, tc) + + // seeing a TC for 10 ranks in the future should advance to rank +11 + curRank := s.tracker.CurrentRank() + tc = helper.MakeTC(helper.WithTCRank(curRank+10), helper.WithTCNewestQC(qc)) + expectedResultingRank = curRank + 11 + expectedLivenessState = &models.LivenessState{ + CurrentRank: expectedResultingRank, + PriorRankTimeoutCertificate: tc, + LatestQuorumCertificate: qc, + } + s.store.On("PutLivenessState", expectedLivenessState).Return(nil).Once() + resultingCurrentRank, err = s.tracker.ReceiveTimeoutCertificate(tc) + require.NoError(s.T(), err) + require.Equal(s.T(), expectedResultingRank, resultingCurrentRank) + s.confirmResultingState(expectedResultingRank, qc, tc) +} + +// TestReceiveTimeoutCertificate_IgnoreOldTC tests that rankTracker ignores old TC and doesn't advance round. +func (s *RankTrackerTestSuite) TestReceiveTimeoutCertificate_IgnoreOldTC() { + curRank := s.tracker.CurrentRank() + tc := helper.MakeTC( + helper.WithTCRank(curRank-1), + helper.WithTCNewestQC(QC(curRank-2))) + resultingCurrentRank, err := s.tracker.ReceiveTimeoutCertificate(tc) + require.NoError(s.T(), err) + require.Equal(s.T(), curRank, resultingCurrentRank) + s.confirmResultingState(curRank, s.initialQC, s.initialTC) +} + +// TestReceiveTimeoutCertificate_IgnoreNilTC tests that rankTracker accepts nil TC as allowed input but doesn't trigger a new rank event +func (s *RankTrackerTestSuite) TestReceiveTimeoutCertificate_IgnoreNilTC() { + curRank := s.tracker.CurrentRank() + resultingCurrentRank, err := s.tracker.ReceiveTimeoutCertificate(nil) + require.NoError(s.T(), err) + require.Equal(s.T(), curRank, resultingCurrentRank) + s.confirmResultingState(curRank, s.initialQC, s.initialTC) +} + +// TestReceiveQuorumCertificate_PersistException tests that rankTracker propagates exception +// when processing QC +func (s *RankTrackerTestSuite) TestReceiveQuorumCertificate_PersistException() { + qc := QC(s.initialRank) + exception := errors.New("store-exception") + s.store.On("PutLivenessState", mock.Anything).Return(exception).Once() + + _, err := s.tracker.ReceiveQuorumCertificate(qc) + require.ErrorIs(s.T(), err, exception) +} + +// TestReceiveTimeoutCertificate_PersistException tests that rankTracker propagates exception +// when processing TC +func (s *RankTrackerTestSuite) TestReceiveTimeoutCertificate_PersistException() { + tc := helper.MakeTC(helper.WithTCRank(s.initialRank)) + exception := errors.New("store-exception") + s.store.On("PutLivenessState", mock.Anything).Return(exception).Once() + + _, err := s.tracker.ReceiveTimeoutCertificate(tc) + require.ErrorIs(s.T(), err, exception) +} + +// TestReceiveQuorumCertificate_InvalidatesPriorRankTimeoutCertificate verifies that rankTracker does not retain any old +// TC if the last rank change was triggered by observing a QC from the previous rank. +func (s *RankTrackerTestSuite) TestReceiveQuorumCertificate_InvalidatesPriorRankTimeoutCertificate() { + initialRank := s.tracker.CurrentRank() + tc := helper.MakeTC(helper.WithTCRank(initialRank), + helper.WithTCNewestQC(s.initialQC)) + s.store.On("PutLivenessState", mock.Anything).Return(nil).Twice() + resultingCurrentRank, err := s.tracker.ReceiveTimeoutCertificate(tc) + require.NoError(s.T(), err) + require.Equal(s.T(), initialRank+1, resultingCurrentRank) + require.NotNil(s.T(), s.tracker.PriorRankTimeoutCertificate()) + + qc := QC(initialRank + 1) + resultingCurrentRank, err = s.tracker.ReceiveQuorumCertificate(qc) + require.NoError(s.T(), err) + require.Equal(s.T(), initialRank+2, resultingCurrentRank) + require.Nil(s.T(), s.tracker.PriorRankTimeoutCertificate()) +} + +// TestReceiveQuorumCertificate_IgnoreOldQC tests that rankTracker ignores old QC and doesn't advance round +func (s *RankTrackerTestSuite) TestReceiveQuorumCertificate_IgnoreOldQC() { + qc := QC(s.initialRank - 1) + resultingCurrentRank, err := s.tracker.ReceiveQuorumCertificate(qc) + require.NoError(s.T(), err) + require.Equal(s.T(), s.initialRank, resultingCurrentRank) + s.confirmResultingState(s.initialRank, s.initialQC, s.initialTC) +} + +// TestReceiveQuorumCertificate_UpdateLatestQuorumCertificate tests that rankTracker tracks the newest QC even if it has advanced past this rank. +// The only one scenario, where it is possible to receive a QC for a rank that we already has passed, yet this QC +// being newer than any known one is: +// - We advance ranks via TC. +// - A QC for a passed rank that is newer than any known one can arrive in 3 ways: +// 1. A QC (e.g. from the vote aggregator) +// 2. A QC embedded into a TC, where the TC is for a passed rank +// 3. A QC embedded into a TC, where the TC is for the current or newer rank +func (s *RankTrackerTestSuite) TestReceiveQuorumCertificate_UpdateLatestQuorumCertificate() { + // Setup + // * we start in rank 5 + // * newest known QC is for rank 4 + // * we receive a TC for rank 55, which results in entering rank 56 + initialRank := s.tracker.CurrentRank() // + tc := helper.MakeTC(helper.WithTCRank(initialRank+50), helper.WithTCNewestQC(s.initialQC)) + s.store.On("PutLivenessState", mock.Anything).Return(nil).Once() + expectedRank := uint64(56) // processing the TC should results in entering rank 56 + resultingCurrentRank, err := s.tracker.ReceiveTimeoutCertificate(tc) + require.NoError(s.T(), err) + require.Equal(s.T(), expectedRank, resultingCurrentRank) + s.confirmResultingState(expectedRank, s.initialQC, tc) + + // Test 1: add QC for rank 9, which is newer than our initial QC - it should become our newest QC + qc := QC(s.tracker.LatestQuorumCertificate().GetRank() + 2) + expectedLivenessState := &models.LivenessState{ + CurrentRank: expectedRank, + PriorRankTimeoutCertificate: tc, + LatestQuorumCertificate: qc, + } + s.store.On("PutLivenessState", expectedLivenessState).Return(nil).Once() + resultingCurrentRank, err = s.tracker.ReceiveQuorumCertificate(qc) + require.NoError(s.T(), err) + require.Equal(s.T(), expectedRank, resultingCurrentRank) + s.confirmResultingState(expectedRank, qc, tc) + + // Test 2: receiving a TC for a passed rank, but the embedded QC is newer than the one we know + qc2 := QC(s.tracker.LatestQuorumCertificate().GetRank() + 4) + olderTC := helper.MakeTC(helper.WithTCRank(qc2.GetRank()+3), helper.WithTCNewestQC(qc2)) + expectedLivenessState = &models.LivenessState{ + CurrentRank: expectedRank, + PriorRankTimeoutCertificate: tc, + LatestQuorumCertificate: qc2, + } + s.store.On("PutLivenessState", expectedLivenessState).Return(nil).Once() + resultingCurrentRank, err = s.tracker.ReceiveTimeoutCertificate(olderTC) + require.NoError(s.T(), err) + require.Equal(s.T(), expectedRank, resultingCurrentRank) + s.confirmResultingState(expectedRank, qc2, tc) + + // Test 3: receiving a TC for a newer rank, the embedded QC is newer than the one we know, but still for a passed rank + qc3 := QC(s.tracker.LatestQuorumCertificate().GetRank() + 7) + finalRank := expectedRank + 1 + newestTC := helper.MakeTC(helper.WithTCRank(expectedRank), helper.WithTCNewestQC(qc3)) + expectedLivenessState = &models.LivenessState{ + CurrentRank: finalRank, + PriorRankTimeoutCertificate: newestTC, + LatestQuorumCertificate: qc3, + } + s.store.On("PutLivenessState", expectedLivenessState).Return(nil).Once() + resultingCurrentRank, err = s.tracker.ReceiveTimeoutCertificate(newestTC) + require.NoError(s.T(), err) + require.Equal(s.T(), finalRank, resultingCurrentRank) + s.confirmResultingState(finalRank, qc3, newestTC) +} diff --git a/consensus/pacemaker/timeout/config.go b/consensus/pacemaker/timeout/config.go new file mode 100644 index 0000000..b4d57e4 --- /dev/null +++ b/consensus/pacemaker/timeout/config.go @@ -0,0 +1,124 @@ +package timeout + +import ( + "time" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Config contains the configuration parameters for a Truncated Exponential +// Backoff, as implemented by the `timeout.Controller` +// - On timeout: increase timeout by multiplicative factor +// `TimeoutAdjustmentFactor`. This results in exponentially growing timeout +// duration on multiple subsequent timeouts. +// - On progress: decrease timeout by multiplicative factor +// `TimeoutAdjustmentFactor. +// +// Config is implemented such that it can be passed by value, while still +// supporting updates of `StateRateDelayMS` at runtime (all configs share the +// same memory holding `StateRateDelayMS`). +type Config struct { + // MinReplicaTimeout is the minimum the timeout can decrease to [MILLISECONDS] + MinReplicaTimeout float64 + // MaxReplicaTimeout is the maximum value the timeout can increase to + // [MILLISECONDS] + MaxReplicaTimeout float64 + // TimeoutAdjustmentFactor: MULTIPLICATIVE factor for increasing timeout when + // rank change was triggered by a TC (unhappy path) or decreasing the timeout + // on progress + TimeoutAdjustmentFactor float64 + // HappyPathMaxRoundFailures is the number of rounds without progress where we + // still consider being on hot path of execution. After exceeding this value + // we will start increasing timeout values. + HappyPathMaxRoundFailures uint64 + // MaxTimeoutStateRebroadcastInterval is the maximum value for timeout state + // rebroadcast interval [MILLISECONDS] + MaxTimeoutStateRebroadcastInterval float64 +} + +var DefaultConfig = NewDefaultConfig() + +// NewDefaultConfig returns a default timeout configuration. +// We explicitly provide a method here, which demonstrates in-code how +// to compute standard values from some basic quantities. +func NewDefaultConfig() Config { + // minReplicaTimeout is the lower bound on the replica's timeout value, this + // is also the initial timeout with what replicas will start their execution. + // If HotStuff is running at full speed, 1200ms should be enough. However, we + // add some buffer. This value is for instant message delivery. + minReplicaTimeout := 3 * time.Second + maxReplicaTimeout := 1 * time.Minute + timeoutAdjustmentFactorFactor := 1.2 + // after 6 successively failed rounds, the pacemaker leaves the hot path and + // starts increasing timeouts (recovery mode) + happyPathMaxRoundFailures := uint64(6) + maxRebroadcastInterval := 5 * time.Second + + conf, err := NewConfig( + minReplicaTimeout, + maxReplicaTimeout, + timeoutAdjustmentFactorFactor, + happyPathMaxRoundFailures, + maxRebroadcastInterval, + ) + if err != nil { + // we check in a unit test that this does not happen + panic("Default config is not compliant with timeout Config requirements") + } + + return conf +} + +// NewConfig creates a new TimoutConfig. +// - minReplicaTimeout: minimal timeout value for replica round [Milliseconds] +// Consistency requirement: must be non-negative +// - maxReplicaTimeout: maximal timeout value for replica round [Milliseconds] +// Consistency requirement: must be non-negative and cannot be smaller than +// minReplicaTimeout +// - timeoutAdjustmentFactor: multiplicative factor for adjusting timeout +// duration +// Consistency requirement: must be strictly larger than 1 +// - happyPathMaxRoundFailures: number of successive failed rounds after which +// we will start increasing timeouts +// - stateRateDelay: a delay to delay the proposal broadcasting [Milliseconds] +// Consistency requirement: must be non-negative +// +// Returns `models.ConfigurationError` is any of the consistency requirements is +// violated. +func NewConfig( + minReplicaTimeout time.Duration, + maxReplicaTimeout time.Duration, + timeoutAdjustmentFactor float64, + happyPathMaxRoundFailures uint64, + maxRebroadcastInterval time.Duration, +) (Config, error) { + if minReplicaTimeout <= 0 { + return Config{}, models.NewConfigurationErrorf( + "minReplicaTimeout must be a positive number[milliseconds]", + ) + } + if maxReplicaTimeout < minReplicaTimeout { + return Config{}, models.NewConfigurationErrorf( + "maxReplicaTimeout cannot be smaller than minReplicaTimeout", + ) + } + if timeoutAdjustmentFactor <= 1 { + return Config{}, models.NewConfigurationErrorf( + "timeoutAdjustmentFactor must be strictly bigger than 1", + ) + } + if maxRebroadcastInterval <= 0 { + return Config{}, models.NewConfigurationErrorf( + "maxRebroadcastInterval must be a positive number [milliseconds]", + ) + } + + tc := Config{ + MinReplicaTimeout: float64(minReplicaTimeout.Milliseconds()), + MaxReplicaTimeout: float64(maxReplicaTimeout.Milliseconds()), + TimeoutAdjustmentFactor: timeoutAdjustmentFactor, + HappyPathMaxRoundFailures: happyPathMaxRoundFailures, + MaxTimeoutStateRebroadcastInterval: float64(maxRebroadcastInterval.Milliseconds()), + } + return tc, nil +} diff --git a/consensus/pacemaker/timeout/config_test.go b/consensus/pacemaker/timeout/config_test.go new file mode 100644 index 0000000..b11f6b4 --- /dev/null +++ b/consensus/pacemaker/timeout/config_test.go @@ -0,0 +1,83 @@ +package timeout + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TestConstructor tests that constructor performs needed checks and returns +// expected values depending on different inputs. +func TestConstructor(t *testing.T) { + c, err := NewConfig( + 1200*time.Millisecond, + 2000*time.Millisecond, + 1.5, + 3, + 2000*time.Millisecond, + ) + require.NoError(t, err) + require.Equal(t, float64(1200), c.MinReplicaTimeout) + require.Equal(t, float64(2000), c.MaxReplicaTimeout) + require.Equal(t, float64(1.5), c.TimeoutAdjustmentFactor) + require.Equal(t, uint64(3), c.HappyPathMaxRoundFailures) + require.Equal(t, float64(2000), c.MaxTimeoutStateRebroadcastInterval) + + // should not allow negative minReplicaTimeout + c, err = NewConfig( + -1200*time.Millisecond, + 2000*time.Millisecond, + 1.5, + 3, + 2000*time.Millisecond, + ) + require.True(t, models.IsConfigurationError(err)) + + // should not allow 0 minReplicaTimeout + c, err = NewConfig(0, 2000*time.Millisecond, 1.5, 3, 2000*time.Millisecond) + require.True(t, models.IsConfigurationError(err)) + + // should not allow maxReplicaTimeout < minReplicaTimeout + c, err = NewConfig( + 1200*time.Millisecond, + 1000*time.Millisecond, + 1.5, + 3, + 2000*time.Millisecond, + ) + require.True(t, models.IsConfigurationError(err)) + + // should not allow timeoutIncrease to be 1.0 or smaller + c, err = NewConfig( + 1200*time.Millisecond, + 2000*time.Millisecond, + 1.0, + 3, + 2000*time.Millisecond, + ) + require.True(t, models.IsConfigurationError(err)) + + // should accept only positive values for maxRebroadcastInterval + c, err = NewConfig(1200*time.Millisecond, 2000*time.Millisecond, 1.5, 3, 0) + require.True(t, models.IsConfigurationError(err)) + c, err = NewConfig( + 1200*time.Millisecond, + 2000*time.Millisecond, + 1.5, + 3, + -1000*time.Millisecond, + ) + require.True(t, models.IsConfigurationError(err)) +} + +// TestDefaultConfig tests that default config is filled with correct values. +func TestDefaultConfig(t *testing.T) { + c := NewDefaultConfig() + + require.Equal(t, float64(3000), c.MinReplicaTimeout) + require.Equal(t, 1.2, c.TimeoutAdjustmentFactor) + require.Equal(t, uint64(6), c.HappyPathMaxRoundFailures) +} diff --git a/consensus/pacemaker/timeout/controller.go b/consensus/pacemaker/timeout/controller.go new file mode 100644 index 0000000..6c7c274 --- /dev/null +++ b/consensus/pacemaker/timeout/controller.go @@ -0,0 +1,185 @@ +package timeout + +import ( + "context" + "math" + "time" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Controller implements the following truncated exponential backoff: +// +// duration = t_min * min(b ^ ((r-k) * θ(r-k)), t_max) +// +// For practical purpose we will transform this formula into: +// +// duration(r) = t_min * b ^ (min((r-k) * θ(r-k)), c) +// where c = log_b (t_max / t_min). +// +// In described formula: +// +// k - is number of rounds we expect during hot path, after failing this many +// rounds, we will start increasing timeouts. +// b - timeout increase factor +// r - failed rounds counter +// θ - Heaviside step function +// t_min/t_max - minimum/maximum round duration +// +// By manipulating `r` after observing progress or lack thereof, we are +// achieving exponential increase/decrease of round durations. +// - on timeout: increase number of failed rounds, this results in exponential +// growing round duration +// on multiple subsequent timeouts, after exceeding k. +// - on progress: decrease number of failed rounds, this results in +// exponential decrease of round duration. +type Controller struct { + cfg Config + timeoutChannel chan time.Time + stopTicker context.CancelFunc + maxExponent float64 // max exponent for exponential function, derived from maximum round duration + r uint64 // failed rounds counter, higher value results in longer round duration +} + +// NewController creates a new Controller. Note that the input Config is +// mplemented such that it can be passed by value, while still supporting +// updates of `StateRateDelayMS` at runtime (all configs share the same memory +// holding `StateRateDelayMS`). +func NewController(timeoutConfig Config) *Controller { + // the initial value for the timeout channel is a closed channel which returns + // immediately. this prevents indefinite blocking when no timeout has been + // started + startChannel := make(chan time.Time) + close(startChannel) + + // we need to calculate log_b(t_max/t_min), golang doesn't support logarithm + // with custom base we will apply change of base logarithm transformation to + // get around this: + // log_b(x) = log_e(x) / log_e(b) + maxExponent := math.Log( + timeoutConfig.MaxReplicaTimeout/timeoutConfig.MinReplicaTimeout, + ) / math.Log(timeoutConfig.TimeoutAdjustmentFactor) + + tc := Controller{ + cfg: timeoutConfig, + timeoutChannel: startChannel, + stopTicker: func() {}, + maxExponent: maxExponent, + } + return &tc +} + +// Channel returns a channel that will receive the specific timeout. +// A new channel is created on each call of `StartTimeout`. +// Returns closed channel if no timer has been started. +func (t *Controller) Channel() <-chan time.Time { + return t.timeoutChannel +} + +// StartTimeout starts the timeout of the specified type and returns the timer +// info +func (t *Controller) StartTimeout( + ctx context.Context, + rank uint64, +) models.TimerInfo { + t.stopTicker() // stop old timeout + + // setup new timer + durationMs := t.replicaTimeout() // duration of current rank in units of Milliseconds + rebroadcastIntervalMs := math.Min( + durationMs, + t.cfg.MaxTimeoutStateRebroadcastInterval, + ) // time between attempted re-broadcast of timeouts if there is no progress + t.timeoutChannel = make(chan time.Time, 1) // channel for delivering timeouts + + // start timeout logic for (re-)broadcasting timeout objects on regular basis + // as long as we are in the same round. + var childContext context.Context + childContext, t.stopTicker = context.WithCancel(ctx) + duration := time.Duration(durationMs) * time.Millisecond + rebroadcastInterval := time.Duration(rebroadcastIntervalMs) * time.Millisecond + go tickAfterTimeout( + childContext, + duration, + rebroadcastInterval, + t.timeoutChannel, + ) + + return models.TimerInfo{ + Rank: rank, + StartTime: time.Now().UTC(), + Duration: duration, + } +} + +// tickAfterTimeout is a utility function which: +// 1. waits for the initial timeout and then sends the current time to +// `timeoutChannel` +// 2. and subsequently sends the current time every `tickInterval` to +// `timeoutChannel` +// +// If the receiver from the `timeoutChannel` falls behind and does not pick up +// the events, we drop ticks until the receiver catches up. When cancelling +// `ctx`, all timing logic stops. This approach allows to have a concurrent-safe +// implementation, where there is no unsafe state sharing between caller and +// ticking logic. +func tickAfterTimeout( + ctx context.Context, + duration time.Duration, + tickInterval time.Duration, + timeoutChannel chan<- time.Time, +) { + // wait for initial timeout + timer := time.NewTimer(duration) + select { + case t := <-timer.C: + timeoutChannel <- t // forward initial timeout to the sink + case <-ctx.Done(): + timer.Stop() // allows timer to be garbage collected (before it expires) + return + } + + // after we have reached the initial timeout, sent to `tickSink` every + // `tickInterval` until cancelled + ticker := time.NewTicker(tickInterval) + for { + select { + case t := <-ticker.C: + timeoutChannel <- t // forward ticks to the sink + case <-ctx.Done(): + ticker.Stop() // critical for ticker to be garbage collected + return + } + } +} + +// replicaTimeout returns the duration of the current rank in milliseconds +// before we time out +func (t *Controller) replicaTimeout() float64 { + if t.r <= t.cfg.HappyPathMaxRoundFailures { + return t.cfg.MinReplicaTimeout + } + r := float64(t.r - t.cfg.HappyPathMaxRoundFailures) + if r >= t.maxExponent { + return t.cfg.MaxReplicaTimeout + } + // compute timeout duration [in milliseconds]: + return t.cfg.MinReplicaTimeout * math.Pow(t.cfg.TimeoutAdjustmentFactor, r) +} + +// OnTimeout indicates to the Controller that a rank change was triggered by a +// TC (unhappy path). +func (t *Controller) OnTimeout() { + if float64(t.r) >= t.maxExponent+float64(t.cfg.HappyPathMaxRoundFailures) { + return + } + t.r++ +} + +// OnProgressBeforeTimeout indicates to the Controller that progress was made +// _before_ the timeout was reached +func (t *Controller) OnProgressBeforeTimeout() { + if t.r > 0 { + t.r-- + } +} diff --git a/consensus/pacemaker/timeout/controller_test.go b/consensus/pacemaker/timeout/controller_test.go new file mode 100644 index 0000000..3126acd --- /dev/null +++ b/consensus/pacemaker/timeout/controller_test.go @@ -0,0 +1,157 @@ +package timeout + +import ( + "math" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + minRepTimeout float64 = 100 // Milliseconds + maxRepTimeout float64 = 10000 // Milliseconds + timeoutAdjustmentFactor float64 = 1.5 // timeout duration adjustment factor + happyPathMaxRoundFailures uint64 = 3 // number of failed rounds before increasing timeouts +) + +func initTimeoutController(t *testing.T) *Controller { + tc, err := NewConfig( + time.Duration(minRepTimeout*1e6), + time.Duration(maxRepTimeout*1e6), + timeoutAdjustmentFactor, + happyPathMaxRoundFailures, + time.Duration(maxRepTimeout*1e6), + ) + if err != nil { + t.Fail() + } + return NewController(tc) +} + +// Test_TimeoutInitialization timeouts are initialized and reported properly +func Test_TimeoutInitialization(t *testing.T) { + tc := initTimeoutController(t) + assert.Equal(t, tc.replicaTimeout(), minRepTimeout) + + // verify that initially returned timeout channel is closed and `nil` is + // returned as `TimerInfo` + select { + case <-tc.Channel(): + break + default: + assert.Fail(t, "timeout channel did not return") + } + tc.Channel() +} + +// Test_TimeoutIncrease verifies that timeout increases exponentially +func Test_TimeoutIncrease(t *testing.T) { + tc := initTimeoutController(t) + + // advance failed rounds beyond `happyPathMaxRoundFailures`; + for r := uint64(0); r < happyPathMaxRoundFailures; r++ { + tc.OnTimeout() + } + + for r := 1; r <= 10; r += 1 { + tc.OnTimeout() + assert.Equal(t, + tc.replicaTimeout(), + minRepTimeout*math.Pow(timeoutAdjustmentFactor, float64(r)), + ) + } +} + +// Test_TimeoutDecrease verifies that timeout decreases exponentially +func Test_TimeoutDecrease(t *testing.T) { + tc := initTimeoutController(t) + + // failed rounds counter + r := uint64(0) + + // advance failed rounds beyond `happyPathMaxRoundFailures`; subsequent + // progress should reduce timeout again + for ; r <= happyPathMaxRoundFailures*2; r++ { + tc.OnTimeout() + } + for ; r > happyPathMaxRoundFailures; r-- { + tc.OnProgressBeforeTimeout() + assert.Equal(t, + tc.replicaTimeout(), + minRepTimeout*math.Pow( + timeoutAdjustmentFactor, + float64(r-1-happyPathMaxRoundFailures), + ), + ) + } +} + +// Test_MinCutoff verifies that timeout does not decrease below minRepTimeout +func Test_MinCutoff(t *testing.T) { + tc := initTimeoutController(t) + + for r := uint64(0); r < happyPathMaxRoundFailures; r++ { + tc.OnTimeout() // replica timeout doesn't increase since r < happyPathMaxRoundFailures. + } + + tc.OnTimeout() // replica timeout increases 100 -> 3/2 * 100 = 150 + tc.OnTimeout() // replica timeout increases 150 -> 3/2 * 150 = 225 + tc.OnProgressBeforeTimeout() // replica timeout decreases 225 -> 180 * 2/3 = 150 + tc.OnProgressBeforeTimeout() // replica timeout decreases 150 -> 153 * 2/3 = 100 + tc.OnProgressBeforeTimeout() // replica timeout decreases 100 -> 100 * 2/3 = max(66.6, 100) = 100 + + tc.OnProgressBeforeTimeout() + assert.Equal(t, tc.replicaTimeout(), minRepTimeout) +} + +// Test_MaxCutoff verifies that timeout does not increase beyond timeout cap +func Test_MaxCutoff(t *testing.T) { + tc := initTimeoutController(t) + + // we update the following two values here in the test, which is a naive + // reference implementation + unboundedReferenceTimeout := minRepTimeout + r := -1 * int64(happyPathMaxRoundFailures) // only start increasing `unboundedReferenceTimeout` when this becomes positive + + // add timeouts until our `unboundedReferenceTimeout` exceeds the limit + for { + tc.OnTimeout() + if r++; r > 0 { + unboundedReferenceTimeout *= timeoutAdjustmentFactor + } + if unboundedReferenceTimeout > maxRepTimeout { + assert.True(t, tc.replicaTimeout() <= maxRepTimeout) + return // end of test + } + } +} + +// Test_CombinedIncreaseDecreaseDynamics verifies that timeout increases and decreases +// work as expected in combination +func Test_CombinedIncreaseDecreaseDynamics(t *testing.T) { + increase, decrease := true, false + testDynamicSequence := func(seq []bool) { + tc := initTimeoutController(t) + tc.cfg.HappyPathMaxRoundFailures = 0 // set happy path rounds to zero to simplify calculation + numberIncreases, numberDecreases := 0, 0 + for _, increase := range seq { + if increase { + numberIncreases += 1 + tc.OnTimeout() + } else { + numberDecreases += 1 + tc.OnProgressBeforeTimeout() + } + } + + expectedRepTimeout := minRepTimeout * math.Pow(timeoutAdjustmentFactor, float64(numberIncreases-numberDecreases)) + numericalError := math.Abs(expectedRepTimeout - tc.replicaTimeout()) + require.LessOrEqual(t, numericalError, 1.0) // at most one millisecond numerical error + } + + testDynamicSequence([]bool{increase, increase, increase, decrease, decrease, decrease}) + testDynamicSequence([]bool{increase, decrease, increase, decrease, increase, decrease}) + testDynamicSequence([]bool{increase, increase, increase, increase, increase, decrease}) +} diff --git a/consensus/participant/participant.go b/consensus/participant/participant.go new file mode 100644 index 0000000..5371b7e --- /dev/null +++ b/consensus/participant/participant.go @@ -0,0 +1,174 @@ +package participant + +import ( + "fmt" + "time" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/eventhandler" + "source.quilibrium.com/quilibrium/monorepo/consensus/eventloop" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/consensus/notifications/pubsub" + "source.quilibrium.com/quilibrium/monorepo/consensus/pacemaker" + "source.quilibrium.com/quilibrium/monorepo/consensus/pacemaker/timeout" + "source.quilibrium.com/quilibrium/monorepo/consensus/recovery" + "source.quilibrium.com/quilibrium/monorepo/consensus/safetyrules" + "source.quilibrium.com/quilibrium/monorepo/consensus/stateproducer" +) + +// NewParticipant initializes the EventLoop instance with needed dependencies +func NewParticipant[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, + CollectedT models.Unique, +]( + logger consensus.TraceLogger, + committee consensus.DynamicCommittee, + signer consensus.Signer[StateT, VoteT], + prover consensus.LeaderProvider[StateT, PeerIDT, CollectedT], + voter consensus.VotingProvider[StateT, VoteT, PeerIDT], + notifier consensus.Consumer[StateT, VoteT], + consensusStore consensus.ConsensusStore[VoteT], + signatureAggregator consensus.SignatureAggregator, + consensusVerifier consensus.Verifier[VoteT], + voteCollectorDistributor *pubsub.VoteCollectorDistributor[VoteT], + timeoutCollectorDistributor *pubsub.TimeoutCollectorDistributor[VoteT], + forks consensus.Forks[StateT], + validator consensus.Validator[StateT, VoteT], + voteAggregator consensus.VoteAggregator[StateT, VoteT], + timeoutAggregator consensus.TimeoutAggregator[VoteT], + finalizer consensus.Finalizer, + filter []byte, + trustedRoot *models.CertifiedState[StateT], + pending []*models.SignedProposal[StateT, VoteT], +) (*eventloop.EventLoop[StateT, VoteT], error) { + cfg, err := timeout.NewConfig( + 20*time.Second, + 3*time.Minute, + 1.2, + 6, + 28*time.Second, + ) + if err != nil { + return nil, err + } + + livenessState, err := consensusStore.GetLivenessState(filter) + if err != nil { + livenessState = &models.LivenessState{ + Filter: filter, + CurrentRank: 0, + LatestQuorumCertificate: trustedRoot.CertifyingQuorumCertificate, + PriorRankTimeoutCertificate: nil, + } + err = consensusStore.PutLivenessState(livenessState) + if err != nil { + return nil, err + } + } + + consensusState, err := consensusStore.GetConsensusState(filter) + if err != nil { + consensusState = &models.ConsensusState[VoteT]{ + FinalizedRank: trustedRoot.Rank(), + LatestAcknowledgedRank: trustedRoot.Rank(), + } + err = consensusStore.PutConsensusState(consensusState) + if err != nil { + return nil, err + } + } + + // prune vote aggregator to initial rank + voteAggregator.PruneUpToRank(trustedRoot.Rank()) + timeoutAggregator.PruneUpToRank(trustedRoot.Rank()) + + // recover HotStuff state from all pending states + qcCollector := recovery.NewCollector[models.QuorumCertificate]() + tcCollector := recovery.NewCollector[models.TimeoutCertificate]() + err = recovery.Recover( + logger, + pending, + recovery.ForksState[StateT, VoteT](forks), // add pending states to Forks + recovery.CollectParentQCs[StateT, VoteT](qcCollector), // collect QCs from all pending state to initialize PaceMaker (below) + recovery.CollectTCs[StateT, VoteT](tcCollector), // collect TCs from all pending state to initialize PaceMaker (below) + ) + if err != nil { + return nil, fmt.Errorf("failed to scan tree of pending states: %w", err) + } + + // initialize the pacemaker + controller := timeout.NewController(cfg) + pacemaker, err := pacemaker.NewPacemaker[StateT, VoteT]( + filter, + controller, + pacemaker.NewStaticProposalDurationProvider(8*time.Second), + notifier, + consensusStore, + logger, + pacemaker.WithQCs[StateT, VoteT](qcCollector.Retrieve()...), + pacemaker.WithTCs[StateT, VoteT](tcCollector.Retrieve()...), + ) + if err != nil { + return nil, fmt.Errorf("could not initialize flow pacemaker: %w", err) + } + + // initialize the safetyRules + safetyRules, err := safetyrules.NewSafetyRules[StateT, VoteT]( + filter, + signer, + consensusStore, + committee, + ) + if err != nil { + return nil, fmt.Errorf("could not initialize safety rules: %w", err) + } + + // initialize state producer + producer, err := stateproducer.NewStateProducer[ + StateT, + VoteT, + PeerIDT, + CollectedT, + ](safetyRules, committee, prover) + if err != nil { + return nil, fmt.Errorf("could not initialize state producer: %w", err) + } + + // initialize the event handler + eventHandler, err := eventhandler.NewEventHandler[ + StateT, + VoteT, + PeerIDT, + CollectedT, + ]( + pacemaker, + producer, + forks, + consensusStore, + committee, + safetyRules, + notifier, + logger, + ) + if err != nil { + return nil, fmt.Errorf("could not initialize event handler: %w", err) + } + + // initialize and return the event loop + loop, err := eventloop.NewEventLoop( + logger, + eventHandler, + time.Now(), + ) + if err != nil { + return nil, fmt.Errorf("could not initialize event loop: %w", err) + } + + // add observer, event loop needs to receive events from distributor + voteCollectorDistributor.AddVoteCollectorConsumer(loop) + timeoutCollectorDistributor.AddTimeoutCollectorConsumer(loop) + + return loop, nil +} diff --git a/consensus/recovery/recover.go b/consensus/recovery/recover.go new file mode 100644 index 0000000..2857bff --- /dev/null +++ b/consensus/recovery/recover.go @@ -0,0 +1,142 @@ +package recovery + +import ( + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// StateScanner describes a function for ingesting pending states. +// Any returned errors are considered fatal. +type StateScanner[StateT models.Unique, VoteT models.Unique] func( + proposal *models.SignedProposal[StateT, VoteT], +) error + +// Recover is a utility method for recovering the HotStuff state after a +// restart. It receives the list `pending` containing _all_ states that +// - have passed the compliance layer and stored in the protocol state +// - descend from the latest finalized state +// - are listed in ancestor-first order (i.e. for any state B ∈ pending, B's +// parent must be listed before B, unless B's parent is the latest finalized +// state) +// +// CAUTION: all pending states are required to be valid (guaranteed if the state +// passed the compliance layer) +func Recover[StateT models.Unique, VoteT models.Unique]( + log consensus.TraceLogger, + pending []*models.SignedProposal[StateT, VoteT], + scanners ...StateScanner[StateT, VoteT], +) error { + log.Trace( + "recovery started", + consensus.Int64Param("total", int64(len(pending))), + ) + + // add all pending states to forks + for _, proposal := range pending { + for _, s := range scanners { + err := s(proposal) + if err != nil { + return fmt.Errorf("scanner failed to ingest proposal: %w", err) + } + } + log.Trace( + "state recovered", + consensus.Uint64Param("rank", proposal.State.Rank), + consensus.IdentityParam("state_id", proposal.State.Identifier), + ) + } + + log.Trace("recovery completed") + return nil +} + +// ForksState recovers Forks' internal state of states descending from the +// latest finalized state. Caution, input states must be valid and in +// parent-first order (unless parent is the latest finalized state). +func ForksState[StateT models.Unique, VoteT models.Unique]( + forks consensus.Forks[StateT], +) StateScanner[StateT, VoteT] { + return func(proposal *models.SignedProposal[StateT, VoteT]) error { + err := forks.AddValidatedState(proposal.State) + if err != nil { + return fmt.Errorf( + "could not add state %x to forks: %w", + proposal.State.Identifier, + err, + ) + } + return nil + } +} + +// VoteAggregatorState recovers the VoteAggregator's internal state as follows: +// - Add all states descending from the latest finalized state to accept +// votes. Those states should be rapidly pruned as the node catches up. +// +// Caution: input states must be valid. +func VoteAggregatorState[StateT models.Unique, VoteT models.Unique]( + voteAggregator consensus.VoteAggregator[StateT, VoteT], +) StateScanner[StateT, VoteT] { + return func(proposal *models.SignedProposal[StateT, VoteT]) error { + voteAggregator.AddState(proposal) + return nil + } +} + +// CollectParentQCs collects all parent QCs included in the states descending +// from the latest finalized state. Caution, input states must be valid. +func CollectParentQCs[StateT models.Unique, VoteT models.Unique]( + collector Collector[models.QuorumCertificate], +) StateScanner[StateT, VoteT] { + return func(proposal *models.SignedProposal[StateT, VoteT]) error { + qc := proposal.State.ParentQuorumCertificate + if qc != nil { + collector.Append(qc) + } + return nil + } +} + +// CollectTCs collect all TCs included in the states descending from the +// latest finalized state. Caution, input states must be valid. +func CollectTCs[StateT models.Unique, VoteT models.Unique]( + collector Collector[models.TimeoutCertificate], +) StateScanner[StateT, VoteT] { + return func(proposal *models.SignedProposal[StateT, VoteT]) error { + tc := proposal.PreviousRankTimeoutCertificate + if tc != nil { + collector.Append(tc) + } + return nil + } +} + +// Collector for objects of generic type. Essentially, it is a stateful list. +// Safe to be passed by value. Retrieve() returns the current state of the list +// and is unaffected by subsequent appends. +type Collector[T any] struct { + list *[]T +} + +func NewCollector[T any]() Collector[T] { + list := make([]T, 0, 5) // heuristic: pre-allocate with some basic capacity + return Collector[T]{list: &list} +} + +// Append adds new elements to the end of the list. +func (c Collector[T]) Append(t ...T) { + *c.list = append(*c.list, t...) +} + +// Retrieve returns the current state of the list (unaffected by subsequent +// append) +func (c Collector[T]) Retrieve() []T { + // Under the hood, the slice is a struct containing a pointer to an underlying + // array and a `len` variable indicating how many of the array elements are + // occupied. Here, we are returning the slice struct by value, i.e. we _copy_ + // the array pointer and the `len` value and return the copy. Therefore, the + // returned slice is unaffected by subsequent append. + return *c.list +} diff --git a/consensus/safetyrules/safety_rules.go b/consensus/safetyrules/safety_rules.go new file mode 100644 index 0000000..091c27d --- /dev/null +++ b/consensus/safetyrules/safety_rules.go @@ -0,0 +1,561 @@ +package safetyrules + +import ( + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// SafetyRules is a dedicated module that enforces consensus safety. This +// component has the sole authority to generate votes and timeouts. It follows +// voting and timeout rules for creating votes and timeouts respectively. +// Caller can be sure that created vote or timeout doesn't break safety and can +// be used in consensus process. SafetyRules relies on consensus.ConsensusStore +// to store latest state of consensus.SafetyData. +// +// The voting rules implemented by SafetyRules are: +// 1. Replicas vote in strictly increasing ranks. At most one vote can be +// signed per rank. Caution: The leader's state signature is formally a vote +// for their own proposal. +// 2. Each state has to include a TC or a QC from the previous rank. +// a. [Happy path] If the previous rank resulted in a QC then the proposer +// should include it in their state. +// b. [Recovery path] If the previous round did *not* result in a QC, the +// leader of the subsequent round *must* include a valid TC for the +// previous rank in its state. +// +// Condition 1 guarantees a foundational security theorem for HotStuff (incl. +// the DiemBFT / Jolteon variant): +// +// THEOREM: For each rank, there can be at most 1 certified state. +// +// NOT safe for concurrent use. +type SafetyRules[StateT models.Unique, VoteT models.Unique] struct { + signer consensus.Signer[StateT, VoteT] + store consensus.ConsensusStore[VoteT] + committee consensus.DynamicCommittee // only produce votes when we are valid committee members + consensusState *models.ConsensusState[VoteT] +} + +var _ consensus.SafetyRules[*nilUnique, *nilUnique] = (*SafetyRules[*nilUnique, *nilUnique])(nil) + +// NewSafetyRules creates a new SafetyRules instance +func NewSafetyRules[StateT models.Unique, VoteT models.Unique]( + filter []byte, + signer consensus.Signer[StateT, VoteT], + store consensus.ConsensusStore[VoteT], + committee consensus.DynamicCommittee, +) (*SafetyRules[StateT, VoteT], error) { + // get the last stored safety data + consensusState, err := store.GetConsensusState(filter) + if err != nil { + return nil, fmt.Errorf("could not load safety data: %w", err) + } + return &SafetyRules[StateT, VoteT]{ + signer: signer, + store: store, + committee: committee, + consensusState: consensusState, + }, nil +} + +// ProduceVote will make a decision on whether it will vote for the given +// proposal, the returned error indicates whether to vote or not. To ensure +// that only safe proposals are being voted on, we check that the proposer is a +// valid committee member and that the proposal complies with voting rules. +// We expect that only well-formed proposals with valid signatures are submitted +// for voting. The curRank is taken as input to ensure SafetyRules will only +// vote for proposals at current rank and prevent double voting. +// Returns: +// - (vote, nil): On the _first_ state for the current rank that is safe to +// vote for. Subsequently, voter does _not_ vote for any other state with +// the same (or lower) rank. +// - (nil, models.NoVoteError): If the voter decides that it does not want to +// vote for the given state. This is a sentinel error and _expected_ during +// normal operation. +// +// All other errors are unexpected and potential symptoms of uncovered edge +// cases or corrupted internal state (fatal). +func (r *SafetyRules[StateT, VoteT]) ProduceVote( + signedProposal *models.SignedProposal[StateT, VoteT], + curRank uint64, +) (*VoteT, error) { + return r.produceVote(&signedProposal.Proposal, curRank) +} + +// produceVote implements the core Safety Rules to validate whether it is safe +// to vote. This method is to be used to vote for other leaders' states as well +// as this node's own proposals under construction. We explicitly codify the +// important aspect that a proposer's signature for their own state is +// conceptually also just a vote (we explicitly use that property when +// aggregating votes and including the proposer's own vote into a QC). In order +// to express this conceptual equivalence in code, the voting logic in Safety +// Rules must also operate on an unsigned Proposal. +// +// The curRank is taken as input to ensure SafetyRules will only vote for +// proposals at current rank and prevent double voting. +// Returns: +// - (vote, nil): On the _first_ state for the current rank that is safe to +// vote for. Subsequently, voter does _not_ vote for any other state with +// the same (or lower) rank. +// - (nil, models.NoVoteError): If the voter decides that it does not want to +// vote for the given state. This is a sentinel error and _expected_ during +// normal operation. +// +// All other errors are unexpected and potential symptoms of uncovered edge +// cases or corrupted internal state (fatal). +func (r *SafetyRules[StateT, VoteT]) produceVote( + proposal *models.Proposal[StateT], + curRank uint64, +) (*VoteT, error) { + state := proposal.State + // sanity checks: + if curRank != state.Rank { + return nil, fmt.Errorf( + "expecting state for current rank %d, but state's rank is %d", + curRank, + state.Rank, + ) + } + + err := r.isSafeToVote(proposal) + if err != nil { + return nil, fmt.Errorf( + "not safe to vote for proposal %x: %w", + proposal.State.Identifier, + err, + ) + } + + currentLeader, err := r.committee.LeaderForRank(state.Rank) + if err != nil { + return nil, fmt.Errorf( + "expect to have a valid leader for rank %d: %w", + curRank, + err, + ) + } + // This sanity check confirms that the proposal is from the correct leader of + // this rank. In case this sanity check fails, we return an exception, because + // the compliance layer should have verified this already. However, proposals + // from this node might not go through the compliance engine, and must be + // signed before anyway. Therefore, we still include this sanity check, but + // return an exception because signing a proposal should be only for ranks + // where this node is actually the leader. + if state.ProposerID != currentLeader { + return nil, fmt.Errorf( + "incorrect proposal, as proposer %x is different from the leader %x for rank %d", + state.ProposerID, + currentLeader, + curRank, + ) + } + + // In case this node is the leader, we can skip the following checks. + // • If this node is ejected (check (ii) would fail), voting for any states or + // signing own proposals is of no harm. This is because all other honest + // nodes should have terminated their connection to us, so we are not + // risking to use up the networking bandwidth of honest nodes. This is + // relevant in case of self-ejection: a node operator suspecting their + // node's keys to be compromised can request for their node to be ejected to + // prevent malicious actors impersonating their node, launching an attack on + // the network, and the seniority being slashed. The self-ejection mechanism + // corresponds to key-revocation and reduces attack surface for the network + // and the node operator's seniority. In case of self-ejection, a node is no + // longer part of the network, hence it cannot harm the network and is no + // longer subject to slashing for actions during the respective ranks. + // Therefore, voting or continuing to signing state proposals is of no + // concern. + // • In case this node is the leader, `state.ProposerID` and + // `r.committee.Self()` are identical. In other words, check (i) also + // verifies that this node itself is not ejected -- the same as check (ii). + // Hence, also check (i) can be skipped with the same reasoning. + if currentLeader != r.committee.Self() { + // (i): we need to make sure that proposer is not ejected to vote + _, err = r.committee.IdentityByState(state.Identifier, state.ProposerID) + if models.IsInvalidSignerError(err) { + // the proposer must be ejected since the proposal has already been + // validated, which ensures that the proposer was a valid committee member + // at the start of the rank + return nil, models.NewNoVoteErrorf("proposer ejected: %w", err) + } + if err != nil { + return nil, fmt.Errorf( + "internal error retrieving Identity of proposer %x at state %x: %w", + state.ProposerID, + state.Identifier, + err, + ) + } + + // (ii) Do not produce a vote for states where we are not an active + // committee member. The HotStuff state machine may request to vote during + // grace periods outside the ranks, where the node is authorized to + // actively participate. If we voted during those grace periods, we would + // needlessly waste network bandwidth, as such votes can't be used to + // produce valid QCs. + _, err = r.committee.IdentityByState(state.Identifier, r.committee.Self()) + if models.IsInvalidSignerError(err) { + return nil, models.NewNoVoteErrorf( + "I am not authorized to vote for state %x: %w", + state.Identifier, + err, + ) + } + if err != nil { + return nil, fmt.Errorf("could not get self identity: %w", err) + } + } + + vote, err := r.signer.CreateVote(state) + if err != nil { + return nil, fmt.Errorf("could not vote for state: %w", err) + } + + // vote for the current rank has been produced, update safetyData + r.consensusState.LatestAcknowledgedRank = curRank + if r.consensusState.FinalizedRank < state.ParentQuorumCertificate.GetRank() { + r.consensusState.FinalizedRank = state.ParentQuorumCertificate.GetRank() + } + + err = r.store.PutConsensusState(r.consensusState) + if err != nil { + return nil, fmt.Errorf("could not persist safety data: %w", err) + } + + return vote, nil +} + +// ProduceTimeout takes current rank, highest locally known QC and TC (optional, +// must be nil if and only if QC is for previous rank) and decides whether to +// produce timeout for current rank. +// Returns: +// - (timeout, nil): It is safe to timeout for current rank using newestQC and +// previousRankTimeoutCert. +// - (nil, models.NoTimeoutError): If replica is not part of the authorized +// consensus committee (anymore) and therefore is not authorized to produce +// a valid timeout state. This sentinel error is _expected_ during normal +// operation, e.g. during the grace-period after Rank switchover or after +// the replica self-ejected. +// +// All other errors are unexpected and potential symptoms of uncovered edge +// cases or corrupted internal state (fatal). +func (r *SafetyRules[StateT, VoteT]) ProduceTimeout( + curRank uint64, + newestQC models.QuorumCertificate, + previousRankTimeoutCert models.TimeoutCertificate, +) (*models.TimeoutState[VoteT], error) { + lastTimeout := r.consensusState.LatestTimeout + if lastTimeout != nil && lastTimeout.Rank == curRank { + updatedTimeout := &models.TimeoutState[VoteT]{ + Rank: lastTimeout.Rank, + LatestQuorumCertificate: lastTimeout.LatestQuorumCertificate, + PriorRankTimeoutCertificate: lastTimeout.PriorRankTimeoutCertificate, + TimeoutTick: lastTimeout.TimeoutTick + 1, + Vote: lastTimeout.Vote, + } + + // persist updated TimeoutState in `safetyData` and return it + r.consensusState.LatestTimeout = updatedTimeout + err := r.store.PutConsensusState(r.consensusState) + if err != nil { + return nil, fmt.Errorf("could not persist safety data: %w", err) + } + return r.consensusState.LatestTimeout, nil + } + + err := r.IsSafeToTimeout(curRank, newestQC, previousRankTimeoutCert) + if err != nil { + return nil, fmt.Errorf("local, trusted inputs failed safety rules: %w", err) + } + + // Do not produce a timeout for rank where we are not a valid committee + // member. + _, err = r.committee.IdentityByRank(curRank, r.committee.Self()) + if err != nil { + if models.IsInvalidSignerError(err) { + return nil, models.NewNoTimeoutErrorf( + "I am not authorized to timeout for rank %d: %w", + curRank, + err, + ) + } + return nil, fmt.Errorf("could not get self identity: %w", err) + } + + timeout, err := r.signer.CreateTimeout( + curRank, + newestQC, + previousRankTimeoutCert, + ) + if err != nil { + return nil, fmt.Errorf( + "could not create timeout at rank %d: %w", + curRank, + err, + ) + } + + r.consensusState.LatestAcknowledgedRank = curRank + r.consensusState.LatestTimeout = timeout + + err = r.store.PutConsensusState(r.consensusState) + if err != nil { + return nil, fmt.Errorf("could not persist safety data: %w", err) + } + + return timeout, nil +} + +// SignOwnProposal takes an unsigned state proposal and produces a vote for it. +// Vote is a cryptographic commitment to the proposal. By adding the vote to an +// unsigned proposal, the caller constructs a signed state proposal. This method +// has to be used only by the leader, which must be the proposer of the state +// (or an exception is returned). Implementors must guarantee that: +// - vote on the proposal satisfies safety rules +// - maximum one proposal is signed per rank +// Returns: +// - (vote, nil): the passed unsigned proposal is a valid one, and it's safe +// to make a proposal. Subsequently, leader does _not_ produce any _other_ +// proposal with the same (or lower) rank. +// - (nil, models.NoVoteError): according to HotStuff's Safety Rules, it is +// not safe to sign the given proposal. This could happen because we have +// already proposed or timed out for the given rank. This is a sentinel +// error and _expected_ during normal operation. +// +// All other errors are unexpected and potential symptoms of uncovered edge +// cases or corrupted internal state (fatal). +func (r *SafetyRules[StateT, VoteT]) SignOwnProposal( + unsignedProposal *models.Proposal[StateT], +) (*VoteT, error) { + // check that the state is created by us + if unsignedProposal.State.ProposerID != r.committee.Self() { + return nil, fmt.Errorf( + "can't sign proposal for someone else's state, proposer: %x, self: %x", + unsignedProposal.State.ProposerID, + r.committee.Self(), + ) + } + + return r.produceVote(unsignedProposal, unsignedProposal.State.Rank) +} + +// isSafeToVote checks if this proposal is valid in terms of voting rules, if +// voting for this proposal won't break safety rules. Expected errors during +// normal operations: +// - NoVoteError if replica already acted during this rank (either voted o +// generated timeout) +func (r *SafetyRules[StateT, VoteT]) isSafeToVote( + proposal *models.Proposal[StateT], +) error { + stateRank := proposal.State.Rank + + err := r.validateEvidenceForEnteringRank( + stateRank, + proposal.State.ParentQuorumCertificate, + proposal.PreviousRankTimeoutCertificate, + ) + if err != nil { + // As we are expecting the states to be pre-validated, any failure here is a + // symptom of an internal bug. + return fmt.Errorf("proposal failed consensus validity check: %w", err) + } + + // This check satisfies voting rule 1 + // 1. Replicas vote strictly in increasing rounds, + // state's rank must be greater than the rank that we have voted for + acRank := r.consensusState.LatestAcknowledgedRank + if stateRank == acRank { + return models.NewNoVoteErrorf( + "already voted or generated timeout in rank %d", + stateRank, + ) + } + if stateRank < acRank { + return fmt.Errorf( + "already acted during rank %d but got proposal for lower rank %d", + acRank, + stateRank, + ) + } + + return nil +} + +// IsSafeToTimeout checks if it's safe to timeout with proposed data, i.e. +// timing out won't break safety. newestQC is the valid QC with the greatest +// rank that we have observed. previousRankTimeoutCert is the TC for the +// previous rank (might be nil). +// +// When generating a timeout, the inputs are provided by node-internal +// components. Failure to comply with the protocol is a symptom of an internal +// bug. We don't expect any errors during normal operations. +func (r *SafetyRules[StateT, VoteT]) IsSafeToTimeout( + curRank uint64, + newestQC models.QuorumCertificate, + previousRankTimeoutCert models.TimeoutCertificate, +) error { + err := r.validateEvidenceForEnteringRank( + curRank, + newestQC, + previousRankTimeoutCert, + ) + if err != nil { + return fmt.Errorf("not safe to timeout: %w", err) + } + + if newestQC.GetRank() < r.consensusState.FinalizedRank { + return fmt.Errorf( + "have already seen QC for rank %d, but newest QC is reported to be for rank %d", + r.consensusState.FinalizedRank, + newestQC.GetRank(), + ) + } + if curRank+1 <= r.consensusState.LatestAcknowledgedRank { + return fmt.Errorf("cannot generate timeout for past rank %d", curRank) + } + // the logic for rejecting inputs with `curRank <= newestQC.Rank` is already + // contained in `validateEvidenceForEnteringRank(..)`, because it only passes + // if + // * either `curRank == newestQC.Rank + 1` (condition 2) + // * or `curRank > newestQC.Rank` (condition 4) + + return nil +} + +// validateEvidenceForEnteringRank performs the following check that is +// fundamental for consensus safety: Whenever a replica acts within a rank, it +// must prove that is has sufficient evidence to enter this rank +// Specifically: +// 1. The replica must always provide a QC and optionally a TC. +// 2. [Happy Path] If the previous round (i.e. `rank -1`) resulted in a QC, the +// replica is allowed to transition to `rank`. The QC from the previous +// round provides sufficient evidence. Furthermore, to prevent +// resource-exhaustion attacks, we require that no TC is included as part of +// the proof. +// 3. Following the Happy Path has priority over following the Recovery Path +// (specified below). +// 4. [Recovery Path] If the previous round (i.e. `rank -1`) did *not* result +// in a QC, a TC from the previous round is required to transition to +// `rank`. The following additional consistency requirements have to be +// satisfied: +// (a) newestQC.Rank + 1 < rank +// Otherwise, the replica has violated condition 3 (in case +// newestQC.Rank + 1 = rank); or the replica failed to apply condition 2 (in +// case newestQC.Rank + 1 > rank). +// (b) newestQC.Rank ≥ previousRankTimeoutCert.NewestQC.Rank +// Otherwise, the replica has violated condition 3. +// +// SafetyRules has the sole signing authority and enforces adherence to these +// conditions. In order to generate valid consensus signatures, the replica must +// provide the respective evidence (required QC + optional TC) to its internal +// SafetyRules component for each consensus action that the replica wants to +// take: +// - primary signing its own proposal +// - replica voting for a state +// - replica generating a timeout message +// +// During normal operations, no errors are expected: +// - As we are expecting the states to be pre-validated, any failure here is a +// symptom of an internal bug. +// - When generating a timeout, the inputs are provided by node-internal +// components. Failure to comply with the protocol is a symptom of an +// internal bug. +func (r *SafetyRules[StateT, VoteT]) validateEvidenceForEnteringRank( + rank uint64, + newestQC models.QuorumCertificate, + previousRankTimeoutCert models.TimeoutCertificate, +) error { + // Condition 1: + if newestQC == nil { + return fmt.Errorf("missing the mandatory QC") + } + + // Condition 2: + if newestQC.GetRank()+1 == rank { + if previousRankTimeoutCert != nil { + return fmt.Errorf( + "when QC is for prior round (%d), no TC should be provided (%d)", + newestQC.GetRank(), + previousRankTimeoutCert.GetRank(), + ) + } + return nil + } + // Condition 3: if we reach the following lines, the happy path is not + // satisfied. + + // Condition 4: + if previousRankTimeoutCert == nil { + return fmt.Errorf( + "expecting TC because QC (%d) is not for prior rank (%d - 1); but didn't get any TC", + newestQC.GetRank(), + rank, + ) + } + if previousRankTimeoutCert.GetRank()+1 != rank { + return fmt.Errorf( + "neither QC (rank %d) nor TC (rank %d) allows to transition to rank %d", + newestQC.GetRank(), + previousRankTimeoutCert.GetRank(), + rank, + ) + } + if newestQC.GetRank() >= rank { + // Note: we need to enforce here that `newestQC.Rank + 1 < rank`, i.e. we + // error for `newestQC.Rank+1 >= rank` However, `newestQC.Rank+1 == rank` is + // impossible, because otherwise we would have walked into condition 2. + // Hence, it suffices to error if `newestQC.Rank+1 > rank`, which is + // identical to `newestQC.Rank >= rank` + return fmt.Errorf( + "still at rank %d, despite knowing a QC for rank %d", + rank, + newestQC.GetRank(), + ) + } + if newestQC.GetRank() < previousRankTimeoutCert.GetLatestQuorumCert().GetRank() { + return fmt.Errorf( + "failed to update newest QC (still at rank %d) despite a newer QC (rank %d) being included in TC", + newestQC.GetRank(), + previousRankTimeoutCert.GetLatestQuorumCert().GetRank(), + ) + } + + return nil +} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/safetyrules/safety_rules_test.go b/consensus/safetyrules/safety_rules_test.go new file mode 100644 index 0000000..9c40b06 --- /dev/null +++ b/consensus/safetyrules/safety_rules_test.go @@ -0,0 +1,834 @@ +package safetyrules + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + "source.quilibrium.com/quilibrium/monorepo/consensus/mocks" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +func TestSafetyRules(t *testing.T) { + suite.Run(t, new(SafetyRulesTestSuite)) +} + +// SafetyRulesTestSuite is a test suite for testing SafetyRules related functionality. +// SafetyRulesTestSuite setups mocks for injected modules and creates models.ConsensusState[*helper.TestVote] +// based on next configuration: +// R <- B[QC_R] <- P[QC_B] +// B.Rank = S.Rank + 1 +// B - bootstrapped state, we are creating SafetyRules at state B +// Based on this LatestAcknowledgedRank = B.Rank and +type SafetyRulesTestSuite struct { + suite.Suite + + bootstrapState *models.State[*helper.TestState] + proposal *models.SignedProposal[*helper.TestState, *helper.TestVote] + proposerIdentity models.Identity + ourIdentity models.Identity + signer *mocks.Signer[*helper.TestState, *helper.TestVote] + persister *mocks.ConsensusStore[*helper.TestVote] + committee *mocks.DynamicCommittee + safetyData *models.ConsensusState[*helper.TestVote] + safety *SafetyRules[*helper.TestState, *helper.TestVote] +} + +func (s *SafetyRulesTestSuite) SetupTest() { + s.ourIdentity = helper.MakeIdentity() + s.signer = &mocks.Signer[*helper.TestState, *helper.TestVote]{} + s.persister = &mocks.ConsensusStore[*helper.TestVote]{} + s.committee = &mocks.DynamicCommittee{} + s.proposerIdentity = helper.MakeIdentity() + + // bootstrap at random bootstrapState + s.bootstrapState = helper.MakeState(helper.WithStateRank[*helper.TestState](100)) + s.proposal = helper.MakeSignedProposal(helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal( + helper.WithState[*helper.TestState]( + helper.MakeState[*helper.TestState]( + helper.WithParentState[*helper.TestState](s.bootstrapState), + helper.WithStateRank[*helper.TestState](s.bootstrapState.Rank+1), + helper.WithStateProposer[*helper.TestState](s.proposerIdentity)), + )))) + + s.committee.On("Self").Return(s.ourIdentity).Maybe() + s.committee.On("LeaderForRank", mock.Anything).Return(s.proposerIdentity, nil).Maybe() + s.committee.On("IdentityByState", mock.Anything, s.ourIdentity).Return(&helper.TestWeightedIdentity{ID: s.ourIdentity}, nil).Maybe() + s.committee.On("IdentityByState", s.proposal.State.Identifier, s.proposal.State.ProposerID).Return(&helper.TestWeightedIdentity{ID: s.proposerIdentity}, nil).Maybe() + s.committee.On("IdentityByRank", mock.Anything, s.ourIdentity).Return(&helper.TestWeightedIdentity{ID: s.ourIdentity}, nil).Maybe() + + s.safetyData = &models.ConsensusState[*helper.TestVote]{ + FinalizedRank: s.bootstrapState.Rank, + LatestAcknowledgedRank: s.bootstrapState.Rank, + } + + s.persister.On("GetConsensusState", mock.Anything).Return(s.safetyData, nil).Once() + var err error + s.safety, err = NewSafetyRules(nil, s.signer, s.persister, s.committee) + require.NoError(s.T(), err) +} + +// TestProduceVote_ShouldVote test basic happy path scenario where we vote for first state after bootstrap +// and next rank ended with TC +func (s *SafetyRulesTestSuite) TestProduceVote_ShouldVote() { + expectedSafetyData := &models.ConsensusState[*helper.TestVote]{ + FinalizedRank: s.proposal.State.ParentQuorumCertificate.GetRank(), + LatestAcknowledgedRank: s.proposal.State.Rank, + } + + expectedVote := makeVote(s.proposal.State) + s.signer.On("CreateVote", s.proposal.State).Return(&expectedVote, nil).Once() + s.persister.On("PutConsensusState", expectedSafetyData).Return(nil).Once() + + vote, err := s.safety.ProduceVote(s.proposal, s.proposal.State.Rank) + require.NoError(s.T(), err) + require.NotNil(s.T(), vote) + require.Equal(s.T(), &expectedVote, vote) + + s.persister.AssertCalled(s.T(), "PutConsensusState", expectedSafetyData) + + // producing vote for same rank yields an error since we have voted already for this rank + otherVote, err := s.safety.ProduceVote(s.proposal, s.proposal.State.Rank) + require.True(s.T(), models.IsNoVoteError(err)) + require.Nil(s.T(), otherVote) + + previousRankTimeoutCert := helper.MakeTC( + helper.WithTCRank(s.proposal.State.Rank+1), + helper.WithTCNewestQC(s.proposal.State.ParentQuorumCertificate)) + + // voting on proposal where last rank ended with TC + proposalWithTC := helper.MakeSignedProposal(helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal[*helper.TestState]( + helper.WithState[*helper.TestState]( + helper.MakeState[*helper.TestState]( + helper.WithParentState[*helper.TestState](s.bootstrapState), + helper.WithStateRank[*helper.TestState](s.proposal.State.Rank+2), + helper.WithStateProposer[*helper.TestState](s.proposerIdentity))), + helper.WithPreviousRankTimeoutCertificate[*helper.TestState](previousRankTimeoutCert)))) + + expectedSafetyData = &models.ConsensusState[*helper.TestVote]{ + FinalizedRank: s.proposal.State.ParentQuorumCertificate.GetRank(), + LatestAcknowledgedRank: proposalWithTC.State.Rank, + } + + expectedVote = makeVote(proposalWithTC.State) + s.signer.On("CreateVote", proposalWithTC.State).Return(&expectedVote, nil).Once() + s.persister.On("PutConsensusState", expectedSafetyData).Return(nil).Once() + s.committee.On("IdentityByState", proposalWithTC.State.Identifier, proposalWithTC.State.ProposerID).Return(&helper.TestWeightedIdentity{ID: s.proposerIdentity}, nil).Maybe() + + vote, err = s.safety.ProduceVote(proposalWithTC, proposalWithTC.State.Rank) + require.NoError(s.T(), err) + require.NotNil(s.T(), vote) + require.Equal(s.T(), &expectedVote, vote) + s.signer.AssertExpectations(s.T()) + s.persister.AssertCalled(s.T(), "PutConsensusState", expectedSafetyData) +} + +// TestProduceVote_IncludedQCHigherThanTCsQC checks specific scenario where previous round resulted in TC and leader +// knows about QC which is not part of TC and qc.Rank > tc.NewestQC.Rank. We want to allow this, in this case leader +// includes their QC into proposal satisfies next condition: State.ParentQuorumCertificate.GetRank() > previousRankTimeoutCert.NewestQC.Rank +func (s *SafetyRulesTestSuite) TestProduceVote_IncludedQCHigherThanTCsQC() { + previousRankTimeoutCert := helper.MakeTC( + helper.WithTCRank(s.proposal.State.Rank+1), + helper.WithTCNewestQC(s.proposal.State.ParentQuorumCertificate)) + + // voting on proposal where last rank ended with TC + proposalWithTC := helper.MakeSignedProposal(helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal[*helper.TestState]( + helper.WithState[*helper.TestState]( + helper.MakeState[*helper.TestState]( + helper.WithParentState[*helper.TestState](s.proposal.State), + helper.WithStateRank[*helper.TestState](s.proposal.State.Rank+2), + helper.WithStateProposer[*helper.TestState](s.proposerIdentity))), + helper.WithPreviousRankTimeoutCertificate[*helper.TestState](previousRankTimeoutCert)))) + + expectedSafetyData := &models.ConsensusState[*helper.TestVote]{ + FinalizedRank: proposalWithTC.State.ParentQuorumCertificate.GetRank(), + LatestAcknowledgedRank: proposalWithTC.State.Rank, + } + + require.Greater(s.T(), proposalWithTC.State.ParentQuorumCertificate.GetRank(), proposalWithTC.PreviousRankTimeoutCertificate.GetLatestQuorumCert().GetRank(), + "for this test case we specifically require that qc.Rank > previousRankTimeoutCert.NewestQC.Rank") + + expectedVote := makeVote(proposalWithTC.State) + s.signer.On("CreateVote", proposalWithTC.State).Return(&expectedVote, nil).Once() + s.persister.On("PutConsensusState", expectedSafetyData).Return(nil).Once() + s.committee.On("IdentityByState", proposalWithTC.State.Identifier, proposalWithTC.State.ProposerID).Return(&helper.TestWeightedIdentity{ID: s.proposerIdentity}, nil).Maybe() + + vote, err := s.safety.ProduceVote(proposalWithTC, proposalWithTC.State.Rank) + require.NoError(s.T(), err) + require.NotNil(s.T(), vote) + require.Equal(s.T(), &expectedVote, vote) + s.signer.AssertExpectations(s.T()) + s.persister.AssertCalled(s.T(), "PutConsensusState", expectedSafetyData) +} + +// TestProduceVote_UpdateFinalizedRank tests that FinalizedRank is updated when sees a higher QC. +// Note: `FinalizedRank` is only updated when the replica votes. +func (s *SafetyRulesTestSuite) TestProduceVote_UpdateFinalizedRank() { + s.safety.consensusState.FinalizedRank = 0 + + require.NotEqual(s.T(), s.safety.consensusState.FinalizedRank, s.proposal.State.ParentQuorumCertificate.GetRank(), + "in this test FinalizedRank is lower so it needs to be updated") + + expectedSafetyData := &models.ConsensusState[*helper.TestVote]{ + FinalizedRank: s.proposal.State.ParentQuorumCertificate.GetRank(), + LatestAcknowledgedRank: s.proposal.State.Rank, + } + + expectedVote := makeVote(s.proposal.State) + s.signer.On("CreateVote", s.proposal.State).Return(&expectedVote, nil).Once() + s.persister.On("PutConsensusState", expectedSafetyData).Return(nil).Once() + + vote, err := s.safety.ProduceVote(s.proposal, s.proposal.State.Rank) + require.NoError(s.T(), err) + require.NotNil(s.T(), vote) + require.Equal(s.T(), &expectedVote, vote) + s.signer.AssertExpectations(s.T()) + s.persister.AssertCalled(s.T(), "PutConsensusState", expectedSafetyData) +} + +// TestProduceVote_InvalidCurrentRank tests that no vote is created if `curRank` has invalid values. +// In particular, `SafetyRules` requires that: +// - the state's rank matches `curRank` +// - that values for `curRank` are monotonicly increasing +// +// Failing any of these conditions is a symptom of an internal bug; hence `SafetyRules` should +// _not_ return a `NoVoteError`. +func (s *SafetyRulesTestSuite) TestProduceVote_InvalidCurrentRank() { + + s.Run("state-rank-does-not-match", func() { + vote, err := s.safety.ProduceVote(s.proposal, s.proposal.State.Rank+1) + require.Nil(s.T(), vote) + require.Error(s.T(), err) + require.False(s.T(), models.IsNoVoteError(err)) + }) + s.Run("rank-not-monotonicly-increasing", func() { + // create state with rank < LatestAcknowledgedRank + proposal := helper.MakeSignedProposal(helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal( + helper.WithState( + helper.MakeState( + func(state *models.State[*helper.TestState]) { + state.ParentQuorumCertificate = helper.MakeQC(helper.WithQCRank(s.safetyData.LatestAcknowledgedRank - 2)) + }, + helper.WithStateRank[*helper.TestState](s.safetyData.LatestAcknowledgedRank-1)))))) + vote, err := s.safety.ProduceVote(proposal, proposal.State.Rank) + require.Nil(s.T(), vote) + require.Error(s.T(), err) + require.False(s.T(), models.IsNoVoteError(err)) + }) + + s.persister.AssertNotCalled(s.T(), "PutConsensusState") +} + +// TestProduceVote_CommitteeLeaderException verifies that SafetyRules handles unexpected error returns from +// the DynamicCommittee correctly. Specifically, generic exceptions and `models.ErrRankUnknown` +// returned by the committee when requesting the leader for the state's rank is propagated up the call stack. +// SafetyRules should *not* wrap unexpected exceptions into an expected NoVoteError. +func (s *SafetyRulesTestSuite) TestProduceVote_CommitteeLeaderException() { + *s.committee = mocks.DynamicCommittee{} + for _, exception := range []error{ + errors.New("invalid-leader-identity"), + models.ErrRankUnknown, + } { + s.committee.On("LeaderForRank", s.proposal.State.Rank).Return("", exception).Once() + vote, err := s.safety.ProduceVote(s.proposal, s.proposal.State.Rank) + require.Nil(s.T(), vote) + require.ErrorIs(s.T(), err, exception) + require.False(s.T(), models.IsNoVoteError(err)) + s.persister.AssertNotCalled(s.T(), "PutConsensusState") + } +} + +// TestProduceVote_DifferentProposerFromLeader tests that no vote is created if the proposer is different from the leader for +// current rank. This is a byzantine behavior and should be handled by the compliance layer but nevertheless we want to +// have a sanity check for other code paths like voting on an own proposal created by the current leader. +func (s *SafetyRulesTestSuite) TestProduceVote_DifferentProposerFromLeader() { + s.proposal.State.ProposerID = helper.MakeIdentity() + vote, err := s.safety.ProduceVote(s.proposal, s.proposal.State.Rank) + require.Error(s.T(), err) + require.False(s.T(), models.IsNoVoteError(err)) + require.Nil(s.T(), vote) + s.persister.AssertNotCalled(s.T(), "PutConsensusState") +} + +// TestProduceVote_NodeEjected tests that no vote is created if state proposer is ejected +func (s *SafetyRulesTestSuite) TestProduceVote_ProposerEjected() { + *s.committee = mocks.DynamicCommittee{} + s.committee.On("Self").Return(s.ourIdentity).Maybe() + s.committee.On("IdentityByState", s.proposal.State.Identifier, s.proposal.State.ProposerID).Return(nil, models.NewInvalidSignerErrorf("node-ejected")).Once() + s.committee.On("LeaderForRank", s.proposal.State.Rank).Return(s.proposerIdentity, nil).Once() + + vote, err := s.safety.ProduceVote(s.proposal, s.proposal.State.Rank) + require.Nil(s.T(), vote) + require.True(s.T(), models.IsNoVoteError(err)) + s.persister.AssertNotCalled(s.T(), "PutConsensusState") +} + +// TestProduceVote_InvalidProposerIdentity tests that no vote is created if there was an exception retrieving proposer identity +// We are specifically testing that unexpected errors are handled correctly, i.e. +// that SafetyRules does not erroneously wrap unexpected exceptions into the expected NoVoteError. +func (s *SafetyRulesTestSuite) TestProduceVote_InvalidProposerIdentity() { + *s.committee = mocks.DynamicCommittee{} + exception := errors.New("invalid-signer-identity") + s.committee.On("Self").Return(s.ourIdentity).Maybe() + s.committee.On("LeaderForRank", s.proposal.State.Rank).Return(s.proposerIdentity, nil).Once() + s.committee.On("IdentityByState", s.proposal.State.Identifier, s.proposal.State.ProposerID).Return(nil, exception).Once() + + vote, err := s.safety.ProduceVote(s.proposal, s.proposal.State.Rank) + require.Nil(s.T(), vote) + require.ErrorIs(s.T(), err, exception) + require.False(s.T(), models.IsNoVoteError(err)) + s.persister.AssertNotCalled(s.T(), "PutConsensusState") +} + +// TestProduceVote_NodeNotAuthorizedToVote tests that no vote is created if the voter is not authorized to vote. +// Nodes have zero weight in the grace periods around the ranks where they are authorized to participate. +// We don't want zero-weight nodes to vote in the first place, to avoid unnecessary traffic. +// Note: this also covers ejected nodes. In both cases, the committee will return an `InvalidSignerError`. +func (s *SafetyRulesTestSuite) TestProduceVote_NodeEjected() { + *s.committee = mocks.DynamicCommittee{} + s.committee.On("Self").Return(s.ourIdentity) + s.committee.On("LeaderForRank", s.proposal.State.Rank).Return(s.proposerIdentity, nil).Once() + s.committee.On("IdentityByState", s.proposal.State.Identifier, s.proposal.State.ProposerID).Return(&helper.TestWeightedIdentity{ID: s.proposerIdentity}, nil).Maybe() + s.committee.On("IdentityByState", s.proposal.State.Identifier, s.ourIdentity).Return(nil, models.NewInvalidSignerErrorf("node-ejected")).Once() + + vote, err := s.safety.ProduceVote(s.proposal, s.proposal.State.Rank) + require.Nil(s.T(), vote) + require.True(s.T(), models.IsNoVoteError(err)) + s.persister.AssertNotCalled(s.T(), "PutConsensusState") +} + +// TestProduceVote_InvalidVoterIdentity tests that no vote is created if there was an exception retrieving voter identity +// We are specifically testing that unexpected errors are handled correctly, i.e. +// that SafetyRules does not erroneously wrap unexpected exceptions into the expected NoVoteError. +func (s *SafetyRulesTestSuite) TestProduceVote_InvalidVoterIdentity() { + *s.committee = mocks.DynamicCommittee{} + s.committee.On("Self").Return(s.ourIdentity) + exception := errors.New("invalid-signer-identity") + s.committee.On("LeaderForRank", s.proposal.State.Rank).Return(s.proposerIdentity, nil).Once() + s.committee.On("IdentityByState", s.proposal.State.Identifier, s.proposal.State.ProposerID).Return(&helper.TestWeightedIdentity{ID: s.proposerIdentity}, nil).Maybe() + s.committee.On("IdentityByState", s.proposal.State.Identifier, s.ourIdentity).Return(nil, exception).Once() + + vote, err := s.safety.ProduceVote(s.proposal, s.proposal.State.Rank) + require.Nil(s.T(), vote) + require.ErrorIs(s.T(), err, exception) + require.False(s.T(), models.IsNoVoteError(err)) + s.persister.AssertNotCalled(s.T(), "PutConsensusState") +} + +// TestProduceVote_CreateVoteException tests that no vote is created if vote creation raised an exception +func (s *SafetyRulesTestSuite) TestProduceVote_CreateVoteException() { + exception := errors.New("create-vote-exception") + s.signer.On("CreateVote", s.proposal.State).Return(nil, exception).Once() + vote, err := s.safety.ProduceVote(s.proposal, s.proposal.State.Rank) + require.Nil(s.T(), vote) + require.ErrorIs(s.T(), err, exception) + require.False(s.T(), models.IsNoVoteError(err)) + s.persister.AssertNotCalled(s.T(), "PutConsensusState") +} + +// TestProduceVote_PersistStateException tests that no vote is created if persisting state failed +func (s *SafetyRulesTestSuite) TestProduceVote_PersistStateException() { + exception := errors.New("persister-exception") + s.persister.On("PutConsensusState", mock.Anything).Return(exception) + + vote := makeVote(s.proposal.State) + s.signer.On("CreateVote", s.proposal.State).Return(&vote, nil).Once() + votePtr, err := s.safety.ProduceVote(s.proposal, s.proposal.State.Rank) + require.Nil(s.T(), votePtr) + require.ErrorIs(s.T(), err, exception) +} + +// TestProduceVote_VotingOnInvalidProposals tests different scenarios where we try to vote on unsafe states +// SafetyRules contain a variety of checks to confirm that QC and TC have the desired relationship to each other. +// In particular, we test: +// +// (i) A TC should be included in a proposal, if and only of the QC is not the prior rank. +// (ii) When the proposal includes a TC (i.e. the QC not being for the prior rank), the TC must be for the prior rank. +// (iii) The QC in the state must have a smaller rank than the state. +// (iv) If the state contains a TC, the TC cannot contain a newer QC than the state itself. +// +// Conditions (i) - (iv) are validity requirements for the state and all states that SafetyRules processes +// are supposed to be pre-validated. Hence, failing any of those conditions means we have an internal bug. +// Consequently, we expect SafetyRules to return exceptions but _not_ `NoVoteError`, because the latter +// indicates that the input state was valid, but we didn't want to vote. +func (s *SafetyRulesTestSuite) TestProduceVote_VotingOnInvalidProposals() { + + // a proposal which includes a QC for the previous round should not contain a TC + s.Run("proposal-includes-last-rank-qc-and-tc", func() { + proposal := helper.MakeSignedProposal(helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal( + helper.WithState( + helper.MakeState( + helper.WithParentState(s.bootstrapState), + helper.WithStateRank[*helper.TestState](s.bootstrapState.Rank+1))), + helper.WithPreviousRankTimeoutCertificate[*helper.TestState](helper.MakeTC())))) + s.committee.On("IdentityByState", proposal.State.Identifier, proposal.State.ProposerID).Return(&helper.TestWeightedIdentity{ID: s.proposerIdentity}, nil).Maybe() + vote, err := s.safety.ProduceVote(proposal, proposal.State.Rank) + require.Error(s.T(), err) + require.False(s.T(), models.IsNoVoteError(err)) + require.Nil(s.T(), vote) + }) + s.Run("no-last-rank-tc", func() { + // create state where State.Rank != State.ParentQuorumCertificate.GetRank()+1 and PreviousRankTimeoutCertificate = nil + proposal := helper.MakeSignedProposal(helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal( + helper.WithState( + helper.MakeState( + helper.WithParentState(s.bootstrapState), + helper.WithStateRank[*helper.TestState](s.bootstrapState.Rank+2)))))) + vote, err := s.safety.ProduceVote(proposal, proposal.State.Rank) + require.Error(s.T(), err) + require.False(s.T(), models.IsNoVoteError(err)) + require.Nil(s.T(), vote) + }) + s.Run("last-rank-tc-invalid-rank", func() { + // create state where State.Rank != State.ParentQuorumCertificate.GetRank()+1 and + // State.Rank != PreviousRankTimeoutCertificate.Rank+1 + proposal := helper.MakeSignedProposal(helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal( + helper.WithState( + helper.MakeState( + helper.WithParentState(s.bootstrapState), + helper.WithStateRank[*helper.TestState](s.bootstrapState.Rank+2))), + helper.WithPreviousRankTimeoutCertificate[*helper.TestState]( + helper.MakeTC( + helper.WithTCRank(s.bootstrapState.Rank)))))) + vote, err := s.safety.ProduceVote(proposal, proposal.State.Rank) + require.Error(s.T(), err) + require.False(s.T(), models.IsNoVoteError(err)) + require.Nil(s.T(), vote) + }) + s.Run("proposal-includes-QC-for-higher-rank", func() { + // create state where State.Rank != State.ParentQuorumCertificate.GetRank()+1 and + // State.Rank == PreviousRankTimeoutCertificate.Rank+1 and State.ParentQuorumCertificate.GetRank() >= State.Rank + // in this case state is not safe to extend since proposal includes QC which is newer than the proposal itself. + proposal := helper.MakeSignedProposal(helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal( + helper.WithState( + helper.MakeState( + helper.WithParentState(s.bootstrapState), + helper.WithStateRank[*helper.TestState](s.bootstrapState.Rank+2), + func(state *models.State[*helper.TestState]) { + state.ParentQuorumCertificate = helper.MakeQC(helper.WithQCRank(s.bootstrapState.Rank + 10)) + })), + helper.WithPreviousRankTimeoutCertificate[*helper.TestState]( + helper.MakeTC( + helper.WithTCRank(s.bootstrapState.Rank+1)))))) + vote, err := s.safety.ProduceVote(proposal, proposal.State.Rank) + require.Error(s.T(), err) + require.False(s.T(), models.IsNoVoteError(err)) + require.Nil(s.T(), vote) + }) + s.Run("last-rank-tc-invalid-highest-qc", func() { + // create state where State.Rank != State.ParentQuorumCertificate.GetRank()+1 and + // State.Rank == PreviousRankTimeoutCertificate.Rank+1 and State.ParentQuorumCertificate.GetRank() < PreviousRankTimeoutCertificate.NewestQC.Rank + // in this case state is not safe to extend since proposal is built on top of QC, which is lower + // than QC presented in PreviousRankTimeoutCertificate. + TONewestQC := helper.MakeQC(helper.WithQCRank(s.bootstrapState.Rank + 1)) + proposal := helper.MakeSignedProposal(helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal( + helper.WithState( + helper.MakeState( + helper.WithParentState(s.bootstrapState), + helper.WithStateRank[*helper.TestState](s.bootstrapState.Rank+2))), + helper.WithPreviousRankTimeoutCertificate[*helper.TestState]( + helper.MakeTC( + helper.WithTCRank(s.bootstrapState.Rank+1), + helper.WithTCNewestQC(TONewestQC)))))) + vote, err := s.safety.ProduceVote(proposal, proposal.State.Rank) + require.Error(s.T(), err) + require.False(s.T(), models.IsNoVoteError(err)) + require.Nil(s.T(), vote) + }) + + s.signer.AssertNotCalled(s.T(), "CreateVote") + s.persister.AssertNotCalled(s.T(), "PutConsensusState") +} + +// TestProduceVote_VoteEquivocation tests scenario when we try to vote twice in same rank. We require that replica +// follows next rules: +// - replica votes once per rank +// - replica votes in monotonicly increasing ranks +// +// Voting twice per round on equivocating proposals is considered a byzantine behavior. +// Expect a `models.NoVoteError` sentinel in such scenario. +func (s *SafetyRulesTestSuite) TestProduceVote_VoteEquivocation() { + expectedVote := makeVote(s.proposal.State) + s.signer.On("CreateVote", s.proposal.State).Return(&expectedVote, nil).Once() + s.persister.On("PutConsensusState", mock.Anything).Return(nil).Once() + + vote, err := s.safety.ProduceVote(s.proposal, s.proposal.State.Rank) + require.NoError(s.T(), err) + require.NotNil(s.T(), vote) + require.Equal(s.T(), &expectedVote, vote) + + equivocatingProposal := helper.MakeSignedProposal(helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal( + helper.WithState( + helper.MakeState( + helper.WithParentState(s.bootstrapState), + helper.WithStateRank[*helper.TestState](s.bootstrapState.Rank+1), + helper.WithStateProposer[*helper.TestState](s.proposerIdentity)), + )))) + + // voting at same rank(even different proposal) should result in NoVoteError + vote, err = s.safety.ProduceVote(equivocatingProposal, s.proposal.State.Rank) + require.True(s.T(), models.IsNoVoteError(err)) + require.Nil(s.T(), vote) + + s.proposal.State.ProposerID = s.ourIdentity + + // proposing at the same rank should result in NoVoteError since we have already voted + vote, err = s.safety.SignOwnProposal(&s.proposal.Proposal) + require.True(s.T(), models.IsNoVoteError(err)) + require.Nil(s.T(), vote) +} + +// TestProduceVote_AfterTimeout tests a scenario where we first timeout for rank and then try to produce a vote for +// same rank, this should result in error since producing a timeout means that we have given up on this rank +// and are in process of moving forward, no vote should be created. +func (s *SafetyRulesTestSuite) TestProduceVote_AfterTimeout() { + rank := s.proposal.State.Rank + newestQC := helper.MakeQC(helper.WithQCRank(rank - 1)) + expectedTimeout := &models.TimeoutState[*helper.TestVote]{ + Rank: rank, + LatestQuorumCertificate: newestQC, + } + s.signer.On("CreateTimeout", rank, newestQC, nil).Return(expectedTimeout, nil).Once() + s.persister.On("PutConsensusState", mock.Anything).Return(nil).Once() + + // first timeout, then try to vote + timeout, err := s.safety.ProduceTimeout(rank, newestQC, nil) + require.NoError(s.T(), err) + require.NotNil(s.T(), timeout) + + // voting in same rank after producing timeout is not allowed + vote, err := s.safety.ProduceVote(s.proposal, rank) + require.True(s.T(), models.IsNoVoteError(err)) + require.Nil(s.T(), vote) + + s.signer.AssertExpectations(s.T()) + s.persister.AssertExpectations(s.T()) +} + +// TestProduceTimeout_ShouldTimeout tests that we can produce timeout in cases where +// last rank was successful or not. Also tests last timeout caching. +func (s *SafetyRulesTestSuite) TestProduceTimeout_ShouldTimeout() { + rank := s.proposal.State.Rank + newestQC := helper.MakeQC(helper.WithQCRank(rank - 1)) + expectedTimeout := &models.TimeoutState[*helper.TestVote]{ + Rank: rank, + LatestQuorumCertificate: newestQC, + // don't care about actual data + Vote: helper.MakeVote[*helper.TestVote](), + } + + expectedSafetyData := &models.ConsensusState[*helper.TestVote]{ + FinalizedRank: s.safetyData.FinalizedRank, + LatestAcknowledgedRank: rank, + LatestTimeout: expectedTimeout, + } + s.signer.On("CreateTimeout", rank, newestQC, nil).Return(expectedTimeout, nil).Once() + s.persister.On("PutConsensusState", expectedSafetyData).Return(nil).Once() + timeout, err := s.safety.ProduceTimeout(rank, newestQC, nil) + require.NoError(s.T(), err) + require.Equal(s.T(), expectedTimeout, timeout) + + s.persister.AssertCalled(s.T(), "PutConsensusState", expectedSafetyData) + + s.persister.On("PutConsensusState", mock.MatchedBy(func(s *models.ConsensusState[*helper.TestVote]) bool { + return s.LatestTimeout.TimeoutTick == 1 + })).Return(nil).Once() + + otherTimeout, err := s.safety.ProduceTimeout(rank, newestQC, nil) + require.NoError(s.T(), err) + require.True(s.T(), timeout.Equals(otherTimeout)) + require.Equal(s.T(), timeout.TimeoutTick+1, otherTimeout.TimeoutTick) + + // to create new TO we need to provide a TC + previousRankTimeoutCert := helper.MakeTC(helper.WithTCRank(rank), + helper.WithTCNewestQC(newestQC)) + + expectedTimeout = &models.TimeoutState[*helper.TestVote]{ + Rank: rank + 1, + LatestQuorumCertificate: newestQC, + PriorRankTimeoutCertificate: previousRankTimeoutCert, + } + s.signer.On("CreateTimeout", rank+1, newestQC, previousRankTimeoutCert).Return(expectedTimeout, nil).Once() + expectedSafetyData = &models.ConsensusState[*helper.TestVote]{ + FinalizedRank: s.safetyData.FinalizedRank, + LatestAcknowledgedRank: rank + 1, + LatestTimeout: expectedTimeout, + } + s.persister.On("PutConsensusState", expectedSafetyData).Return(nil).Once() + + // creating new timeout should invalidate cache + otherTimeout, err = s.safety.ProduceTimeout(rank+1, newestQC, previousRankTimeoutCert) + require.NoError(s.T(), err) + require.NotNil(s.T(), otherTimeout) +} + +// TestProduceTimeout_NotSafeToTimeout tests that we don't produce a timeout when it's not safe +// We expect that the EventHandler to feed only request timeouts for the current rank, providing valid set of inputs. +// Hence, the cases tested here would be symptoms of an internal bugs, and therefore should not result in an NoVoteError. +func (s *SafetyRulesTestSuite) TestProduceTimeout_NotSafeToTimeout() { + + s.Run("newest-qc-nil", func() { + // newestQC cannot be nil + timeout, err := s.safety.ProduceTimeout(s.safetyData.FinalizedRank, nil, nil) + require.Error(s.T(), err) + require.Nil(s.T(), timeout) + }) + // if a QC for the previous rank is provided, a last rank TC is unnecessary and must not be provided + s.Run("includes-last-rank-qc-and-tc", func() { + newestQC := helper.MakeQC(helper.WithQCRank(s.safetyData.FinalizedRank)) + + // tc not needed but included + timeout, err := s.safety.ProduceTimeout(newestQC.GetRank()+1, newestQC, helper.MakeTC()) + require.Error(s.T(), err) + require.Nil(s.T(), timeout) + }) + s.Run("last-rank-tc-nil", func() { + newestQC := helper.MakeQC(helper.WithQCRank(s.safetyData.FinalizedRank)) + + // tc needed but not included + timeout, err := s.safety.ProduceTimeout(newestQC.GetRank()+2, newestQC, nil) + require.Error(s.T(), err) + require.Nil(s.T(), timeout) + }) + s.Run("last-rank-tc-for-wrong-rank", func() { + newestQC := helper.MakeQC(helper.WithQCRank(s.safetyData.FinalizedRank)) + // previousRankTimeoutCert should be for newestQC.GetRank()+1 + previousRankTimeoutCert := helper.MakeTC(helper.WithTCRank(newestQC.GetRank())) + + timeout, err := s.safety.ProduceTimeout(newestQC.GetRank()+2, newestQC, previousRankTimeoutCert) + require.Error(s.T(), err) + require.Nil(s.T(), timeout) + }) + s.Run("cur-rank-equal-to-highest-QC", func() { + newestQC := helper.MakeQC(helper.WithQCRank(s.safetyData.FinalizedRank)) + previousRankTimeoutCert := helper.MakeTC(helper.WithTCRank(s.safetyData.FinalizedRank - 1)) + + timeout, err := s.safety.ProduceTimeout(s.safetyData.FinalizedRank, newestQC, previousRankTimeoutCert) + require.Error(s.T(), err) + require.Nil(s.T(), timeout) + }) + s.Run("cur-rank-below-highest-QC", func() { + newestQC := helper.MakeQC(helper.WithQCRank(s.safetyData.FinalizedRank)) + previousRankTimeoutCert := helper.MakeTC(helper.WithTCRank(newestQC.GetRank() - 2)) + + timeout, err := s.safety.ProduceTimeout(newestQC.GetRank()-1, newestQC, previousRankTimeoutCert) + require.Error(s.T(), err) + require.Nil(s.T(), timeout) + }) + s.Run("last-rank-tc-is-newer", func() { + newestQC := helper.MakeQC(helper.WithQCRank(s.safetyData.FinalizedRank)) + // newest QC included in TC cannot be higher than the newest QC known to replica + previousRankTimeoutCert := helper.MakeTC(helper.WithTCRank(newestQC.GetRank()+1), + helper.WithTCNewestQC(helper.MakeQC(helper.WithQCRank(newestQC.GetRank()+1)))) + + timeout, err := s.safety.ProduceTimeout(newestQC.GetRank()+2, newestQC, previousRankTimeoutCert) + require.Error(s.T(), err) + require.Nil(s.T(), timeout) + }) + s.Run("highest-qc-below-locked-round", func() { + newestQC := helper.MakeQC(helper.WithQCRank(s.safetyData.FinalizedRank - 1)) + + timeout, err := s.safety.ProduceTimeout(newestQC.GetRank()+1, newestQC, nil) + require.Error(s.T(), err) + require.Nil(s.T(), timeout) + }) + s.Run("cur-rank-below-highest-acknowledged-rank", func() { + newestQC := helper.MakeQC(helper.WithQCRank(s.safetyData.FinalizedRank)) + // modify highest acknowledged rank in a way that it's definitely bigger than the newest QC rank + s.safetyData.LatestAcknowledgedRank = newestQC.GetRank() + 10 + + timeout, err := s.safety.ProduceTimeout(newestQC.GetRank()+1, newestQC, nil) + require.Error(s.T(), err) + require.Nil(s.T(), timeout) + }) + + s.signer.AssertNotCalled(s.T(), "CreateTimeout") + s.signer.AssertNotCalled(s.T(), "PutConsensusState") +} + +// TestProduceTimeout_CreateTimeoutException tests that no timeout is created if timeout creation raised an exception +func (s *SafetyRulesTestSuite) TestProduceTimeout_CreateTimeoutException() { + rank := s.proposal.State.Rank + newestQC := helper.MakeQC(helper.WithQCRank(rank - 1)) + + exception := errors.New("create-timeout-exception") + s.signer.On("CreateTimeout", rank, newestQC, nil).Return(nil, exception).Once() + vote, err := s.safety.ProduceTimeout(rank, newestQC, nil) + require.Nil(s.T(), vote) + require.ErrorIs(s.T(), err, exception) + require.False(s.T(), models.IsNoVoteError(err)) + s.persister.AssertNotCalled(s.T(), "PutConsensusState") +} + +// TestProduceTimeout_PersistStateException tests that no timeout is created if persisting state failed +func (s *SafetyRulesTestSuite) TestProduceTimeout_PersistStateException() { + exception := errors.New("persister-exception") + s.persister.On("PutConsensusState", mock.Anything).Return(exception) + + rank := s.proposal.State.Rank + newestQC := helper.MakeQC(helper.WithQCRank(rank - 1)) + expectedTimeout := &models.TimeoutState[*helper.TestVote]{ + Rank: rank, + LatestQuorumCertificate: newestQC, + } + + s.signer.On("CreateTimeout", rank, newestQC, nil).Return(expectedTimeout, nil).Once() + timeout, err := s.safety.ProduceTimeout(rank, newestQC, nil) + require.Nil(s.T(), timeout) + require.ErrorIs(s.T(), err, exception) +} + +// TestProduceTimeout_AfterVote tests a case where we first produce a vote and then try to timeout +// for same rank. This behavior is expected and should result in valid timeout without any errors. +func (s *SafetyRulesTestSuite) TestProduceTimeout_AfterVote() { + expectedVote := makeVote(s.proposal.State) + s.signer.On("CreateVote", s.proposal.State).Return(&expectedVote, nil).Once() + s.persister.On("PutConsensusState", mock.Anything).Return(nil).Times(2) + + rank := s.proposal.State.Rank + + // first produce vote, then try to timeout + vote, err := s.safety.ProduceVote(s.proposal, rank) + require.NoError(s.T(), err) + require.NotNil(s.T(), vote) + + newestQC := helper.MakeQC(helper.WithQCRank(rank - 1)) + + expectedTimeout := &models.TimeoutState[*helper.TestVote]{ + Rank: rank, + LatestQuorumCertificate: newestQC, + } + + s.signer.On("CreateTimeout", rank, newestQC, nil).Return(expectedTimeout, nil).Once() + + // timing out for same rank should be possible + timeout, err := s.safety.ProduceTimeout(rank, newestQC, nil) + require.NoError(s.T(), err) + require.NotNil(s.T(), timeout) + + s.persister.AssertExpectations(s.T()) + s.signer.AssertExpectations(s.T()) +} + +// TestProduceTimeout_InvalidProposerIdentity tests that no timeout is created if there was an exception retrieving proposer identity +// We are specifically testing that unexpected errors are handled correctly, i.e. +// that SafetyRules does not erroneously wrap unexpected exceptions into the expected models.NoTimeoutError. +func (s *SafetyRulesTestSuite) TestProduceTimeout_InvalidProposerIdentity() { + rank := s.proposal.State.Rank + newestQC := helper.MakeQC(helper.WithQCRank(rank - 1)) + *s.committee = mocks.DynamicCommittee{} + exception := errors.New("invalid-signer-identity") + s.committee.On("IdentityByRank", rank, s.ourIdentity).Return(nil, exception).Once() + s.committee.On("Self").Return(s.ourIdentity) + + timeout, err := s.safety.ProduceTimeout(rank, newestQC, nil) + require.Nil(s.T(), timeout) + require.ErrorIs(s.T(), err, exception) + require.False(s.T(), models.IsNoTimeoutError(err)) + s.persister.AssertNotCalled(s.T(), "PutConsensusState") +} + +// TestProduceTimeout_NodeEjected tests that no timeout is created if the replica is not authorized to create timeout. +// Nodes have zero weight in the grace periods around the ranks where they are authorized to participate. +// We don't want zero-weight nodes to participate in the first place, to avoid unnecessary traffic. +// Note: this also covers ejected nodes. In both cases, the committee will return an `InvalidSignerError`. +func (s *SafetyRulesTestSuite) TestProduceTimeout_NodeEjected() { + rank := s.proposal.State.Rank + newestQC := helper.MakeQC(helper.WithQCRank(rank - 1)) + *s.committee = mocks.DynamicCommittee{} + s.committee.On("Self").Return(s.ourIdentity) + s.committee.On("IdentityByRank", rank, s.ourIdentity).Return(nil, models.NewInvalidSignerErrorf("")).Maybe() + + timeout, err := s.safety.ProduceTimeout(rank, newestQC, nil) + require.Nil(s.T(), timeout) + require.True(s.T(), models.IsNoTimeoutError(err)) + s.persister.AssertNotCalled(s.T(), "PutConsensusState") +} + +// TestSignOwnProposal tests a happy path scenario where leader can sign their own proposal. +func (s *SafetyRulesTestSuite) TestSignOwnProposal() { + s.proposal.State.ProposerID = s.ourIdentity + expectedSafetyData := &models.ConsensusState[*helper.TestVote]{ + FinalizedRank: s.proposal.State.ParentQuorumCertificate.GetRank(), + LatestAcknowledgedRank: s.proposal.State.Rank, + } + expectedVote := makeVote(s.proposal.State) + s.committee.On("LeaderForRank").Unset() + s.committee.On("LeaderForRank", s.proposal.State.Rank).Return(s.ourIdentity, nil).Once() + s.signer.On("CreateVote", s.proposal.State).Return(&expectedVote, nil).Once() + s.persister.On("PutConsensusState", expectedSafetyData).Return(nil).Once() + vote, err := s.safety.SignOwnProposal(&s.proposal.Proposal) + require.NoError(s.T(), err) + require.Equal(s.T(), vote, &expectedVote) +} + +// TestSignOwnProposal_ProposalNotSelf tests that we cannot sign a proposal that is not ours. We +// verify that SafetyRules returns an exception and not the benign sentinel error NoVoteError. +func (s *SafetyRulesTestSuite) TestSignOwnProposal_ProposalNotSelf() { + vote, err := s.safety.SignOwnProposal(&s.proposal.Proposal) + require.Error(s.T(), err) + require.False(s.T(), models.IsNoVoteError(err)) + require.Nil(s.T(), vote) +} + +// TestSignOwnProposal_SelfInvalidLeader tests that we cannot sign a proposal if we are not the leader for the rank. +// We verify that SafetyRules returns and exception and does not the benign sentinel error NoVoteError. +func (s *SafetyRulesTestSuite) TestSignOwnProposal_SelfInvalidLeader() { + s.proposal.State.ProposerID = s.ourIdentity + otherID := helper.MakeIdentity() + require.NotEqual(s.T(), otherID, s.ourIdentity) + s.committee.On("LeaderForRank").Unset() + s.committee.On("LeaderForRank", s.proposal.State.Rank).Return(otherID, nil).Once() + vote, err := s.safety.SignOwnProposal(&s.proposal.Proposal) + require.Error(s.T(), err) + require.False(s.T(), models.IsNoVoteError(err)) + require.Nil(s.T(), vote) +} + +// TestSignOwnProposal_ProposalEquivocation verifies that SafetyRules will refuse to sign multiple proposals for the same rank. +// We require that leader complies with the following next rules: +// - leader proposes once per rank +// - leader's proposals follow safety rules +// +// Signing repeatedly for one rank (either proposals or voting) can lead to equivocating (byzantine behavior). +// Expect a `models.NoVoteError` sentinel in such scenario. +func (s *SafetyRulesTestSuite) TestSignOwnProposal_ProposalEquivocation() { + s.proposal.State.ProposerID = s.ourIdentity + expectedSafetyData := &models.ConsensusState[*helper.TestVote]{ + FinalizedRank: s.proposal.State.ParentQuorumCertificate.GetRank(), + LatestAcknowledgedRank: s.proposal.State.Rank, + } + expectedVote := makeVote(s.proposal.State) + s.committee.On("LeaderForRank").Unset() + s.committee.On("LeaderForRank", s.proposal.State.Rank).Return(s.ourIdentity, nil).Once() + s.signer.On("CreateVote", s.proposal.State).Return(&expectedVote, nil).Once() + s.persister.On("PutConsensusState", expectedSafetyData).Return(nil).Once() + + vote, err := s.safety.SignOwnProposal(&s.proposal.Proposal) + require.NoError(s.T(), err) + require.Equal(s.T(), &expectedVote, vote) + + // signing same proposal again should return an error since we have already created a proposal for this rank + vote, err = s.safety.SignOwnProposal(&s.proposal.Proposal) + require.Error(s.T(), err) + require.True(s.T(), models.IsNoVoteError(err)) + require.Nil(s.T(), vote) + + // voting for same rank should also return an error since we have already proposed + vote, err = s.safety.ProduceVote(s.proposal, s.proposal.State.Rank) + require.Error(s.T(), err) + require.True(s.T(), models.IsNoVoteError(err)) + require.Nil(s.T(), vote) +} + +func makeVote(state *models.State[*helper.TestState]) *helper.TestVote { + return &helper.TestVote{ + StateID: state.Identifier, + Rank: state.Rank, + ID: helper.MakeIdentity(), + } +} diff --git a/consensus/signature/packer.go b/consensus/signature/packer.go new file mode 100644 index 0000000..7a93412 --- /dev/null +++ b/consensus/signature/packer.go @@ -0,0 +1,74 @@ +package signature + +import ( + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// ConsensusSigDataPacker implements the consensus.Packer interface. +type ConsensusSigDataPacker struct { + committees consensus.Replicas +} + +var _ consensus.Packer = &ConsensusSigDataPacker{} + +// NewConsensusSigDataPacker creates a new ConsensusSigDataPacker instance +func NewConsensusSigDataPacker( + committees consensus.Replicas, +) *ConsensusSigDataPacker { + return &ConsensusSigDataPacker{ + committees: committees, + } +} + +// Pack serializes the state signature data into raw bytes, suitable to create a +// QC. To pack the state signature data, we first build a compact data type, and +// then encode it into bytes. Expected error returns during normal operations: +// - none; all errors are symptoms of inconsistent input data or corrupted +// internal state. +func (p *ConsensusSigDataPacker) Pack( + rank uint64, + sig *consensus.StateSignatureData, +) ([]byte, []byte, error) { + // retrieve all authorized consensus participants at the given state + fullMembers, err := p.committees.IdentitiesByRank(rank) + if err != nil { + return nil, nil, fmt.Errorf( + "could not find consensus committee for rank %d: %w", + rank, + err, + ) + } + + sigSet := map[models.Identity]struct{}{} + for _, s := range sig.Signers { + sigSet[s.Identity()] = struct{}{} + } + + signerIndices := make([]byte, (len(fullMembers)+7)/8) + for i, member := range fullMembers { + if _, ok := sigSet[member.Identity()]; ok { + signerIndices[i/8] |= 1 << (i % 8) + } + } + + return signerIndices, sig.Signature, nil +} + +// Unpack de-serializes the provided signature data. +// rank is the rank of the state that the aggregated sig is signed for +// sig is the aggregated signature data +// It returns: +// - (sigData, nil) if successfully unpacked the signature data +// - (nil, models.InvalidFormatError) if failed to unpack the signature data +func (p *ConsensusSigDataPacker) Unpack( + signerIdentities []models.WeightedIdentity, + sigData []byte, +) (*consensus.StateSignatureData, error) { + return &consensus.StateSignatureData{ + Signers: signerIdentities, + Signature: sigData, + }, nil +} diff --git a/consensus/signature/state_signer_decoder.go b/consensus/signature/state_signer_decoder.go new file mode 100644 index 0000000..e04962d --- /dev/null +++ b/consensus/signature/state_signer_decoder.go @@ -0,0 +1,135 @@ +package signature + +import ( + "errors" + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// StateSignerDecoder is a wrapper around the `consensus.DynamicCommittee`, +// which implements the auxiliary logic for de-coding signer indices of a state +// (header) to full node IDs +type StateSignerDecoder[StateT models.Unique] struct { + consensus.DynamicCommittee +} + +func NewStateSignerDecoder[StateT models.Unique]( + committee consensus.DynamicCommittee, +) *StateSignerDecoder[StateT] { + return &StateSignerDecoder[StateT]{committee} +} + +var _ consensus.StateSignerDecoder[*nilUnique] = (*StateSignerDecoder[*nilUnique])(nil) + +// DecodeSignerIDs decodes the signer indices from the given state into +// full node IDs. Note: A state header contains a quorum certificate for its +// parent, which proves that the consensus committee has reached agreement on +// validity of parent state. Consequently, the returned IdentifierList contains +// the consensus participants that signed the parent state. Expected Error +// returns during normal operations: +// - consensus.InvalidSignerIndicesError if signer indices included in the +// state do not encode a valid subset of the consensus committee +// - state.ErrUnknownSnapshotReference if the input state is not a known +// incorporated state. +func (b *StateSignerDecoder[StateT]) DecodeSignerIDs( + state *models.State[StateT], +) ( + []models.WeightedIdentity, + error, +) { + // root state does not have signer indices + if state.ParentQuorumCertificate == nil { + return []models.WeightedIdentity{}, nil + } + + // we will use IdentitiesByRank since it's a faster call and avoids DB lookup + members, err := b.IdentitiesByRank(state.ParentQuorumCertificate.GetRank()) + if err != nil { + if errors.Is(err, models.ErrRankUnknown) { + // possibly, we request rank which is far behind in the past, in this + // case we won't have it in cache. try asking by parent ID + byStateMembers, err := b.IdentitiesByState( + state.ParentQuorumCertificate.Identity(), + ) + if err != nil { + return nil, fmt.Errorf( + "could not retrieve identities for state %x with QC rank %d for parent %x: %w", + state.Identifier, + state.ParentQuorumCertificate.GetRank(), + state.ParentQuorumCertificate.Identity(), + err, + ) // state.ErrUnknownSnapshotReference or exception + } + members = byStateMembers + } else { + return nil, fmt.Errorf( + "unexpected error retrieving identities for state %x: %w", + state.Identifier, + err, + ) + } + } + + signerIDs := []models.WeightedIdentity{} + sigIndices := state.ParentQuorumCertificate.GetAggregatedSignature().GetBitmask() + for i, member := range members { + if sigIndices[i/8]>>i%8&1 == 1 { + signerIDs = append(signerIDs, member) + } + } + + return signerIDs, nil +} + +// NoopStateSignerDecoder does not decode any signer indices and consistently +// returns nil for the signing node IDs (auxiliary data) +type NoopStateSignerDecoder[StateT models.Unique] struct{} + +func NewNoopStateSignerDecoder[ + StateT models.Unique, +]() *NoopStateSignerDecoder[StateT] { + return &NoopStateSignerDecoder[StateT]{} +} + +func (b *NoopStateSignerDecoder[StateT]) DecodeSignerIDs( + _ *models.State[StateT], +) ([]models.WeightedIdentity, error) { + return nil, nil +} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/signature/weighted_signature_aggregator.go b/consensus/signature/weighted_signature_aggregator.go new file mode 100644 index 0000000..b58e1ad --- /dev/null +++ b/consensus/signature/weighted_signature_aggregator.go @@ -0,0 +1,227 @@ +package signature + +import ( + "errors" + "fmt" + "sync" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// signerInfo holds information about a signer, its weight and index +type signerInfo struct { + weight uint64 + pk []byte + index int +} + +// WeightedSignatureAggregator implements consensus.WeightedSignatureAggregator. +// It is a wrapper around consensus.SignatureAggregatorSameMessage, which +// implements a mapping from node IDs (as used by HotStuff) to index-based +// addressing of authorized signers (as used by SignatureAggregatorSameMessage). +// +// Similarly to module/consensus.SignatureAggregatorSameMessage, this module +// assumes proofs of possession (PoP) of all identity public keys are valid. +type WeightedSignatureAggregator struct { + aggregator consensus.SignatureAggregator + ids []models.WeightedIdentity + idToInfo map[models.Identity]signerInfo + totalWeight uint64 + dsTag []byte + message []byte + lock sync.RWMutex + + // collectedSigs tracks the Identities of all nodes whose signatures have been + // collected so far. The reason for tracking the duplicate signers at this + // module level is that having no duplicates is a Hotstuff constraint, rather + // than a cryptographic aggregation constraint. + collectedSigs map[models.Identity][]byte +} + +var _ consensus.WeightedSignatureAggregator = (*WeightedSignatureAggregator)(nil) + +// NewWeightedSignatureAggregator returns a weighted aggregator initialized with +// a list of identities, their respective public keys, a message and a +// domain separation tag. The identities represent the list of all possible +// signers. This aggregator is only safe if PoPs of all identity keys are valid. +// This constructor does not verify the PoPs but assumes they have been +// validated outside this module. +// The constructor errors if: +// - the list of identities is empty +// - if the length of keys does not match the length of identities +// - if one of the keys is not a valid public key. +// +// A weighted aggregator is used for one aggregation only. A new instance should +// be used for each signature aggregation task in the protocol. +func NewWeightedSignatureAggregator( + ids []models.WeightedIdentity, + pks [][]byte, // list of corresponding public keys used for signature verifications + message []byte, // message to get an aggregated signature for + dsTag []byte, // domain separation tag used by the signature + aggregator consensus.SignatureAggregator, +) (*WeightedSignatureAggregator, error) { + if len(ids) != len(pks) { + return nil, fmt.Errorf("keys length %d and identities length %d do not match", len(pks), len(ids)) + } + + // build the internal map for a faster look-up + idToInfo := make(map[models.Identity]signerInfo) + for i, id := range ids { + idToInfo[id.Identity()] = signerInfo{ + weight: id.Weight(), + pk: pks[i], + index: i, + } + } + + return &WeightedSignatureAggregator{ + dsTag: dsTag, + ids: ids, + idToInfo: idToInfo, + aggregator: aggregator, + message: message, + collectedSigs: make(map[models.Identity][]byte), + }, nil +} + +// Verify verifies the signature under the stored public keys and message. +// Expected errors during normal operations: +// - models.InvalidSignerError if signerID is invalid (not a consensus +// participant) +// - models.ErrInvalidSignature if signerID is valid but signature is +// cryptographically invalid +// +// The function is thread-safe. +func (w *WeightedSignatureAggregator) Verify( + signerID models.Identity, + sig []byte, +) error { + info, ok := w.idToInfo[signerID] + if !ok { + return models.NewInvalidSignerErrorf( + "%x is not an authorized signer", + signerID, + ) + } + + ok = w.aggregator.VerifySignatureRaw(info.pk, sig, w.message, w.dsTag) + if !ok { + return fmt.Errorf( + "invalid signature %x from %x (pk: %x, msg: %x, dsTag: %x): %w", + sig, + signerID, + info.pk, + w.message, + w.dsTag, + models.ErrInvalidSignature, + ) + } + return nil +} + +// TrustedAdd adds a signature to the internal set of signatures and adds the +// signer's weight to the total collected weight, iff the signature is _not_ a +// duplicate. +// +// The total weight of all collected signatures (excluding duplicates) is +// returned regardless of any returned error. +// The function errors with: +// - models.InvalidSignerError if signerID is invalid (not a consensus +// participant) +// - models.DuplicatedSignerError if the signer has been already added +// +// The function is thread-safe. +func (w *WeightedSignatureAggregator) TrustedAdd( + signerID models.Identity, + sig []byte, +) (uint64, error) { + info, found := w.idToInfo[signerID] + if !found { + return w.TotalWeight(), models.NewInvalidSignerErrorf( + "%x is not an authorized signer", + signerID, + ) + } + + // atomically update the signatures pool and the total weight + w.lock.Lock() + defer w.lock.Unlock() + + // check for repeated occurrence of signerID + if _, duplicate := w.collectedSigs[signerID]; duplicate { + return w.totalWeight, models.NewDuplicatedSignerErrorf( + "signature from %x was already added", + signerID, + ) + } + + w.collectedSigs[signerID] = sig + w.totalWeight += info.weight + + return w.totalWeight, nil +} + +// TotalWeight returns the total weight presented by the collected signatures. +// The function is thread-safe +func (w *WeightedSignatureAggregator) TotalWeight() uint64 { + w.lock.RLock() + defer w.lock.RUnlock() + return w.totalWeight +} + +// Aggregate aggregates the signatures and returns the aggregated consensus. +// The function performs a final verification and errors if the aggregated +// signature is invalid. This is required for the function safety since +// `TrustedAdd` allows adding invalid signatures. The function errors with: +// - models.InsufficientSignaturesError if no signatures have been added yet +// - models.InvalidSignatureIncludedError if: +// - some signature(s), included via TrustedAdd, fail to deserialize +// (regardless of the aggregated public key) +// -- or all signatures deserialize correctly but some signature(s), +// included via TrustedAdd, are invalid (while aggregated public key is +// valid) +// -- models.InvalidAggregatedKeyError if all signatures deserialize +// correctly but the signer's proving public keys sum up to an invalid +// key (BLS identity public key). Any aggregated signature would fail the +// cryptographic verification under the identity public key and therefore +// such signature is considered invalid. Such scenario can only happen if +// proving public keys of signers were forged to add up to the identity +// public key. Under the assumption that all proving key PoPs are valid, +// this error case can only happen if all signers are malicious and +// colluding. If there is at least one honest signer, there is a +// negligible probability that the aggregated key is identity. +// +// The function is thread-safe. +func (w *WeightedSignatureAggregator) Aggregate() ( + []models.WeightedIdentity, + models.AggregatedSignature, + error, +) { + w.lock.Lock() + defer w.lock.Unlock() + + pks := [][]byte{} + signerIDs := []models.WeightedIdentity{} + sigs := [][]byte{} + for id, sig := range w.collectedSigs { + signerIDs = append(signerIDs, w.ids[w.idToInfo[id].index]) + pks = append(pks, w.idToInfo[id].pk) + sigs = append(sigs, sig) + } + if len(sigs) == 0 { + return nil, nil, models.NewInsufficientSignaturesError( + errors.New("no signatures"), + ) + } + + aggSignature, err := w.aggregator.Aggregate(pks, sigs) + if err != nil { + return nil, nil, fmt.Errorf( + "unexpected error during signature aggregation: %w", + err, + ) + } + + return signerIDs, aggSignature, nil +} diff --git a/consensus/state_machine.go b/consensus/state_machine.go deleted file mode 100644 index f0c83e7..0000000 --- a/consensus/state_machine.go +++ /dev/null @@ -1,1364 +0,0 @@ -package consensus - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/pkg/errors" -) - -// State represents a consensus engine state -type State string - -const ( - // StateStopped - Initial state, engine is not running - StateStopped State = "stopped" - // StateStarting - Engine is initializing - StateStarting State = "starting" - // StateLoading - Loading data and syncing with network - StateLoading State = "loading" - // StateCollecting - Collecting data for consensus round, prepares proposal - StateCollecting State = "collecting" - // StateLivenessCheck - Announces and awaits prover liveness - StateLivenessCheck State = "liveness_check" - // StateProving - Generating proof (prover only) - StateProving State = "proving" - // StatePublishing - Publishing relevant state - StatePublishing State = "publishing" - // StateVoting - Voting on proposals - StateVoting State = "voting" - // StateFinalizing - Finalizing consensus round - StateFinalizing State = "finalizing" - // StateVerifying - Verifying and publishing results - StateVerifying State = "verifying" - // StateStopping - Engine is shutting down - StateStopping State = "stopping" -) - -// Event represents an event that can trigger state transitions -type Event string - -const ( - EventStart Event = "start" - EventStop Event = "stop" - EventSyncTimeout Event = "sync_timeout" - EventInduceSync Event = "induce_sync" - EventSyncComplete Event = "sync_complete" - EventInitComplete Event = "init_complete" - EventCollectionDone Event = "collection_done" - EventLivenessCheckReceived Event = "liveness_check_received" - EventLivenessTimeout Event = "liveness_timeout" - EventProverSignal Event = "prover_signal" - EventProofComplete Event = "proof_complete" - EventPublishComplete Event = "publish_complete" - EventPublishTimeout Event = "publish_timeout" - EventProposalReceived Event = "proposal_received" - EventVoteReceived Event = "vote_received" - EventQuorumReached Event = "quorum_reached" - EventVotingTimeout Event = "voting_timeout" - EventAggregationDone Event = "aggregation_done" - EventAggregationTimeout Event = "aggregation_timeout" - EventConfirmationReceived Event = "confirmation_received" - EventVerificationDone Event = "verification_done" - EventVerificationTimeout Event = "verification_timeout" - EventCleanupComplete Event = "cleanup_complete" -) - -type Identity = string - -// Unique defines important attributes for distinguishing relative basis of -// items. -type Unique interface { - // Provides the relevant identity of the given Unique. - Identity() Identity - // Clone should provide a shallow clone of the Unique. - Clone() Unique - // Rank indicates the ordinal basis of comparison, e.g. a frame number, a - // height. - Rank() uint64 -} - -// TransitionGuard is a function that determines if a transition should occur -type TransitionGuard[ - StateT Unique, - VoteT Unique, - PeerIDT Unique, - CollectedT Unique, -] func(sm *StateMachine[StateT, VoteT, PeerIDT, CollectedT]) bool - -// Transition defines a state transition -type Transition[ - StateT Unique, - VoteT Unique, - PeerIDT Unique, - CollectedT Unique, -] struct { - From State - Event Event - To State - Guard TransitionGuard[StateT, VoteT, PeerIDT, CollectedT] -} - -// TransitionListener is notified of state transitions -type TransitionListener[StateT Unique] interface { - OnTransition( - from State, - to State, - event Event, - ) -} - -type eventWrapper struct { - event Event - response chan error -} - -// SyncProvider handles synchronization management -type SyncProvider[StateT Unique] interface { - // Performs synchronization to set internal state. Note that it is assumed - // that errors are transient and synchronization should be reattempted on - // failure. If some other process for synchronization is used and this should - // be bypassed, send nil on the error channel. Provided context may be - // canceled, should be used to halt long-running sync operations. - Synchronize( - existing *StateT, - ctx context.Context, - ) (<-chan *StateT, <-chan error) -} - -// VotingProvider handles voting logic by deferring decisions, collection, and -// state finalization to an outside implementation. -type VotingProvider[StateT Unique, VoteT Unique, PeerIDT Unique] interface { - // Sends a proposal for voting. - SendProposal(proposal *StateT, ctx context.Context) error - // DecideAndSendVote makes a decision, mapped by leader, and should handle any - // side effects (like publishing vote messages). - DecideAndSendVote( - proposals map[Identity]*StateT, - ctx context.Context, - ) (PeerIDT, *VoteT, error) - // Re-publishes a vote message, used to help lagging peers catch up. - SendVote(vote *VoteT, ctx context.Context) (PeerIDT, error) - // IsQuorum returns a response indicating whether or not quorum has been - // reached. - IsQuorum( - proposalVotes map[Identity]*VoteT, - ctx context.Context, - ) (bool, error) - // FinalizeVotes performs any folding of proposed state required from VoteT - // onto StateT, proposed states and votes matched by PeerIDT, returns - // finalized state, chosen proposer PeerIDT. - FinalizeVotes( - proposals map[Identity]*StateT, - proposalVotes map[Identity]*VoteT, - ctx context.Context, - ) (*StateT, PeerIDT, error) - // SendConfirmation sends confirmation of the finalized state. - SendConfirmation(finalized *StateT, ctx context.Context) error -} - -// LeaderProvider handles leader selection. State is provided, if relevant to -// the upstream consensus engine. -type LeaderProvider[ - StateT Unique, - PeerIDT Unique, - CollectedT Unique, -] interface { - // GetNextLeaders returns a list of node indices, in priority order. Note that - // it is assumed that if no error is returned, GetNextLeaders should produce - // a non-empty list. If a list of size smaller than minimumProvers is - // provided, the liveness check will loop until the list is greater than that. - GetNextLeaders(prior *StateT, ctx context.Context) ([]PeerIDT, error) - // ProveNextState prepares a non-finalized new state from the prior, to be - // proposed and voted upon. Provided context may be canceled, should be used - // to halt long-running prover operations. - ProveNextState( - prior *StateT, - collected CollectedT, - ctx context.Context, - ) (*StateT, error) -} - -// LivenessProvider handles liveness announcements ahead of proving, to -// pre-emptively choose the next prover. In expected leader scenarios, this -// enables a peer to determine if an honest next prover is offline, so that it -// can publish the next state without waiting. -type LivenessProvider[ - StateT Unique, - PeerIDT Unique, - CollectedT Unique, -] interface { - // Collect returns the collected mutation operations ahead of liveness - // announcements. - Collect(ctx context.Context) (CollectedT, error) - // SendLiveness announces liveness ahead of the next prover deterimination and - // subsequent proving. Provides prior state and collected mutation operations - // if relevant. - SendLiveness(prior *StateT, collected CollectedT, ctx context.Context) error -} - -// TraceLogger defines a simple tracing interface -type TraceLogger interface { - Trace(message string) - Error(message string, err error) -} - -type nilTracer struct{} - -func (nilTracer) Trace(message string) {} -func (nilTracer) Error(message string, err error) {} - -// StateMachine manages consensus engine state transitions with generic state -// tracking. T represents the raw state bearing type, the implementation details -// are left to callers, who may augment their transitions to utilize the data -// if needed. If no method of fork choice is utilized external to this machine, -// this state machine provides BFT consensus (e.g. < 1/3 byzantine behaviors) -// provided assumptions outlined in interface types are fulfilled. The state -// transition patterns strictly assume a round-based state transition using -// cryptographic proofs. -// -// This implementation requires implementations of specific patterns: -// - A need to synchronize state from peers (SyncProvider) -// - A need to record voting from the upstream consumer to decide on consensus -// changes during the voting period (VotingProvider) -// - A need to decide on the next leader and prove (LeaderProvider) -// - A need to announce liveness ahead of long-running proof operations -// (LivenessProvider) -type StateMachine[ - StateT Unique, - VoteT Unique, - PeerIDT Unique, - CollectedT Unique, -] struct { - mu sync.RWMutex - transitions map[State]map[Event]*Transition[ - StateT, VoteT, PeerIDT, CollectedT, - ] - stateConfigs map[State]*StateConfig[ - StateT, VoteT, PeerIDT, CollectedT, - ] - eventChan chan eventWrapper - ctx context.Context - cancel context.CancelFunc - timeoutTimer *time.Timer - behaviorCancel context.CancelFunc - - // Internal state - machineState State - activeState *StateT - nextState *StateT - collected *CollectedT - id PeerIDT - nextProvers []PeerIDT - liveness map[uint64]map[Identity]CollectedT - votes map[uint64]map[Identity]*VoteT - proposals map[uint64]map[Identity]*StateT - confirmations map[uint64]map[Identity]*StateT - chosenProposer *PeerIDT - stateStartTime time.Time - transitionCount uint64 - listeners []TransitionListener[StateT] - shouldEmitReceiveEventsOnSends bool - minimumProvers func() uint64 - - // Dependencies - syncProvider SyncProvider[StateT] - votingProvider VotingProvider[StateT, VoteT, PeerIDT] - leaderProvider LeaderProvider[StateT, PeerIDT, CollectedT] - livenessProvider LivenessProvider[StateT, PeerIDT, CollectedT] - traceLogger TraceLogger -} - -// StateConfig defines configuration for a state with generic behaviors -type StateConfig[ - StateT Unique, - VoteT Unique, - PeerIDT Unique, - CollectedT Unique, -] struct { - // Callbacks for state entry/exit - OnEnter StateCallback[StateT, VoteT, PeerIDT, CollectedT] - OnExit StateCallback[StateT, VoteT, PeerIDT, CollectedT] - - // State behavior - runs continuously while in state - Behavior StateBehavior[StateT, VoteT, PeerIDT, CollectedT] - - // Timeout configuration - Timeout time.Duration - OnTimeout Event -} - -// StateCallback is called when entering or exiting a state -type StateCallback[ - StateT Unique, - VoteT Unique, - PeerIDT Unique, - CollectedT Unique, -] func( - sm *StateMachine[StateT, VoteT, PeerIDT, CollectedT], - data *StateT, - event Event, -) - -// StateBehavior defines the behavior while in a state -type StateBehavior[ - StateT Unique, - VoteT Unique, - PeerIDT Unique, - CollectedT Unique, -] func( - sm *StateMachine[StateT, VoteT, PeerIDT, CollectedT], - data *StateT, - ctx context.Context, -) - -// NewStateMachine creates a new generic state machine for consensus. -// `initialState` should be provided if available, this does not set the -// position of the state machine however, consumers will need to manually force -// a state machine's internal state if desired. Assumes some variety of pubsub- -// based semantics are used in send/receive based operations, if the pubsub -// implementation chosen does not receive messages published by itself, set -// shouldEmitReceiveEventsOnSends to true. -func NewStateMachine[ - StateT Unique, - VoteT Unique, - PeerIDT Unique, - CollectedT Unique, -]( - id PeerIDT, - initialState *StateT, - shouldEmitReceiveEventsOnSends bool, - minimumProvers func() uint64, - syncProvider SyncProvider[StateT], - votingProvider VotingProvider[StateT, VoteT, PeerIDT], - leaderProvider LeaderProvider[StateT, PeerIDT, CollectedT], - livenessProvider LivenessProvider[StateT, PeerIDT, CollectedT], - traceLogger TraceLogger, -) *StateMachine[StateT, VoteT, PeerIDT, CollectedT] { - ctx, cancel := context.WithCancel(context.Background()) - if traceLogger == nil { - traceLogger = nilTracer{} - } - sm := &StateMachine[StateT, VoteT, PeerIDT, CollectedT]{ - machineState: StateStopped, - transitions: make( - map[State]map[Event]*Transition[StateT, VoteT, PeerIDT, CollectedT], - ), - stateConfigs: make( - map[State]*StateConfig[StateT, VoteT, PeerIDT, CollectedT], - ), - eventChan: make(chan eventWrapper, 100), - ctx: ctx, - cancel: cancel, - activeState: initialState, - id: id, - votes: make(map[uint64]map[Identity]*VoteT), - proposals: make(map[uint64]map[Identity]*StateT), - liveness: make(map[uint64]map[Identity]CollectedT), - confirmations: make(map[uint64]map[Identity]*StateT), - listeners: make([]TransitionListener[StateT], 0), - shouldEmitReceiveEventsOnSends: shouldEmitReceiveEventsOnSends, - minimumProvers: minimumProvers, - syncProvider: syncProvider, - votingProvider: votingProvider, - leaderProvider: leaderProvider, - livenessProvider: livenessProvider, - traceLogger: traceLogger, - } - - // Define state configurations - sm.defineStateConfigs() - - // Define transitions - sm.defineTransitions() - - // Start event processor - go sm.processEvents() - - return sm -} - -// defineStateConfigs sets up state configurations with behaviors -func (sm *StateMachine[ - StateT, - VoteT, - PeerIDT, - CollectedT, -]) defineStateConfigs() { - sm.traceLogger.Trace("enter defineStateConfigs") - defer sm.traceLogger.Trace("exit defineStateConfigs") - // Starting state - just timeout to complete initialization - sm.stateConfigs[StateStarting] = &StateConfig[ - StateT, - VoteT, - PeerIDT, - CollectedT, - ]{ - Timeout: 1 * time.Second, - OnTimeout: EventInitComplete, - } - - type Config = StateConfig[ - StateT, - VoteT, - PeerIDT, - CollectedT, - ] - - type SMT = StateMachine[StateT, VoteT, PeerIDT, CollectedT] - - // Loading state - synchronize with network - sm.stateConfigs[StateLoading] = &Config{ - Behavior: func(sm *SMT, data *StateT, ctx context.Context) { - sm.traceLogger.Trace("enter Loading behavior") - defer sm.traceLogger.Trace("exit Loading behavior") - if sm.syncProvider != nil { - newStateCh, errCh := sm.syncProvider.Synchronize(sm.activeState, ctx) - select { - case newState := <-newStateCh: - sm.mu.Lock() - sm.activeState = newState - sm.mu.Unlock() - nextLeaders, err := sm.leaderProvider.GetNextLeaders(newState, ctx) - if err != nil { - sm.traceLogger.Error( - fmt.Sprintf("error encountered in %s", sm.machineState), - err, - ) - time.Sleep(10 * time.Second) - sm.SendEvent(EventSyncTimeout) - return - } - found := false - for _, leader := range nextLeaders { - if leader.Identity() == sm.id.Identity() { - found = true - break - } - } - if found { - sm.SendEvent(EventSyncComplete) - } else { - time.Sleep(10 * time.Second) - sm.SendEvent(EventSyncTimeout) - } - case <-errCh: - time.Sleep(10 * time.Second) - sm.SendEvent(EventSyncTimeout) - case <-ctx.Done(): - return - } - } - }, - Timeout: 10 * time.Hour, - OnTimeout: EventSyncTimeout, - } - - // Collecting state - wait for frame or timeout - sm.stateConfigs[StateCollecting] = &Config{ - Behavior: func(sm *SMT, data *StateT, ctx context.Context) { - sm.traceLogger.Trace("enter Collecting behavior") - defer sm.traceLogger.Trace("exit Collecting behavior") - collected, err := sm.livenessProvider.Collect(ctx) - if err != nil { - sm.traceLogger.Error( - fmt.Sprintf("error encountered in %s", sm.machineState), - err, - ) - sm.SendEvent(EventInduceSync) - return - } - - sm.mu.Lock() - sm.nextProvers = []PeerIDT{} - sm.chosenProposer = nil - sm.collected = &collected - sm.mu.Unlock() - - nextProvers, err := sm.leaderProvider.GetNextLeaders(data, ctx) - if err != nil { - sm.traceLogger.Error( - fmt.Sprintf("error encountered in %s", sm.machineState), - err, - ) - sm.SendEvent(EventInduceSync) - return - } - - sm.mu.Lock() - sm.nextProvers = nextProvers - sm.mu.Unlock() - - err = sm.livenessProvider.SendLiveness(data, collected, ctx) - if err != nil { - sm.traceLogger.Error( - fmt.Sprintf("error encountered in %s", sm.machineState), - err, - ) - sm.SendEvent(EventInduceSync) - return - } - - sm.mu.Lock() - if sm.shouldEmitReceiveEventsOnSends { - if _, ok := sm.liveness[collected.Rank()]; !ok { - sm.liveness[collected.Rank()] = make(map[Identity]CollectedT) - } - sm.liveness[collected.Rank()][sm.id.Identity()] = *sm.collected - } - sm.mu.Unlock() - - if sm.shouldEmitReceiveEventsOnSends { - sm.SendEvent(EventLivenessCheckReceived) - } - - sm.SendEvent(EventCollectionDone) - }, - Timeout: 10 * time.Second, - OnTimeout: EventInduceSync, - } - - // Liveness check state - sm.stateConfigs[StateLivenessCheck] = &Config{ - Behavior: func(sm *SMT, data *StateT, ctx context.Context) { - sm.traceLogger.Trace("enter Liveness behavior") - defer sm.traceLogger.Trace("exit Liveness behavior") - sm.mu.Lock() - nextProversLen := len(sm.nextProvers) - sm.mu.Unlock() - - // If we're not meeting the minimum prover count, we should loop. - if nextProversLen < int(sm.minimumProvers()) { - sm.traceLogger.Trace("insufficient provers, re-fetching leaders") - var err error - nextProvers, err := sm.leaderProvider.GetNextLeaders(data, ctx) - if err != nil { - sm.traceLogger.Error( - fmt.Sprintf("error encountered in %s", sm.machineState), - err, - ) - sm.SendEvent(EventInduceSync) - return - } - sm.mu.Lock() - sm.nextProvers = nextProvers - sm.mu.Unlock() - } - - sm.mu.Lock() - collected := *sm.collected - sm.mu.Unlock() - - sm.mu.Lock() - livenessLen := len(sm.liveness[(*sm.activeState).Rank()+1]) - sm.mu.Unlock() - - // We have enough checks for consensus: - if livenessLen >= int(sm.minimumProvers()) { - sm.traceLogger.Trace( - "sufficient liveness checks, sending prover signal", - ) - sm.SendEvent(EventProverSignal) - return - } - - sm.traceLogger.Trace( - fmt.Sprintf( - "insufficient liveness checks: need %d, have %d", - sm.minimumProvers(), - livenessLen, - ), - ) - - select { - case <-time.After(1 * time.Second): - err := sm.livenessProvider.SendLiveness(data, collected, ctx) - if err != nil { - sm.traceLogger.Error( - fmt.Sprintf("error encountered in %s", sm.machineState), - err, - ) - sm.SendEvent(EventInduceSync) - return - } - case <-ctx.Done(): - } - }, - Timeout: 2 * time.Second, - OnTimeout: EventLivenessTimeout, - } - - // Proving state - generate proof - sm.stateConfigs[StateProving] = &Config{ - Behavior: func(sm *SMT, data *StateT, ctx context.Context) { - sm.traceLogger.Trace("enter Proving behavior") - defer sm.traceLogger.Trace("exit Proving behavior") - sm.mu.Lock() - collected := sm.collected - sm.collected = nil - sm.mu.Unlock() - - if collected == nil { - sm.SendEvent(EventInduceSync) - return - } - - proposal, err := sm.leaderProvider.ProveNextState( - data, - *collected, - ctx, - ) - if err != nil { - sm.traceLogger.Error( - fmt.Sprintf("error encountered in %s", sm.machineState), - err, - ) - - sm.SendEvent(EventInduceSync) - return - } - - sm.mu.Lock() - sm.traceLogger.Trace( - fmt.Sprintf("adding proposal with rank %d", (*proposal).Rank()), - ) - if _, ok := sm.proposals[(*proposal).Rank()]; !ok { - sm.proposals[(*proposal).Rank()] = make(map[Identity]*StateT) - } - sm.proposals[(*proposal).Rank()][sm.id.Identity()] = proposal - sm.mu.Unlock() - - sm.SendEvent(EventProofComplete) - }, - Timeout: 120 * time.Second, - OnTimeout: EventPublishTimeout, - } - - // Publishing state - publish frame - sm.stateConfigs[StatePublishing] = &Config{ - Behavior: func(sm *SMT, data *StateT, ctx context.Context) { - sm.traceLogger.Trace("enter Publishing behavior") - defer sm.traceLogger.Trace("exit Publishing behavior") - sm.mu.Lock() - if _, ok := sm.proposals[(*data).Rank()+1][sm.id.Identity()]; ok { - proposal := sm.proposals[(*data).Rank()+1][sm.id.Identity()] - sm.mu.Unlock() - - err := sm.votingProvider.SendProposal( - proposal, - ctx, - ) - if err != nil { - sm.traceLogger.Error( - fmt.Sprintf("error encountered in %s", sm.machineState), - err, - ) - sm.SendEvent(EventInduceSync) - return - } - sm.SendEvent(EventPublishComplete) - } else { - sm.mu.Unlock() - } - }, - Timeout: 1 * time.Second, - OnTimeout: EventPublishTimeout, - } - - // Voting state - monitor for quorum - sm.stateConfigs[StateVoting] = &Config{ - Behavior: func(sm *SMT, data *StateT, ctx context.Context) { - sm.traceLogger.Trace("enter Voting behavior") - defer sm.traceLogger.Trace("exit Voting behavior") - - sm.mu.Lock() - - if sm.chosenProposer == nil { - // We haven't voted yet - sm.traceLogger.Trace("proposer not yet chosen") - perfect := map[int]PeerIDT{} // all provers - live := map[int]PeerIDT{} // the provers who told us they're alive - for i, p := range sm.nextProvers { - perfect[i] = p - if _, ok := sm.liveness[(*sm.activeState).Rank()+1][p.Identity()]; ok { - live[i] = p - } - } - - if len(sm.proposals[(*sm.activeState).Rank()+1]) < int(sm.minimumProvers()) { - sm.traceLogger.Trace( - fmt.Sprintf( - "insufficient proposal count: %d, need %d", - len(sm.proposals[(*sm.activeState).Rank()+1]), - int(sm.minimumProvers()), - ), - ) - sm.mu.Unlock() - return - } - - if ctx == nil { - sm.traceLogger.Trace("context null") - sm.mu.Unlock() - return - } - - select { - case <-ctx.Done(): - sm.traceLogger.Trace("context canceled") - sm.mu.Unlock() - return - default: - sm.traceLogger.Trace("choosing proposal") - proposals := map[Identity]*StateT{} - for k, v := range sm.proposals[(*sm.activeState).Rank()+1] { - state := (*v).Clone().(StateT) - proposals[k] = &state - } - - sm.mu.Unlock() - selectedPeer, vote, err := sm.votingProvider.DecideAndSendVote( - proposals, - ctx, - ) - if err != nil { - sm.traceLogger.Error( - fmt.Sprintf("error encountered in %s", sm.machineState), - err, - ) - sm.SendEvent(EventInduceSync) - break - } - sm.mu.Lock() - sm.chosenProposer = &selectedPeer - - if sm.shouldEmitReceiveEventsOnSends { - if _, ok := sm.votes[(*sm.activeState).Rank()+1]; !ok { - sm.votes[(*sm.activeState).Rank()+1] = make(map[Identity]*VoteT) - } - sm.votes[(*sm.activeState).Rank()+1][sm.id.Identity()] = vote - sm.mu.Unlock() - sm.SendEvent(EventVoteReceived) - return - } - sm.mu.Unlock() - } - } else { - sm.traceLogger.Trace("proposal chosen, checking for quorum") - proposalVotes := map[Identity]*VoteT{} - for p, vp := range sm.votes[(*sm.activeState).Rank()+1] { - vclone := (*vp).Clone().(VoteT) - proposalVotes[p] = &vclone - } - haveEnoughProposals := len(sm.proposals[(*sm.activeState).Rank()+1]) >= - int(sm.minimumProvers()) - sm.mu.Unlock() - isQuorum, err := sm.votingProvider.IsQuorum(proposalVotes, ctx) - if err != nil { - sm.traceLogger.Error( - fmt.Sprintf("error encountered in %s", sm.machineState), - err, - ) - sm.SendEvent(EventInduceSync) - return - } - - if isQuorum && haveEnoughProposals { - sm.traceLogger.Trace("quorum reached") - sm.SendEvent(EventQuorumReached) - } else { - sm.traceLogger.Trace( - fmt.Sprintf( - "quorum not reached: proposals: %d, needed: %d", - len(sm.proposals[(*sm.activeState).Rank()+1]), - sm.minimumProvers(), - ), - ) - } - } - }, - Timeout: 1 * time.Second, - OnTimeout: EventVotingTimeout, - } - - // Finalizing state - sm.stateConfigs[StateFinalizing] = &Config{ - Behavior: func(sm *SMT, data *StateT, ctx context.Context) { - sm.mu.Lock() - proposals := map[Identity]*StateT{} - for k, v := range sm.proposals[(*sm.activeState).Rank()+1] { - state := (*v).Clone().(StateT) - proposals[k] = &state - } - proposalVotes := map[Identity]*VoteT{} - for p, vp := range sm.votes[(*sm.activeState).Rank()+1] { - vclone := (*vp).Clone().(VoteT) - proposalVotes[p] = &vclone - } - sm.mu.Unlock() - finalized, _, err := sm.votingProvider.FinalizeVotes( - proposals, - proposalVotes, - ctx, - ) - if err != nil { - sm.traceLogger.Error( - fmt.Sprintf("error encountered in %s", sm.machineState), - err, - ) - sm.SendEvent(EventInduceSync) - return - } - next := (*finalized).Clone().(StateT) - sm.mu.Lock() - sm.nextState = &next - sm.mu.Unlock() - sm.SendEvent(EventAggregationDone) - }, - Timeout: 1 * time.Second, - OnTimeout: EventAggregationTimeout, - } - - // Verifying state - sm.stateConfigs[StateVerifying] = &Config{ - Behavior: func(sm *SMT, data *StateT, ctx context.Context) { - sm.traceLogger.Trace("enter Verifying behavior") - defer sm.traceLogger.Trace("exit Verifying behavior") - sm.mu.Lock() - if _, ok := sm.confirmations[(*sm.activeState).Rank()+1][sm.id.Identity()]; !ok && - sm.nextState != nil { - nextState := sm.nextState - sm.mu.Unlock() - err := sm.votingProvider.SendConfirmation(nextState, ctx) - if err != nil { - sm.traceLogger.Error( - fmt.Sprintf("error encountered in %s", sm.machineState), - err, - ) - sm.SendEvent(EventInduceSync) - return - } - sm.mu.Lock() - } - - progressed := false - if sm.nextState != nil { - sm.activeState = sm.nextState - progressed = true - } - if progressed { - sm.nextState = nil - sm.collected = nil - delete(sm.liveness, (*sm.activeState).Rank()) - delete(sm.proposals, (*sm.activeState).Rank()) - delete(sm.votes, (*sm.activeState).Rank()) - delete(sm.confirmations, (*sm.activeState).Rank()) - sm.mu.Unlock() - sm.SendEvent(EventVerificationDone) - } else { - sm.mu.Unlock() - } - }, - Timeout: 1 * time.Second, - OnTimeout: EventVerificationTimeout, - } - - // Stopping state - sm.stateConfigs[StateStopping] = &Config{ - Behavior: func(sm *SMT, data *StateT, ctx context.Context) { - sm.SendEvent(EventCleanupComplete) - }, - Timeout: 30 * time.Second, - OnTimeout: EventCleanupComplete, - } -} - -// defineTransitions sets up all possible state transitions -func (sm *StateMachine[ - StateT, - VoteT, - PeerIDT, - CollectedT, -]) defineTransitions() { - sm.traceLogger.Trace("enter defineTransitions") - defer sm.traceLogger.Trace("exit defineTransitions") - - // Helper to add transition - addTransition := func( - from State, - event Event, - to State, - guard TransitionGuard[StateT, VoteT, PeerIDT, CollectedT], - ) { - if sm.transitions[from] == nil { - sm.transitions[from] = make(map[Event]*Transition[ - StateT, - VoteT, - PeerIDT, - CollectedT, - ]) - } - sm.transitions[from][event] = &Transition[ - StateT, - VoteT, - PeerIDT, - CollectedT, - ]{ - From: from, - Event: event, - To: to, - Guard: guard, - } - } - - // Basic flow transitions - addTransition(StateStopped, EventStart, StateStarting, nil) - addTransition(StateStarting, EventInitComplete, StateLoading, nil) - addTransition(StateLoading, EventSyncTimeout, StateLoading, nil) - addTransition(StateLoading, EventSyncComplete, StateCollecting, nil) - addTransition(StateCollecting, EventCollectionDone, StateLivenessCheck, nil) - addTransition(StateLivenessCheck, EventProverSignal, StateProving, nil) - - // Loop indefinitely if nobody can be found - addTransition( - StateLivenessCheck, - EventLivenessTimeout, - StateLivenessCheck, - nil, - ) - // // Loop until we get enough of these - // addTransition( - // StateLivenessCheck, - // EventLivenessCheckReceived, - // StateLivenessCheck, - // nil, - // ) - - // Prover flow - addTransition(StateProving, EventProofComplete, StatePublishing, nil) - addTransition(StateProving, EventPublishTimeout, StateVoting, nil) - addTransition(StatePublishing, EventPublishComplete, StateVoting, nil) - addTransition(StatePublishing, EventPublishTimeout, StateVoting, nil) - - // Common voting flow - addTransition(StateVoting, EventProposalReceived, StateVoting, nil) - // addTransition(StateVoting, EventVoteReceived, StateVoting, nil) - addTransition(StateVoting, EventQuorumReached, StateFinalizing, nil) - addTransition(StateVoting, EventVotingTimeout, StateVoting, nil) - addTransition(StateFinalizing, EventAggregationDone, StateVerifying, nil) - addTransition(StateFinalizing, EventAggregationTimeout, StateFinalizing, nil) - addTransition(StateVerifying, EventVerificationDone, StateCollecting, nil) - addTransition(StateVerifying, EventVerificationTimeout, StateVerifying, nil) - - // Stop or induce Sync transitions from any state - for _, state := range []State{ - StateStarting, - StateLoading, - StateCollecting, - StateLivenessCheck, - StateProving, - StatePublishing, - StateVoting, - StateFinalizing, - StateVerifying, - } { - addTransition(state, EventStop, StateStopping, nil) - addTransition(state, EventInduceSync, StateLoading, nil) - } - - addTransition(StateStopping, EventCleanupComplete, StateStopped, nil) -} - -// Start begins the state machine -func (sm *StateMachine[ - StateT, - VoteT, - PeerIDT, - CollectedT, -]) Start() error { - sm.traceLogger.Trace("enter start") - defer sm.traceLogger.Trace("exit start") - sm.SendEvent(EventStart) - return nil -} - -// Stop halts the state machine -func (sm *StateMachine[ - StateT, - VoteT, - PeerIDT, - CollectedT, -]) Stop() error { - sm.traceLogger.Trace("enter stop") - defer sm.traceLogger.Trace("exit stop") -drain: - for { - select { - case <-sm.eventChan: - default: - break drain - } - } - sm.SendEvent(EventStop) - return nil -} - -// SendEvent sends an event to the state machine -func (sm *StateMachine[ - StateT, - VoteT, - PeerIDT, - CollectedT, -]) SendEvent(event Event) { - sm.traceLogger.Trace(fmt.Sprintf("enter sendEvent: %s", event)) - defer sm.traceLogger.Trace(fmt.Sprintf("exit sendEvent: %s", event)) - response := make(chan error, 1) - go func() { - select { - case sm.eventChan <- eventWrapper{event: event, response: response}: - <-response - case <-sm.ctx.Done(): - return - } - }() -} - -// processEvents handles events and transitions -func (sm *StateMachine[ - StateT, - VoteT, - PeerIDT, - CollectedT, -]) processEvents() { - defer func() { - if r := recover(); r != nil { - sm.traceLogger.Error( - "fatal error encountered", - errors.New(fmt.Sprintf("%+v", r)), - ) - sm.Close() - } - }() - - sm.traceLogger.Trace("enter processEvents") - defer sm.traceLogger.Trace("exit processEvents") - for { - select { - case <-sm.ctx.Done(): - return - case wrapper := <-sm.eventChan: - err := sm.handleEvent(wrapper.event) - wrapper.response <- err - } - } -} - -// handleEvent processes a single event -func (sm *StateMachine[ - StateT, - VoteT, - PeerIDT, - CollectedT, -]) handleEvent(event Event) error { - sm.traceLogger.Trace(fmt.Sprintf("enter handleEvent: %s", event)) - defer sm.traceLogger.Trace(fmt.Sprintf("exit handleEvent: %s", event)) - sm.mu.Lock() - - currentState := sm.machineState - transitions, exists := sm.transitions[currentState] - if !exists { - sm.mu.Unlock() - - return errors.Wrap( - fmt.Errorf("no transitions defined for state %s", currentState), - "handle event", - ) - } - - transition, exists := transitions[event] - if !exists { - sm.mu.Unlock() - - return errors.Wrap( - fmt.Errorf( - "no transition for event %s in state %s", - event, - currentState, - ), - "handle event", - ) - } - - // Check guard condition with the actual state - if transition.Guard != nil && !transition.Guard(sm) { - sm.mu.Unlock() - - return errors.Wrap( - fmt.Errorf( - "transition guard failed for %s -> %s on %s", - currentState, - transition.To, - event, - ), - "handle event", - ) - } - - sm.mu.Unlock() - - // Execute transition - sm.executeTransition(currentState, transition.To, event) - return nil -} - -// executeTransition performs the state transition -func (sm *StateMachine[ - StateT, - VoteT, - PeerIDT, - CollectedT, -]) executeTransition( - from State, - to State, - event Event, -) { - sm.traceLogger.Trace( - fmt.Sprintf("enter executeTransition: %s -> %s [%s]", from, to, event), - ) - defer sm.traceLogger.Trace( - fmt.Sprintf("exit executeTransition: %s -> %s [%s]", from, to, event), - ) - sm.mu.Lock() - - // Cancel any existing timeout and behavior - if sm.timeoutTimer != nil { - sm.timeoutTimer.Stop() - sm.timeoutTimer = nil - } - - // Cancel existing behavior if any - if sm.behaviorCancel != nil { - sm.behaviorCancel() - sm.behaviorCancel = nil - } - - // Call exit callback for current state - if config, exists := sm.stateConfigs[from]; exists && config.OnExit != nil { - sm.mu.Unlock() - config.OnExit(sm, sm.activeState, event) - sm.mu.Lock() - } - - // Update state - sm.machineState = to - sm.stateStartTime = time.Now() - sm.transitionCount++ - - // Notify listeners - for _, listener := range sm.listeners { - listener.OnTransition(from, to, event) - } - - // Call enter callback for new state - if config, exists := sm.stateConfigs[to]; exists { - if config.OnEnter != nil { - sm.mu.Unlock() - config.OnEnter(sm, sm.activeState, event) - sm.mu.Lock() - } - - // Start state behavior if defined - if config.Behavior != nil { - behaviorCtx, cancel := context.WithCancel(sm.ctx) - sm.behaviorCancel = cancel - sm.mu.Unlock() - config.Behavior(sm, sm.activeState, behaviorCtx) - sm.mu.Lock() - } - - // Set up timeout for new state - if config.Timeout > 0 && config.OnTimeout != "" { - sm.timeoutTimer = time.AfterFunc(config.Timeout, func() { - sm.SendEvent(config.OnTimeout) - }) - } - } - sm.mu.Unlock() -} - -// GetState returns the current state -func (sm *StateMachine[ - StateT, - VoteT, - PeerIDT, - CollectedT, -]) GetState() State { - sm.traceLogger.Trace("enter getstate") - defer sm.traceLogger.Trace("exit getstate") - sm.mu.Lock() - defer sm.mu.Unlock() - return sm.machineState -} - -// Additional methods for compatibility -func (sm *StateMachine[ - StateT, - VoteT, - PeerIDT, - CollectedT, -]) GetStateTime() time.Duration { - sm.traceLogger.Trace("enter getstatetime") - defer sm.traceLogger.Trace("exit getstatetime") - return time.Since(sm.stateStartTime) -} - -func (sm *StateMachine[ - StateT, - VoteT, - PeerIDT, - CollectedT, -]) GetTransitionCount() uint64 { - sm.traceLogger.Trace("enter transitioncount") - defer sm.traceLogger.Trace("exit transitioncount") - return sm.transitionCount -} - -func (sm *StateMachine[ - StateT, - VoteT, - PeerIDT, - CollectedT, -]) AddListener(listener TransitionListener[StateT]) { - sm.traceLogger.Trace("enter addlistener") - defer sm.traceLogger.Trace("exit addlistener") - sm.mu.Lock() - defer sm.mu.Unlock() - sm.listeners = append(sm.listeners, listener) -} - -func (sm *StateMachine[ - StateT, - VoteT, - PeerIDT, - CollectedT, -]) Close() { - sm.traceLogger.Trace("enter close") - defer sm.traceLogger.Trace("exit close") - sm.mu.Lock() - defer sm.mu.Unlock() - sm.cancel() - if sm.timeoutTimer != nil { - sm.timeoutTimer.Stop() - } - if sm.behaviorCancel != nil { - sm.behaviorCancel() - } - sm.machineState = StateStopped -} - -// ReceiveLivenessCheck receives a liveness announcement and captures -// collected mutation operations reported by the peer if relevant. -func (sm *StateMachine[ - StateT, - VoteT, - PeerIDT, - CollectedT, -]) ReceiveLivenessCheck(peer PeerIDT, collected CollectedT) error { - sm.traceLogger.Trace( - fmt.Sprintf( - "enter receivelivenesscheck, peer: %s, rank: %d", - peer.Identity(), - collected.Rank(), - ), - ) - defer sm.traceLogger.Trace("exit receivelivenesscheck") - sm.mu.Lock() - if _, ok := sm.liveness[collected.Rank()]; !ok { - sm.liveness[collected.Rank()] = make(map[Identity]CollectedT) - } - if _, ok := sm.liveness[collected.Rank()][peer.Identity()]; !ok { - sm.liveness[collected.Rank()][peer.Identity()] = collected - } - sm.mu.Unlock() - - sm.SendEvent(EventLivenessCheckReceived) - return nil -} - -// ReceiveProposal receives a proposed new state. -func (sm *StateMachine[ - StateT, - VoteT, - PeerIDT, - CollectedT, -]) ReceiveProposal(peer PeerIDT, proposal *StateT) error { - sm.traceLogger.Trace("enter receiveproposal") - defer sm.traceLogger.Trace("exit receiveproposal") - sm.mu.Lock() - if _, ok := sm.proposals[(*proposal).Rank()]; !ok { - sm.proposals[(*proposal).Rank()] = make(map[Identity]*StateT) - } - if _, ok := sm.proposals[(*proposal).Rank()][peer.Identity()]; !ok { - sm.proposals[(*proposal).Rank()][peer.Identity()] = proposal - } - sm.mu.Unlock() - - sm.SendEvent(EventProposalReceived) - return nil -} - -// ReceiveVote captures a vote. Presumes structural and protocol validity of a -// vote has already been evaluated. -func (sm *StateMachine[ - StateT, - VoteT, - PeerIDT, - CollectedT, -]) ReceiveVote(proposer PeerIDT, voter PeerIDT, vote *VoteT) error { - sm.traceLogger.Trace("enter receivevote") - defer sm.traceLogger.Trace("exit receivevote") - sm.mu.Lock() - - if _, ok := sm.votes[(*vote).Rank()]; !ok { - sm.votes[(*vote).Rank()] = make(map[Identity]*VoteT) - } - if _, ok := sm.votes[(*vote).Rank()][voter.Identity()]; !ok { - sm.votes[(*vote).Rank()][voter.Identity()] = vote - } else if (*sm.votes[(*vote).Rank()][voter.Identity()]).Identity() != - (*vote).Identity() { - sm.mu.Unlock() - return errors.Wrap(errors.New("received conflicting vote"), "receive vote") - } - sm.mu.Unlock() - - sm.SendEvent(EventVoteReceived) - return nil -} - -// ReceiveConfirmation captures a confirmation. Presumes structural and protocol -// validity of the state has already been evaluated. -func (sm *StateMachine[ - StateT, - VoteT, - PeerIDT, - CollectedT, -]) ReceiveConfirmation( - peer PeerIDT, - confirmation *StateT, -) error { - sm.traceLogger.Trace("enter receiveconfirmation") - defer sm.traceLogger.Trace("exit receiveconfirmation") - sm.mu.Lock() - if _, ok := sm.confirmations[(*confirmation).Rank()]; !ok { - sm.confirmations[(*confirmation).Rank()] = make(map[Identity]*StateT) - } - if _, ok := sm.confirmations[(*confirmation).Rank()][peer.Identity()]; !ok { - sm.confirmations[(*confirmation).Rank()][peer.Identity()] = confirmation - } - sm.mu.Unlock() - - sm.SendEvent(EventConfirmationReceived) - return nil -} diff --git a/consensus/state_machine_test.go b/consensus/state_machine_test.go deleted file mode 100644 index a2b1b75..0000000 --- a/consensus/state_machine_test.go +++ /dev/null @@ -1,1055 +0,0 @@ -package consensus - -import ( - "context" - "fmt" - "slices" - "sync" - "testing" - "time" - - "github.com/pkg/errors" -) - -// Test types for the generic state machine -type TestState struct { - Round uint64 - Hash string - Timestamp time.Time - ProposalID string -} - -func (t TestState) Identity() string { - return t.Hash -} - -func (t TestState) Rank() uint64 { - return t.Round -} - -func (t TestState) Clone() Unique { - return TestState{ - Round: t.Round, - Hash: t.Hash, - Timestamp: t.Timestamp, - ProposalID: t.ProposalID, - } -} - -type TestVote struct { - Round uint64 - VoterID string - ProposalID string - Signature string -} - -func (t TestVote) Identity() string { - return t.VoterID -} - -func (t TestVote) Rank() uint64 { - return t.Round -} - -func (t TestVote) Clone() Unique { - return TestVote{ - Round: t.Round, - VoterID: t.VoterID, - ProposalID: t.ProposalID, - Signature: t.Signature, - } -} - -type TestPeerID string - -func (t TestPeerID) Identity() string { - return string(t) -} - -func (t TestPeerID) Clone() Unique { - return t -} - -func (t TestPeerID) Rank() uint64 { - return 0 -} - -type TestCollected struct { - Round uint64 - Data []byte - Timestamp time.Time -} - -func (t TestCollected) Identity() string { - return string(t.Data) -} - -func (t TestCollected) Rank() uint64 { - return t.Round -} - -func (t TestCollected) Clone() Unique { - return TestCollected{ - Round: t.Round, - Data: slices.Clone(t.Data), - Timestamp: t.Timestamp, - } -} - -// Mock implementations -type mockSyncProvider struct { - syncDelay time.Duration - newState *TestState -} - -func (m *mockSyncProvider) Synchronize( - existing *TestState, - ctx context.Context, -) (<-chan *TestState, <-chan error) { - stateCh := make(chan *TestState, 1) - errCh := make(chan error, 1) - - go func() { - select { - case <-time.After(m.syncDelay): - if m.newState != nil { - stateCh <- m.newState - } else if existing != nil { - // Just return existing state - stateCh <- existing - } else { - // Create initial state - stateCh <- &TestState{ - Round: 0, - Hash: "genesis", - Timestamp: time.Now(), - } - } - close(stateCh) - close(errCh) - case <-ctx.Done(): - close(stateCh) - close(errCh) - } - }() - - return stateCh, errCh -} - -type mockVotingProvider struct { - mu sync.Mutex - quorumSize int - sentProposals []*TestState - sentVotes []*TestVote - confirmations []*TestState -} - -func (m *mockVotingProvider) SendProposal(proposal *TestState, ctx context.Context) error { - m.mu.Lock() - defer m.mu.Unlock() - m.sentProposals = append(m.sentProposals, proposal) - return nil -} - -func (m *mockVotingProvider) DecideAndSendVote( - proposals map[Identity]*TestState, - ctx context.Context, -) (TestPeerID, *TestVote, error) { - m.mu.Lock() - defer m.mu.Unlock() - - // Pick first proposal - for peerID, proposal := range proposals { - if proposal == nil { - continue - } - vote := &TestVote{ - VoterID: "leader1", - ProposalID: proposal.ProposalID, - Signature: "test-sig", - } - m.sentVotes = append(m.sentVotes, vote) - return TestPeerID(peerID), vote, nil - } - - return "", nil, errors.New("no proposal to vote for") -} - -func (m *mockVotingProvider) SendVote(vote *TestVote, ctx context.Context) (TestPeerID, error) { - return "", nil -} - -func (m *mockVotingProvider) IsQuorum(proposalVotes map[Identity]*TestVote, ctx context.Context) (bool, error) { - totalVotes := 0 - voteCount := map[string]int{} - for _, votes := range proposalVotes { - count, ok := voteCount[votes.ProposalID] - if !ok { - voteCount[votes.ProposalID] = 1 - } else { - voteCount[votes.ProposalID] = count + 1 - } - totalVotes += 1 - - if count >= m.quorumSize { - return true, nil - } - } - if totalVotes >= m.quorumSize { - return false, errors.New("split quorum") - } - return false, nil -} - -func (m *mockVotingProvider) FinalizeVotes( - proposals map[Identity]*TestState, - proposalVotes map[Identity]*TestVote, - ctx context.Context, -) (*TestState, TestPeerID, error) { - // Pick the proposal with the most votes - winnerCount := 0 - var winnerProposal *TestState = nil - var winnerProposer TestPeerID - voteCount := map[string]int{} - for _, votes := range proposalVotes { - count, ok := voteCount[votes.ProposalID] - if !ok { - voteCount[votes.ProposalID] = 1 - } else { - voteCount[votes.ProposalID] = count + 1 - } - } - for peerID, proposal := range proposals { - if proposal == nil { - continue - } - if _, ok := voteCount[proposal.ProposalID]; !ok { - continue - } - if voteCount[proposal.ProposalID] > winnerCount { - winnerCount = voteCount[proposal.ProposalID] - winnerProposal = proposal - winnerProposer = TestPeerID(peerID) - } - } - - if winnerProposal != nil { - // Create new state with incremented round - newState := &TestState{ - Round: winnerProposal.Round + 1, - Hash: "hash-" + fmt.Sprintf("%d", winnerProposal.Round+1), - Timestamp: time.Now(), - ProposalID: "finalized", - } - return newState, winnerProposer, nil - } - - // Default to first proposal - for peerID, proposal := range proposals { - if proposal == nil { - continue - } - newState := &TestState{ - Round: proposal.Round + 1, - Hash: "hash-" + fmt.Sprintf("%d", proposal.Round+1), - Timestamp: time.Now(), - ProposalID: "finalized", - } - return newState, TestPeerID(peerID), nil - } - - return nil, "", nil -} - -func (m *mockVotingProvider) SendConfirmation(finalized *TestState, ctx context.Context) error { - m.mu.Lock() - defer m.mu.Unlock() - m.confirmations = append(m.confirmations, finalized) - return nil -} - -type mockLeaderProvider struct { - isLeader bool - leaders []TestPeerID - proveDelay time.Duration - shouldFail bool -} - -func (m *mockLeaderProvider) GetNextLeaders(prior *TestState, ctx context.Context) ([]TestPeerID, error) { - if len(m.leaders) > 0 { - return m.leaders, nil - } - return []TestPeerID{"leader1", "leader2", "leader3"}, nil -} - -func (m *mockLeaderProvider) ProveNextState( - prior *TestState, - collected TestCollected, - ctx context.Context, -) (*TestState, error) { - if m.shouldFail || !m.isLeader { - return nil, context.Canceled - } - - select { - case <-time.After(m.proveDelay): - round := uint64(0) - if prior != nil { - round = prior.Round - } - return &TestState{ - Round: round + 1, - Hash: "proved-hash", - Timestamp: time.Now(), - ProposalID: "proposal-" + fmt.Sprintf("%d", round+1), - }, nil - case <-ctx.Done(): - return nil, ctx.Err() - } -} - -type mockLivenessProvider struct { - collectDelay time.Duration - sentLiveness int - mu sync.Mutex -} - -func (m *mockLivenessProvider) Collect(ctx context.Context) (TestCollected, error) { - select { - case <-time.After(m.collectDelay): - return TestCollected{ - Round: 1, - Data: []byte("collected-data"), - Timestamp: time.Now(), - }, nil - case <-ctx.Done(): - return TestCollected{}, ctx.Err() - } -} - -func (m *mockLivenessProvider) SendLiveness(prior *TestState, collected TestCollected, ctx context.Context) error { - m.mu.Lock() - defer m.mu.Unlock() - m.sentLiveness++ - return nil -} - -// MockTransitionListener for tracking state transitions -type MockTransitionListener struct { - mu sync.Mutex - transitions []TransitionRecord -} - -type TransitionRecord struct { - From State - To State - Event Event - Time time.Time -} - -func (m *MockTransitionListener) OnTransition(from State, to State, event Event) { - m.mu.Lock() - defer m.mu.Unlock() - m.transitions = append(m.transitions, TransitionRecord{ - From: from, - To: to, - Event: event, - Time: time.Now(), - }) -} - -func (m *MockTransitionListener) GetTransitions() []TransitionRecord { - m.mu.Lock() - defer m.mu.Unlock() - result := make([]TransitionRecord, len(m.transitions)) - copy(result, m.transitions) - return result -} - -// Helper to create test state machine -func createTestStateMachine( - id TestPeerID, - isLeader bool, -) *StateMachine[TestState, TestVote, TestPeerID, TestCollected] { - leaders := []TestPeerID{"leader1", "leader2", "leader3"} - if isLeader { - leaders[0] = id - } - - // For leader-only tests, set minimumProvers to 1 - minimumProvers := func() uint64 { return uint64(2) } - if isLeader { - minimumProvers = func() uint64 { return uint64(1) } - } - - return NewStateMachine( - id, - &TestState{Round: 0, Hash: "genesis", Timestamp: time.Now()}, - true, // shouldEmitReceiveEventsOnSends - minimumProvers, - &mockSyncProvider{syncDelay: 10 * time.Millisecond}, - &mockVotingProvider{quorumSize: int(minimumProvers())}, - &mockLeaderProvider{ - isLeader: isLeader, - leaders: leaders, - proveDelay: 50 * time.Millisecond, - }, - &mockLivenessProvider{collectDelay: 10 * time.Millisecond}, - nil, - ) -} - -// Helper to wait for a specific state in transition history -func waitForTransition(listener *MockTransitionListener, targetState State, timeout time.Duration) bool { - deadline := time.Now().Add(timeout) - for time.Now().Before(deadline) { - transitions := listener.GetTransitions() - for _, tr := range transitions { - if tr.To == targetState { - return true - } - } - time.Sleep(10 * time.Millisecond) - } - return false -} - -// Helper to check if a state was reached in transition history -func hasReachedState(listener *MockTransitionListener, targetState State) bool { - transitions := listener.GetTransitions() - for _, tr := range transitions { - if tr.To == targetState { - return true - } - } - return false -} - -func TestStateMachineBasicTransitions(t *testing.T) { - sm := createTestStateMachine("test-node", true) - defer sm.Close() - - // Initial state should be stopped - if sm.GetState() != StateStopped { - t.Errorf("Expected initial state to be %s, got %s", StateStopped, sm.GetState()) - } - - listener := &MockTransitionListener{} - sm.AddListener(listener) - - // Start the state machine - err := sm.Start() - if err != nil { - t.Fatalf("Failed to start state machine: %v", err) - } - - time.Sleep(10 * time.Millisecond) - - // Should transition to starting immediately - if sm.GetState() != StateStarting { - t.Errorf("Expected state to be %s after start, got %s", StateStarting, sm.GetState()) - } - - // Wait for automatic transitions - if !waitForTransition(listener, StateLoading, 2*time.Second) { - t.Fatalf("Failed to reach loading state") - } - - if !waitForTransition(listener, StateCollecting, 3*time.Second) { - t.Fatalf("Failed to reach collecting state") - } - - // Verify the expected transition sequence - transitions := listener.GetTransitions() - expectedSequence := []State{StateStarting, StateLoading, StateCollecting} - - for i, expected := range expectedSequence { - if i >= len(transitions) { - t.Errorf("Missing transition to %s", expected) - continue - } - if transitions[i].To != expected { - t.Errorf("Expected transition %d to be to %s, got %s", i, expected, transitions[i].To) - } - } -} - -func TestStateMachineLeaderFlow(t *testing.T) { - sm := createTestStateMachine("leader1", true) - defer sm.Close() - - listener := &MockTransitionListener{} - sm.AddListener(listener) - - // Start the machine - err := sm.Start() - if err != nil { - t.Fatalf("Failed to start: %v", err) - } - - // Wait for the leader to progress through states - if !waitForTransition(listener, StateCollecting, 3*time.Second) { - t.Fatalf("Failed to reach collecting state") - } - - // Leader should reach proving state - if !waitForTransition(listener, StateProving, 5*time.Second) { - // Debug output if test fails - transitions := listener.GetTransitions() - t.Logf("Current state: %s", sm.GetState()) - t.Logf("Total transitions: %d", len(transitions)) - for _, tr := range transitions { - t.Logf("Transition: %s -> %s [%s]", tr.From, tr.To, tr.Event) - } - t.Fatalf("Leader should have entered proving state") - } - - // Verify expected states were reached - if !hasReachedState(listener, StateCollecting) { - t.Error("Leader should have gone through collecting state") - } - if !hasReachedState(listener, StateLivenessCheck) { - t.Error("Leader should have gone through liveness check state") - } - if !hasReachedState(listener, StateProving) { - t.Error("Leader should have entered proving state") - } -} - -func TestStateMachineExternalEvents(t *testing.T) { - sm := createTestStateMachine("leader1", true) - defer sm.Close() - - listener := &MockTransitionListener{} - sm.AddListener(listener) - - sm.Start() - - // Wait for collecting state - if !waitForTransition(listener, StateCollecting, 3*time.Second) { - t.Fatalf("Failed to reach collecting state") - } - - // Send liveness check - sm.ReceiveLivenessCheck("leader2", TestCollected{Round: 1, Data: []byte("foo"), Timestamp: time.Now()}) - - // Receive a proposal while collecting - err := sm.ReceiveProposal("external-leader", &TestState{ - Round: 1, - Hash: "external-hash", - Timestamp: time.Now(), - ProposalID: "external-proposal", - }) - if err != nil { - t.Fatalf("Failed to receive proposal: %v", err) - } - - // Should transition to voting - if !waitForTransition(listener, StateVoting, 4*time.Second) { - t.Errorf("Expected to transition to voting after proposal") - } - - // Verify the transition happened - if !hasReachedState(listener, StateVoting) { - transitions := listener.GetTransitions() - t.Logf("Total transitions: %d", len(transitions)) - for _, tr := range transitions { - t.Logf("Transition: %s -> %s [%s]", tr.From, tr.To, tr.Event) - } - t.Error("Should have transitioned to voting state") - } -} - -func TestStateMachineVoting(t *testing.T) { - sm := createTestStateMachine("leader1", true) - defer sm.Close() - - listener := &MockTransitionListener{} - sm.AddListener(listener) - - sm.Start() - - // Wait for collecting state - if !waitForTransition(listener, StateCollecting, 3*time.Second) { - t.Fatalf("Failed to reach collecting state") - } - - // Send liveness check - sm.ReceiveLivenessCheck("leader2", TestCollected{Round: 1, Data: []byte("foo"), Timestamp: time.Now()}) - - // Send proposal to trigger voting - sm.ReceiveProposal("leader2", &TestState{ - Round: 1, - Hash: "test-hash", - Timestamp: time.Now(), - ProposalID: "test-proposal", - }) - - // Wait for voting state - if !waitForTransition(listener, StateVoting, 2*time.Second) { - t.Fatalf("Failed to reach voting state") - } - - // Add another vote to reach quorum - err := sm.ReceiveVote("leader1", "leader2", &TestVote{ - Round: 1, - VoterID: "leader2", - ProposalID: "test-proposal", - Signature: "sig2", - }) - if err != nil { - t.Fatalf("Failed to receive vote: %v", err) - } - - // Should eventually progress past voting (to finalizing, verifying, or back to collecting) - time.Sleep(2 * time.Second) - - // Check if we progressed past voting - progressedPastVoting := hasReachedState(listener, StateFinalizing) || - hasReachedState(listener, StateVerifying) || - (hasReachedState(listener, StateCollecting) && len(listener.GetTransitions()) > 5) - - if !progressedPastVoting { - // If still stuck, try manual trigger - sm.SendEvent(EventQuorumReached) - time.Sleep(500 * time.Millisecond) - - progressedPastVoting = hasReachedState(listener, StateFinalizing) || - hasReachedState(listener, StateVerifying) || - (hasReachedState(listener, StateCollecting) && len(listener.GetTransitions()) > 5) - } - - if !progressedPastVoting { - transitions := listener.GetTransitions() - t.Logf("Total transitions: %d", len(transitions)) - for _, tr := range transitions { - t.Logf("Transition: %s -> %s [%s]", tr.From, tr.To, tr.Event) - } - t.Errorf("Expected to progress past voting with quorum") - } -} - -func TestStateMachineStop(t *testing.T) { - sm := createTestStateMachine("leader1", true) - defer sm.Close() - - listener := &MockTransitionListener{} - sm.AddListener(listener) - - sm.Start() - - // Wait for any state beyond starting - if !waitForTransition(listener, StateLoading, 2*time.Second) { - t.Fatalf("State machine did not progress from starting") - } - - // Stop from any state - err := sm.Stop() - if err != nil { - t.Fatalf("Failed to stop: %v", err) - } - - // Should transition to stopping - if !waitForTransition(listener, StateStopping, 1*time.Second) { - t.Errorf("Expected to transition to stopping state") - } - - // Should eventually reach stopped - if !waitForTransition(listener, StateStopped, 3*time.Second) { - // Try manual cleanup complete - sm.SendEvent(EventCleanupComplete) - time.Sleep(100 * time.Millisecond) - } - - // Verify we reached stopped state - if !hasReachedState(listener, StateStopped) { - transitions := listener.GetTransitions() - t.Logf("Total transitions: %d", len(transitions)) - for _, tr := range transitions { - t.Logf("Transition: %s -> %s [%s]", tr.From, tr.To, tr.Event) - } - t.Errorf("Expected to reach stopped state") - } -} - -func TestStateMachineLiveness(t *testing.T) { - sm := createTestStateMachine("leader1", true) - defer sm.Close() - - listener := &MockTransitionListener{} - sm.AddListener(listener) - - sm.Start() - - // Wait for collecting state - if !waitForTransition(listener, StateCollecting, 3*time.Second) { - t.Fatalf("Failed to reach collecting state") - } - - // Wait for liveness check state - if !waitForTransition(listener, StateLivenessCheck, 3*time.Second) { - transitions := listener.GetTransitions() - t.Logf("Total transitions: %d", len(transitions)) - for _, tr := range transitions { - t.Logf("Transition: %s -> %s [%s]", tr.From, tr.To, tr.Event) - } - t.Fatalf("Failed to reach liveness check state") - } - - // Receive liveness checks - sm.ReceiveLivenessCheck("peer1", TestCollected{ - Data: []byte("peer1-data"), - Timestamp: time.Now(), - }) - - sm.ReceiveLivenessCheck("peer2", TestCollected{ - Data: []byte("peer2-data"), - Timestamp: time.Now(), - }) - - // Give it a moment to process - time.Sleep(100 * time.Millisecond) - - // Check that liveness data was stored - sm.mu.RLock() - livenessCount := len(sm.liveness) - sm.mu.RUnlock() - - // Should have at least 2 entries (or 3 if self-emit is counted) - if livenessCount < 2 { - t.Errorf("Expected at least 2 liveness entries, got %d", livenessCount) - } -} - -func TestStateMachineMetrics(t *testing.T) { - sm := createTestStateMachine("leader1", true) - defer sm.Close() - - // Initial metrics - if sm.GetTransitionCount() != 0 { - t.Error("Expected initial transition count to be 0") - } - - listener := &MockTransitionListener{} - sm.AddListener(listener) - - // Make transitions - sm.Start() - - // Wait for a few transitions - if !waitForTransition(listener, StateCollecting, 3*time.Second) { - t.Fatalf("Failed to reach collecting state") - } - - if sm.GetTransitionCount() == 0 { - t.Error("Expected transition count to be greater than 0") - } - - // Check state time - stateTime := sm.GetStateTime() - if stateTime < 0 { - t.Errorf("Invalid state time: %v", stateTime) - } -} - -func TestStateMachineConfirmations(t *testing.T) { - sm := createTestStateMachine("leader1", true) - sm.id = "leader1" - defer sm.Close() - - listener := &MockTransitionListener{} - sm.AddListener(listener) - - sm.Start() - - // Progress to voting state via proposal - if !waitForTransition(listener, StateCollecting, 3*time.Second) { - t.Fatalf("Failed to reach collecting state") - } - - // Send liveness check - sm.ReceiveLivenessCheck("leader2", TestCollected{Round: 1, Data: []byte("foo"), Timestamp: time.Now()}) - - // Send proposal to get to voting - sm.ReceiveProposal("leader2", &TestState{ - Round: 1, - Hash: "test-hash", - Timestamp: time.Now(), - ProposalID: "test-proposal", - }) - - // Wait for voting - if !waitForTransition(listener, StateVoting, 2*time.Second) { - t.Fatalf("Failed to reach voting state") - } - - // Wait a bit for auto-progression or trigger manually - time.Sleep(1 * time.Second) - - // Try to progress to finalizing - sm.SendEvent(EventVotingTimeout) - time.Sleep(500 * time.Millisecond) - - // Check if we reached a state that accepts confirmations - currentState := sm.GetState() - canAcceptConfirmation := currentState == StateFinalizing || currentState == StateVerifying - - if !canAcceptConfirmation { - // Check transition history - if hasReachedState(listener, StateFinalizing) || hasReachedState(listener, StateVerifying) { - // We passed through the state already, that's ok - canAcceptConfirmation = true - } else { - transitions := listener.GetTransitions() - t.Logf("Current state: %s", currentState) - t.Logf("Total transitions: %d", len(transitions)) - for _, tr := range transitions { - t.Logf("Transition: %s -> %s [%s]", tr.From, tr.To, tr.Event) - } - // Don't fail - just skip the confirmation test - t.Skip("Could not reach a state that accepts confirmations") - } - } - - // Send confirmation (should only be accepted in finalizing or verifying) - sm.ReceiveConfirmation("leader2", &TestState{ - Round: 1, - Hash: "confirmed-hash", - Timestamp: time.Now(), - ProposalID: "confirmed", - }) - - // Check that confirmation was stored - sm.mu.RLock() - confirmCount := len(sm.confirmations) - sm.mu.RUnlock() - - if confirmCount != 1 { - t.Errorf("Expected 1 confirmation, got %d", confirmCount) - } -} - -func TestStateMachineConcurrency(t *testing.T) { - sm := createTestStateMachine("leader1", true) - defer sm.Close() - - sm.Start() - time.Sleep(500 * time.Millisecond) - - // Concurrent operations - var wg sync.WaitGroup - errChan := make(chan error, 5) - - // Send multiple events concurrently - for i := 0; i < 5; i++ { - wg.Add(1) - go func() { - defer wg.Done() - sm.SendEvent(EventSyncComplete) - }() - } - - // Receive data concurrently - for i := 0; i < 5; i++ { - wg.Add(1) - go func(id int) { - defer wg.Done() - peerID := TestPeerID(fmt.Sprintf("peer%d", id)) - if err := sm.ReceiveLivenessCheck(peerID, TestCollected{ - Data: []byte("data"), - }); err != nil { - errChan <- err - } - }(i) - } - - wg.Wait() - close(errChan) - - // Some errors are expected due to invalid state transitions - errorCount := 0 - for err := range errChan { - if err != nil { - errorCount++ - } - } - - // As long as we didn't panic, concurrency is handled - t.Logf("Concurrent operations completed with %d errors (expected)", errorCount) -} - -type mockPanickingVotingProvider struct { - mu sync.Mutex - quorumSize int - sentProposals []*TestState - sentVotes []*TestVote - confirmations []*TestState -} - -func (m *mockPanickingVotingProvider) SendProposal(proposal *TestState, ctx context.Context) error { - m.mu.Lock() - defer m.mu.Unlock() - m.sentProposals = append(m.sentProposals, proposal) - return nil -} - -func (m *mockPanickingVotingProvider) DecideAndSendVote( - proposals map[Identity]*TestState, - ctx context.Context, -) (TestPeerID, *TestVote, error) { - m.mu.Lock() - defer m.mu.Unlock() - - // Pick first proposal - for peerID, proposal := range proposals { - if proposal == nil { - continue - } - vote := &TestVote{ - VoterID: "leader1", - ProposalID: proposal.ProposalID, - Signature: "test-sig", - } - m.sentVotes = append(m.sentVotes, vote) - return TestPeerID(peerID), vote, nil - } - - return "", nil, errors.New("no proposal to vote for") -} - -func (m *mockPanickingVotingProvider) IsQuorum(proposalVotes map[Identity]*TestVote, ctx context.Context) (bool, error) { - totalVotes := 0 - voteCount := map[string]int{} - for _, votes := range proposalVotes { - count, ok := voteCount[votes.ProposalID] - if !ok { - voteCount[votes.ProposalID] = 1 - count = 1 - } else { - voteCount[votes.ProposalID] = count + 1 - count = count + 1 - } - totalVotes += 1 - - if count >= m.quorumSize { - return true, nil - } - } - if totalVotes >= m.quorumSize { - return false, errors.New("split quorum") - } - return false, nil -} - -func (m *mockPanickingVotingProvider) FinalizeVotes( - proposals map[Identity]*TestState, - proposalVotes map[Identity]*TestVote, - ctx context.Context, -) (*TestState, TestPeerID, error) { - // Pick the proposal with the most votes - winnerCount := 0 - var winnerProposal *TestState = nil - var winnerProposer TestPeerID - voteCount := map[string]int{} - for _, votes := range proposalVotes { - count, ok := voteCount[votes.ProposalID] - if !ok { - voteCount[votes.ProposalID] = 1 - count = 1 - } else { - voteCount[votes.ProposalID] = count + 1 - count += 1 - } - } - for peerID, proposal := range proposals { - if proposal == nil { - continue - } - if _, ok := voteCount[proposal.ProposalID]; !ok { - continue - } - if voteCount[proposal.ProposalID] > winnerCount { - winnerCount = voteCount[proposal.ProposalID] - winnerProposal = proposal - winnerProposer = TestPeerID(peerID) - } - } - - if winnerProposal != nil { - // Create new state with incremented round - newState := &TestState{ - Round: winnerProposal.Round + 1, - Hash: "hash-" + fmt.Sprintf("%d", winnerProposal.Round+1), - Timestamp: time.Now(), - ProposalID: "finalized", - } - return newState, winnerProposer, nil - } - - // Default to first proposal - for peerID, proposal := range proposals { - if proposal == nil { - continue - } - newState := &TestState{ - Round: proposal.Round + 1, - Hash: "hash-" + fmt.Sprintf("%d", proposal.Round+1), - Timestamp: time.Now(), - ProposalID: "finalized", - } - return newState, TestPeerID(peerID), nil - } - - return nil, "", nil -} - -func (m *mockPanickingVotingProvider) SendVote(vote *TestVote, ctx context.Context) (TestPeerID, error) { - return "", nil -} - -func (m *mockPanickingVotingProvider) SendConfirmation(finalized *TestState, ctx context.Context) error { - panic("PANIC HERE") -} - -type printtracer struct{} - -// Error implements TraceLogger. -func (p *printtracer) Error(message string, err error) { - fmt.Println("[error]", message, err) -} - -// Trace implements TraceLogger. -func (p *printtracer) Trace(message string) { - fmt.Println("[trace]", message) -} - -func TestStateMachinePanicRecovery(t *testing.T) { - minimumProvers := func() uint64 { return uint64(1) } - - sm := NewStateMachine( - "leader1", - &TestState{Round: 0, Hash: "genesis", Timestamp: time.Now()}, - true, // shouldEmitReceiveEventsOnSends - minimumProvers, - &mockSyncProvider{syncDelay: 10 * time.Millisecond}, - &mockPanickingVotingProvider{quorumSize: 1}, - &mockLeaderProvider{ - isLeader: true, - leaders: []TestPeerID{"leader1"}, - proveDelay: 50 * time.Millisecond, - }, - &mockLivenessProvider{collectDelay: 10 * time.Millisecond}, - &printtracer{}, - ) - defer sm.Close() - - sm.Start() - time.Sleep(10 * time.Second) - sm.mu.Lock() - if sm.machineState != StateStopped { - sm.mu.Unlock() - t.FailNow() - } - sm.mu.Unlock() - -} diff --git a/consensus/state_machine_viz.go b/consensus/state_machine_viz.go deleted file mode 100644 index 0634589..0000000 --- a/consensus/state_machine_viz.go +++ /dev/null @@ -1,360 +0,0 @@ -package consensus - -import ( - "fmt" - "strings" - "time" -) - -// StateMachineViz provides visualization utilities for the generic state machine -type StateMachineViz[ - StateT Unique, - VoteT Unique, - PeerIDT Unique, - CollectedT Unique, -] struct { - sm *StateMachine[StateT, VoteT, PeerIDT, CollectedT] -} - -// NewStateMachineViz creates a new visualizer for the generic state machine -func NewStateMachineViz[ - StateT Unique, - VoteT Unique, - PeerIDT Unique, - CollectedT Unique, -]( - sm *StateMachine[StateT, VoteT, PeerIDT, CollectedT], -) *StateMachineViz[StateT, VoteT, PeerIDT, CollectedT] { - return &StateMachineViz[StateT, VoteT, PeerIDT, CollectedT]{sm: sm} -} - -// GenerateMermaidDiagram generates a Mermaid diagram of the state machine -func ( - v *StateMachineViz[StateT, VoteT, PeerIDT, CollectedT], -) GenerateMermaidDiagram() string { - var sb strings.Builder - - sb.WriteString("```mermaid\n") - sb.WriteString("stateDiagram-v2\n") - sb.WriteString(" [*] --> Stopped\n") - - // Define states with descriptions - // Use CamelCase for state IDs to avoid underscore issues - stateMap := map[State]string{ - StateStopped: "Stopped", - StateStarting: "Starting", - StateLoading: "Loading", - StateCollecting: "Collecting", - StateLivenessCheck: "LivenessCheck", - StateProving: "Proving", - StatePublishing: "Publishing", - StateVoting: "Voting", - StateFinalizing: "Finalizing", - StateVerifying: "Verifying", - StateStopping: "Stopping", - } - - stateDescriptions := map[State]string{ - StateStopped: "Engine not running", - StateStarting: "Initializing components", - StateLoading: "Syncing with network", - StateCollecting: "Gathering consensus data", - StateLivenessCheck: "Checking prover availability", - StateProving: "Generating cryptographic proof", - StatePublishing: "Broadcasting proposal", - StateVoting: "Participating in consensus", - StateFinalizing: "Aggregating votes", - StateVerifying: "Publishing confirmation", - StateStopping: "Cleaning up resources", - } - - // Add state descriptions - for state, id := range stateMap { - desc := stateDescriptions[state] - sb.WriteString(fmt.Sprintf(" %s : %s\n", id, desc)) - } - - sb.WriteString("\n") - - // Add transitions using mapped state names - transitions := v.getTransitionList() - for _, t := range transitions { - fromID := stateMap[t.From] - toID := stateMap[t.To] - if t.Guard != nil { - sb.WriteString(fmt.Sprintf( - " %s --> %s : %s [guarded]\n", - fromID, toID, t.Event)) - } else { - sb.WriteString(fmt.Sprintf( - " %s --> %s : %s\n", - fromID, toID, t.Event)) - } - } - - // Add special annotations using mapped names - sb.WriteString("\n") - sb.WriteString(" note right of Proving : Leader only\n") - sb.WriteString( - " note right of LivenessCheck : Divergence point\\nfor leader/non-leader\n", - ) - sb.WriteString(" note right of Voting : Convergence point\n") - - sb.WriteString("```\n") - - return sb.String() -} - -// GenerateDotDiagram generates a Graphviz DOT diagram -func ( - v *StateMachineViz[StateT, VoteT, PeerIDT, CollectedT], -) GenerateDotDiagram() string { - var sb strings.Builder - - sb.WriteString("digraph ConsensusStateMachine {\n") - sb.WriteString(" rankdir=TB;\n") - sb.WriteString(" node [shape=box, style=rounded];\n") - sb.WriteString(" edge [fontsize=10];\n\n") - - // Define node styles - sb.WriteString(" // State styles\n") - sb.WriteString( - " Stopped [style=\"rounded,filled\", fillcolor=lightgray];\n", - ) - sb.WriteString( - " Starting [style=\"rounded,filled\", fillcolor=lightyellow];\n", - ) - sb.WriteString( - " Loading [style=\"rounded,filled\", fillcolor=lightyellow];\n", - ) - sb.WriteString( - " Collecting [style=\"rounded,filled\", fillcolor=lightblue];\n", - ) - sb.WriteString( - " LivenessCheck [style=\"rounded,filled\", fillcolor=orange];\n", - ) - sb.WriteString( - " Proving [style=\"rounded,filled\", fillcolor=lightgreen];\n", - ) - sb.WriteString( - " Publishing [style=\"rounded,filled\", fillcolor=lightgreen];\n", - ) - sb.WriteString( - " Voting [style=\"rounded,filled\", fillcolor=lightblue];\n", - ) - sb.WriteString( - " Finalizing [style=\"rounded,filled\", fillcolor=lightblue];\n", - ) - sb.WriteString( - " Verifying [style=\"rounded,filled\", fillcolor=lightblue];\n", - ) - sb.WriteString( - " Stopping [style=\"rounded,filled\", fillcolor=lightcoral];\n\n", - ) - - // Add transitions - sb.WriteString(" // Transitions\n") - transitions := v.getTransitionList() - for _, t := range transitions { - label := string(t.Event) - if t.Guard != nil { - label += " [G]" - } - sb.WriteString(fmt.Sprintf( - " %s -> %s [label=\"%s\"];\n", - t.From, t.To, label)) - } - - // Add legend - sb.WriteString("\n // Legend\n") - sb.WriteString(" subgraph cluster_legend {\n") - sb.WriteString(" label=\"Legend\";\n") - sb.WriteString(" style=dotted;\n") - sb.WriteString(" \"[G] = Guarded transition\" [shape=none];\n") - sb.WriteString(" \"Yellow = Initialization\" [shape=none];\n") - sb.WriteString(" \"Blue = Consensus flow\" [shape=none];\n") - sb.WriteString(" \"Green = Leader specific\" [shape=none];\n") - sb.WriteString(" \"Orange = Decision point\" [shape=none];\n") - sb.WriteString(" }\n") - - sb.WriteString("}\n") - - return sb.String() -} - -// GenerateTransitionTable generates a markdown table of all transitions -func ( - v *StateMachineViz[StateT, VoteT, PeerIDT, CollectedT], -) GenerateTransitionTable() string { - var sb strings.Builder - - sb.WriteString("| From State | Event | To State | Condition |\n") - sb.WriteString("|------------|-------|----------|----------|\n") - - transitions := v.getTransitionList() - for _, t := range transitions { - condition := "None" - if t.Guard != nil { - condition = "Has guard" - } - sb.WriteString(fmt.Sprintf( - "| %s | %s | %s | %s |\n", - t.From, t.Event, t.To, condition)) - } - - return sb.String() -} - -// getTransitionList extracts all transitions from the state machine -func ( - v *StateMachineViz[StateT, VoteT, PeerIDT, CollectedT], -) getTransitionList() []*Transition[StateT, VoteT, PeerIDT, CollectedT] { - var transitions []*Transition[StateT, VoteT, PeerIDT, CollectedT] - - v.sm.mu.RLock() - defer v.sm.mu.RUnlock() - - for _, eventMap := range v.sm.transitions { - for _, transition := range eventMap { - transitions = append(transitions, transition) - } - } - - return transitions -} - -// GetStateStats returns statistics about the state machine -func ( - v *StateMachineViz[StateT, VoteT, PeerIDT, CollectedT], -) GetStateStats() string { - var sb strings.Builder - - sb.WriteString("State Machine Statistics:\n") - sb.WriteString("========================\n\n") - - v.sm.mu.RLock() - defer v.sm.mu.RUnlock() - - // Count states and transitions - stateCount := 0 - transitionCount := 0 - eventCount := make(map[Event]int) - - for _, eventMap := range v.sm.transitions { - // Only count if we have transitions for this state - if len(eventMap) > 0 { - stateCount++ - } - for event := range eventMap { - transitionCount++ - eventCount[event]++ - } - } - - sb.WriteString(fmt.Sprintf("Total States: %d\n", stateCount)) - sb.WriteString(fmt.Sprintf("Total Transitions: %d\n", transitionCount)) - sb.WriteString(fmt.Sprintf("Current State: %s\n", v.sm.machineState)) - sb.WriteString(fmt.Sprintf("Transitions Made: %d\n", v.sm.transitionCount)) - sb.WriteString( - fmt.Sprintf("Time in Current State: %v\n", v.sm.GetStateTime()), - ) - - // Display current leader info if available - if len(v.sm.nextProvers) > 0 { - sb.WriteString("\nNext Leaders:\n") - for i, leader := range v.sm.nextProvers { - sb.WriteString(fmt.Sprintf(" %d. %v\n", i+1, leader)) - } - } - - // Display active state info - if v.sm.activeState != nil { - sb.WriteString(fmt.Sprintf("\nActive State: %+v\n", v.sm.activeState)) - } - - // Display liveness info - sb.WriteString(fmt.Sprintf("\nLiveness Checks: %d\n", len(v.sm.liveness))) - - // Display voting info - sb.WriteString(fmt.Sprintf("Proposals: %d\n", len(v.sm.proposals))) - sb.WriteString(fmt.Sprintf("Votes: %d\n", len(v.sm.votes))) - sb.WriteString(fmt.Sprintf("Confirmations: %d\n", len(v.sm.confirmations))) - - sb.WriteString("\nEvent Usage:\n") - for event, count := range eventCount { - sb.WriteString(fmt.Sprintf(" %s: %d transitions\n", event, count)) - } - - return sb.String() -} - -// GetCurrentStateInfo returns detailed information about the current state -func ( - v *StateMachineViz[StateT, VoteT, PeerIDT, CollectedT]) GetCurrentStateInfo() string { - v.sm.mu.RLock() - defer v.sm.mu.RUnlock() - - var sb strings.Builder - - sb.WriteString("Current State Information:\n") - sb.WriteString("=========================\n\n") - sb.WriteString(fmt.Sprintf("State: %s\n", v.sm.machineState)) - sb.WriteString( - fmt.Sprintf("Time in State: %v\n", time.Since(v.sm.stateStartTime)), - ) - sb.WriteString(fmt.Sprintf("Total Transitions: %d\n", v.sm.transitionCount)) - - // State configuration info - if config, exists := v.sm.stateConfigs[v.sm.machineState]; exists { - sb.WriteString("\nState Configuration:\n") - if config.Timeout > 0 { - sb.WriteString(fmt.Sprintf(" Timeout: %v\n", config.Timeout)) - sb.WriteString(fmt.Sprintf(" Timeout Event: %s\n", config.OnTimeout)) - } - if config.Behavior != nil { - sb.WriteString(" Has Behavior: Yes\n") - } - if config.OnEnter != nil { - sb.WriteString(" Has OnEnter Callback: Yes\n") - } - if config.OnExit != nil { - sb.WriteString(" Has OnExit Callback: Yes\n") - } - } - - // Available transitions from current state - sb.WriteString("\nAvailable Transitions:\n") - if transitions, exists := v.sm.transitions[v.sm.machineState]; exists { - for event, transition := range transitions { - guardStr := "" - if transition.Guard != nil { - guardStr = " [guarded]" - } - sb.WriteString( - fmt.Sprintf(" %s -> %s%s\n", event, transition.To, guardStr), - ) - } - } - - return sb.String() -} - -// GenerateEventFlow generates a flow of events that occurred -func ( - v *StateMachineViz[StateT, VoteT, PeerIDT, CollectedT], -) GenerateEventFlow() string { - var sb strings.Builder - - sb.WriteString("Event Flow:\n") - sb.WriteString("===========\n\n") - - transitions := v.getTransitionList() - for i, tr := range transitions { - sb.WriteString(fmt.Sprintf( - "%d. %s -> %s [%s]\n", - i+1, tr.From, tr.To, tr.Event, - )) - } - - return sb.String() -} diff --git a/consensus/stateproducer/safety_rules_wrapper.go b/consensus/stateproducer/safety_rules_wrapper.go new file mode 100644 index 0000000..8b2661d --- /dev/null +++ b/consensus/stateproducer/safety_rules_wrapper.go @@ -0,0 +1,128 @@ +package stateproducer + +import ( + "fmt" + + "go.uber.org/atomic" + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// safetyRulesConcurrencyWrapper wraps `consensus.SafetyRules` to allow its +// usage in concurrent environments. +// Correctness requirements: +// +// (i) The wrapper's Sign function is called exactly once (wrapper errors on +// repeated Sign calls) +// (ii) SafetyRules is not accessed outside the wrapper concurrently. The +// wrapper cannot enforce this. +// +// The correctness condition (ii) holds because there is a single dedicated +// thread executing the Event Loop, including the EventHandler, that also runs +// the logic of `StateProducer.MakeStateProposal`. +// +// Concurrency safety: +// +// (a) There is one dedicated thread executing the Event Loop, including the +// EventHandler, that also runs the logic of +// `StateProducer.MakeStateProposal`. Hence, while the 'Event Loop Thread' +// is in `MakeStateProposal`, we are guaranteed the only interactions with +// `SafetyRules` are in `consensus.LeaderProvider.ProveNextState` +// (b) The Event Loop Thread instantiates the variable `signingStatus`. +// Furthermore, the `signer` call first reads `signingStatus`. Therefore, +// all operations in the EventHandler prior to calling +// `Builder.ProveNextState(..)` happen before the call to `signer`. Hence, +// it is guaranteed that the `signer` uses the most recent state of +// `SafetyRules`, even if `Sign` is executed by a different thread. +// (c) Just before the `signer` call returns, it writes `signingStatus`. +// Furthermore, the Event Loop Thread reads `signingStatus` right after +// the `Builder.ProveNextState(..)` call returns. Thereby, Event Loop +// Thread sees the most recent state of `SafetyRules` after completing the +// signing operation. +// +// With the transitivity of the 'Happens Before' relationship (-> go Memory +// Model https://go.dev/ref/mem#atomic), we have proven that concurrent access +// of the wrapped `safetyRules` is safe for the state transition: +// +// instantiate signingStatus to 0 ─► update signingStatus from 0 to 1 → signer → update signingStatus from 1 to 2 ─► confirm signingStatus has value 2 +// +// ╰──────────────┬───────────────╯ ╰──────────────────────────────────────┬─────────────────────────────────────╯ ╰────────────────┬────────────────╯ +// +// Event Loop Thread within the scope of Builder.ProveNextState Event Loop Thread +// +// All state transitions _other_ than the one above yield exceptions without +// modifying `SafetyRules`. +type safetyRulesConcurrencyWrapper[ + StateT models.Unique, + VoteT models.Unique, +] struct { + // signingStatus guarantees concurrency safety and encodes the progress of the + // signing process. We differentiate between 4 different states: + // - value 0: signing is not yet started + // - value 1: one thread has already entered the signing process, which is + // currently ongoing + // - value 2: the thread that set `signingStatus` to value 1 has completed + // the signing + signingStatus atomic.Uint32 + safetyRules consensus.SafetyRules[StateT, VoteT] +} + +func newSafetyRulesConcurrencyWrapper[ + StateT models.Unique, + VoteT models.Unique, +]( + safetyRules consensus.SafetyRules[StateT, VoteT], +) *safetyRulesConcurrencyWrapper[StateT, VoteT] { + return &safetyRulesConcurrencyWrapper[StateT, VoteT]{safetyRules: safetyRules} +} + +// Sign modifies the given unsignedHeader by including the proposer's signature +// date. Safe under concurrent calls. Per convention, this method should be +// called exactly once. Only the first call will succeed, and subsequent calls +// error. The implementation is backed by `SafetyRules` and thereby guarantees +// consensus safety for singing state proposals. +// Error Returns: +// - models.NoVoteError if it is not safe for us to vote (our proposal +// includes our vote) for this rank. This can happen if we have already +// proposed or timed out this rank. +// - generic error in case of unexpected failure +func (w *safetyRulesConcurrencyWrapper[StateT, VoteT]) Sign( + unsigned *models.Proposal[StateT], +) (*VoteT, error) { + // value of `signingStatus` is something else than 0 + if !w.signingStatus.CompareAndSwap(0, 1) { + return nil, fmt.Errorf( + "signer has already commenced signing; possibly repeated signer call", + ) + } + + // signer is now in state 1, and this thread is the only one every going to + // execute the following logic + + // signature for own state is structurally a vote + vote, err := w.safetyRules.SignOwnProposal(unsigned) + if err != nil { + return nil, fmt.Errorf("could not sign state proposal: %w", err) + } + // value of `signingStatus` is always 1, i.e. the following check always + // succeeds. + if !w.signingStatus.CompareAndSwap(1, 2) { + // sanity check protects logic from future modifications accidentally + // breaking this invariant + panic( + "signer wrapper completed its work but encountered state other than 1", + ) // never happens + } + return vote, nil +} + +// IsSigningComplete atomically checks whether the Sign logic has concluded, and +// returns true only in this case. By reading the atomic `signingStatus` and +// confirming it has the expected value, it is guaranteed that any state changes +// of `safetyRules` that happened within `Sign` are visible to the Event Loop +// Thread. No errors expected during normal operations +func ( + w *safetyRulesConcurrencyWrapper[StateT, VoteT], +) IsSigningComplete() bool { + return w.signingStatus.Load() == 2 +} diff --git a/consensus/stateproducer/state_producer.go b/consensus/stateproducer/state_producer.go new file mode 100644 index 0000000..941be4e --- /dev/null +++ b/consensus/stateproducer/state_producer.go @@ -0,0 +1,138 @@ +package stateproducer + +import ( + "context" + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// StateProducer is responsible for producing new state proposals. It is a +// service component to HotStuff's main state machine (implemented in the +// EventHandler). The StateProducer's central purpose is to mediate concurrent +// signing requests to its embedded `consensus.SafetyRules` during state +// production. The actual work of producing a state proposal is delegated to the +// embedded `consensus.LeaderProvider`. +type StateProducer[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, + CollectedT models.Unique, +] struct { + safetyRules consensus.SafetyRules[StateT, VoteT] + committee consensus.Replicas + builder consensus.LeaderProvider[StateT, PeerIDT, CollectedT] +} + +var _ consensus.StateProducer[*nilUnique, *nilUnique] = (*StateProducer[*nilUnique, *nilUnique, *nilUnique, *nilUnique])(nil) + +// New creates a new StateProducer, which mediates concurrent signing requests +// to the embedded `consensus.SafetyRules` during state production, delegated to +// `consensus.LeaderProvider`. No errors are expected during normal operation. +func NewStateProducer[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, + CollectedT models.Unique, +]( + safetyRules consensus.SafetyRules[StateT, VoteT], + committee consensus.Replicas, + builder consensus.LeaderProvider[StateT, PeerIDT, CollectedT], +) (*StateProducer[StateT, VoteT, PeerIDT, CollectedT], error) { + bp := &StateProducer[StateT, VoteT, PeerIDT, CollectedT]{ + safetyRules: safetyRules, + committee: committee, + builder: builder, + } + return bp, nil +} + +// MakeStateProposal builds a new HotStuff state proposal using the given rank, +// the given quorum certificate for its parent and [optionally] a timeout +// certificate for last rank(could be nil). +// Error Returns: +// - models.NoVoteError if it is not safe for us to vote (our proposal +// includes our vote) for this rank. This can happen if we have already +// proposed or timed out this rank. +// - generic error in case of unexpected failure +func (bp *StateProducer[StateT, VoteT, PeerIDT, CollectedT]) MakeStateProposal( + rank uint64, + qc models.QuorumCertificate, + previousRankTimeoutCert models.TimeoutCertificate, +) (*models.SignedProposal[StateT, VoteT], error) { + newState, err := bp.builder.ProveNextState( + context.TODO(), + rank, + qc.GetFilter(), + qc.Identity(), + ) + if err != nil { + if models.IsNoVoteError(err) { + return nil, fmt.Errorf( + "unsafe to vote for own proposal on top of %x: %w", + qc.Identity(), + err, + ) + } + return nil, fmt.Errorf( + "could not build state proposal on top of %x: %w", + qc.Identity(), + err, + ) + } + + proposal := models.ProposalFrom( + models.StateFrom(newState, qc), + previousRankTimeoutCert, + ) + + signer := newSafetyRulesConcurrencyWrapper(bp.safetyRules) + vote, err := signer.Sign(proposal) + if err != nil { + return nil, fmt.Errorf( + "could not vote on state proposal on top of %x: %w", + qc.Identity(), + err, + ) + } + + signedProposal := models.SignedProposalFromState(proposal, vote) + + return signedProposal, nil +} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/timeoutaggregator/timeout_aggregator.go b/consensus/timeoutaggregator/timeout_aggregator.go new file mode 100644 index 0000000..3bd2698 --- /dev/null +++ b/consensus/timeoutaggregator/timeout_aggregator.go @@ -0,0 +1,271 @@ +package timeoutaggregator + +import ( + "context" + "errors" + "fmt" + "sync" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/counters" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" +) + +// defaultTimeoutAggregatorWorkers number of workers to dispatch events for +// timeout aggregator +const defaultTimeoutAggregatorWorkers = 4 + +// defaultTimeoutQueueCapacity maximum capacity for buffering unprocessed +// timeouts +const defaultTimeoutQueueCapacity = 1000 + +// TimeoutAggregator stores the timeout states and aggregates them into a TC +// when enough TSs have been collected. It's safe to use in concurrent +// environment. +type TimeoutAggregator[VoteT models.Unique] struct { + *lifecycle.ComponentManager + tracer consensus.TraceLogger + lowestRetainedRank counters.StrictMonotonicCounter + collectors consensus.TimeoutCollectors[VoteT] + queuedTimeoutsNotifier chan struct{} + enteringRankNotifier chan struct{} + queuedTimeouts chan *models.TimeoutState[VoteT] + wg sync.WaitGroup +} + +var _ consensus.TimeoutAggregator[*nilUnique] = (*TimeoutAggregator[*nilUnique])(nil) + +// NewTimeoutAggregator creates an instance of timeout aggregator. +// No errors are expected during normal operations. +func NewTimeoutAggregator[VoteT models.Unique]( + tracer consensus.TraceLogger, + lowestRetainedRank uint64, + collectors consensus.TimeoutCollectors[VoteT], +) (*TimeoutAggregator[VoteT], error) { + queuedTimeouts := make( + chan *models.TimeoutState[VoteT], + defaultTimeoutQueueCapacity, + ) + + aggregator := &TimeoutAggregator[VoteT]{ + tracer: tracer, + lowestRetainedRank: counters.NewMonotonicCounter(lowestRetainedRank), + collectors: collectors, + queuedTimeoutsNotifier: make(chan struct{}, 1), + enteringRankNotifier: make(chan struct{}, 1), + queuedTimeouts: queuedTimeouts, + wg: sync.WaitGroup{}, + } + + aggregator.wg.Add(defaultTimeoutAggregatorWorkers + 1) + componentBuilder := lifecycle.NewComponentManagerBuilder() + for i := 0; i < defaultTimeoutAggregatorWorkers; i++ { + // manager for worker routines that process inbound events + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + aggregator.queuedTimeoutsProcessingLoop(ctx) + }) + } + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + aggregator.enteringRankProcessingLoop(ctx) + }) + + aggregator.ComponentManager = componentBuilder.Build() + + return aggregator, nil +} + +// queuedTimeoutsProcessingLoop is the event loop which waits for notification +// about pending work and as soon as there is some it triggers processing. +func ( + t *TimeoutAggregator[VoteT], +) queuedTimeoutsProcessingLoop(ctx lifecycle.SignalerContext) { + defer t.wg.Done() + notifier := t.queuedTimeoutsNotifier + for { + select { + case <-ctx.Done(): + return + case <-notifier: + t.tracer.Trace("notified for queued timeout state") + err := t.processQueuedTimeoutStates(ctx) + if err != nil { + ctx.Throw(fmt.Errorf( + "internal error processing queued timeout events: %w", + err, + )) + return + } + } + } +} + +// processQueuedTimeoutStates sequentially processes items from `queuedTimeouts` +// until the queue returns 'empty'. Only when there are no more queued up +// TimeoutStates, this function call returns. No errors are expected during +// normal operations. +func (t *TimeoutAggregator[VoteT]) processQueuedTimeoutStates( + ctx context.Context, +) error { + for { + select { + case <-ctx.Done(): + return nil + case timeoutState, ok := <-t.queuedTimeouts: + if !ok { + return nil + } + + err := t.processQueuedTimeout(timeoutState) + + if err != nil { + return fmt.Errorf("could not process pending TO: %d: %w", + timeoutState.Rank, + err, + ) + } + + t.tracer.Trace("TimeoutState processed successfully") + default: + // when there is no more messages in the queue, back to the loop to wait + // for the next incoming message to arrive. + return nil + } + } +} + +// processQueuedTimeout performs actual processing of queued timeouts, this +// method is called from multiple concurrent goroutines. No errors are expected +// during normal operation +func (t *TimeoutAggregator[VoteT]) processQueuedTimeout( + timeoutState *models.TimeoutState[VoteT], +) error { + // We create a timeout collector before validating the first TO, so processing + // an invalid TO will result in a collector being added, until the + // corresponding rank is pruned. + collector, _, err := t.collectors.GetOrCreateCollector(timeoutState.Rank) + if err != nil { + if errors.Is(err, models.ErrRankUnknown) { + t.tracer.Error("discarding TO for unknown rank", err) + return nil + } + return fmt.Errorf("could not get collector for rank %d: %w", + timeoutState.Rank, err) + } + + t.tracer.Trace("adding timeout to collector") + err = collector.AddTimeout(timeoutState) + if err != nil { + return fmt.Errorf("could not process TO for rank %d: %w", + timeoutState.Rank, err) + } + return nil +} + +// AddTimeout checks if TO is stale and appends TO to processing queue. +// The actual processing will be done asynchronously by the +// `TimeoutAggregator`'s internal worker routines. +func (t *TimeoutAggregator[VoteT]) AddTimeout( + timeoutState *models.TimeoutState[VoteT], +) { + // drop stale objects + if timeoutState.Rank < t.lowestRetainedRank.Value() { + t.tracer.Trace("drop stale timeouts") + return + } + + select { + case t.queuedTimeouts <- timeoutState: + select { + case t.queuedTimeoutsNotifier <- struct{}{}: + default: + } + default: + // processing pipeline `queuedTimeouts` is full + // It's ok to silently drop timeouts, because we are probably catching up. + t.tracer.Trace("no queue capacity, dropping timeout") + } +} + +// PruneUpToRank deletes all `TimeoutCollector`s _below_ to the given rank, as +// well as related indices. We only retain and process `TimeoutCollector`s, +// whose rank is equal or larger than `lowestRetainedRank`. If +// `lowestRetainedRank` is smaller than the previous value, the previous value +// is kept and the method call is a NoOp. +func (t *TimeoutAggregator[VoteT]) PruneUpToRank(lowestRetainedRank uint64) { + t.collectors.PruneUpToRank(lowestRetainedRank) +} + +// OnRankChange implements the `OnRankChange` callback from the +// `consensus.Consumer`. We notify the enteringRankProcessingLoop worker, which +// then prunes up to the active rank. CAUTION: the input to this callback is +// treated as trusted; precautions should be taken that messages from external +// nodes cannot be considered as inputs to this function +func (t *TimeoutAggregator[VoteT]) OnRankChange(oldRank, newRank uint64) { + if t.lowestRetainedRank.Set(newRank) { + select { + case t.enteringRankNotifier <- struct{}{}: + default: + } + } +} + +// enteringRankProcessingLoop is a separate goroutine that performs processing +// of entering rank events +func (t *TimeoutAggregator[VoteT]) enteringRankProcessingLoop( + ctx context.Context, +) { + defer t.wg.Done() + notifier := t.enteringRankNotifier + for { + select { + case <-ctx.Done(): + return + case <-notifier: + t.PruneUpToRank(t.lowestRetainedRank.Value()) + } + } +} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/timeoutaggregator/timeout_aggregator_test.go b/consensus/timeoutaggregator/timeout_aggregator_test.go new file mode 100644 index 0000000..d30a5f6 --- /dev/null +++ b/consensus/timeoutaggregator/timeout_aggregator_test.go @@ -0,0 +1,136 @@ +package timeoutaggregator + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "go.uber.org/atomic" + + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + "source.quilibrium.com/quilibrium/monorepo/consensus/mocks" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/lifecycle/unittest" +) + +func TestTimeoutAggregator(t *testing.T) { + suite.Run(t, new(TimeoutAggregatorTestSuite)) +} + +// TimeoutAggregatorTestSuite is a test suite for isolated testing of TimeoutAggregator. +// Contains mocked state which is used to verify correct behavior of TimeoutAggregator. +// Automatically starts and stops module.Startable in SetupTest and TearDownTest respectively. +type TimeoutAggregatorTestSuite struct { + suite.Suite + + lowestRetainedRank uint64 + highestKnownRank uint64 + aggregator *TimeoutAggregator[*helper.TestVote] + collectors *mocks.TimeoutCollectors[*helper.TestVote] + stopAggregator context.CancelFunc +} + +func (s *TimeoutAggregatorTestSuite) SetupTest() { + var err error + s.collectors = mocks.NewTimeoutCollectors[*helper.TestVote](s.T()) + + s.lowestRetainedRank = 100 + + s.aggregator, err = NewTimeoutAggregator( + helper.Logger(), + s.lowestRetainedRank, + s.collectors, + ) + require.NoError(s.T(), err) + + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := unittest.NewMockSignalerContext(s.T(), ctx) + s.stopAggregator = cancel + s.aggregator.Start(signalerCtx) + unittest.RequireCloseBefore(s.T(), s.aggregator.Ready(), 100*time.Millisecond, "should close before timeout") +} + +func (s *TimeoutAggregatorTestSuite) TearDownTest() { + s.stopAggregator() + unittest.RequireCloseBefore(s.T(), s.aggregator.Done(), time.Second, "should close before timeout") +} + +// TestAddTimeout_HappyPath tests a happy path when multiple threads are adding timeouts for processing +// Eventually every timeout has to be processed by TimeoutCollector +func (s *TimeoutAggregatorTestSuite) TestAddTimeout_HappyPath() { + timeoutsCount := 20 + collector := mocks.NewTimeoutCollector[*helper.TestVote](s.T()) + callCount := atomic.NewUint64(0) + collector.On("AddTimeout", mock.Anything).Run(func(mock.Arguments) { + callCount.Add(1) + }).Return(nil).Times(timeoutsCount) + s.collectors.On("GetOrCreateCollector", s.lowestRetainedRank).Return(collector, true, nil).Times(timeoutsCount) + + var start sync.WaitGroup + start.Add(timeoutsCount) + for i := 0; i < timeoutsCount; i++ { + go func() { + timeout := helper.TimeoutStateFixture[*helper.TestVote](helper.WithTimeoutStateRank[*helper.TestVote](s.lowestRetainedRank), helper.WithTimeoutVote(&helper.TestVote{Rank: s.lowestRetainedRank, ID: helper.MakeIdentity()})) + + start.Done() + // Wait for last worker routine to signal ready. Then, + // feed all timeouts into cache + start.Wait() + + s.aggregator.AddTimeout(timeout) + }() + } + + start.Wait() + + require.Eventually(s.T(), func() bool { + return callCount.Load() == uint64(timeoutsCount) + }, time.Second, time.Millisecond*20) +} + +// TestAddTimeout_RankUnknown tests if timeout states targeting unknown rank should be ignored +func (s *TimeoutAggregatorTestSuite) TestAddTimeout_RankUnknown() { + timeout := helper.TimeoutStateFixture(helper.WithTimeoutStateRank[*helper.TestVote](s.lowestRetainedRank), helper.WithTimeoutVote(&helper.TestVote{Rank: s.lowestRetainedRank, ID: helper.MakeIdentity()})) + *s.collectors = *mocks.NewTimeoutCollectors[*helper.TestVote](s.T()) + done := make(chan struct{}) + s.collectors.On("GetOrCreateCollector", timeout.Rank).Return(nil, false, models.ErrRankUnknown).Run(func(args mock.Arguments) { + close(done) + }).Once() + s.aggregator.AddTimeout(timeout) + unittest.AssertClosesBefore(s.T(), done, time.Second) +} + +// TestPruneUpToRank tests that pruning removes collectors lower that retained rank +func (s *TimeoutAggregatorTestSuite) TestPruneUpToRank() { + s.collectors.On("PruneUpToRank", s.lowestRetainedRank+1).Once() + s.aggregator.PruneUpToRank(s.lowestRetainedRank + 1) +} + +// TestOnQuorumCertificateTriggeredRankChange tests if entering rank event gets processed when send through `TimeoutAggregator`. +// Tests the whole processing pipeline. +func (s *TimeoutAggregatorTestSuite) TestOnQuorumCertificateTriggeredRankChange() { + done := make(chan struct{}) + s.collectors.On("PruneUpToRank", s.lowestRetainedRank+1).Run(func(args mock.Arguments) { + close(done) + }).Once() + qc := helper.MakeQC(helper.WithQCRank(s.lowestRetainedRank)) + s.aggregator.OnRankChange(qc.GetRank(), qc.GetRank()+1) + unittest.AssertClosesBefore(s.T(), done, time.Second) +} + +// TestOnTimeoutCertificateTriggeredRankChange tests if entering rank event gets processed when send through `TimeoutAggregator`. +// Tests the whole processing pipeline. +func (s *TimeoutAggregatorTestSuite) TestOnTimeoutCertificateTriggeredRankChange() { + rank := s.lowestRetainedRank + 1 + done := make(chan struct{}) + s.collectors.On("PruneUpToRank", rank).Run(func(args mock.Arguments) { + close(done) + }).Once() + tc := helper.MakeTC(helper.WithTCRank(s.lowestRetainedRank)) + s.aggregator.OnRankChange(tc.GetRank(), tc.GetRank()+1) + unittest.AssertClosesBefore(s.T(), done, time.Second) +} diff --git a/consensus/timeoutaggregator/timeout_collectors.go b/consensus/timeoutaggregator/timeout_collectors.go new file mode 100644 index 0000000..7e58936 --- /dev/null +++ b/consensus/timeoutaggregator/timeout_collectors.go @@ -0,0 +1,156 @@ +package timeoutaggregator + +import ( + "fmt" + "sync" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutCollectors implements management of multiple timeout collectors +// indexed by rank. Implements consensus.TimeoutCollectors interface. Creating a +// TimeoutCollector for a particular rank is lazy (instances are created on +// demand). This structure is concurrently safe. +type TimeoutCollectors[VoteT models.Unique] struct { + tracer consensus.TraceLogger + lock sync.RWMutex + lowestRetainedRank uint64 // lowest rank, for which we still retain a TimeoutCollector and process timeouts + newestRankCachedCollector uint64 // highest rank, for which we have created a TimeoutCollector + collectors map[uint64]consensus.TimeoutCollector[VoteT] // rank -> TimeoutCollector + collectorFactory consensus.TimeoutCollectorFactory[VoteT] // factor for creating collectors +} + +var _ consensus.TimeoutCollectors[*nilUnique] = (*TimeoutCollectors[*nilUnique])(nil) + +func NewTimeoutCollectors[VoteT models.Unique]( + tracer consensus.TraceLogger, + lowestRetainedRank uint64, + collectorFactory consensus.TimeoutCollectorFactory[VoteT], +) *TimeoutCollectors[VoteT] { + return &TimeoutCollectors[VoteT]{ + tracer: tracer, + lowestRetainedRank: lowestRetainedRank, + newestRankCachedCollector: lowestRetainedRank, + collectors: make(map[uint64]consensus.TimeoutCollector[VoteT]), + collectorFactory: collectorFactory, + } +} + +// GetOrCreateCollector retrieves the consensus.TimeoutCollector for the +// specified rank or creates one if none exists. +// - (collector, true, nil) if no collector can be found by the rank, and a +// new collector was created. +// - (collector, false, nil) if the collector can be found by the rank +// - (nil, false, error) if running into any exception creating the timeout +// collector state machine +// +// Expected error returns during normal operations: +// - models.BelowPrunedThresholdError if rank is below the pruning threshold +// - models.ErrRankUnknown if rank is not yet pruned but no rank containing +// the given rank is known, this error +// +// can be returned from factory method. +func (t *TimeoutCollectors[VoteT]) GetOrCreateCollector(rank uint64) ( + consensus.TimeoutCollector[VoteT], + bool, + error, +) { + cachedCollector, hasCachedCollector, err := t.getCollector(rank) + if err != nil { + return nil, false, err + } + if hasCachedCollector { + return cachedCollector, false, nil + } + + collector, err := t.collectorFactory.Create(rank) + if err != nil { + return nil, false, fmt.Errorf( + "could not create timeout collector for rank %d: %w", + rank, + err, + ) + } + + // Initial check showed that there was no collector. However, it's possible + // that after the initial check but before acquiring the lock to add the + // newly-created collector, another goroutine already added the needed + // collector. Hence, check again after acquiring the lock: + t.lock.Lock() + clr, found := t.collectors[rank] + if found { + t.lock.Unlock() + return clr, false, nil + } + t.collectors[rank] = collector + if t.newestRankCachedCollector < rank { + t.newestRankCachedCollector = rank + } + t.lock.Unlock() + + t.tracer.Trace("timeout collector has been created") + return collector, true, nil +} + +// getCollector retrieves consensus.TimeoutCollector from local cache in +// concurrent safe way. Performs check for lowestRetainedRank. +// Expected error returns during normal operations: +// - models.BelowPrunedThresholdError - in case rank is lower than +// lowestRetainedRank +func (t *TimeoutCollectors[VoteT]) getCollector(rank uint64) ( + consensus.TimeoutCollector[VoteT], + bool, + error, +) { + t.lock.RLock() + defer t.lock.RUnlock() + if rank < t.lowestRetainedRank { + return nil, false, models.NewBelowPrunedThresholdErrorf( + "cannot retrieve collector for pruned rank %d (lowest retained rank %d)", + rank, + t.lowestRetainedRank, + ) + } + + clr, found := t.collectors[rank] + return clr, found, nil +} + +// PruneUpToRank prunes the timeout collectors with ranks _below_ the given +// value, i.e. we only retain and process whose rank is equal or larger than +// `lowestRetainedRank`. If `lowestRetainedRank` is smaller than the previous +// value, the previous value is kept and the method call is a NoOp. +func (t *TimeoutCollectors[VoteT]) PruneUpToRank(lowestRetainedRank uint64) { + t.lock.Lock() + if t.lowestRetainedRank >= lowestRetainedRank { + t.lock.Unlock() + return + } + sizeBefore := len(t.collectors) + if sizeBefore == 0 { + t.lowestRetainedRank = lowestRetainedRank + t.lock.Unlock() + return + } + + // to optimize the pruning of large rank-ranges, we compare: + // * the number of ranks for which we have collectors: len(t.collectors) + // * the number of ranks that need to be pruned: rank-t.lowestRetainedRank + // We iterate over the dimension which is smaller. + if uint64(sizeBefore) < lowestRetainedRank-t.lowestRetainedRank { + for w := range t.collectors { + if w < lowestRetainedRank { + delete(t.collectors, w) + } + } + } else { + for w := t.lowestRetainedRank; w < lowestRetainedRank; w++ { + delete(t.collectors, w) + } + } + t.lowestRetainedRank = lowestRetainedRank + t.lock.Unlock() + + t.tracer.Trace("pruned timeout collectors") +} diff --git a/consensus/timeoutaggregator/timeout_collectors_test.go b/consensus/timeoutaggregator/timeout_collectors_test.go new file mode 100644 index 0000000..3300cd7 --- /dev/null +++ b/consensus/timeoutaggregator/timeout_collectors_test.go @@ -0,0 +1,176 @@ +package timeoutaggregator + +import ( + "errors" + "fmt" + "sync" + "testing" + + "github.com/gammazero/workerpool" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "go.uber.org/atomic" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + "source.quilibrium.com/quilibrium/monorepo/consensus/mocks" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +var factoryError = errors.New("factory error") + +func TestTimeoutCollectors(t *testing.T) { + suite.Run(t, new(TimeoutCollectorsTestSuite)) +} + +// TimeoutCollectorsTestSuite is a test suite for isolated testing of TimeoutCollectors. +// Contains helper methods and mocked state which is used to verify correct behavior of TimeoutCollectors. +type TimeoutCollectorsTestSuite struct { + suite.Suite + + mockedCollectors map[uint64]*mocks.TimeoutCollector[*helper.TestVote] + factoryMethod *mocks.TimeoutCollectorFactory[*helper.TestVote] + collectors *TimeoutCollectors[*helper.TestVote] + lowestRank uint64 + workerPool *workerpool.WorkerPool +} + +func (s *TimeoutCollectorsTestSuite) SetupTest() { + s.lowestRank = 1000 + s.mockedCollectors = make(map[uint64]*mocks.TimeoutCollector[*helper.TestVote]) + s.workerPool = workerpool.New(2) + s.factoryMethod = mocks.NewTimeoutCollectorFactory[*helper.TestVote](s.T()) + s.factoryMethod.On("Create", mock.Anything).Return(func(rank uint64) consensus.TimeoutCollector[*helper.TestVote] { + if collector, found := s.mockedCollectors[rank]; found { + return collector + } + return nil + }, func(rank uint64) error { + if _, found := s.mockedCollectors[rank]; found { + return nil + } + return fmt.Errorf("mocked collector %v not found: %w", rank, factoryError) + }).Maybe() + s.collectors = NewTimeoutCollectors(helper.Logger(), s.lowestRank, s.factoryMethod) +} + +func (s *TimeoutCollectorsTestSuite) TearDownTest() { + s.workerPool.StopWait() +} + +// prepareMockedCollector prepares a mocked collector and stores it in map, later it will be used +// to mock behavior of timeout collectors. +func (s *TimeoutCollectorsTestSuite) prepareMockedCollector(rank uint64) *mocks.TimeoutCollector[*helper.TestVote] { + collector := mocks.NewTimeoutCollector[*helper.TestVote](s.T()) + collector.On("Rank").Return(rank).Maybe() + s.mockedCollectors[rank] = collector + return collector +} + +// TestGetOrCreateCollector_RankLowerThanLowest tests a scenario where caller tries to create a collector with rank +// lower than already pruned one. This should result in sentinel error `BelowPrunedThresholdError` +func (s *TimeoutCollectorsTestSuite) TestGetOrCreateCollector_RankLowerThanLowest() { + collector, created, err := s.collectors.GetOrCreateCollector(s.lowestRank - 10) + require.Nil(s.T(), collector) + require.False(s.T(), created) + require.Error(s.T(), err) + require.True(s.T(), models.IsBelowPrunedThresholdError(err)) +} + +// TestGetOrCreateCollector_UnknownRank tests a scenario where caller tries to create a collector with rank referring rank +// that we don't know about. This should result in sentinel error ` +func (s *TimeoutCollectorsTestSuite) TestGetOrCreateCollector_UnknownRank() { + *s.factoryMethod = *mocks.NewTimeoutCollectorFactory[*helper.TestVote](s.T()) + s.factoryMethod.On("Create", mock.Anything).Return(nil, models.ErrRankUnknown) + collector, created, err := s.collectors.GetOrCreateCollector(s.lowestRank + 100) + require.Nil(s.T(), collector) + require.False(s.T(), created) + require.ErrorIs(s.T(), err, models.ErrRankUnknown) +} + +// TestGetOrCreateCollector_ValidCollector tests a happy path scenario where we try first to create and then retrieve cached collector. +func (s *TimeoutCollectorsTestSuite) TestGetOrCreateCollector_ValidCollector() { + rank := s.lowestRank + 10 + s.prepareMockedCollector(rank) + collector, created, err := s.collectors.GetOrCreateCollector(rank) + require.NoError(s.T(), err) + require.True(s.T(), created) + require.Equal(s.T(), rank, collector.Rank()) + + cached, cachedCreated, err := s.collectors.GetOrCreateCollector(rank) + require.NoError(s.T(), err) + require.False(s.T(), cachedCreated) + require.Equal(s.T(), collector, cached) +} + +// TestGetOrCreateCollector_FactoryError tests that error from factory method is propagated to caller. +func (s *TimeoutCollectorsTestSuite) TestGetOrCreateCollector_FactoryError() { + // creating collector without calling prepareMockedCollector will yield factoryError. + collector, created, err := s.collectors.GetOrCreateCollector(s.lowestRank + 10) + require.Nil(s.T(), collector) + require.False(s.T(), created) + require.ErrorIs(s.T(), err, factoryError) +} + +// TestGetOrCreateCollectors_ConcurrentAccess tests that concurrently accessing of GetOrCreateCollector creates +// only one collector and all other instances are retrieved from cache. +func (s *TimeoutCollectorsTestSuite) TestGetOrCreateCollectors_ConcurrentAccess() { + createdTimes := atomic.NewUint64(0) + rank := s.lowestRank + 10 + s.prepareMockedCollector(rank) + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + _, created, err := s.collectors.GetOrCreateCollector(rank) + require.NoError(s.T(), err) + if created { + createdTimes.Add(1) + } + }() + } + wg.Wait() + + require.Equal(s.T(), uint64(1), createdTimes.Load()) +} + +// TestPruneUpToRank tests pruning removes item below pruning height and leaves unmodified other items. +func (s *TimeoutCollectorsTestSuite) TestPruneUpToRank() { + numberOfCollectors := uint64(10) + prunedRanks := make([]uint64, 0) + for i := uint64(0); i < numberOfCollectors; i++ { + rank := s.lowestRank + i + s.prepareMockedCollector(rank) + _, _, err := s.collectors.GetOrCreateCollector(rank) + require.NoError(s.T(), err) + prunedRanks = append(prunedRanks, rank) + } + + pruningHeight := s.lowestRank + numberOfCollectors + + expectedCollectors := make([]consensus.TimeoutCollector[*helper.TestVote], 0) + for i := uint64(0); i < numberOfCollectors; i++ { + rank := pruningHeight + i + s.prepareMockedCollector(rank) + collector, _, err := s.collectors.GetOrCreateCollector(rank) + require.NoError(s.T(), err) + expectedCollectors = append(expectedCollectors, collector) + } + + // after this operation collectors below pruning height should be pruned and everything higher + // should be left unmodified + s.collectors.PruneUpToRank(pruningHeight) + + for _, prunedRank := range prunedRanks { + _, _, err := s.collectors.GetOrCreateCollector(prunedRank) + require.Error(s.T(), err) + require.True(s.T(), models.IsBelowPrunedThresholdError(err)) + } + + for _, collector := range expectedCollectors { + cached, _, _ := s.collectors.GetOrCreateCollector(collector.Rank()) + require.Equal(s.T(), collector, cached) + } +} diff --git a/consensus/timeoutcollector/aggregation.go b/consensus/timeoutcollector/aggregation.go new file mode 100644 index 0000000..78a8610 --- /dev/null +++ b/consensus/timeoutcollector/aggregation.go @@ -0,0 +1,227 @@ +package timeoutcollector + +import ( + "fmt" + "sync" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/consensus/verification" +) + +// signerInfo holds information about a signer, its public key and weight +type signerInfo struct { + pk []byte + weight uint64 +} + +// sigInfo holds signature and high QC rank submitted by some signer +type sigInfo struct { + sig []byte + newestQCRank uint64 +} + +// TimeoutSignatureAggregator implements consensus.TimeoutSignatureAggregator. +// It performs timeout specific BLS aggregation over multiple distinct messages. +// We perform timeout signature aggregation for some concrete rank, utilizing +// the protocol specification that timeouts sign the message: +// hash(rank, newestQCRank), where newestQCRank can have different values +// for different replicas. +// Rank and the identities of all authorized replicas are specified when the +// TimeoutSignatureAggregator is instantiated. Each signer is allowed to sign at +// most once. Aggregation uses BLS scheme. Mitigation against rogue attacks is +// done using Proof Of Possession (PoP). Implementation is only safe under the +// assumption that all proofs of possession (PoP) of the public keys are valid. +// This module does not perform the PoPs validity checks, it assumes +// verification was done outside the module. Implementation is thread-safe. +type TimeoutSignatureAggregator struct { + lock sync.RWMutex + filter []byte + dsTag []byte + aggregator consensus.SignatureAggregator + idToInfo map[models.Identity]signerInfo // auxiliary map to lookup signer weight and public key (only gets updated by constructor) + idToSignature map[models.Identity]sigInfo // signatures indexed by the signer ID + totalWeight uint64 // total accumulated weight + rank uint64 // rank for which we are aggregating signatures +} + +var _ consensus.TimeoutSignatureAggregator = (*TimeoutSignatureAggregator)(nil) + +// NewTimeoutSignatureAggregator returns a multi message signature aggregator +// initialized with a predefined rank for which we aggregate signatures, list of +// identities, their respective public keys and a domain separation tag. The +// identities represent the list of all authorized signers. The constructor does +// not verify PoPs of input public keys, it assumes verification was done +// outside this module. +// The constructor errors if: +// - the list of identities is empty +// - if one of the keys is not a valid public key. +// +// A multi message sig aggregator is used for aggregating timeouts for a single +// rank only. A new instance should be used for each signature aggregation task +// in the protocol. +func NewTimeoutSignatureAggregator( + aggregator consensus.SignatureAggregator, + filter []byte, + rank uint64, // rank for which we are aggregating signatures + ids []models.WeightedIdentity, // list of all authorized signers + dsTag []byte, // domain separation tag used by the signature +) (*TimeoutSignatureAggregator, error) { + if len(ids) == 0 { + return nil, fmt.Errorf( + "number of participants must be larger than 0, got %d", + len(ids), + ) + } + + // build the internal map for a faster look-up + idToInfo := make(map[models.Identity]signerInfo) + for _, id := range ids { + idToInfo[id.Identity()] = signerInfo{ + pk: id.PublicKey(), + weight: id.Weight(), + } + } + + return &TimeoutSignatureAggregator{ + aggregator: aggregator, + filter: filter, + dsTag: dsTag, + idToInfo: idToInfo, + idToSignature: make(map[models.Identity]sigInfo), + rank: rank, + }, nil +} + +// VerifyAndAdd verifies the signature under the stored public keys and adds +// signature with corresponding newest QC rank to the internal set. Internal set +// and collected weight is modified iff the signer ID is not a duplicate and +// signature _is_ valid. The total weight of all collected signatures (excluding +// duplicates) is returned regardless of any returned error. +// Expected errors during normal operations: +// - models.InvalidSignerError if signerID is invalid (not a consensus +// participant) +// - models.DuplicatedSignerError if the signer has been already added +// - models.ErrInvalidSignature if signerID is valid but signature is +// cryptographically invalid +// +// The function is thread-safe. +func (a *TimeoutSignatureAggregator) VerifyAndAdd( + signerID models.Identity, + sig []byte, + newestQCRank uint64, +) (totalWeight uint64, exception error) { + info, ok := a.idToInfo[signerID] + if !ok { + return a.TotalWeight(), models.NewInvalidSignerErrorf( + "%x is not an authorized signer", + signerID, + ) + } + + // to avoid expensive signature verification we will proceed with double lock + // style check + if a.hasSignature(signerID) { + return a.TotalWeight(), models.NewDuplicatedSignerErrorf( + "signature from %x was already added", + signerID, + ) + } + + msg := verification.MakeTimeoutMessage(a.filter, a.rank, newestQCRank) + valid := a.aggregator.VerifySignatureRaw(info.pk, sig, msg, a.dsTag) + if !valid { + return a.TotalWeight(), fmt.Errorf( + "invalid signature from %s: %w", + signerID, + models.ErrInvalidSignature, + ) + } + + a.lock.Lock() + defer a.lock.Unlock() + + if _, duplicate := a.idToSignature[signerID]; duplicate { + return a.totalWeight, models.NewDuplicatedSignerErrorf( + "signature from %x was already added", + signerID, + ) + } + + a.idToSignature[signerID] = sigInfo{ + sig: sig, + newestQCRank: newestQCRank, + } + a.totalWeight += info.weight + + return a.totalWeight, nil +} + +func (a *TimeoutSignatureAggregator) hasSignature( + signerID models.Identity, +) bool { + a.lock.RLock() + defer a.lock.RUnlock() + _, found := a.idToSignature[signerID] + return found +} + +// TotalWeight returns the total weight presented by the collected signatures. +// The function is thread-safe +func (a *TimeoutSignatureAggregator) TotalWeight() uint64 { + a.lock.RLock() + defer a.lock.RUnlock() + return a.totalWeight +} + +// Rank returns rank for which aggregation happens +// The function is thread-safe +func (a *TimeoutSignatureAggregator) Rank() uint64 { + return a.rank +} + +// Aggregate aggregates the signatures and returns the aggregated consensus. +// The resulting aggregated signature is guaranteed to be valid, as all +// individual signatures are pre-validated before their addition. Expected +// errors during normal operations: +// - models.InsufficientSignaturesError if no signatures have been added yet +// +// This function is thread-safe +func (a *TimeoutSignatureAggregator) Aggregate() ( + []consensus.TimeoutSignerInfo, + models.AggregatedSignature, + error, +) { + a.lock.RLock() + defer a.lock.RUnlock() + + sharesNum := len(a.idToSignature) + signatures := make([][]byte, 0, sharesNum) + publicKeys := make([][]byte, 0, sharesNum) + signersData := make([]consensus.TimeoutSignerInfo, 0, sharesNum) + for id, info := range a.idToSignature { + publicKeys = append(publicKeys, a.idToInfo[id].pk) + signatures = append(signatures, info.sig) + signersData = append(signersData, consensus.TimeoutSignerInfo{ + NewestQCRank: info.newestQCRank, + Signer: id, + }) + } + + if sharesNum == 0 { + return nil, nil, models.NewInsufficientSignaturesErrorf( + "cannot aggregate an empty list of signatures", + ) + } + + aggSignature, err := a.aggregator.Aggregate(publicKeys, signatures) + if err != nil { + // any other error here is a symptom of an internal bug + return nil, nil, fmt.Errorf( + "unexpected internal error during BLS signature aggregation: %w", + err, + ) + } + + return signersData, aggSignature, nil +} diff --git a/consensus/timeoutcollector/factory.go b/consensus/timeoutcollector/factory.go new file mode 100644 index 0000000..b4ac96d --- /dev/null +++ b/consensus/timeoutcollector/factory.go @@ -0,0 +1,174 @@ +package timeoutcollector + +import ( + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutCollectorFactory implements consensus.TimeoutCollectorFactory, it is +// responsible for creating timeout collector for given rank. +type TimeoutCollectorFactory[VoteT models.Unique] struct { + tracer consensus.TraceLogger + notifier consensus.TimeoutAggregationConsumer[VoteT] + processorFactory consensus.TimeoutProcessorFactory[VoteT] +} + +var _ consensus.TimeoutCollectorFactory[*nilUnique] = (*TimeoutCollectorFactory[*nilUnique])(nil) + +// NewTimeoutCollectorFactory creates new instance of TimeoutCollectorFactory. +// No error returns are expected during normal operations. +func NewTimeoutCollectorFactory[VoteT models.Unique]( + tracer consensus.TraceLogger, + notifier consensus.TimeoutAggregationConsumer[VoteT], + createProcessor consensus.TimeoutProcessorFactory[VoteT], +) *TimeoutCollectorFactory[VoteT] { + return &TimeoutCollectorFactory[VoteT]{ + tracer: tracer, + notifier: notifier, + processorFactory: createProcessor, + } +} + +// Create is a factory method to generate a TimeoutCollector for a given rank +// Expected error returns during normal operations: +// - models.ErrRankUnknown if rank is not yet pruned but no rank containing +// the given rank is known +// +// All other errors should be treated as exceptions. +func (f *TimeoutCollectorFactory[VoteT]) Create(rank uint64) ( + consensus.TimeoutCollector[VoteT], + error, +) { + processor, err := f.processorFactory.Create(rank) + if err != nil { + return nil, fmt.Errorf( + "could not create TimeoutProcessor at rank %d: %w", + rank, + err, + ) + } + return NewTimeoutCollector(f.tracer, rank, f.notifier, processor), nil +} + +// TimeoutProcessorFactory implements consensus.TimeoutProcessorFactory, it is +// responsible for creating timeout processor for given rank. +type TimeoutProcessorFactory[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +] struct { + tracer consensus.TraceLogger + filter []byte + aggregator consensus.SignatureAggregator + committee consensus.Replicas + notifier consensus.TimeoutCollectorConsumer[VoteT] + validator consensus.Validator[StateT, VoteT] + voting consensus.VotingProvider[StateT, VoteT, PeerIDT] + domainSeparationTag []byte +} + +var _ consensus.TimeoutProcessorFactory[*nilUnique] = (*TimeoutProcessorFactory[*nilUnique, *nilUnique, *nilUnique])(nil) + +// NewTimeoutProcessorFactory creates new instance of TimeoutProcessorFactory. +// No error returns are expected during normal operations. +func NewTimeoutProcessorFactory[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +]( + tracer consensus.TraceLogger, + filter []byte, + aggregator consensus.SignatureAggregator, + notifier consensus.TimeoutCollectorConsumer[VoteT], + committee consensus.Replicas, + validator consensus.Validator[StateT, VoteT], + voting consensus.VotingProvider[StateT, VoteT, PeerIDT], + domainSeparationTag []byte, +) *TimeoutProcessorFactory[StateT, VoteT, PeerIDT] { + return &TimeoutProcessorFactory[StateT, VoteT, PeerIDT]{ + tracer: tracer, + filter: filter, + aggregator: aggregator, + committee: committee, + notifier: notifier, + validator: validator, + voting: voting, + domainSeparationTag: domainSeparationTag, + } +} + +// Create is a factory method to generate a TimeoutProcessor for a given rank +// Expected error returns during normal operations: +// - models.ErrRankUnknown no rank containing the given rank is known +// +// All other errors should be treated as exceptions. +func (f *TimeoutProcessorFactory[StateT, VoteT, PeerIDT]) Create(rank uint64) ( + consensus.TimeoutProcessor[VoteT], + error, +) { + allParticipants, err := f.committee.IdentitiesByRank(rank) + if err != nil { + return nil, fmt.Errorf("error retrieving consensus participants: %w", err) + } + + sigAggregator, err := NewTimeoutSignatureAggregator( + f.aggregator, + f.filter, + rank, + allParticipants, + f.domainSeparationTag, + ) + if err != nil { + return nil, fmt.Errorf( + "could not create TimeoutSignatureAggregator at rank %d: %w", + rank, + err, + ) + } + + return NewTimeoutProcessor[StateT, VoteT, PeerIDT]( + f.tracer, + f.committee, + f.validator, + sigAggregator, + f.notifier, + f.voting, + ) +} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/timeoutcollector/timeout_cache.go b/consensus/timeoutcollector/timeout_cache.go new file mode 100644 index 0000000..9e1fa9e --- /dev/null +++ b/consensus/timeoutcollector/timeout_cache.go @@ -0,0 +1,122 @@ +package timeoutcollector + +import ( + "errors" + "sync" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +var ( + // ErrRepeatedTimeout is emitted, when we receive an identical timeout state + // for the same state from the same voter multiple times. This error does + // _not_ indicate equivocation. + ErrRepeatedTimeout = errors.New("duplicated timeout") + ErrTimeoutForIncompatibleRank = errors.New("timeout for incompatible rank") +) + +// TimeoutStatesCache maintains a _concurrency safe_ cache of timeouts for one +// particular rank. The cache memorizes the order in which the timeouts were +// received. Timeouts are de-duplicated based on the following rules: +// - For each voter (i.e. SignerID), we store the _first_ timeout t0. +// - For any subsequent timeout t, we check whether t equals t0. +// If this is the case, we consider the timeout a duplicate and drop it. +// If t and t0 have different contents, the voter is equivocating, and +// we return a models.DoubleTimeoutError. +type TimeoutStatesCache[VoteT models.Unique] struct { + lock sync.RWMutex + rank uint64 + timeouts map[models.Identity]*models.TimeoutState[VoteT] // signerID -> first timeout +} + +// NewTimeoutStatesCache instantiates a TimeoutStatesCache for the given rank +func NewTimeoutStatesCache[VoteT models.Unique]( + rank uint64, +) *TimeoutStatesCache[VoteT] { + return &TimeoutStatesCache[VoteT]{ + rank: rank, + timeouts: make(map[models.Identity]*models.TimeoutState[VoteT]), + } +} + +func (vc *TimeoutStatesCache[VoteT]) Rank() uint64 { return vc.rank } + +// AddTimeoutState stores a timeout in the cache. The following errors are +// expected during normal operations: +// - nil: if the timeout was successfully added +// - models.DoubleTimeoutError is returned if the replica is equivocating +// - RepeatedTimeoutErr is returned when adding an _identical_ timeout for the +// same rank from the same voter multiple times. +// - TimeoutForIncompatibleRankError is returned if the timeout is for a +// different rank. +// +// When AddTimeoutState returns an error, the timeout is _not_ stored. +func (vc *TimeoutStatesCache[VoteT]) AddTimeoutState( + timeout *models.TimeoutState[VoteT], +) error { + if timeout.Rank != vc.rank { + return ErrTimeoutForIncompatibleRank + } + vc.lock.Lock() + + // De-duplicated timeouts based on the following rules: + // * For each voter (i.e. SignerID), we store the _first_ t0. + // * For any subsequent timeout t, we check whether t equals t0. + // If this is the case, we consider the timeout a duplicate and drop it. + // If t and t0 have different contents, the voter is equivocating, and + // we return a models.DoubleTimeoutError. + firstTimeout, exists := vc.timeouts[(*timeout.Vote).Identity()] + if exists { + vc.lock.Unlock() + if !firstTimeout.Equals(timeout) { + return models.NewDoubleTimeoutErrorf( + firstTimeout, + timeout, + "detected timeout equivocation by replica %x at rank: %d", + (*timeout.Vote).Identity(), + vc.rank, + ) + } + return ErrRepeatedTimeout + } + vc.timeouts[(*timeout.Vote).Identity()] = timeout + vc.lock.Unlock() + + return nil +} + +// GetTimeoutState returns the stored timeout for the given `signerID`. Returns: +// - (timeout, true) if a timeout state from signerID is known +// - (nil, false) no timeout state from signerID is known +func (vc *TimeoutStatesCache[VoteT]) GetTimeoutState( + signerID models.Identity, +) (*models.TimeoutState[VoteT], bool) { + vc.lock.RLock() + timeout, exists := vc.timeouts[signerID] // if signerID is unknown, its `Vote` pointer is nil + vc.lock.RUnlock() + return timeout, exists +} + +// Size returns the number of cached timeout states +func (vc *TimeoutStatesCache[VoteT]) Size() int { + vc.lock.RLock() + s := len(vc.timeouts) + vc.lock.RUnlock() + return s +} + +// All returns all currently cached timeout states. Concurrency safe. +func (vc *TimeoutStatesCache[VoteT]) All() []*models.TimeoutState[VoteT] { + vc.lock.RLock() + defer vc.lock.RUnlock() + return vc.all() +} + +// all returns all currently cached timeout states. NOT concurrency safe +func (vc *TimeoutStatesCache[VoteT]) all() []*models.TimeoutState[VoteT] { + timeoutStates := make([]*models.TimeoutState[VoteT], 0, len(vc.timeouts)) + for _, t := range vc.timeouts { + timeoutStates = append(timeoutStates, t) + } + return timeoutStates +} diff --git a/consensus/timeoutcollector/timeout_cache_test.go b/consensus/timeoutcollector/timeout_cache_test.go new file mode 100644 index 0000000..7fcbceb --- /dev/null +++ b/consensus/timeoutcollector/timeout_cache_test.go @@ -0,0 +1,172 @@ +package timeoutcollector + +import ( + "fmt" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TestTimeoutStatesCache_Rank tests that Rank returns same value that was set by constructor +func TestTimeoutStatesCache_Rank(t *testing.T) { + rank := uint64(100) + cache := NewTimeoutStatesCache[*helper.TestVote](rank) + require.Equal(t, rank, cache.Rank()) +} + +// TestTimeoutStatesCache_AddTimeoutStateRepeatedTimeout tests that AddTimeoutState skips duplicated timeouts +func TestTimeoutStatesCache_AddTimeoutStateRepeatedTimeout(t *testing.T) { + t.Parallel() + + rank := uint64(100) + cache := NewTimeoutStatesCache[*helper.TestVote](rank) + timeout := helper.TimeoutStateFixture( + helper.WithTimeoutStateRank[*helper.TestVote](rank), + helper.WithTimeoutVote[*helper.TestVote](&helper.TestVote{ + ID: "1", + Rank: rank, + }), + ) + + require.NoError(t, cache.AddTimeoutState(timeout)) + err := cache.AddTimeoutState(timeout) + require.ErrorIs(t, err, ErrRepeatedTimeout) + require.Len(t, cache.All(), 1) +} + +// TestTimeoutStatesCache_AddTimeoutStateIncompatibleRank tests that adding timeout with incompatible rank results in error +func TestTimeoutStatesCache_AddTimeoutStateIncompatibleRank(t *testing.T) { + t.Parallel() + + rank := uint64(100) + cache := NewTimeoutStatesCache[*helper.TestVote](rank) + timeout := helper.TimeoutStateFixture( + helper.WithTimeoutStateRank[*helper.TestVote](rank+1), + helper.WithTimeoutVote[*helper.TestVote](&helper.TestVote{ + ID: "1", + Rank: rank, + }), + ) + err := cache.AddTimeoutState(timeout) + require.ErrorIs(t, err, ErrTimeoutForIncompatibleRank) +} + +// TestTimeoutStatesCache_GetTimeout tests that GetTimeout method returns the first added timeout +// for a given signer, if any timeout has been added. +func TestTimeoutStatesCache_GetTimeout(t *testing.T) { + rank := uint64(100) + knownTimeout := helper.TimeoutStateFixture( + helper.WithTimeoutStateRank[*helper.TestVote](rank), + helper.WithTimeoutVote[*helper.TestVote](&helper.TestVote{ + ID: "1", + Rank: rank, + }), + ) + doubleTimeout := helper.TimeoutStateFixture( + helper.WithTimeoutStateRank[*helper.TestVote](rank), + helper.WithTimeoutVote[*helper.TestVote](&helper.TestVote{ + ID: "1", + Rank: rank, + }), + ) + + cache := NewTimeoutStatesCache[*helper.TestVote](rank) + + // unknown timeout + timeout, found := cache.GetTimeoutState(helper.MakeIdentity()) + require.Nil(t, timeout) + require.False(t, found) + + // known timeout + err := cache.AddTimeoutState(knownTimeout) + require.NoError(t, err) + timeout, found = cache.GetTimeoutState((*knownTimeout.Vote).ID) + require.Equal(t, knownTimeout, timeout) + require.True(t, found) + + // for a signer ID with a known timeout, the cache should memorize the _first_ encountered timeout + err = cache.AddTimeoutState(doubleTimeout) + require.True(t, models.IsDoubleTimeoutError[*helper.TestVote](err)) + timeout, found = cache.GetTimeoutState((*doubleTimeout.Vote).ID) + require.Equal(t, knownTimeout, timeout) + require.True(t, found) +} + +// TestTimeoutStatesCache_All tests that All returns previously added timeouts. +func TestTimeoutStatesCache_All(t *testing.T) { + t.Parallel() + + rank := uint64(100) + cache := NewTimeoutStatesCache[*helper.TestVote](rank) + expectedTimeouts := make([]*models.TimeoutState[*helper.TestVote], 5) + for i := range expectedTimeouts { + timeout := helper.TimeoutStateFixture( + helper.WithTimeoutStateRank[*helper.TestVote](rank), + helper.WithTimeoutVote[*helper.TestVote](&helper.TestVote{ + ID: fmt.Sprintf("%d", i), + Rank: rank, + }), + ) + expectedTimeouts[i] = timeout + require.NoError(t, cache.AddTimeoutState(timeout)) + } + require.ElementsMatch(t, expectedTimeouts, cache.All()) +} + +// BenchmarkAdd measured the time it takes to add `numberTimeouts` concurrently to the TimeoutStatesCache. +// On MacBook with Intel i7-7820HQ CPU @ 2.90GHz: +// adding 1 million timeouts in total, with 20 threads concurrently, took 0.48s +func BenchmarkAdd(b *testing.B) { + numberTimeouts := 1_000_000 + threads := 20 + + // Setup: create worker routines and timeouts to feed + rank := uint64(10) + cache := NewTimeoutStatesCache[*helper.TestVote](rank) + + var start sync.WaitGroup + start.Add(threads) + var done sync.WaitGroup + done.Add(threads) + + n := numberTimeouts / threads + + for ; threads > 0; threads-- { + go func(i int) { + // create timeouts and signal ready + timeouts := make([]models.TimeoutState[*helper.TestVote], 0, n) + for len(timeouts) < n { + t := helper.TimeoutStateFixture( + helper.WithTimeoutStateRank[*helper.TestVote](rank), + helper.WithTimeoutVote[*helper.TestVote](&helper.TestVote{ + ID: helper.MakeIdentity(), + Rank: rank, + }), + ) + timeouts = append(timeouts, *t) + } + + start.Done() + + // Wait for last worker routine to signal ready. Then, + // feed all timeouts into cache + start.Wait() + + for _, v := range timeouts { + err := cache.AddTimeoutState(&v) + require.NoError(b, err) + } + done.Done() + }(threads) + } + start.Wait() + t1 := time.Now() + done.Wait() + duration := time.Since(t1) + fmt.Printf("=> adding %d timeouts to Cache took %f seconds\n", cache.Size(), duration.Seconds()) +} diff --git a/consensus/timeoutcollector/timeout_collector.go b/consensus/timeoutcollector/timeout_collector.go new file mode 100644 index 0000000..d972cb0 --- /dev/null +++ b/consensus/timeoutcollector/timeout_collector.go @@ -0,0 +1,152 @@ +package timeoutcollector + +import ( + "errors" + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/counters" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutCollector implements logic for collecting timeout states. Performs +// deduplication, caching and processing of timeouts, delegating those tasks to +// underlying modules. Emits notifications about verified QCs and TCs, if their +// rank is newer than any QC or TC previously known to the TimeoutCollector. +// This module is safe to use in concurrent environment. +type TimeoutCollector[VoteT models.Unique] struct { + tracer consensus.TraceLogger + timeoutsCache *TimeoutStatesCache[VoteT] // cache for tracking double timeout and timeout equivocation + notifier consensus.TimeoutAggregationConsumer[VoteT] + processor consensus.TimeoutProcessor[VoteT] + newestReportedQC counters.StrictMonotonicCounter // rank of newest QC that was reported + newestReportedTC counters.StrictMonotonicCounter // rank of newest TC that was reported +} + +var _ consensus.TimeoutCollector[*nilUnique] = (*TimeoutCollector[*nilUnique])(nil) + +// NewTimeoutCollector creates new instance of TimeoutCollector +func NewTimeoutCollector[VoteT models.Unique]( + tracer consensus.TraceLogger, + rank uint64, + notifier consensus.TimeoutAggregationConsumer[VoteT], + processor consensus.TimeoutProcessor[VoteT], +) *TimeoutCollector[VoteT] { + tc := &TimeoutCollector[VoteT]{ + tracer: tracer, + notifier: notifier, + timeoutsCache: NewTimeoutStatesCache[VoteT](rank), + processor: processor, + newestReportedQC: counters.NewMonotonicCounter(0), + newestReportedTC: counters.NewMonotonicCounter(0), + } + + return tc +} + +// AddTimeout adds a Timeout State to the collector. When TSs from +// strictly more than 1/3 of consensus participants (measured by weight) were +// collected, the callback for partial TC will be triggered. After collecting +// TSs from a supermajority, a TC will be created and passed to the EventLoop. +// Expected error returns during normal operations: +// - timeoutcollector.ErrTimeoutForIncompatibleRank - submitted timeout for +// incompatible rank +// +// All other exceptions are symptoms of potential state corruption. +func (c *TimeoutCollector[VoteT]) AddTimeout( + timeout *models.TimeoutState[VoteT], +) error { + // cache timeout + err := c.timeoutsCache.AddTimeoutState(timeout) + if err != nil { + if errors.Is(err, ErrRepeatedTimeout) { + return nil + } + doubleTimeoutErr, isDoubleTimeoutErr := + models.AsDoubleTimeoutError[VoteT](err) + if isDoubleTimeoutErr { + c.notifier.OnDoubleTimeoutDetected( + doubleTimeoutErr.FirstTimeout, + doubleTimeoutErr.ConflictingTimeout, + ) + return nil + } + return fmt.Errorf("internal error adding timeout to cache: %d: %w", + timeout.Rank, + err, + ) + } + + err = c.processTimeout(timeout) + if err != nil { + return fmt.Errorf("internal error processing TO: %d: %w", + timeout.Rank, + err, + ) + } + return nil +} + +// processTimeout delegates TO processing to TimeoutProcessor, handles sentinel +// errors expected errors are handled and reported to notifier. Notifies +// listeners about validates QCs and TCs. No errors are expected during normal +// flow of operations. +func (c *TimeoutCollector[VoteT]) processTimeout( + timeout *models.TimeoutState[VoteT], +) error { + err := c.processor.Process(timeout) + if err != nil { + if invalidTimeoutErr, ok := models.AsInvalidTimeoutError[VoteT](err); ok { + c.notifier.OnInvalidTimeoutDetected(*invalidTimeoutErr) + return nil + } + return fmt.Errorf("internal error while processing timeout: %w", err) + } + + // TODO: consider moving OnTimeoutProcessed to TimeoutAggregationConsumer, + // need to fix telemetry for this. + c.notifier.OnTimeoutProcessed(timeout) + + // In the following, we emit notifications about new QCs, if their rank is + // newer than any QC previously known to the TimeoutCollector. Note that our + // implementation only provides weak ordering: + // * Over larger time scales, the emitted events are for statistically + // increasing ranks. + // * However, on short time scales there are _no_ monotonicity guarantees + // w.r.t. the ranks. + // Explanation: + // While only QCs with strict monotonicly increasing ranks pass the + // `if c.newestReportedQC.Set(timeout.NewestQC.Rank)` statement, we emit the + // notification in a separate step. Therefore, emitting the notifications is + // subject to races, where on very short time-scales the notifications can be + // out of order. Nevertheless, we note that notifications are only created for + // QCs that are strictly newer than any other known QC at the time we check + // via the `if ... Set(..)` statement. Thereby, we implement the desired + // filtering behaviour, i.e. that the recipient of the notifications is not + // spammed by old (or repeated) QCs. Reasoning for this approach: + // The current implementation is completely lock-free without noteworthy risk + // of congestion. For the recipient of the notifications, the weak ordering is + // of no concern, because it anyway is only interested in the newest QC. + // Time-localized disorder is irrelevant, because newer QCs that would arrive + // later in a strongly ordered system can only arrive earlier in our weakly + // ordered implementation. Hence, if anything, the recipient receives the + // desired information _earlier_ but not later. + if c.newestReportedQC.Set(timeout.LatestQuorumCertificate.GetRank()) { + c.notifier.OnNewQuorumCertificateDiscovered(timeout.LatestQuorumCertificate) + } + // Same explanation for weak ordering of QCs also applies to TCs. + if timeout.PriorRankTimeoutCertificate != nil { + if c.newestReportedTC.Set(timeout.PriorRankTimeoutCertificate.GetRank()) { + c.notifier.OnNewTimeoutCertificateDiscovered( + timeout.PriorRankTimeoutCertificate, + ) + } + } + + return nil +} + +// Rank returns rank which is associated with this timeout collector +func (c *TimeoutCollector[VoteT]) Rank() uint64 { + return c.timeoutsCache.Rank() +} diff --git a/consensus/timeoutcollector/timeout_collector_test.go b/consensus/timeoutcollector/timeout_collector_test.go new file mode 100644 index 0000000..9fe2f5b --- /dev/null +++ b/consensus/timeoutcollector/timeout_collector_test.go @@ -0,0 +1,230 @@ +package timeoutcollector + +import ( + "errors" + "math/rand" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + "source.quilibrium.com/quilibrium/monorepo/consensus/mocks" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +func TestTimeoutCollector(t *testing.T) { + suite.Run(t, new(TimeoutCollectorTestSuite)) +} + +// TimeoutCollectorTestSuite is a test suite for testing TimeoutCollector. It stores mocked +// state internally for testing behavior. +type TimeoutCollectorTestSuite struct { + suite.Suite + + rank uint64 + notifier *mocks.TimeoutAggregationConsumer[*helper.TestVote] + processor *mocks.TimeoutProcessor[*helper.TestVote] + collector *TimeoutCollector[*helper.TestVote] +} + +func (s *TimeoutCollectorTestSuite) SetupTest() { + s.rank = 1000 + s.notifier = mocks.NewTimeoutAggregationConsumer[*helper.TestVote](s.T()) + s.processor = mocks.NewTimeoutProcessor[*helper.TestVote](s.T()) + + s.notifier.On("OnNewQuorumCertificateDiscovered", mock.Anything).Maybe() + s.notifier.On("OnNewTimeoutCertificateDiscovered", mock.Anything).Maybe() + + s.collector = NewTimeoutCollector(helper.Logger(), s.rank, s.notifier, s.processor) +} + +// TestRank tests that `Rank` returns the same value that was passed in constructor +func (s *TimeoutCollectorTestSuite) TestRank() { + require.Equal(s.T(), s.rank, s.collector.Rank()) +} + +// TestAddTimeout_HappyPath tests that process in happy path executed by multiple workers deliver expected results +// all operations should be successful, no errors expected +func (s *TimeoutCollectorTestSuite) TestAddTimeout_HappyPath() { + var wg sync.WaitGroup + for i := 0; i < 20; i++ { + wg.Add(1) + go func() { + defer wg.Done() + timeout := helper.TimeoutStateFixture( + helper.WithTimeoutStateRank[*helper.TestVote](s.rank), + helper.WithTimeoutVote[*helper.TestVote](&helper.TestVote{ + ID: helper.MakeIdentity(), + Rank: s.rank, + }), + ) + s.notifier.On("OnTimeoutProcessed", timeout).Once() + s.processor.On("Process", timeout).Return(nil).Once() + err := s.collector.AddTimeout(timeout) + require.NoError(s.T(), err) + }() + } + + s.processor.AssertExpectations(s.T()) +} + +// TestAddTimeout_DoubleTimeout tests that submitting two different timeouts for same rank ends with reporting +// double timeout to notifier which can be slashed later. +func (s *TimeoutCollectorTestSuite) TestAddTimeout_DoubleTimeout() { + timeout := helper.TimeoutStateFixture( + helper.WithTimeoutStateRank[*helper.TestVote](s.rank), + helper.WithTimeoutVote[*helper.TestVote](&helper.TestVote{ + ID: "1", + Rank: s.rank, + }), + ) + s.notifier.On("OnTimeoutProcessed", timeout).Once() + s.processor.On("Process", timeout).Return(nil).Once() + err := s.collector.AddTimeout(timeout) + require.NoError(s.T(), err) + + otherTimeout := helper.TimeoutStateFixture( + helper.WithTimeoutStateRank[*helper.TestVote](s.rank), + helper.WithTimeoutVote[*helper.TestVote](&helper.TestVote{ + ID: "1", + Rank: s.rank, + }), + ) + + s.notifier.On("OnDoubleTimeoutDetected", timeout, otherTimeout).Once() + + err = s.collector.AddTimeout(otherTimeout) + require.NoError(s.T(), err) + s.notifier.AssertExpectations(s.T()) + s.processor.AssertNumberOfCalls(s.T(), "Process", 1) +} + +// TestAddTimeout_RepeatedTimeout checks that repeated timeouts are silently dropped without any errors. +func (s *TimeoutCollectorTestSuite) TestAddTimeout_RepeatedTimeout() { + timeout := helper.TimeoutStateFixture( + helper.WithTimeoutStateRank[*helper.TestVote](s.rank), + helper.WithTimeoutVote[*helper.TestVote](&helper.TestVote{ + ID: helper.MakeIdentity(), + Rank: s.rank, + }), + ) + s.notifier.On("OnTimeoutProcessed", timeout).Once() + s.processor.On("Process", timeout).Return(nil).Once() + err := s.collector.AddTimeout(timeout) + require.NoError(s.T(), err) + err = s.collector.AddTimeout(timeout) + require.NoError(s.T(), err) + s.processor.AssertNumberOfCalls(s.T(), "Process", 1) +} + +// TestAddTimeout_TimeoutCacheException tests that submitting timeout state for rank which is not designated for this +// collector results in ErrTimeoutForIncompatibleRank. +func (s *TimeoutCollectorTestSuite) TestAddTimeout_TimeoutCacheException() { + // incompatible rank is an exception and not handled by timeout collector + timeout := helper.TimeoutStateFixture( + helper.WithTimeoutStateRank[*helper.TestVote](s.rank+1), + helper.WithTimeoutVote[*helper.TestVote](&helper.TestVote{ + ID: helper.MakeIdentity(), + Rank: s.rank + 1, + }), + ) + err := s.collector.AddTimeout(timeout) + require.ErrorIs(s.T(), err, ErrTimeoutForIncompatibleRank) + s.processor.AssertNotCalled(s.T(), "Process") +} + +// TestAddTimeout_InvalidTimeout tests that sentinel errors while processing timeouts are correctly handled and reported +// to notifier, but exceptions are propagated to caller. +func (s *TimeoutCollectorTestSuite) TestAddTimeout_InvalidTimeout() { + s.Run("invalid-timeout", func() { + timeout := helper.TimeoutStateFixture( + helper.WithTimeoutStateRank[*helper.TestVote](s.rank), + helper.WithTimeoutVote[*helper.TestVote](&helper.TestVote{ + ID: helper.MakeIdentity(), + Rank: s.rank, + }), + ) + s.processor.On("Process", timeout).Return(models.NewInvalidTimeoutErrorf(timeout, "")).Once() + s.notifier.On("OnInvalidTimeoutDetected", mock.Anything).Run(func(args mock.Arguments) { + invalidTimeoutErr := args.Get(0).(models.InvalidTimeoutError[*helper.TestVote]) + require.Equal(s.T(), timeout, invalidTimeoutErr.Timeout) + }).Once() + err := s.collector.AddTimeout(timeout) + require.NoError(s.T(), err) + + time.Sleep(100 * time.Millisecond) + s.notifier.AssertCalled(s.T(), "OnInvalidTimeoutDetected", mock.Anything) + }) + s.Run("process-exception", func() { + exception := errors.New("invalid-signature") + timeout := helper.TimeoutStateFixture( + helper.WithTimeoutStateRank[*helper.TestVote](s.rank), + helper.WithTimeoutVote[*helper.TestVote](&helper.TestVote{ + ID: helper.MakeIdentity(), + Rank: s.rank, + }), + ) + s.processor.On("Process", timeout).Return(exception).Once() + err := s.collector.AddTimeout(timeout) + require.ErrorIs(s.T(), err, exception) + }) +} + +// TestAddTimeout_TONotifications tests that TimeoutCollector in happy path reports the newest discovered QC and TC +func (s *TimeoutCollectorTestSuite) TestAddTimeout_TONotifications() { + qcCount := 100 + // generate QCs with increasing rank numbers + if s.rank < uint64(qcCount) { + s.T().Fatal("invalid test configuration") + } + + *s.notifier = *mocks.NewTimeoutAggregationConsumer[*helper.TestVote](s.T()) + + var highestReportedQC models.QuorumCertificate + s.notifier.On("OnNewQuorumCertificateDiscovered", mock.Anything).Run(func(args mock.Arguments) { + qc := args.Get(0).(models.QuorumCertificate) + if highestReportedQC == nil || highestReportedQC.GetRank() < qc.GetRank() { + highestReportedQC = qc + } + }) + + previousRankTimeoutCert := helper.MakeTC(helper.WithTCRank(s.rank - 1)) + s.notifier.On("OnNewTimeoutCertificateDiscovered", previousRankTimeoutCert).Once() + + timeouts := make([]*models.TimeoutState[*helper.TestVote], 0, qcCount) + for i := 0; i < qcCount; i++ { + qc := helper.MakeQC(helper.WithQCRank(uint64(i))) + timeout := helper.TimeoutStateFixture(func(timeout *models.TimeoutState[*helper.TestVote]) { + timeout.Rank = s.rank + timeout.LatestQuorumCertificate = qc + timeout.PriorRankTimeoutCertificate = previousRankTimeoutCert + }, helper.WithTimeoutVote(&helper.TestVote{Rank: s.rank, ID: helper.MakeIdentity()})) + timeouts = append(timeouts, timeout) + s.notifier.On("OnTimeoutProcessed", timeout).Once() + s.processor.On("Process", timeout).Return(nil).Once() + } + + expectedHighestQC := timeouts[len(timeouts)-1].LatestQuorumCertificate + + // shuffle timeouts in random order + rand.Shuffle(len(timeouts), func(i, j int) { + timeouts[i], timeouts[j] = timeouts[j], timeouts[i] + }) + + var wg sync.WaitGroup + wg.Add(len(timeouts)) + for _, timeout := range timeouts { + go func(timeout *models.TimeoutState[*helper.TestVote]) { + defer wg.Done() + err := s.collector.AddTimeout(timeout) + require.NoError(s.T(), err) + }(timeout) + } + wg.Wait() + + require.Equal(s.T(), expectedHighestQC, highestReportedQC) +} diff --git a/consensus/timeoutcollector/timeout_processor.go b/consensus/timeoutcollector/timeout_processor.go new file mode 100644 index 0000000..c20466b --- /dev/null +++ b/consensus/timeoutcollector/timeout_processor.go @@ -0,0 +1,418 @@ +package timeoutcollector + +import ( + "context" + "errors" + "fmt" + + "go.uber.org/atomic" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/consensus/tracker" +) + +// accumulatedWeightTracker tracks one-time event of reaching required weight +// Uses atomic flag to guarantee concurrency safety. +type accumulatedWeightTracker struct { + minRequiredWeight uint64 + done atomic.Bool +} + +func (t *accumulatedWeightTracker) Done() bool { + return t.done.Load() +} + +// Track returns true if `weight` reaches or exceeds `minRequiredWeight` for the +// _first time_. All subsequent calls of `Track` (with any value) return false. +func (t *accumulatedWeightTracker) Track(weight uint64) bool { + if weight < t.minRequiredWeight { + return false + } + return t.done.CompareAndSwap(false, true) +} + +// TimeoutProcessor implements the consensus.TimeoutProcessor interface. It +// processes timeout states broadcast by other replicas of the consensus +// committee. TimeoutProcessor collects TSs for one rank, eventually when enough +// timeout states are contributed TimeoutProcessor will create a timeout +// certificate which can be used to advance round. Concurrency safe. +type TimeoutProcessor[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +] struct { + tracer consensus.TraceLogger + rank uint64 + validator consensus.Validator[StateT, VoteT] + committee consensus.Replicas + sigAggregator consensus.TimeoutSignatureAggregator + notifier consensus.TimeoutCollectorConsumer[VoteT] + voting consensus.VotingProvider[StateT, VoteT, PeerIDT] + partialTCTracker accumulatedWeightTracker + tcTracker accumulatedWeightTracker + newestQCTracker *tracker.NewestQCTracker +} + +var _ consensus.TimeoutProcessor[*nilUnique] = (*TimeoutProcessor[*nilUnique, *nilUnique, *nilUnique])(nil) + +// NewTimeoutProcessor creates new instance of TimeoutProcessor +// Returns the following expected errors for invalid inputs: +// - models.ErrRankUnknown if no rank containing the given rank is known +// +// All other errors should be treated as exceptions. +func NewTimeoutProcessor[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +]( + tracer consensus.TraceLogger, + committee consensus.Replicas, + validator consensus.Validator[StateT, VoteT], + sigAggregator consensus.TimeoutSignatureAggregator, + notifier consensus.TimeoutCollectorConsumer[VoteT], + voting consensus.VotingProvider[StateT, VoteT, PeerIDT], +) (*TimeoutProcessor[StateT, VoteT, PeerIDT], error) { + rank := sigAggregator.Rank() + qcThreshold, err := committee.QuorumThresholdForRank(rank) + if err != nil { + return nil, fmt.Errorf( + "could not retrieve QC weight threshold for rank %d: %w", + rank, + err, + ) + } + timeoutThreshold, err := committee.TimeoutThresholdForRank(rank) + if err != nil { + return nil, fmt.Errorf( + "could not retrieve timeout weight threshold for rank %d: %w", + rank, + err, + ) + } + return &TimeoutProcessor[StateT, VoteT, PeerIDT]{ + tracer: tracer, + rank: rank, + committee: committee, + validator: validator, + notifier: notifier, + partialTCTracker: accumulatedWeightTracker{ + minRequiredWeight: timeoutThreshold, + done: *atomic.NewBool(false), + }, + tcTracker: accumulatedWeightTracker{ + minRequiredWeight: qcThreshold, + done: *atomic.NewBool(false), + }, + sigAggregator: sigAggregator, + newestQCTracker: tracker.NewNewestQCTracker(), + voting: voting, + }, nil +} + +// Process performs processing of timeout state in concurrent safe way. This +// function is implemented to be called by multiple goroutines at the same time. +// Design of this function is event driven, as soon as we collect enough weight +// to create a TC or a partial TC we will immediately do so and submit it +// via callback for further processing. +// Expected error returns during normal operations: +// - ErrTimeoutForIncompatibleRank - submitted timeout for incompatible rank +// - models.InvalidTimeoutError - submitted invalid timeout(invalid structure +// or invalid signature) +// - models.DuplicatedSignerError if a timeout from the same signer was +// previously already added. It does _not necessarily_ imply that the +// timeout is invalid or the sender is equivocating. +// +// All other errors should be treated as exceptions. +func (p *TimeoutProcessor[StateT, VoteT, PeerIDT]) Process( + timeout *models.TimeoutState[VoteT], +) error { + if p.rank != timeout.Rank { + return fmt.Errorf( + "received incompatible timeout, expected %d got %d: %w", + p.rank, + timeout.Rank, + ErrTimeoutForIncompatibleRank, + ) + } + + if p.tcTracker.Done() { + return nil + } + + err := p.validateTimeout(timeout) + if err != nil { + return fmt.Errorf("validating timeout failed: %w", err) + } + if p.tcTracker.Done() { + return nil + } + + // CAUTION: for correctness it is critical that we update the + // `newestQCTracker` first, _before_ we add the TO's signature to + // `sigAggregator`. Reasoning: + // * For a valid TC, we require that the TC includes a QC with + // rank ≥ max{TC.LatestQuorumCertificateRanks}. + // * The `LatestQuorumCertificateRanks` is maintained by `sigAggregator`. + // * Hence, for any rank `v ∈ LatestQuorumCertificateRanks` that + // `sigAggregator` knows, a QC with equal or larger rank is known to + // `newestQCTracker`. This is guaranteed if and only if `newestQCTracker` + // is updated first. + p.newestQCTracker.Track(&timeout.LatestQuorumCertificate) + + totalWeight, err := p.sigAggregator.VerifyAndAdd( + (*timeout.Vote).Identity(), + (*timeout.Vote).GetSignature(), + timeout.LatestQuorumCertificate.GetRank(), + ) + if err != nil { + if models.IsInvalidSignerError(err) { + return models.NewInvalidTimeoutErrorf( + timeout, + "invalid signer for timeout: %w", + err, + ) + } + if errors.Is(err, models.ErrInvalidSignature) { + return models.NewInvalidTimeoutErrorf( + timeout, + "timeout is from valid signer but has cryptographically invalid signature: %w", + err, + ) + } + // models.DuplicatedSignerError is an expected error and just bubbled up the + // call stack. It does _not necessarily_ imply that the timeout is invalid + // or the sender is equivocating. + return fmt.Errorf("adding signature to aggregator failed: %w", err) + } + p.tracer.Trace(fmt.Sprintf( + "processed timeout, total weight=(%d), required=(%d)", + totalWeight, + p.tcTracker.minRequiredWeight, + )) + + if p.partialTCTracker.Track(totalWeight) { + qc := p.newestQCTracker.NewestQC() + p.notifier.OnPartialTimeoutCertificateCreated( + p.rank, + *qc, + timeout.PriorRankTimeoutCertificate, + ) + } + + // Checking of conditions for building TC are satisfied when willBuildTC is + // true. At this point, we have enough signatures to build a TC. Another + // routine might just be at this point. To avoid duplicate work, Track returns + // true only once. + willBuildTC := p.tcTracker.Track(totalWeight) + if !willBuildTC { + p.tracer.Trace( + "insufficient weight to build tc", + consensus.Uint64Param("total_weight", totalWeight), + ) + // either we do not have enough timeouts to build a TC, or another thread + // has already passed this gate and created a TC + return nil + } + + tc, err := p.buildTC() + if err != nil { + return fmt.Errorf("internal error constructing TC: %w", err) + } + p.notifier.OnTimeoutCertificateConstructedFromTimeouts(*tc) + p.tracer.Trace( + "timeout constructed from timeouts", + consensus.Uint64Param("rank", (*tc).GetRank()), + ) + return nil +} + +// validateTimeout performs validation of timeout state, verifies if timeout is +// correctly structured and included QC and TC is correctly structured and +// signed. ATTENTION: this function does _not_ check whether the TO's `SignerID` +// is an authorized node nor if the signature is valid. These checks happen in +// signature aggregator. Expected error returns during normal operations: +// * models.InvalidTimeoutError - submitted invalid timeout +// All other errors should be treated as exceptions. +func (p *TimeoutProcessor[StateT, VoteT, PeerIDT]) validateTimeout( + timeout *models.TimeoutState[VoteT], +) error { + // 1. check if it's correctly structured + // (a) Every TO must contain a QC + if timeout.LatestQuorumCertificate == nil { + return models.NewInvalidTimeoutErrorf(timeout, "TimeoutState without QC is invalid") + } + + if timeout.Rank <= timeout.LatestQuorumCertificate.GetRank() { + return models.NewInvalidTimeoutErrorf( + timeout, + "TO's QC %d cannot be newer than the TO's rank %d", + timeout.LatestQuorumCertificate.GetRank(), + timeout.Rank, + ) + } + + // (b) If a TC is included, the TC must be for the past round, no matter + // whether a QC for the last round is also included. In some edge cases, a + // node might observe _both_ QC and TC for the previous round, in which + // case it can include both. + if timeout.PriorRankTimeoutCertificate != nil { + if timeout.Rank != timeout.PriorRankTimeoutCertificate.GetRank()+1 { + return models.NewInvalidTimeoutErrorf( + timeout, + "invalid TC for non-previous rank, expected rank %d, got rank %d", + timeout.Rank-1, + timeout.PriorRankTimeoutCertificate.GetRank(), + ) + } + if timeout.LatestQuorumCertificate.GetRank() < + timeout.PriorRankTimeoutCertificate.GetLatestQuorumCert().GetRank() { + return models.NewInvalidTimeoutErrorf( + timeout, + "timeout.LatestQuorumCertificate is older (rank=%d) than the QC in timeout.PriorRankTimeoutCertificate (rank=%d)", + timeout.LatestQuorumCertificate.GetRank(), + timeout.PriorRankTimeoutCertificate.GetLatestQuorumCert().GetRank(), + ) + } + } + // (c) The TO must contain a proof that sender legitimately entered + // timeout.Rank. Transitioning to round timeout.Rank is possible either by + // observing a QC or a TC for the previous round. If no QC is included, we + // require a TC to be present, which by check (1b) must be for the + // previous round. + lastRankSuccessful := timeout.Rank == + timeout.LatestQuorumCertificate.GetRank()+1 + if !lastRankSuccessful { + // The TO's sender did _not_ observe a QC for round timeout.Rank-1. Hence, + // it should include a TC for the previous round. Otherwise, the TO is + // invalid. + if timeout.PriorRankTimeoutCertificate == nil { + return models.NewInvalidTimeoutErrorf(timeout, "timeout must include TC") + } + } + + // 2. Check if QC is valid + err := p.validator.ValidateQuorumCertificate(timeout.LatestQuorumCertificate) + if err != nil { + if models.IsInvalidQuorumCertificateError(err) { + return models.NewInvalidTimeoutErrorf( + timeout, + "included QC is invalid: %w", + err, + ) + } + if errors.Is(err, models.ErrRankUnknown) { + // We require each replica to be bootstrapped with a QC pointing to a + // finalized state. Therefore, we should know the Rank for any QC.Rank + // and TC.Rank we encounter. Receiving a `models.ErrRankUnknown` is + // conceptually impossible, i.e. a symptom of an internal bug or invalid + // bootstrapping information. + return fmt.Errorf( + "no Rank information available for QC that was included in TO; symptom of internal bug or invalid bootstrapping information: %s", + err.Error(), + ) + } + return fmt.Errorf("unexpected error when validating QC: %w", err) + } + + // 3. If TC is included, it must be valid + if timeout.PriorRankTimeoutCertificate != nil { + err = p.validator.ValidateTimeoutCertificate( + timeout.PriorRankTimeoutCertificate, + ) + if err != nil { + if models.IsInvalidTimeoutCertificateError(err) { + return models.NewInvalidTimeoutErrorf( + timeout, + "included TC is invalid: %w", + err, + ) + } + if errors.Is(err, models.ErrRankUnknown) { + // We require each replica to be bootstrapped with a QC pointing to a + // finalized state. Therefore, we should know the Rank for any QC.Rank + // and TC.Rank we encounter. Receiving a `models.ErrRankUnknown` is + // conceptually impossible, i.e. a symptom of an internal bug or invalid + // bootstrapping information. + return fmt.Errorf( + "no Rank information availalbe for TC that was included in TO; symptom of internal bug or invalid bootstrapping information: %s", + err.Error(), + ) + } + return fmt.Errorf("unexpected error when validating TC: %w", err) + } + } + return nil + +} + +// buildTC performs aggregation of signatures when we have collected enough +// weight for building TC. This function is run only once by single worker. +// Any error should be treated as exception. +func (p *TimeoutProcessor[StateT, VoteT, PeerIDT]) buildTC() ( + *models.TimeoutCertificate, + error, +) { + signersData, aggregatedSig, err := p.sigAggregator.Aggregate() + if err != nil { + return nil, fmt.Errorf( + "could not aggregate multi message signature: %w", + err, + ) + } + + newestQCRanks := make([]uint64, 0, len(signersData)) + for _, data := range signersData { + newestQCRanks = append(newestQCRanks, data.NewestQCRank) + } + + // Note that `newestQC` can have a larger rank than any of the ranks included + // in `newestQCRanks`. This is because for a TO currently being processes + // following two operations are executed in separate steps: + // * updating the `newestQCTracker` with the QC from the TO + // * adding the TO's signature to `sigAggregator` + // Therefore, races are possible, where the `newestQCTracker` already knows of + // a QC with larger rank than the data stored in `sigAggregator`. + newestQC := p.newestQCTracker.NewestQC() + tc, err := p.voting.FinalizeTimeout( + context.TODO(), + p.rank, + *newestQC, + newestQCRanks, + aggregatedSig, + ) + if err != nil { + return nil, fmt.Errorf("could not construct timeout certificate: %w", err) + } + + return &tc, nil +} + +// signerIndicesFromIdentities encodes identities into signer indices. +// Any error should be treated as exception. +func (p *TimeoutProcessor[StateT, VoteT, PeerIDT]) signerIndicesFromIdentities( + signerIDs []models.WeightedIdentity, +) ([]byte, error) { + allIdentities, err := p.committee.IdentitiesByRank(p.rank) + if err != nil { + return nil, fmt.Errorf( + "could not retrieve identities for rank %d: %w", + p.rank, + err, + ) + } + + signerSet := map[models.Identity]struct{}{} + for _, signerID := range signerIDs { + signerSet[signerID.Identity()] = struct{}{} + } + + signerIndices := make([]byte, (len(allIdentities)+7)/8) + for i, member := range allIdentities { + if _, ok := signerSet[member.Identity()]; ok { + signerIndices[i/8] |= 1 << (i % 8) + } + } + + return signerIndices, nil +} diff --git a/consensus/timeoutcollector/timeout_processor_test.go b/consensus/timeoutcollector/timeout_processor_test.go new file mode 100644 index 0000000..2a92e01 --- /dev/null +++ b/consensus/timeoutcollector/timeout_processor_test.go @@ -0,0 +1,678 @@ +package timeoutcollector + +import ( + "errors" + "fmt" + "math/rand" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "go.uber.org/atomic" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + "source.quilibrium.com/quilibrium/monorepo/consensus/mocks" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/consensus/validator" + "source.quilibrium.com/quilibrium/monorepo/consensus/verification" + "source.quilibrium.com/quilibrium/monorepo/consensus/votecollector" +) + +func TestTimeoutProcessor(t *testing.T) { + suite.Run(t, new(TimeoutProcessorTestSuite)) +} + +// TimeoutProcessorTestSuite is a test suite that holds mocked state for isolated testing of TimeoutProcessor. +type TimeoutProcessorTestSuite struct { + suite.Suite + + participants []models.WeightedIdentity + signer models.WeightedIdentity + rank uint64 + sigWeight uint64 + totalWeight atomic.Uint64 + committee *mocks.Replicas + validator *mocks.Validator[*helper.TestState, *helper.TestVote] + sigAggregator *mocks.TimeoutSignatureAggregator + notifier *mocks.TimeoutCollectorConsumer[*helper.TestVote] + processor *TimeoutProcessor[*helper.TestState, *helper.TestVote, *helper.TestPeer] + voting *mocks.VotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer] +} + +func (s *TimeoutProcessorTestSuite) SetupTest() { + var err error + s.sigWeight = 1000 + s.committee = mocks.NewReplicas(s.T()) + s.validator = mocks.NewValidator[*helper.TestState, *helper.TestVote](s.T()) + s.sigAggregator = mocks.NewTimeoutSignatureAggregator(s.T()) + s.notifier = mocks.NewTimeoutCollectorConsumer[*helper.TestVote](s.T()) + s.participants = helper.WithWeightedIdentityList(11) + s.signer = s.participants[0] + s.rank = (uint64)(rand.Uint32() + 100) + s.totalWeight = *atomic.NewUint64(0) + s.voting = mocks.NewVotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer](s.T()) + + s.committee.On("QuorumThresholdForRank", mock.Anything).Return(uint64(8000), nil).Maybe() + s.committee.On("TimeoutThresholdForRank", mock.Anything).Return(uint64(8000), nil).Maybe() + s.committee.On("IdentityByRank", mock.Anything, mock.Anything).Return(s.signer, nil).Maybe() + s.sigAggregator.On("Rank").Return(s.rank).Maybe() + s.sigAggregator.On("VerifyAndAdd", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + s.totalWeight.Add(s.sigWeight) + }).Return(func(signerID models.Identity, sig []byte, newestQCRank uint64) uint64 { + return s.totalWeight.Load() + }, func(signerID models.Identity, sig []byte, newestQCRank uint64) error { + return nil + }).Maybe() + s.sigAggregator.On("TotalWeight").Return(func() uint64 { + return s.totalWeight.Load() + }).Maybe() + + s.processor, err = NewTimeoutProcessor[*helper.TestState, *helper.TestVote, *helper.TestPeer]( + helper.Logger(), + s.committee, + s.validator, + s.sigAggregator, + s.notifier, + s.voting, + ) + require.NoError(s.T(), err) +} + +// TimeoutLastRankSuccessfulFixture creates a valid timeout if last rank has ended with QC. +func (s *TimeoutProcessorTestSuite) TimeoutLastRankSuccessfulFixture(opts ...func(*models.TimeoutState[*helper.TestVote])) *models.TimeoutState[*helper.TestVote] { + timeout := helper.TimeoutStateFixture( + helper.WithTimeoutStateRank[*helper.TestVote](s.rank), + helper.WithTimeoutNewestQC[*helper.TestVote](helper.MakeQC(helper.WithQCRank(s.rank-1))), + helper.WithTimeoutVote(&helper.TestVote{ID: helper.MakeIdentity(), Rank: s.rank}), + helper.WithTimeoutPreviousRankTimeoutCertificate[*helper.TestVote](nil), + ) + + for _, opt := range opts { + opt(timeout) + } + + return timeout +} + +// TimeoutLastRankFailedFixture creates a valid timeout if last rank has ended with TC. +func (s *TimeoutProcessorTestSuite) TimeoutLastRankFailedFixture(opts ...func(*models.TimeoutState[*helper.TestVote])) *models.TimeoutState[*helper.TestVote] { + newestQC := helper.MakeQC(helper.WithQCRank(s.rank - 10)) + timeout := helper.TimeoutStateFixture( + helper.WithTimeoutStateRank[*helper.TestVote](s.rank), + helper.WithTimeoutNewestQC[*helper.TestVote](newestQC), + helper.WithTimeoutVote(&helper.TestVote{ID: helper.MakeIdentity(), Rank: s.rank}), + helper.WithTimeoutPreviousRankTimeoutCertificate[*helper.TestVote](helper.MakeTC( + helper.WithTCRank(s.rank-1), + helper.WithTCNewestQC(helper.MakeQC(helper.WithQCRank(newestQC.GetRank()))))), + ) + + for _, opt := range opts { + opt(timeout) + } + + return timeout +} + +// TestProcess_TimeoutNotForRank tests that TimeoutProcessor accepts only timeouts for the rank it was initialized with +// We expect dedicated sentinel errors for timeouts for different ranks (`ErrTimeoutForIncompatibleRank`). +func (s *TimeoutProcessorTestSuite) TestProcess_TimeoutNotForRank() { + err := s.processor.Process(s.TimeoutLastRankSuccessfulFixture(func(t *models.TimeoutState[*helper.TestVote]) { + t.Rank++ + })) + require.ErrorIs(s.T(), err, ErrTimeoutForIncompatibleRank) + require.False(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err)) + + s.sigAggregator.AssertNotCalled(s.T(), "Verify") +} + +// TestProcess_TimeoutWithoutQC tests that TimeoutProcessor fails with models.InvalidTimeoutError if +// timeout doesn't contain QC. +func (s *TimeoutProcessorTestSuite) TestProcess_TimeoutWithoutQC() { + err := s.processor.Process(s.TimeoutLastRankSuccessfulFixture(func(t *models.TimeoutState[*helper.TestVote]) { + t.LatestQuorumCertificate = nil + })) + require.True(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err)) +} + +// TestProcess_TimeoutNewerHighestQC tests that TimeoutProcessor fails with models.InvalidTimeoutError if +// timeout contains a QC with QC.Rank > timeout.Rank, QC can be only with lower rank than timeout. +func (s *TimeoutProcessorTestSuite) TestProcess_TimeoutNewerHighestQC() { + s.Run("t.Rank == t.LatestQuorumCertificate.(*helper.TestQuorumCertificate).Rank", func() { + err := s.processor.Process(s.TimeoutLastRankSuccessfulFixture(func(t *models.TimeoutState[*helper.TestVote]) { + t.LatestQuorumCertificate.(*helper.TestQuorumCertificate).Rank = t.Rank + })) + require.True(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err)) + }) + s.Run("t.Rank < t.LatestQuorumCertificate.(*helper.TestQuorumCertificate).Rank", func() { + err := s.processor.Process(s.TimeoutLastRankSuccessfulFixture(func(t *models.TimeoutState[*helper.TestVote]) { + t.LatestQuorumCertificate.(*helper.TestQuorumCertificate).Rank = t.Rank + 1 + })) + require.True(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err)) + }) +} + +// TestProcess_PreviousRankTimeoutCertificateWrongRank tests that TimeoutProcessor fails with models.InvalidTimeoutError if +// timeout contains a proof that sender legitimately entered timeout.Rank but it has wrong rank meaning he used TC from previous rounds. +func (s *TimeoutProcessorTestSuite) TestProcess_PreviousRankTimeoutCertificateWrongRank() { + // if TC is included it must have timeout.Rank == timeout.PriorRankTimeoutCertificate.(*helper.TestTimeoutCertificate).Rank+1 + err := s.processor.Process(s.TimeoutLastRankFailedFixture(func(t *models.TimeoutState[*helper.TestVote]) { + t.PriorRankTimeoutCertificate.(*helper.TestTimeoutCertificate).Rank = t.Rank - 10 + })) + require.True(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err)) +} + +// TestProcess_LastRankHighestQCInvalidRank tests that TimeoutProcessor fails with models.InvalidTimeoutError if +// timeout contains a proof that sender legitimately entered timeout.Rank but included HighestQC has older rank +// than QC included in TC. For honest nodes this shouldn't happen. +func (s *TimeoutProcessorTestSuite) TestProcess_LastRankHighestQCInvalidRank() { + err := s.processor.Process(s.TimeoutLastRankFailedFixture(func(t *models.TimeoutState[*helper.TestVote]) { + t.PriorRankTimeoutCertificate.(*helper.TestTimeoutCertificate).LatestQuorumCert.(*helper.TestQuorumCertificate).Rank = t.LatestQuorumCertificate.(*helper.TestQuorumCertificate).Rank + 1 // TC contains newer QC than Timeout State + })) + require.True(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err)) +} + +// TestProcess_PreviousRankTimeoutCertificateRequiredButNotPresent tests that TimeoutProcessor fails with models.InvalidTimeoutError if +// timeout must contain a proof that sender legitimately entered timeout.Rank but doesn't have it. +func (s *TimeoutProcessorTestSuite) TestProcess_PreviousRankTimeoutCertificateRequiredButNotPresent() { + // if last rank is not successful(timeout.Rank != timeout.HighestQC.Rank+1) then this + // timeout must contain valid timeout.PriorRankTimeoutCertificate + err := s.processor.Process(s.TimeoutLastRankFailedFixture(func(t *models.TimeoutState[*helper.TestVote]) { + t.PriorRankTimeoutCertificate = nil + })) + require.True(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err)) +} + +// TestProcess_IncludedQCInvalid tests that TimeoutProcessor correctly handles validation errors if +// timeout is well-formed but included QC is invalid +func (s *TimeoutProcessorTestSuite) TestProcess_IncludedQCInvalid() { + timeout := s.TimeoutLastRankSuccessfulFixture() + + s.Run("invalid-qc-sentinel", func() { + *s.validator = *mocks.NewValidator[*helper.TestState, *helper.TestVote](s.T()) + s.validator.On("ValidateQuorumCertificate", timeout.LatestQuorumCertificate).Return(models.InvalidQuorumCertificateError{}).Once() + + err := s.processor.Process(timeout) + require.True(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err)) + require.True(s.T(), models.IsInvalidQuorumCertificateError(err)) + }) + s.Run("invalid-qc-exception", func() { + exception := errors.New("validate-qc-failed") + *s.validator = *mocks.NewValidator[*helper.TestState, *helper.TestVote](s.T()) + s.validator.On("ValidateQuorumCertificate", timeout.LatestQuorumCertificate).Return(exception).Once() + + err := s.processor.Process(timeout) + require.ErrorIs(s.T(), err, exception) + require.False(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err)) + }) + s.Run("invalid-qc-err-rank-for-unknown-rank", func() { + *s.validator = *mocks.NewValidator[*helper.TestState, *helper.TestVote](s.T()) + s.validator.On("ValidateQuorumCertificate", timeout.LatestQuorumCertificate).Return(models.ErrRankUnknown).Once() + + err := s.processor.Process(timeout) + require.False(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err)) + require.NotErrorIs(s.T(), err, models.ErrRankUnknown) + }) +} + +// TestProcess_IncludedTCInvalid tests that TimeoutProcessor correctly handles validation errors if +// timeout is well-formed but included TC is invalid +func (s *TimeoutProcessorTestSuite) TestProcess_IncludedTCInvalid() { + timeout := s.TimeoutLastRankFailedFixture() + + s.Run("invalid-tc-sentinel", func() { + *s.validator = *mocks.NewValidator[*helper.TestState, *helper.TestVote](s.T()) + s.validator.On("ValidateQuorumCertificate", timeout.LatestQuorumCertificate).Return(nil) + s.validator.On("ValidateTimeoutCertificate", timeout.PriorRankTimeoutCertificate).Return(models.InvalidTimeoutCertificateError{}) + + err := s.processor.Process(timeout) + require.True(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err)) + require.True(s.T(), models.IsInvalidTimeoutCertificateError(err)) + }) + s.Run("invalid-tc-exception", func() { + exception := errors.New("validate-tc-failed") + *s.validator = *mocks.NewValidator[*helper.TestState, *helper.TestVote](s.T()) + s.validator.On("ValidateQuorumCertificate", timeout.LatestQuorumCertificate).Return(nil) + s.validator.On("ValidateTimeoutCertificate", timeout.PriorRankTimeoutCertificate).Return(exception).Once() + + err := s.processor.Process(timeout) + require.ErrorIs(s.T(), err, exception) + require.False(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err)) + }) + s.Run("invalid-tc-err-rank-for-unknown-rank", func() { + *s.validator = *mocks.NewValidator[*helper.TestState, *helper.TestVote](s.T()) + s.validator.On("ValidateQuorumCertificate", timeout.LatestQuorumCertificate).Return(nil) + s.validator.On("ValidateTimeoutCertificate", timeout.PriorRankTimeoutCertificate).Return(models.ErrRankUnknown).Once() + + err := s.processor.Process(timeout) + require.False(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err)) + require.NotErrorIs(s.T(), err, models.ErrRankUnknown) + }) +} + +// TestProcess_ValidTimeout tests that processing a valid timeout succeeds without error +func (s *TimeoutProcessorTestSuite) TestProcess_ValidTimeout() { + s.Run("happy-path", func() { + timeout := s.TimeoutLastRankSuccessfulFixture() + s.validator.On("ValidateQuorumCertificate", timeout.LatestQuorumCertificate).Return(nil).Once() + err := s.processor.Process(timeout) + require.NoError(s.T(), err) + s.sigAggregator.AssertCalled(s.T(), "VerifyAndAdd", (*timeout.Vote).ID, (*timeout.Vote).Signature, timeout.LatestQuorumCertificate.(*helper.TestQuorumCertificate).Rank) + }) + s.Run("recovery-path", func() { + timeout := s.TimeoutLastRankFailedFixture() + s.validator.On("ValidateQuorumCertificate", timeout.LatestQuorumCertificate).Return(nil).Once() + s.validator.On("ValidateTimeoutCertificate", timeout.PriorRankTimeoutCertificate).Return(nil).Once() + err := s.processor.Process(timeout) + require.NoError(s.T(), err) + s.sigAggregator.AssertCalled(s.T(), "VerifyAndAdd", (*timeout.Vote).ID, (*timeout.Vote).Signature, timeout.LatestQuorumCertificate.(*helper.TestQuorumCertificate).Rank) + }) +} + +// TestProcess_VerifyAndAddFailed tests different scenarios when TimeoutSignatureAggregator fails with error. +// We check all sentinel errors and exceptions in this scenario. +func (s *TimeoutProcessorTestSuite) TestProcess_VerifyAndAddFailed() { + timeout := s.TimeoutLastRankSuccessfulFixture() + s.validator.On("ValidateQuorumCertificate", timeout.LatestQuorumCertificate).Return(nil) + s.Run("invalid-signer", func() { + *s.sigAggregator = *mocks.NewTimeoutSignatureAggregator(s.T()) + s.sigAggregator.On("VerifyAndAdd", mock.Anything, mock.Anything, mock.Anything). + Return(uint64(0), models.NewInvalidSignerError(fmt.Errorf(""))).Once() + err := s.processor.Process(timeout) + require.True(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err)) + require.True(s.T(), models.IsInvalidSignerError(err)) + }) + s.Run("invalid-signature", func() { + *s.sigAggregator = *mocks.NewTimeoutSignatureAggregator(s.T()) + s.sigAggregator.On("VerifyAndAdd", mock.Anything, mock.Anything, mock.Anything). + Return(uint64(0), models.ErrInvalidSignature).Once() + err := s.processor.Process(timeout) + require.True(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err)) + require.ErrorIs(s.T(), err, models.ErrInvalidSignature) + }) + s.Run("duplicated-signer", func() { + *s.sigAggregator = *mocks.NewTimeoutSignatureAggregator(s.T()) + s.sigAggregator.On("VerifyAndAdd", mock.Anything, mock.Anything, mock.Anything). + Return(uint64(0), models.NewDuplicatedSignerErrorf("")).Once() + err := s.processor.Process(timeout) + require.True(s.T(), models.IsDuplicatedSignerError(err)) + // this shouldn't be wrapped in invalid timeout + require.False(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err)) + }) + s.Run("verify-exception", func() { + *s.sigAggregator = *mocks.NewTimeoutSignatureAggregator(s.T()) + exception := errors.New("verify-exception") + s.sigAggregator.On("VerifyAndAdd", mock.Anything, mock.Anything, mock.Anything). + Return(uint64(0), exception).Once() + err := s.processor.Process(timeout) + require.False(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err)) + require.ErrorIs(s.T(), err, exception) + }) +} + +// TestProcess_CreatingTC is a test for happy path single threaded signature aggregation and TC creation +// Each replica commits unique timeout state, this object gets processed by TimeoutProcessor. After collecting +// enough weight we expect a TC to be created. All further operations should be no-op, only one TC should be created. +func (s *TimeoutProcessorTestSuite) TestProcess_CreatingTC() { + // consider next situation: + // last successful rank was N, after this we weren't able to get a proposal with QC for + // len(participants) ranks, but in each rank QC was created(but not distributed). + // In rank N+len(participants) each replica contributes with unique highest QC. + lastSuccessfulQC := helper.MakeQC(helper.WithQCRank(s.rank - uint64(len(s.participants)))) + previousRankTimeoutCert := helper.MakeTC(helper.WithTCRank(s.rank-1), + helper.WithTCNewestQC(lastSuccessfulQC)) + + var highQCRanks []uint64 + var timeouts []*models.TimeoutState[*helper.TestVote] + signers := s.participants[1:] + for i, signer := range signers { + qc := helper.MakeQC(helper.WithQCRank(lastSuccessfulQC.GetRank() + uint64(i+1))) + highQCRanks = append(highQCRanks, qc.GetRank()) + + timeout := helper.TimeoutStateFixture( + helper.WithTimeoutStateRank[*helper.TestVote](s.rank), + helper.WithTimeoutNewestQC[*helper.TestVote](qc), + helper.WithTimeoutVote(&helper.TestVote{ID: signer.Identity(), Rank: s.rank}), + helper.WithTimeoutPreviousRankTimeoutCertificate[*helper.TestVote](previousRankTimeoutCert), + ) + timeouts = append(timeouts, timeout) + } + + // change tracker to require all except one signer to create TC + s.processor.tcTracker.minRequiredWeight = s.sigWeight * uint64(len(highQCRanks)) + + expectedSigBytes := make([]byte, 74) + expectedSig := &helper.TestAggregatedSignature{ + Signature: expectedSigBytes, + Bitmask: []byte{0b11111111, 0b00000111}, + PublicKey: make([]byte, 585), + } + s.validator.On("ValidateQuorumCertificate", mock.Anything).Return(nil) + s.validator.On("ValidateTimeoutCertificate", mock.Anything).Return(nil) + s.notifier.On("OnPartialTimeoutCertificateCreated", s.rank, mock.Anything, previousRankTimeoutCert).Return(nil).Once() + s.notifier.On("OnTimeoutCertificateConstructedFromTimeouts", mock.Anything).Run(func(args mock.Arguments) { + newestQC := timeouts[len(timeouts)-1].LatestQuorumCertificate + tc := args.Get(0).(models.TimeoutCertificate) + // ensure that TC contains correct fields + expectedTC := &helper.TestTimeoutCertificate{ + Rank: s.rank, + LatestRanks: highQCRanks, + LatestQuorumCert: newestQC, + AggregatedSignature: expectedSig, + } + require.Equal(s.T(), expectedTC, tc) + }).Return(nil).Once() + s.voting.On("FinalizeTimeout", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&helper.TestTimeoutCertificate{ + Filter: nil, + Rank: s.rank, + LatestRanks: highQCRanks, + LatestQuorumCert: timeouts[len(timeouts)-1].LatestQuorumCertificate, + AggregatedSignature: &helper.TestAggregatedSignature{ + PublicKey: make([]byte, 585), + Signature: make([]byte, 74), + Bitmask: []byte{0b11111111, 0b00000111}, + }, + }, nil) + + signersData := make([]consensus.TimeoutSignerInfo, 0) + for i, signer := range signers { + signersData = append(signersData, consensus.TimeoutSignerInfo{ + NewestQCRank: highQCRanks[i], + Signer: signer.Identity(), + }) + } + s.sigAggregator.On("Aggregate").Return(signersData, expectedSig, nil) + + for _, timeout := range timeouts { + err := s.processor.Process(timeout) + require.NoError(s.T(), err) + } + s.notifier.AssertExpectations(s.T()) + s.sigAggregator.AssertExpectations(s.T()) + + // add extra timeout, make sure we don't create another TC + // should be no-op + timeout := helper.TimeoutStateFixture( + helper.WithTimeoutStateRank[*helper.TestVote](s.rank), + helper.WithTimeoutNewestQC[*helper.TestVote](helper.MakeQC(helper.WithQCRank(lastSuccessfulQC.GetRank()))), + helper.WithTimeoutVote(&helper.TestVote{ + ID: s.participants[0].Identity(), + Rank: s.rank, + }), + helper.WithTimeoutPreviousRankTimeoutCertificate[*helper.TestVote](nil), + ) + err := s.processor.Process(timeout) + require.NoError(s.T(), err) + + s.notifier.AssertExpectations(s.T()) + s.validator.AssertExpectations(s.T()) +} + +// TestProcess_ConcurrentCreatingTC tests a scenario where multiple goroutines process timeout at same time, +// we expect only one TC created in this scenario. +func (s *TimeoutProcessorTestSuite) TestProcess_ConcurrentCreatingTC() { + s.validator.On("ValidateQuorumCertificate", mock.Anything).Return(nil) + s.notifier.On("OnPartialTimeoutCertificateCreated", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() + s.notifier.On("OnTimeoutCertificateConstructedFromTimeouts", mock.Anything).Return(nil).Once() + + signersData := make([]consensus.TimeoutSignerInfo, 0, len(s.participants)) + for _, signer := range s.participants { + signersData = append(signersData, consensus.TimeoutSignerInfo{ + NewestQCRank: 0, + Signer: signer.Identity(), + }) + } + // don't care about actual data + s.sigAggregator.On("Aggregate").Return(signersData, &helper.TestAggregatedSignature{PublicKey: make([]byte, 585), Signature: make([]byte, 74), Bitmask: []byte{0b11111111, 0b00000111}}, nil) + var startupWg, shutdownWg sync.WaitGroup + + newestQC := helper.MakeQC(helper.WithQCRank(s.rank - 1)) + s.voting.On("FinalizeTimeout", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&helper.TestTimeoutCertificate{ + Filter: nil, + Rank: s.rank, + LatestRanks: []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + LatestQuorumCert: newestQC, + AggregatedSignature: &helper.TestAggregatedSignature{ + PublicKey: make([]byte, 585), + Signature: make([]byte, 74), + Bitmask: []byte{0b11111111, 0b00000111}, + }, + }, nil) + + startupWg.Add(1) + // prepare goroutines, so they are ready to submit a timeout at roughly same time + for i, signer := range s.participants { + shutdownWg.Add(1) + timeout := helper.TimeoutStateFixture( + helper.WithTimeoutStateRank[*helper.TestVote](s.rank), + helper.WithTimeoutNewestQC[*helper.TestVote](newestQC), + helper.WithTimeoutVote(&helper.TestVote{ + ID: signer.Identity(), + Rank: s.rank, + }), + helper.WithTimeoutPreviousRankTimeoutCertificate[*helper.TestVote](nil), + ) + go func(i int, timeout *models.TimeoutState[*helper.TestVote]) { + defer shutdownWg.Done() + startupWg.Wait() + err := s.processor.Process(timeout) + require.NoError(s.T(), err) + }(i, timeout) + } + + startupWg.Done() + + // wait for all routines to finish + shutdownWg.Wait() +} + +// TestTimeoutProcessor_BuildVerifyTC tests a complete path from creating timeouts to collecting timeouts and then +// building & verifying TC. +// This test emulates the most complex scenario where TC consists of TimeoutStates that are structurally different. +// Let's consider a case where at some rank N consensus committee generated both QC and TC, resulting in nodes differently entering rank N+1. +// When constructing TC for rank N+1 some replicas will contribute with TO{Rank:N+1, NewestQC.Rank: N, PreviousRankTimeoutCertificate: nil} +// while others with TO{Rank:N+1, NewestQC.Rank: N-1, PreviousRankTimeoutCertificate: TC{Rank: N, NewestQC.Rank: N-1}}. +// This results in multi-message BLS signature with messages picked from set M={N-1,N}. +// We have to be able to construct a valid TC for rank N+1 and successfully validate it. +// We start by building a valid QC for rank N-1, that will be included in every TimeoutState at rank N. +// Right after we create a valid QC for rank N. We need to have valid QCs since TimeoutProcessor performs complete validation of TimeoutState. +// Then we create a valid cryptographically signed timeout for each signer. Created timeouts are feed to TimeoutProcessor +// which eventually creates a TC after seeing processing enough objects. After we verify if TC was correctly constructed +// and if it doesn't violate protocol rules. At this point we have QC for rank N-1, both QC and TC for rank N. +// After constructing valid objects we will repeat TC creation process and create a TC for rank N+1 where replicas contribute +// with structurally different TimeoutStates to make sure that TC is correctly built and can be successfully validated. +func TestTimeoutProcessor_BuildVerifyTC(t *testing.T) { + // signers hold objects that are created with private key and can sign votes and proposals + signers := make(map[models.Identity]*verification.Signer[*helper.TestState, *helper.TestVote, *helper.TestPeer]) + // prepare proving signers, each signer has its own private/public key pair + // identities must be in canonical order + provingSigners := helper.WithWeightedIdentityList(11) + leader := provingSigners[0] + rank := uint64(rand.Uint32() + 100) + + state := helper.MakeState(helper.WithStateRank[*helper.TestState](rank-1), + helper.WithStateProposer[*helper.TestState](leader.Identity())) + votingProviders := []*mocks.VotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer]{} + for _, s := range provingSigners { + v := mocks.NewVotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer](t) + votingProviders = append(votingProviders, v) + vote := &helper.TestVote{ + ID: s.Identity(), + Rank: rank - 1, + Signature: make([]byte, 74), + Timestamp: uint64(time.Now().UnixMilli()), + StateID: state.Identifier, + } + v.On("SignVote", mock.Anything, mock.Anything).Return(&vote, nil).Once() + signers[s.Identity()] = verification.NewSigner[*helper.TestState, *helper.TestVote, *helper.TestPeer](v) + } + + // utility function which generates a valid timeout for every signer + createTimeouts := func(participants []models.WeightedIdentity, rank uint64, newestQC models.QuorumCertificate, previousRankTimeoutCert models.TimeoutCertificate) []*models.TimeoutState[*helper.TestVote] { + timeouts := make([]*models.TimeoutState[*helper.TestVote], 0, len(participants)) + for _, signer := range participants { + timeout, err := signers[signer.Identity()].CreateTimeout(rank, newestQC, previousRankTimeoutCert) + require.NoError(t, err) + timeouts = append(timeouts, timeout) + } + return timeouts + } + + provingSignersSkeleton := provingSigners + + committee := mocks.NewDynamicCommittee(t) + committee.On("IdentitiesByRank", mock.Anything).Return(provingSignersSkeleton, nil) + committee.On("IdentitiesByState", mock.Anything).Return(provingSigners, nil) + committee.On("QuorumThresholdForRank", mock.Anything).Return(uint64(8000), nil) + committee.On("TimeoutThresholdForRank", mock.Anything).Return(uint64(8000), nil) + + // create first QC for rank N-1, this will be our olderQC + olderQC := createRealQC(t, committee, provingSignersSkeleton, signers, state) + // now create a second QC for rank N, this will be our newest QC + nextState := helper.MakeState( + helper.WithStateRank[*helper.TestState](rank), + helper.WithStateProposer[*helper.TestState](leader.Identity()), + helper.WithStateQC[*helper.TestState](olderQC)) + + for i, vp := range votingProviders { + vote := &helper.TestVote{ + ID: provingSigners[i].Identity(), + Rank: rank, + Signature: make([]byte, 74), + Timestamp: uint64(time.Now().UnixMilli()), + StateID: nextState.Identifier, + } + vp.On("SignVote", mock.Anything, mock.Anything).Return(&vote, nil).Once() + tvote := &helper.TestVote{ + ID: provingSigners[i].Identity(), + Rank: rank, + Signature: make([]byte, 74), + Timestamp: uint64(time.Now().UnixMilli()), + } + vp.On("SignTimeoutVote", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&tvote, nil) + } + newestQC := createRealQC(t, committee, provingSignersSkeleton, signers, nextState) + + // At this point we have created two QCs for round N-1 and N. + // Next step is create a TC for rank N. + + // create verifier that will do crypto checks of created TC + verifier := &mocks.Verifier[*helper.TestVote]{} + verifier.On("VerifyQuorumCertificate", mock.Anything).Return(nil) + verifier.On("VerifyTimeoutCertificate", mock.Anything).Return(nil) + + // create validator which will do compliance and crypto checks of created TC + validator := validator.NewValidator[*helper.TestState, *helper.TestVote](committee, verifier) + + var previousRankTimeoutCert models.TimeoutCertificate + onTCCreated := func(args mock.Arguments) { + tc := args.Get(0).(models.TimeoutCertificate) + // check if resulted TC is valid + err := validator.ValidateTimeoutCertificate(tc) + require.NoError(t, err) + previousRankTimeoutCert = tc + } + + sigagg := mocks.NewSignatureAggregator(t) + sigagg.On("VerifySignatureRaw", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true) + sigagg.On("Aggregate", mock.Anything, mock.Anything).Return(&helper.TestAggregatedSignature{PublicKey: make([]byte, 585), Signature: make([]byte, 74), Bitmask: []byte{0b11111111, 0b00000111}}, nil) + + aggregator, err := NewTimeoutSignatureAggregator(sigagg, []byte{}, rank, provingSignersSkeleton, []byte{}) + require.NoError(t, err) + + notifier := mocks.NewTimeoutCollectorConsumer[*helper.TestVote](t) + notifier.On("OnPartialTimeoutCertificateCreated", rank, olderQC, nil).Return().Once() + notifier.On("OnTimeoutCertificateConstructedFromTimeouts", mock.Anything).Run(onTCCreated).Return().Once() + voting := mocks.NewVotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer](t) + voting.On("FinalizeTimeout", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&helper.TestTimeoutCertificate{ + Filter: nil, + Rank: rank, + LatestRanks: []uint64{rank - 1, rank - 1, rank - 1, rank - 1, rank - 1, rank - 1, rank - 1, rank - 1}, + LatestQuorumCert: olderQC, + AggregatedSignature: &helper.TestAggregatedSignature{PublicKey: make([]byte, 585), Signature: make([]byte, 74), Bitmask: []byte{0b11111111, 0b00000111}}, + }, nil) + processor, err := NewTimeoutProcessor[*helper.TestState, *helper.TestVote, *helper.TestPeer](helper.Logger(), committee, validator, aggregator, notifier, voting) + require.NoError(t, err) + + // last rank was successful, no previousRankTimeoutCert in this case + timeouts := createTimeouts(provingSignersSkeleton, rank, olderQC, nil) + for _, timeout := range timeouts { + err := processor.Process(timeout) + require.NoError(t, err) + } + + notifier.AssertExpectations(t) + + // at this point we have created QCs for rank N-1 and N additionally a TC for rank N, we can create TC for rank N+1 + // with timeout states containing both QC and TC for rank N + + aggregator, err = NewTimeoutSignatureAggregator(sigagg, []byte{}, rank+1, provingSignersSkeleton, []byte{}) + require.NoError(t, err) + + notifier = mocks.NewTimeoutCollectorConsumer[*helper.TestVote](t) + notifier.On("OnPartialTimeoutCertificateCreated", rank+1, newestQC, mock.Anything).Return() + notifier.On("OnTimeoutCertificateConstructedFromTimeouts", mock.Anything).Run(onTCCreated).Return().Once() + processor, err = NewTimeoutProcessor[*helper.TestState, *helper.TestVote, *helper.TestPeer](helper.Logger(), committee, validator, aggregator, notifier, voting) + require.NoError(t, err) + + // part of committee will use QC, another part TC, this will result in aggregated signature consisting + // of two types of messages with ranks N-1 and N representing the newest QC known to replicas. + timeoutsWithQC := createTimeouts(provingSignersSkeleton[:len(provingSignersSkeleton)/2], rank+1, newestQC, nil) + timeoutsWithTC := createTimeouts(provingSignersSkeleton[len(provingSignersSkeleton)/2:], rank+1, olderQC, previousRankTimeoutCert) + timeouts = append(timeoutsWithQC, timeoutsWithTC...) + for _, timeout := range timeouts { + err := processor.Process(timeout) + require.NoError(t, err) + } + + notifier.AssertExpectations(t) +} + +// createRealQC is a helper function which generates a properly signed QC with real signatures for given state. +func createRealQC( + t *testing.T, + committee consensus.DynamicCommittee, + signers []models.WeightedIdentity, + signerObjects map[models.Identity]*verification.Signer[*helper.TestState, *helper.TestVote, *helper.TestPeer], + state *models.State[*helper.TestState], +) models.QuorumCertificate { + leader := signers[0] + leaderVote, err := signerObjects[leader.Identity()].CreateVote(state) + require.NoError(t, err) + proposal := helper.MakeSignedProposal(helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal(helper.WithState(state))), helper.WithVote[*helper.TestState](leaderVote)) + + var createdQC *models.QuorumCertificate + onQCCreated := func(qc models.QuorumCertificate) { + createdQC = &qc + } + + voteProcessorFactory := votecollector.NewVoteProcessorFactory[*helper.TestState, *helper.TestVote, *helper.TestPeer](committee, onQCCreated) + sigagg := mocks.NewSignatureAggregator(t) + sigagg.On("VerifySignatureRaw", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true) + sigagg.On("Aggregate", mock.Anything, mock.Anything).Return(&helper.TestAggregatedSignature{PublicKey: make([]byte, 585), Signature: make([]byte, 74), Bitmask: []byte{0b11111111, 0b00000111}}, nil) + + votingProvider := mocks.NewVotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer](t) + votingProvider.On("FinalizeQuorumCertificate", mock.Anything, mock.Anything, mock.Anything).Return(&helper.TestQuorumCertificate{ + Filter: nil, + Rank: state.Rank, + FrameNumber: state.Rank, + Selector: state.Identifier, + Timestamp: uint64(time.Now().UnixMilli()), + AggregatedSignature: &helper.TestAggregatedSignature{PublicKey: make([]byte, 585), Signature: make([]byte, 74), Bitmask: []byte{0b11111111, 0b00000111}}, + }, nil) + voteProcessor, err := voteProcessorFactory.Create(helper.Logger(), []byte{}, proposal, []byte{}, sigagg, votingProvider) + require.NoError(t, err) + + for _, signer := range signers[1:] { + vote, err := signerObjects[signer.Identity()].CreateVote(state) + require.NoError(t, err) + err = voteProcessor.Process(vote) + require.NoError(t, err) + } + + require.NotNil(t, createdQC, "vote processor must create a valid QC at this point") + return *createdQC +} diff --git a/consensus/tracker/tracker.go b/consensus/tracker/tracker.go new file mode 100644 index 0000000..f316021 --- /dev/null +++ b/consensus/tracker/tracker.go @@ -0,0 +1,175 @@ +package tracker + +import ( + "unsafe" + + "go.uber.org/atomic" + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// NewestQCTracker is a helper structure which keeps track of the newest QC +// (by rank) in concurrency safe way. +type NewestQCTracker struct { + newestQC *atomic.UnsafePointer +} + +func NewNewestQCTracker() *NewestQCTracker { + tracker := &NewestQCTracker{ + newestQC: atomic.NewUnsafePointer(unsafe.Pointer(nil)), + } + return tracker +} + +// Track updates local state of NewestQC if the provided instance is newer +// (by rank). Concurrency safe +func (t *NewestQCTracker) Track(qc *models.QuorumCertificate) bool { + // to record the newest value that we have ever seen we need to use loop + // with CAS atomic operation to make sure that we always write the latest + // value in case of shared access to updated value. + for { + // take a snapshot + newestQC := t.NewestQC() + // verify that our update makes sense + if newestQC != nil && (*newestQC).GetRank() >= (*qc).GetRank() { + return false + } + // attempt to install new value, repeat in case of shared update. + if t.newestQC.CompareAndSwap(unsafe.Pointer(newestQC), unsafe.Pointer(qc)) { + return true + } + } +} + +// NewestQC returns the newest QC(by rank) tracked. +// Concurrency safe. +func (t *NewestQCTracker) NewestQC() *models.QuorumCertificate { + return (*models.QuorumCertificate)(t.newestQC.Load()) +} + +// NewestTCTracker is a helper structure which keeps track of the newest TC (by +// rank) in concurrency safe way. +type NewestTCTracker struct { + newestTC *atomic.UnsafePointer +} + +func NewNewestTCTracker() *NewestTCTracker { + tracker := &NewestTCTracker{ + newestTC: atomic.NewUnsafePointer(unsafe.Pointer(nil)), + } + return tracker +} + +// Track updates local state of NewestTC if the provided instance is newer (by +// rank). Concurrency safe. +func (t *NewestTCTracker) Track(tc *models.TimeoutCertificate) bool { + // to record the newest value that we have ever seen we need to use loop + // with CAS atomic operation to make sure that we always write the latest + // value in case of shared access to updated value. + for { + // take a snapshot + newestTC := t.NewestTC() + // verify that our update makes sense + if newestTC != nil && (*newestTC).GetRank() >= (*tc).GetRank() { + return false + } + // attempt to install new value, repeat in case of shared update. + if t.newestTC.CompareAndSwap(unsafe.Pointer(newestTC), unsafe.Pointer(tc)) { + return true + } + } +} + +// NewestTC returns the newest TC(by rank) tracked. +// Concurrency safe. +func (t *NewestTCTracker) NewestTC() *models.TimeoutCertificate { + return (*models.TimeoutCertificate)(t.newestTC.Load()) +} + +// NewestStateTracker is a helper structure which keeps track of the newest +// state (by rank) in concurrency safe way. +type NewestStateTracker[StateT models.Unique] struct { + newestState *atomic.UnsafePointer +} + +func NewNewestStateTracker[StateT models.Unique]() *NewestStateTracker[StateT] { + tracker := &NewestStateTracker[StateT]{ + newestState: atomic.NewUnsafePointer(unsafe.Pointer(nil)), + } + return tracker +} + +// Track updates local state of newestState if the provided instance is newer +// (by rank). Concurrency safe. +func (t *NewestStateTracker[StateT]) Track(state *models.State[StateT]) bool { + // to record the newest value that we have ever seen we need to use loop + // with CAS atomic operation to make sure that we always write the latest + // value in case of shared access to updated value. + for { + // take a snapshot + newestState := t.NewestState() + // verify that our update makes sense + if newestState != nil && newestState.Rank >= state.Rank { + return false + } + // attempt to install new value, repeat in case of shared update. + if t.newestState.CompareAndSwap( + unsafe.Pointer(newestState), + unsafe.Pointer(state), + ) { + return true + } + } +} + +// NewestState returns the newest state (by rank) tracked. +// Concurrency safe. +func (t *NewestStateTracker[StateT]) NewestState() *models.State[StateT] { + return (*models.State[StateT])(t.newestState.Load()) +} + +// NewestPartialTimeoutCertificateTracker tracks the newest partial TC (by rank) in a +// concurrency safe way. +type NewestPartialTimeoutCertificateTracker struct { + newestPartialTimeoutCertificate *atomic.UnsafePointer +} + +func NewNewestPartialTimeoutCertificateTracker() *NewestPartialTimeoutCertificateTracker { + tracker := &NewestPartialTimeoutCertificateTracker{ + newestPartialTimeoutCertificate: atomic.NewUnsafePointer(unsafe.Pointer(nil)), + } + return tracker +} + +// Track updates local state of newestPartialTimeoutCertificate if the provided instance is +// newer (by rank). Concurrency safe. +func (t *NewestPartialTimeoutCertificateTracker) Track( + partialTimeoutCertificate *consensus.PartialTimeoutCertificateCreated, +) bool { + // To record the newest value that we have ever seen, we need to use loop + // with CAS atomic operation to make sure that we always write the latest + // value in case of shared access to updated value. + for { + // take a snapshot + newestPartialTimeoutCertificate := t.NewestPartialTimeoutCertificate() + // verify that our partial TC is from a newer rank + if newestPartialTimeoutCertificate != nil && newestPartialTimeoutCertificate.Rank >= partialTimeoutCertificate.Rank { + return false + } + // attempt to install new value, repeat in case of shared update. + if t.newestPartialTimeoutCertificate.CompareAndSwap( + unsafe.Pointer(newestPartialTimeoutCertificate), + unsafe.Pointer(partialTimeoutCertificate), + ) { + return true + } + } +} + +// NewestPartialTimeoutCertificate returns the newest partial TC (by rank) tracked. +// Concurrency safe. +func ( + t *NewestPartialTimeoutCertificateTracker, +) NewestPartialTimeoutCertificate() *consensus.PartialTimeoutCertificateCreated { + return (*consensus.PartialTimeoutCertificateCreated)(t.newestPartialTimeoutCertificate.Load()) +} diff --git a/consensus/tracker/tracker_test.go b/consensus/tracker/tracker_test.go new file mode 100644 index 0000000..0ca65b3 --- /dev/null +++ b/consensus/tracker/tracker_test.go @@ -0,0 +1,154 @@ +package tracker + +import ( + "sync" + "testing" + + "github.com/stretchr/testify/require" + + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TestNewNewestQCTracker checks that new instance returns nil tracked value. +func TestNewNewestQCTracker(t *testing.T) { + tracker := NewNewestQCTracker() + require.Nil(t, tracker.NewestQC()) +} + +// TestNewestQCTracker_Track this test is needed to make sure that concurrent updates on NewestQCTracker are performed correctly, +// and it always tracks the newest QC, especially in scenario of shared access. This test is structured in a way that it +// starts multiple goroutines that will try to submit their QCs simultaneously to the tracker. Once all goroutines are started +// we will use a wait group to execute all operations as concurrent as possible, after that we will observe if resulted value +// is indeed expected. This test will run multiple times. +func TestNewestQCTracker_Track(t *testing.T) { + tracker := NewNewestQCTracker() + samples := 20 // number of concurrent updates per test case + times := 20 // number of times we run the test case + + // setup initial value + initialQC := helper.MakeQC(helper.WithQCRank(0)) + tracker.Track(&initialQC) + + for i := 0; i < times; i++ { + startRank := (*tracker.NewestQC()).GetRank() + var readyWg, startWg, doneWg sync.WaitGroup + startWg.Add(1) + readyWg.Add(samples) + doneWg.Add(samples) + for s := 0; s < samples; s++ { + qc := helper.MakeQC(helper.WithQCRank(startRank + uint64(s+1))) + go func(newestQC *models.QuorumCertificate) { + defer doneWg.Done() + readyWg.Done() + startWg.Wait() + tracker.Track(newestQC) + }(&qc) + } + + // wait for all goroutines to be ready + readyWg.Wait() + // since we have waited for all goroutines to be ready this `Done` will start all goroutines + startWg.Done() + // wait for all of them to finish execution + doneWg.Wait() + + // at this point tracker MUST have the newest QC + require.Equal(t, startRank+uint64(samples), (*tracker.NewestQC()).GetRank()) + } +} + +// TestNewNewestTCTracker checks that new instance returns nil tracked value. +func TestNewNewestTCTracker(t *testing.T) { + tracker := NewNewestTCTracker() + require.Nil(t, tracker.NewestTC()) +} + +// TestNewestTCTracker_Track this test is needed to make sure that concurrent updates on NewestTCTracker are performed correctly, +// and it always tracks the newest TC, especially in scenario of shared access. This test is structured in a way that it +// starts multiple goroutines that will try to submit their TCs simultaneously to the tracker. Once all goroutines are started +// we will use a wait group to execute all operations as concurrent as possible, after that we will observe if resulted value +// is indeed expected. This test will run multiple times. +func TestNewestTCTracker_Track(t *testing.T) { + tracker := NewNewestTCTracker() + samples := 20 + times := 20 + + // setup initial value + initialTc := helper.MakeTC(helper.WithTCRank(0)) + tracker.Track(&initialTc) + + for i := 0; i < times; i++ { + startRank := (*tracker.NewestTC()).GetRank() + var readyWg, startWg, doneWg sync.WaitGroup + startWg.Add(1) + readyWg.Add(samples) + doneWg.Add(samples) + for s := 0; s < samples; s++ { + tc := helper.MakeTC(helper.WithTCRank(startRank + uint64(s+1))) + go func(newestTC *models.TimeoutCertificate) { + defer doneWg.Done() + readyWg.Done() + startWg.Wait() + tracker.Track(newestTC) + }(&tc) + } + + // wait for all goroutines to be ready + readyWg.Wait() + // since we have waited for all goroutines to be ready this `Done` will start all goroutines + startWg.Done() + // wait for all of them to finish execution + doneWg.Wait() + + // at this point tracker MUST have the newest TC + require.Equal(t, startRank+uint64(samples), (*tracker.NewestTC()).GetRank()) + } +} + +// TestNewNewestStateTracker checks that new instance returns nil tracked value. +func TestNewNewestStateTracker(t *testing.T) { + tracker := NewNewestStateTracker[*helper.TestState]() + require.Nil(t, tracker.NewestState()) +} + +// TestNewestStateTracker_Track this test is needed to make sure that concurrent updates on NewestStateTracker are performed correctly, +// and it always tracks the newest state, especially in scenario of shared access. This test is structured in a way that it +// starts multiple goroutines that will try to submit their states simultaneously to the tracker. Once all goroutines are started +// we will use a wait group to execute all operations as concurrent as possible, after that we will observe if resulted value +// is indeed expected. This test will run multiple times. +func TestNewestStateTracker_Track(t *testing.T) { + tracker := NewNewestStateTracker[*helper.TestState]() + samples := 20 // number of concurrent updates per test case + times := 20 // number of times we run the test case + + // setup initial value + tracker.Track(helper.MakeState(helper.WithStateRank[*helper.TestState](0))) + + for i := 0; i < times; i++ { + startRank := tracker.NewestState().Rank + var readyWg, startWg, doneWg sync.WaitGroup + startWg.Add(1) + readyWg.Add(samples) + doneWg.Add(samples) + for s := 0; s < samples; s++ { + state := helper.MakeState(helper.WithStateRank[*helper.TestState](startRank + uint64(s+1))) + go func(newestState *models.State[*helper.TestState]) { + defer doneWg.Done() + readyWg.Done() + startWg.Wait() + tracker.Track(newestState) + }(state) + } + + // wait for all goroutines to be ready + readyWg.Wait() + // since we have waited for all goroutines to be ready this `Done` will start all goroutines + startWg.Done() + // wait for all of them to finish execution + doneWg.Wait() + + // at this point tracker MUST have the newest state + require.Equal(t, startRank+uint64(samples), tracker.NewestState().Rank) + } +} diff --git a/consensus/validator/validator.go b/consensus/validator/validator.go new file mode 100644 index 0000000..fa097d1 --- /dev/null +++ b/consensus/validator/validator.go @@ -0,0 +1,566 @@ +package validator + +import ( + "errors" + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Validator is responsible for validating QC, State and Vote +type Validator[StateT models.Unique, VoteT models.Unique] struct { + committee consensus.Replicas + verifier consensus.Verifier[VoteT] +} + +var _ consensus.Validator[*nilUnique, *nilUnique] = (*Validator[*nilUnique, *nilUnique])(nil) + +// New creates a new Validator instance +func NewValidator[StateT models.Unique, VoteT models.Unique]( + committee consensus.Replicas, + verifier consensus.Verifier[VoteT], +) *Validator[StateT, VoteT] { + return &Validator[StateT, VoteT]{ + committee: committee, + verifier: verifier, + } +} + +// ValidateTimeoutCertificate validates the TimeoutCertificate `TC`. +// During normal operations, the following error returns are expected: +// - models.InvalidTCError if the TC is invalid +// - models.ErrRankUnknown if the TC refers unknown rank +// +// Any other error should be treated as exception +func (v *Validator[StateT, VoteT]) ValidateTimeoutCertificate( + tc models.TimeoutCertificate, +) error { + newestQC := tc.GetLatestQuorumCert() + if newestQC == nil { + return newInvalidTimeoutCertificateError( + tc, + fmt.Errorf("TC must include a QC but found nil"), + ) + } + + // The TC's rank cannot be smaller than the rank of the QC it contains. + // Note: we specifically allow for the TC to have the same rank as the highest + // QC. This is useful as a fallback, because it allows replicas other than the + // designated leader to also collect votes and generate a QC. + if tc.GetRank() < newestQC.GetRank() { + return newInvalidTimeoutCertificateError( + tc, + fmt.Errorf("TC's QC cannot be newer than the TC's rank"), + ) + } + + // 1. Check if there is super-majority of votes + allParticipants, err := v.committee.IdentitiesByRank(tc.GetRank()) + if err != nil { + return fmt.Errorf( + "could not get consensus participants at rank %d: %w", + tc.GetRank(), + err, + ) + } + + signerIDs := []models.WeightedIdentity{} + sigIndices := tc.GetAggregatedSignature().GetBitmask() + totalWeight := uint64(0) + if len(sigIndices) < (len(allParticipants)+7)/8 { + return models.NewInsufficientSignaturesErrorf("insufficient signatures") + } + for i, member := range allParticipants { + if sigIndices[i/8]&(1<<(i%8)) == (1 << (i % 8)) { + signerIDs = append(signerIDs, member) + totalWeight += member.Weight() + } + } + + // determine whether signers reach minimally required weight threshold for + // consensus + threshold, err := v.committee.QuorumThresholdForRank(tc.GetRank()) + if err != nil { + return newInvalidTimeoutCertificateError( + tc, + fmt.Errorf( + "could not get weight threshold for rank %d: %w", + tc.GetRank(), + err, + ), + ) + } + + if totalWeight < threshold { + return newInvalidTimeoutCertificateError(tc, fmt.Errorf( + "tc signers have insufficient weight of %d (required=%d)", + totalWeight, + threshold, + )) + } + + // Verify multi-message BLS sig of TC, by far the most expensive check + err = v.verifier.VerifyTimeoutCertificate(tc) + if err != nil { + // Considerations about other errors that `VerifyTC` could return: + // * models.InsufficientSignaturesError: we previously checked the total + // weight of all signers meets the supermajority threshold, which is a + // _positive_ number. Hence, there must be at least one signer. Hence, + // receiving this error would be a symptom of a fatal internal bug. + switch { + case models.IsInvalidFormatError(err): + return newInvalidTimeoutCertificateError( + tc, + fmt.Errorf("TC's signature data has an invalid structure: %w", err), + ) + case errors.Is(err, models.ErrInvalidSignature): + return newInvalidTimeoutCertificateError( + tc, + fmt.Errorf("TC contains invalid signature(s): %w", err), + ) + default: + return fmt.Errorf( + "cannot verify tc's aggregated signature (tc.Rank: %d): %w", + tc.GetRank(), + err, + ) + } + } + + // verifying that tc.NewestQC is the QC with the highest rank. + // Note: A byzantine TC could include `nil` for tc.NewestQCRanks, in which + // case `tc.NewestQCRanks[0]` would panic. Though, per API specification + // `verifier.VerifyTC(…)` should return a `models.InvalidFormatError` if + // `signers` and `tc.NewestQCRanks` have different length. Hence, the + // following code is safe only if it is executed + // 1. _after_ checking the quorum threshold (thereby we guarantee that + // `signers` is not empty); and + // 2. _after_ `verifier.VerifyTC(…)`, which enforces that `signers` and + // `tc.NewestQCRanks` have identical length. + // Only then we can be sure that `tc.NewestQCRanks` cannot be nil. + newestQCRank := tc.GetLatestRanks()[0] + for _, rank := range tc.GetLatestRanks() { + if newestQCRank < rank { + newestQCRank = rank + } + } + if newestQCRank > tc.GetLatestQuorumCert().GetRank() { + return newInvalidTimeoutCertificateError( + tc, + fmt.Errorf( + "included QC (rank=%d) should be equal or higher to highest contributed rank: %d", + tc.GetLatestQuorumCert().GetRank(), + newestQCRank, + ), + ) + } + + // Validate QC + err = v.ValidateQuorumCertificate(newestQC) + if err != nil { + if models.IsInvalidQuorumCertificateError(err) { + return newInvalidTimeoutCertificateError(tc, fmt.Errorf( + "invalid QC included in TC: %w", + err, + )) + } + if errors.Is(err, models.ErrRankUnknown) { + // We require each replica to be bootstrapped with a QC pointing to a + // finalized state. Consensus safety rules guarantee that a QC at least as + // new as the root QC must be contained in any TC. This is because the TC + // must include signatures from a supermajority of replicas, including at + // least one honest replica, which attest to their locally highest known + // QC. Hence, any QC included in a TC must be the root QC or newer. + // Therefore, we should know the rank for any QC we encounter. Receiving + // a `models.ErrRankUnknown` is conceptually impossible, i.e. a symptom of + // an internal bug or invalid bootstrapping information. + return fmt.Errorf( + "no rank information availalbe for QC that was included in TC; symptom of internal bug or invalid bootstrapping information: %s", + err.Error(), + ) + } + return fmt.Errorf( + "unexpected internal error while verifying the QC included in the TC: %w", + err, + ) + } + + return nil +} + +// ValidateQuorumCertificate validates the Quorum Certificate `qc`. +// During normal operations, the following error returns are expected: +// - models.InvalidQCError if the QC is invalid +// - models.ErrRankUnknown if the QC refers unknown rank +// +// Any other error should be treated as exception +func (v *Validator[StateT, VoteT]) ValidateQuorumCertificate( + qc models.QuorumCertificate, +) error { + // Retrieve the initial identities of consensus participants for this rank, + // and those that signed the QC. IdentitiesByRank contains all nodes that were + // authorized to sign during this rank. Ejection and dynamic weight + // adjustments are not taken into account here. By using an rank-static set + // of authorized signers, we can check QC validity without needing all + // ancestor states. + allParticipants, err := v.committee.IdentitiesByRank(qc.GetRank()) + if err != nil { + return fmt.Errorf( + "could not get consensus participants at rank %d: %w", + qc.GetRank(), + err, + ) + } + + signerIDs := []models.WeightedIdentity{} + sigIndices := qc.GetAggregatedSignature().GetBitmask() + totalWeight := uint64(0) + if len(sigIndices) < (len(allParticipants)+7)/8 { + return newInvalidQuorumCertificateError( + qc, + models.NewInsufficientSignaturesErrorf("insufficient signatures"), + ) + } + for i, member := range allParticipants { + if sigIndices[i/8]&(1<<(i%8)) == (1 << (i % 8)) { + signerIDs = append(signerIDs, member) + totalWeight += member.Weight() + } + } + + // determine whether signers reach minimally required weight threshold for + // consensus + threshold, err := v.committee.QuorumThresholdForRank(qc.GetRank()) + if err != nil { + return newInvalidQuorumCertificateError( + qc, + fmt.Errorf( + "could not get weight threshold for rank %d: %w", + qc.GetRank(), + err, + ), + ) + } + + if totalWeight < threshold { + return newInvalidQuorumCertificateError( + qc, + fmt.Errorf( + "QC signers have insufficient weight of %d (required=%d)", + totalWeight, + threshold, + ), + ) + } + + // verify whether the signature bytes are valid for the QC + err = v.verifier.VerifyQuorumCertificate(qc) + if err != nil { + // Considerations about other errors that `VerifyQC` could return: + // * models.InvalidSignerError + // * models.InsufficientSignaturesError: we previously checked the total + // weight of all signers meets the supermajority threshold, which is a + // _positive_ number. Hence, there must be at least one signer. Hence, + // receiving this error would be a symptom of a fatal internal bug. + switch { + case models.IsInvalidFormatError(err): + return newInvalidQuorumCertificateError( + qc, + fmt.Errorf("QC's signature data has an invalid structure: %w", err), + ) + case errors.Is(err, models.ErrInvalidSignature): + return newInvalidQuorumCertificateError( + qc, + fmt.Errorf("QC contains invalid signature(s): %w", err), + ) + case errors.Is(err, models.ErrRankUnknown): + // We have earlier queried the Identities for the QC's rank, which must + // have returned proper values, otherwise, we wouldn't reach this code. + // Therefore, it should be impossible for `verifier.VerifyQC` to return + // ErrRankUnknown. To avoid confusion with expected sentinel errors, we + // only preserve the error messages here, but not the error types. + return fmt.Errorf( + "internal error, as querying identities for rank %d succeeded earlier but now the rank supposedly belongs to an unknown rank: %s", + qc.GetRank(), + err.Error(), + ) + default: + return fmt.Errorf( + "cannot verify qc's aggregated signature (qc.Identifier: %x): %w", + qc.Identity(), + err, + ) + } + } + + return nil +} + +// ValidateProposal validates the state proposal +// A state is considered as valid if it's a valid extension of existing forks. +// Note it doesn't check if it's conflicting with finalized state +// During normal operations, the following error returns are expected: +// - models.InvalidProposalError if the state is invalid +// - models.ErrRankUnknown if the proposal refers unknown rank +// +// Any other error should be treated as exception +func (v *Validator[StateT, VoteT]) ValidateProposal( + proposal *models.SignedProposal[StateT, VoteT], +) error { + qc := proposal.State.ParentQuorumCertificate + state := proposal.State + + // validate the proposer's vote and get their identity + vote, err := proposal.ProposerVote() + if err != nil { + return fmt.Errorf("could not get vote from proposer vote: %w", err) + } + _, err = v.ValidateVote(vote) + if models.IsInvalidVoteError[VoteT](err) { + return models.NewInvalidProposalErrorf( + proposal, + "invalid proposer signature: %w", + err, + ) + } + if err != nil { + return fmt.Errorf( + "error verifying leader signature for state %x: %w", + state.Identifier, + err, + ) + } + + // check the proposer is the leader for the proposed state's rank + leader, err := v.committee.LeaderForRank(state.Rank) + if err != nil { + return fmt.Errorf( + "error determining leader for state %x: %w", + state.Identifier, + err, + ) + } + if leader != state.ProposerID { + return models.NewInvalidProposalErrorf( + proposal, + "proposer %s is not leader (%s) for rank %d", + state.ProposerID, + leader, + state.Rank, + ) + } + + // The State must contain a proof that the primary legitimately entered the + // respective rank. Transitioning to proposal.State.Rank is possible either by + // observing a QC or a TC for the previous round. If and only if the QC is + // _not_ for the previous round we require a TC for the previous rank to be + // present. + lastRankSuccessful := proposal.State.Rank == + proposal.State.ParentQuorumCertificate.GetRank()+1 + if !lastRankSuccessful { + // check if proposal is correctly structured + if proposal.PreviousRankTimeoutCertificate == nil { + return models.NewInvalidProposalErrorf( + proposal, + "QC in state is not for previous rank, so expecting a TC but none is included in state", + ) + } + + // check if included TC is for previous rank + if proposal.State.Rank != + proposal.PreviousRankTimeoutCertificate.GetRank()+1 { + return models.NewInvalidProposalErrorf( + proposal, + "QC in state is not for previous rank, so expecting a TC for rank %d but got TC for rank %d", + proposal.State.Rank-1, + proposal.PreviousRankTimeoutCertificate.GetRank(), + ) + } + + // Check if proposal extends either the newest QC specified in the TC, or a + // newer QC in edge cases a leader may construct a TC and QC concurrently + // such that TC contains an older QC - in these case we still want to build + // on the newest QC, so this case is allowed. + if proposal.State.ParentQuorumCertificate.GetRank() < + proposal.PreviousRankTimeoutCertificate.GetLatestQuorumCert().GetRank() { + return models.NewInvalidProposalErrorf( + proposal, + "TC in state contains a newer QC than the state itself, which is a protocol violation", + ) + } + } else if proposal.PreviousRankTimeoutCertificate != nil { + // last rank ended with QC, including TC is a protocol violation + return models.NewInvalidProposalErrorf( + proposal, + "last rank has ended with QC but proposal includes PreviousRankTimeoutCertificate", + ) + } + + // Check signatures, keep the most expensive the last to check + + // check if included QC is valid + err = v.ValidateQuorumCertificate(qc) + if err != nil { + if models.IsInvalidQuorumCertificateError(err) { + return models.NewInvalidProposalErrorf(proposal, "invalid qc included: %w", err) + } + if errors.Is(err, models.ErrRankUnknown) { + // We require each replica to be bootstrapped with a QC pointing to a + // finalized state. Therefore, receiving a `models.ErrRankUnknown` is + // conceptually impossible, i.e. a symptom of an internal bug or invalid + // bootstrapping information. + return fmt.Errorf( + "no rank information availalbe for QC that was included in proposal; symptom of internal bug or invalid bootstrapping information: %s", + err.Error(), + ) + } + return fmt.Errorf("unexpected error verifying qc: %w", err) + } + + if !lastRankSuccessful { + // check if included TC is valid + err = v.ValidateTimeoutCertificate(proposal.PreviousRankTimeoutCertificate) + if err != nil { + if models.IsInvalidTimeoutCertificateError(err) { + return models.NewInvalidProposalErrorf( + proposal, + "proposals TC's is not valid: %w", + err, + ) + } + if errors.Is(err, models.ErrRankUnknown) { + // We require each replica to be bootstrapped with a QC pointing to a + // finalized state. Therefore, we should know the rank for any QC.Rank + // and TC.Rank we encounter. Receiving a `models.ErrRankUnknown` is + // conceptually impossible, i.e. a symptom of an internal bug or invalid + // bootstrapping information. + return fmt.Errorf( + "no rank information availalbe for QC that was included in TC; symptom of internal bug or invalid bootstrapping information: %s", + err.Error(), + ) + } + return fmt.Errorf( + "unexpected internal error while verifying the TC included in state: %w", + err, + ) + } + } + + return nil +} + +// ValidateVote validates the vote and returns the identity of the voter who +// signed the vote - the vote to be validated +// During normal operations, the following error returns are expected: +// - models.InvalidVoteError for invalid votes +// - models.ErrRankUnknown if the vote refers unknown rank +// +// Any other error should be treated as exception +func (v *Validator[StateT, VoteT]) ValidateVote(vote *VoteT) ( + *models.WeightedIdentity, + error, +) { + voter, err := v.committee.IdentityByRank( + (*vote).GetRank(), + (*vote).Identity(), + ) + if models.IsInvalidSignerError(err) { + return nil, newInvalidVoteError(vote, err) + } + if err != nil { + return nil, fmt.Errorf( + "error retrieving voter Identity at rank %d: %w", + (*vote).GetRank(), + err, + ) + } + + // check whether the signature data is valid for the vote in the hotstuff + // context + err = v.verifier.VerifyVote(vote) + if err != nil { + // Theoretically, `VerifyVote` could also return a + // `models.InvalidSignerError`. + if models.IsInvalidFormatError(err) || + errors.Is(err, models.ErrInvalidSignature) { + return nil, newInvalidVoteError(vote, err) + } + if errors.Is(err, models.ErrRankUnknown) { + return nil, fmt.Errorf( + "no rank information available for vote; symptom of internal bug or invalid bootstrapping information: %s", + err.Error(), + ) + } + return nil, fmt.Errorf( + "cannot verify signature for vote (%x): %w", + (*vote).Identity(), + err, + ) + } + + return &voter, nil +} + +func newInvalidQuorumCertificateError( + qc models.QuorumCertificate, + err error, +) error { + return models.InvalidQuorumCertificateError{ + Identifier: qc.Identity(), + Rank: qc.GetRank(), + Err: err, + } +} + +func newInvalidTimeoutCertificateError( + tc models.TimeoutCertificate, + err error, +) error { + return models.InvalidTimeoutCertificateError{ + Rank: tc.GetRank(), + Err: err, + } +} + +func newInvalidVoteError[VoteT models.Unique](vote *VoteT, err error) error { + return models.InvalidVoteError[VoteT]{ + Vote: vote, + Err: err, + } +} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/validator/validator_test.go b/consensus/validator/validator_test.go new file mode 100644 index 0000000..b8d16cd --- /dev/null +++ b/consensus/validator/validator_test.go @@ -0,0 +1,933 @@ +package validator + +import ( + "errors" + "fmt" + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + "source.quilibrium.com/quilibrium/monorepo/consensus/mocks" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +func TestValidateProposal(t *testing.T) { + suite.Run(t, new(ProposalSuite)) +} + +type ProposalSuite struct { + suite.Suite + participants []models.WeightedIdentity + indices []byte + leader models.WeightedIdentity + finalized uint64 + parent *models.State[*helper.TestState] + state *models.State[*helper.TestState] + voters []models.WeightedIdentity + proposal *models.SignedProposal[*helper.TestState, *helper.TestVote] + vote *helper.TestVote + voter models.WeightedIdentity + committee *mocks.Replicas + verifier *mocks.Verifier[*helper.TestVote] + validator *Validator[*helper.TestState, *helper.TestVote] +} + +func (ps *ProposalSuite) SetupTest() { + // the leader is a random node for now + ps.finalized = uint64(rand.Uint32() + 1) + ps.participants = helper.WithWeightedIdentityList(8) + ps.leader = ps.participants[0] + + // the parent is the last finalized state, followed directly by a state from the leader + ps.parent = helper.MakeState[*helper.TestState]( + helper.WithStateRank[*helper.TestState](ps.finalized), + ) + + var err error + + ps.indices = []byte{0b11111111} + + ps.state = helper.MakeState( + helper.WithStateRank[*helper.TestState](ps.finalized+1), + helper.WithStateProposer[*helper.TestState](ps.leader.Identity()), + helper.WithParentState(ps.parent), + helper.WithParentSigners[*helper.TestState](ps.indices), + ) + + ps.voters = ps.participants + vt := &helper.TestVote{ + Rank: ps.state.Rank, + ID: ps.leader.Identity(), + Signature: make([]byte, 74), + StateID: ps.state.Identifier, + } + ps.proposal = helper.MakeSignedProposal( + helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal(helper.WithState(ps.state))), + helper.WithVote[*helper.TestState, *helper.TestVote](&vt), + ) + vote, err := ps.proposal.ProposerVote() + require.NoError(ps.T(), err) + ps.vote = *vote + ps.voter = ps.leader + + // set up the mocked hotstuff Replicas state + ps.committee = &mocks.Replicas{} + ps.committee.On("LeaderForRank", ps.state.Rank).Return(ps.leader.Identity(), nil) + ps.committee.On("QuorumThresholdForRank", mock.Anything).Return(uint64(8000), nil) + ps.committee.On("IdentitiesByRank", mock.Anything).Return( + func(_ uint64) []models.WeightedIdentity { + return ps.participants + }, + nil, + ) + for _, participant := range ps.participants { + ps.committee.On("IdentityByRank", mock.Anything, participant.Identity()).Return(participant, nil) + } + + // set up the mocked verifier + ps.verifier = &mocks.Verifier[*helper.TestVote]{} + ps.verifier.On("VerifyQuorumCertificate", ps.state.ParentQuorumCertificate).Return(nil).Maybe() + ps.verifier.On("VerifyVote", &ps.vote).Return(nil).Maybe() + + // set up the validator with the mocked dependencies + ps.validator = NewValidator[*helper.TestState, *helper.TestVote](ps.committee, ps.verifier) +} + +func (ps *ProposalSuite) TestProposalOK() { + err := ps.validator.ValidateProposal(ps.proposal) + assert.NoError(ps.T(), err, "a valid proposal should be accepted") +} + +func (ps *ProposalSuite) TestProposalSignatureError() { + + // change the verifier to error on signature validation with unspecific error + *ps.verifier = mocks.Verifier[*helper.TestVote]{} + ps.verifier.On("VerifyQuorumCertificate", ps.state.ParentQuorumCertificate).Return(nil) + ps.verifier.On("VerifyVote", &ps.vote).Return(errors.New("dummy error")) + + // check that validation now fails + err := ps.validator.ValidateProposal(ps.proposal) + assert.Error(ps.T(), err, "a proposal should be rejected if signature check fails") + + // check that the error is not one that leads to invalid + assert.False(ps.T(), models.IsInvalidProposalError[*helper.TestState, *helper.TestVote](err), "if signature check fails, we should not receive an ErrorInvalidState") +} + +func (ps *ProposalSuite) TestProposalSignatureInvalidFormat() { + + // change the verifier to fail signature validation with InvalidFormatError error + *ps.verifier = mocks.Verifier[*helper.TestVote]{} + ps.verifier.On("VerifyQuorumCertificate", ps.state.ParentQuorumCertificate).Return(nil) + ps.verifier.On("VerifyVote", &ps.vote).Return(models.NewInvalidFormatErrorf("")) + + // check that validation now fails + err := ps.validator.ValidateProposal(ps.proposal) + assert.Error(ps.T(), err, "a proposal with an invalid signature should be rejected") + + // check that the error is an invalid proposal error to allow creating slashing challenge + assert.True(ps.T(), models.IsInvalidProposalError[*helper.TestState, *helper.TestVote](err), "if signature is invalid, we should generate an invalid error") +} + +func (ps *ProposalSuite) TestProposalSignatureInvalid() { + + // change the verifier to fail signature validation + *ps.verifier = mocks.Verifier[*helper.TestVote]{} + ps.verifier.On("VerifyQuorumCertificate", ps.state.ParentQuorumCertificate).Return(nil) + ps.verifier.On("VerifyVote", &ps.vote).Return(models.ErrInvalidSignature) + + // check that validation now fails + err := ps.validator.ValidateProposal(ps.proposal) + assert.Error(ps.T(), err, "a proposal with an invalid signature should be rejected") + + // check that the error is an invalid proposal error to allow creating slashing challenge + assert.True(ps.T(), models.IsInvalidProposalError[*helper.TestState, *helper.TestVote](err), "if signature is invalid, we should generate an invalid error") +} + +func (ps *ProposalSuite) TestProposalWrongLeader() { + + // change the consensus.Replicas to return a different leader + *ps.committee = mocks.Replicas{} + ps.committee.On("LeaderForRank", ps.state.Rank).Return(ps.participants[1].Identity(), nil) + for _, participant := range ps.participants { + ps.committee.On("IdentityByRank", mock.Anything, participant.Identity()).Return(participant, nil) + } + + // check that validation fails now + err := ps.validator.ValidateProposal(ps.proposal) + assert.Error(ps.T(), err, "a proposal from the wrong proposer should be rejected") + + // check that the error is an invalid proposal error to allow creating slashing challenge + assert.True(ps.T(), models.IsInvalidProposalError[*helper.TestState, *helper.TestVote](err), "if the proposal has wrong proposer, we should generate a invalid error") +} + +// TestProposalQCInvalid checks that Validator handles the verifier's error returns correctly. +// In case of `models.InvalidFormatError` and models.ErrInvalidSignature`, we expect the Validator +// to recognize those as an invalid QC, i.e. returns an `models.InvalidProposalError`. +// In contrast, unexpected exceptions and `models.InvalidSignerError` should _not_ be +// interpreted as a sign of an invalid QC. +func (ps *ProposalSuite) TestProposalQCInvalid() { + ps.Run("invalid-signature", func() { + *ps.verifier = mocks.Verifier[*helper.TestVote]{} + ps.verifier.On("VerifyQuorumCertificate", ps.state.ParentQuorumCertificate).Return( + fmt.Errorf("invalid qc: %w", models.ErrInvalidSignature)) + ps.verifier.On("VerifyVote", &ps.vote).Return(nil) + + // check that validation fails and the failure case is recognized as an invalid state + err := ps.validator.ValidateProposal(ps.proposal) + assert.True(ps.T(), models.IsInvalidProposalError[*helper.TestState, *helper.TestVote](err), "if the state's QC signature is invalid, an ErrorInvalidState error should be raised") + }) + + ps.Run("invalid-format", func() { + *ps.verifier = mocks.Verifier[*helper.TestVote]{} + ps.verifier.On("VerifyQuorumCertificate", ps.state.ParentQuorumCertificate).Return(models.NewInvalidFormatErrorf("invalid qc")) + ps.verifier.On("VerifyVote", &ps.vote).Return(nil) + + // check that validation fails and the failure case is recognized as an invalid state + err := ps.validator.ValidateProposal(ps.proposal) + assert.True(ps.T(), models.IsInvalidProposalError[*helper.TestState, *helper.TestVote](err), "if the state's QC has an invalid format, an ErrorInvalidState error should be raised") + }) + + ps.Run("invalid-signer", func() { + *ps.verifier = mocks.Verifier[*helper.TestVote]{} + ps.verifier.On("VerifyQuorumCertificate", ps.state.ParentQuorumCertificate).Return( + fmt.Errorf("invalid qc: %w", models.NewInvalidSignerErrorf(""))) + ps.verifier.On("VerifyVote", &ps.vote).Return(nil) + + // check that validation fails and the failure case is recognized as an invalid state + err := ps.validator.ValidateProposal(ps.proposal) + assert.Error(ps.T(), err) + assert.False(ps.T(), models.IsInvalidProposalError[*helper.TestState, *helper.TestVote](err)) + }) + + ps.Run("unknown-exception", func() { + exception := errors.New("exception") + *ps.verifier = mocks.Verifier[*helper.TestVote]{} + ps.verifier.On("VerifyQuorumCertificate", ps.state.ParentQuorumCertificate).Return(exception) + ps.verifier.On("VerifyVote", &ps.vote).Return(nil) + + // check that validation fails and the failure case is recognized as an invalid state + err := ps.validator.ValidateProposal(ps.proposal) + assert.ErrorIs(ps.T(), err, exception) + assert.False(ps.T(), models.IsInvalidProposalError[*helper.TestState, *helper.TestVote](err)) + }) + + ps.Run("verify-qc-err-rank-for-unknown-rank", func() { + *ps.verifier = mocks.Verifier[*helper.TestVote]{} + ps.verifier.On("VerifyQuorumCertificate", ps.state.ParentQuorumCertificate).Return(models.ErrRankUnknown) + ps.verifier.On("VerifyVote", &ps.vote).Return(nil) + + // check that validation fails and the failure is considered internal exception and NOT an InvalidProposal error + err := ps.validator.ValidateProposal(ps.proposal) + assert.Error(ps.T(), err) + assert.NotErrorIs(ps.T(), err, models.ErrRankUnknown) + assert.False(ps.T(), models.IsInvalidProposalError[*helper.TestState, *helper.TestVote](err)) + }) +} + +func (ps *ProposalSuite) TestProposalQCError() { + + // change verifier to fail on QC validation + *ps.verifier = mocks.Verifier[*helper.TestVote]{} + ps.verifier.On("VerifyQuorumCertificate", ps.state.ParentQuorumCertificate).Return(fmt.Errorf("some exception")) + ps.verifier.On("VerifyVote", &ps.vote).Return(nil) + + // check that validation fails now + err := ps.validator.ValidateProposal(ps.proposal) + assert.Error(ps.T(), err, "a proposal with an invalid QC should be rejected") + + // check that the error is an invalid proposal error to allow creating slashing challenge + assert.False(ps.T(), models.IsInvalidProposalError[*helper.TestState, *helper.TestVote](err), "if we can't verify the QC, we should not generate a invalid error") +} + +// TestProposalWithPreviousRankTimeoutCertificate tests different scenarios where last rank has ended with TC +// this requires including a valid PreviousRankTimeoutCertificate. +func (ps *ProposalSuite) TestProposalWithPreviousRankTimeoutCertificate() { + // assume all proposals are created by valid leader + ps.verifier.On("VerifyVote", mock.Anything).Return(nil) + ps.committee.On("LeaderForRank", mock.Anything).Return(ps.leader.Identity(), nil) + + ps.Run("happy-path", func() { + state := helper.MakeState( + helper.WithStateRank[*helper.TestState](ps.state.Rank+2), + helper.WithStateProposer[*helper.TestState](ps.leader.Identity()), + helper.WithParentSigners[*helper.TestState](ps.indices), + helper.WithStateQC[*helper.TestState](ps.state.ParentQuorumCertificate)) + vote := &helper.TestVote{ + Rank: ps.state.Rank + 2, + ID: ps.leader.Identity(), + StateID: state.Identifier, + Signature: make([]byte, 74), + } + proposal := helper.MakeSignedProposal(helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal( + helper.WithState(state), + helper.WithPreviousRankTimeoutCertificate[*helper.TestState](helper.MakeTC( + helper.WithTCSigners(ps.indices), + helper.WithTCRank(ps.state.Rank+1), + helper.WithTCNewestQC(ps.state.ParentQuorumCertificate))), + )), helper.WithVote[*helper.TestState, *helper.TestVote](&vote)) + ps.verifier.On("VerifyTimeoutCertificate", proposal.PreviousRankTimeoutCertificate).Return(nil).Once() + err := ps.validator.ValidateProposal(proposal) + require.NoError(ps.T(), err) + }) + ps.Run("no-tc", func() { + state := helper.MakeState( + helper.WithStateRank[*helper.TestState](ps.state.Rank+2), + helper.WithStateProposer[*helper.TestState](ps.leader.Identity()), + helper.WithParentSigners[*helper.TestState](ps.indices), + helper.WithStateQC[*helper.TestState](ps.state.ParentQuorumCertificate)) + vote := &helper.TestVote{ + Rank: ps.state.Rank + 2, + ID: ps.leader.Identity(), + StateID: state.Identifier, + Signature: make([]byte, 74), + } + proposal := helper.MakeSignedProposal(helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal( + helper.WithState(state), + // in this case proposal without PreviousRankTimeoutCertificate is considered invalid + )), helper.WithVote[*helper.TestState, *helper.TestVote](&vote)) + err := ps.validator.ValidateProposal(proposal) + require.True(ps.T(), models.IsInvalidProposalError[*helper.TestState, *helper.TestVote](err)) + ps.verifier.AssertNotCalled(ps.T(), "VerifyQuorumCertificate") + ps.verifier.AssertNotCalled(ps.T(), "VerifyTimeoutCertificate") + }) + ps.Run("tc-for-wrong-rank", func() { + state := helper.MakeState[*helper.TestState]( + helper.WithStateRank[*helper.TestState](ps.state.Rank+2), + helper.WithStateProposer[*helper.TestState](ps.leader.Identity()), + helper.WithParentSigners[*helper.TestState](ps.indices), + helper.WithStateQC[*helper.TestState](ps.state.ParentQuorumCertificate)) + vote := &helper.TestVote{ + Rank: ps.state.Rank + 2, + ID: ps.leader.Identity(), + StateID: state.Identifier, + Signature: make([]byte, 74), + } + proposal := helper.MakeSignedProposal[*helper.TestState, *helper.TestVote](helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal( + helper.WithState(state), + helper.WithPreviousRankTimeoutCertificate[*helper.TestState](helper.MakeTC( + helper.WithTCSigners(ps.indices), + helper.WithTCRank(ps.state.Rank+10), // PreviousRankTimeoutCertificate.Rank must be equal to State.Rank-1 + helper.WithTCNewestQC(ps.state.ParentQuorumCertificate))), + )), helper.WithVote[*helper.TestState, *helper.TestVote](&vote)) + err := ps.validator.ValidateProposal(proposal) + require.True(ps.T(), models.IsInvalidProposalError[*helper.TestState, *helper.TestVote](err)) + ps.verifier.AssertNotCalled(ps.T(), "VerifyQuorumCertificate") + ps.verifier.AssertNotCalled(ps.T(), "VerifyTimeoutCertificate") + }) + ps.Run("proposal-not-safe-to-extend", func() { + state := helper.MakeState[*helper.TestState]( + helper.WithStateRank[*helper.TestState](ps.state.Rank+2), + helper.WithStateProposer[*helper.TestState](ps.leader.Identity()), + helper.WithParentSigners[*helper.TestState](ps.indices), + helper.WithStateQC[*helper.TestState](ps.state.ParentQuorumCertificate)) + vote := &helper.TestVote{ + Rank: state.Rank, + ID: ps.leader.Identity(), + StateID: state.Identifier, + Signature: make([]byte, 74), + } + proposal := helper.MakeSignedProposal[*helper.TestState, *helper.TestVote](helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal( + helper.WithState(state), + helper.WithPreviousRankTimeoutCertificate[*helper.TestState](helper.MakeTC( + helper.WithTCSigners(ps.indices), + helper.WithTCRank(ps.state.Rank+1), + // proposal is not safe to extend because included QC.Rank is higher that State.QC.Rank + helper.WithTCNewestQC(helper.MakeQC(helper.WithQCRank(ps.state.Rank+1))))), + )), helper.WithVote[*helper.TestState, *helper.TestVote](&vote)) + err := ps.validator.ValidateProposal(proposal) + require.True(ps.T(), models.IsInvalidProposalError[*helper.TestState, *helper.TestVote](err)) + ps.verifier.AssertNotCalled(ps.T(), "VerifyQuorumCertificate") + ps.verifier.AssertNotCalled(ps.T(), "VerifyTimeoutCertificate") + }) + ps.Run("included-tc-highest-qc-not-highest", func() { + state := helper.MakeState[*helper.TestState]( + helper.WithStateRank[*helper.TestState](ps.state.Rank+2), + helper.WithStateProposer[*helper.TestState](ps.leader.Identity()), + helper.WithParentSigners[*helper.TestState](ps.indices), + helper.WithStateQC[*helper.TestState](ps.state.ParentQuorumCertificate)) + vote := &helper.TestVote{ + Rank: state.Rank, + ID: ps.leader.Identity(), + StateID: state.Identifier, + Signature: make([]byte, 74), + } + proposal := helper.MakeSignedProposal[*helper.TestState, *helper.TestVote](helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal( + helper.WithState(state), + helper.WithPreviousRankTimeoutCertificate[*helper.TestState](helper.MakeTC( + helper.WithTCSigners(ps.indices), + helper.WithTCRank(ps.state.Rank+1), + helper.WithTCNewestQC(ps.state.ParentQuorumCertificate), + )), + )), helper.WithVote[*helper.TestState, *helper.TestVote](&vote)) + ps.verifier.On("VerifyTimeoutCertificate", proposal.PreviousRankTimeoutCertificate).Return(nil).Once() + + // this is considered an invalid TC, because highest QC's rank is not equal to max{NewestQCRanks} + proposal.PreviousRankTimeoutCertificate.(*helper.TestTimeoutCertificate).LatestRanks[0] = proposal.PreviousRankTimeoutCertificate.GetLatestQuorumCert().GetRank() + 1 + err := ps.validator.ValidateProposal(proposal) + require.True(ps.T(), models.IsInvalidProposalError[*helper.TestState, *helper.TestVote](err) && models.IsInvalidTimeoutCertificateError(err)) + ps.verifier.AssertNotCalled(ps.T(), "VerifyTimeoutCertificate") + }) + ps.Run("included-tc-threshold-not-reached", func() { + state := helper.MakeState[*helper.TestState]( + helper.WithStateRank[*helper.TestState](ps.state.Rank+2), + helper.WithStateProposer[*helper.TestState](ps.leader.Identity()), + helper.WithParentSigners[*helper.TestState](ps.indices), + helper.WithStateQC[*helper.TestState](ps.state.ParentQuorumCertificate)) + vote := &helper.TestVote{ + Rank: state.Rank, + ID: ps.leader.Identity(), + StateID: state.Identifier, + Signature: make([]byte, 74), + } + // TC is signed by only one signer - insufficient to reach weight threshold + insufficientSignerIndices := []byte{0b00000001} + proposal := helper.MakeSignedProposal[*helper.TestState, *helper.TestVote](helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal( + helper.WithState(state), + helper.WithPreviousRankTimeoutCertificate[*helper.TestState](helper.MakeTC( + helper.WithTCSigners(insufficientSignerIndices), // one signer is not enough to reach threshold + helper.WithTCRank(ps.state.Rank+1), + helper.WithTCNewestQC(ps.state.ParentQuorumCertificate), + )), + )), helper.WithVote[*helper.TestState, *helper.TestVote](&vote)) + err := ps.validator.ValidateProposal(proposal) + require.True(ps.T(), models.IsInvalidProposalError[*helper.TestState, *helper.TestVote](err) && models.IsInvalidTimeoutCertificateError(err)) + ps.verifier.AssertNotCalled(ps.T(), "VerifyTimeoutCertificate") + }) + ps.Run("included-tc-highest-qc-invalid", func() { + state := helper.MakeState[*helper.TestState]( + helper.WithStateRank[*helper.TestState](ps.state.Rank+2), + helper.WithStateProposer[*helper.TestState](ps.leader.Identity()), + helper.WithParentSigners[*helper.TestState](ps.indices), + helper.WithStateQC[*helper.TestState](ps.state.ParentQuorumCertificate)) + vote := &helper.TestVote{ + Rank: state.Rank, + ID: ps.leader.Identity(), + StateID: state.Identifier, + Signature: make([]byte, 74), + } + // QC included in TC has rank below QC included in proposal + qc := helper.MakeQC( + helper.WithQCRank(ps.state.ParentQuorumCertificate.GetRank()-1), + helper.WithQCSigners(ps.indices)) + + proposal := helper.MakeSignedProposal[*helper.TestState, *helper.TestVote](helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal( + helper.WithState(state), + helper.WithPreviousRankTimeoutCertificate[*helper.TestState](helper.MakeTC( + helper.WithTCSigners(ps.indices), + helper.WithTCRank(ps.state.Rank+1), + helper.WithTCNewestQC(qc))), + )), helper.WithVote[*helper.TestState, *helper.TestVote](&vote)) + ps.verifier.On("VerifyTimeoutCertificate", proposal.PreviousRankTimeoutCertificate).Return(nil).Once() + ps.verifier.On("VerifyQuorumCertificate", qc).Return(models.ErrInvalidSignature).Once() + err := ps.validator.ValidateProposal(proposal) + require.True(ps.T(), models.IsInvalidProposalError[*helper.TestState, *helper.TestVote](err) && models.IsInvalidTimeoutCertificateError(err)) + }) + ps.Run("verify-qc-err-rank-for-unknown-rank", func() { + state := helper.MakeState[*helper.TestState]( + helper.WithStateRank[*helper.TestState](ps.state.Rank+2), + helper.WithStateProposer[*helper.TestState](ps.leader.Identity()), + helper.WithParentSigners[*helper.TestState](ps.indices), + helper.WithStateQC[*helper.TestState](ps.state.ParentQuorumCertificate)) + newestQC := helper.MakeQC( + helper.WithQCRank(ps.state.ParentQuorumCertificate.GetRank()-2), + helper.WithQCSigners(ps.indices)) + vote := &helper.TestVote{ + Rank: state.Rank, + ID: ps.leader.Identity(), + StateID: state.Identifier, + Signature: make([]byte, 74), + } + proposal := helper.MakeSignedProposal[*helper.TestState, *helper.TestVote](helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal( + helper.WithState(state), + helper.WithPreviousRankTimeoutCertificate[*helper.TestState](helper.MakeTC( + helper.WithTCSigners(ps.indices), + helper.WithTCRank(ps.state.Rank+1), + helper.WithTCNewestQC(newestQC))), + )), helper.WithVote[*helper.TestState, *helper.TestVote](&vote)) + ps.verifier.On("VerifyTimeoutCertificate", proposal.PreviousRankTimeoutCertificate).Return(nil).Once() + // Validating QC included in TC returns ErrRankUnknown + ps.verifier.On("VerifyQuorumCertificate", newestQC).Return(models.ErrRankUnknown).Once() + err := ps.validator.ValidateProposal(proposal) + require.Error(ps.T(), err) + require.False(ps.T(), models.IsInvalidProposalError[*helper.TestState, *helper.TestVote](err)) + require.False(ps.T(), models.IsInvalidTimeoutCertificateError(err)) + require.NotErrorIs(ps.T(), err, models.ErrRankUnknown) + }) + ps.Run("included-tc-invalid-sig", func() { + state := helper.MakeState[*helper.TestState]( + helper.WithStateRank[*helper.TestState](ps.state.Rank+2), + helper.WithStateProposer[*helper.TestState](ps.leader.Identity()), + helper.WithParentSigners[*helper.TestState](ps.indices), + helper.WithStateQC[*helper.TestState](ps.state.ParentQuorumCertificate)) + vote := &helper.TestVote{ + Rank: state.Rank, + ID: ps.leader.Identity(), + StateID: state.Identifier, + Signature: make([]byte, 74), + } + proposal := helper.MakeSignedProposal[*helper.TestState, *helper.TestVote](helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal( + helper.WithState(state), + helper.WithPreviousRankTimeoutCertificate[*helper.TestState](helper.MakeTC( + helper.WithTCSigners(ps.indices), + helper.WithTCRank(ps.state.Rank+1), + helper.WithTCNewestQC(ps.state.ParentQuorumCertificate))), + )), helper.WithVote[*helper.TestState, *helper.TestVote](&vote)) + ps.verifier.On("VerifyTimeoutCertificate", proposal.PreviousRankTimeoutCertificate).Return(models.ErrInvalidSignature).Once() + err := ps.validator.ValidateProposal(proposal) + require.True(ps.T(), models.IsInvalidProposalError[*helper.TestState, *helper.TestVote](err) && models.IsInvalidTimeoutCertificateError(err)) + ps.verifier.AssertCalled(ps.T(), "VerifyTimeoutCertificate", proposal.PreviousRankTimeoutCertificate) + }) + ps.Run("last-rank-successful-but-includes-tc", func() { + state := helper.MakeState[*helper.TestState]( + helper.WithStateRank[*helper.TestState](ps.finalized+1), + helper.WithStateProposer[*helper.TestState](ps.leader.Identity()), + helper.WithParentSigners[*helper.TestState](ps.indices), + helper.WithParentState(ps.parent)) + vote := &helper.TestVote{ + Rank: state.Rank, + ID: ps.leader.Identity(), + StateID: state.Identifier, + Signature: make([]byte, 74), + } + proposal := helper.MakeSignedProposal[*helper.TestState, *helper.TestVote](helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal( + helper.WithState(state), + helper.WithPreviousRankTimeoutCertificate[*helper.TestState](helper.MakeTC()), + )), helper.WithVote[*helper.TestState, *helper.TestVote](&vote)) + err := ps.validator.ValidateProposal(proposal) + require.True(ps.T(), models.IsInvalidProposalError[*helper.TestState, *helper.TestVote](err)) + ps.verifier.AssertNotCalled(ps.T(), "VerifyTimeoutCertificate") + }) + ps.verifier.AssertExpectations(ps.T()) +} + +func TestValidateVote(t *testing.T) { + suite.Run(t, new(VoteSuite)) +} + +type VoteSuite struct { + suite.Suite + signer models.WeightedIdentity + state *models.State[*helper.TestState] + vote *helper.TestVote + verifier *mocks.Verifier[*helper.TestVote] + committee *mocks.Replicas + validator *Validator[*helper.TestState, *helper.TestVote] +} + +func (vs *VoteSuite) SetupTest() { + + // create a random signing identity + vs.signer = helper.WithWeightedIdentityList(1)[0] + + // create a state that should be signed + vs.state = helper.MakeState[*helper.TestState]() + + // create a vote for this state + vs.vote = &helper.TestVote{ + Rank: vs.state.Rank, + ID: vs.signer.Identity(), + StateID: vs.state.Identifier, + Signature: []byte{}, + } + + // set up the mocked verifier + vs.verifier = &mocks.Verifier[*helper.TestVote]{} + vs.verifier.On("VerifyVote", &vs.vote).Return(nil) + + // the leader for the state rank is the correct one + vs.committee = &mocks.Replicas{} + vs.committee.On("IdentityByRank", mock.Anything, vs.signer.Identity()).Return(vs.signer, nil) + + // set up the validator with the mocked dependencies + vs.validator = NewValidator[*helper.TestState, *helper.TestVote](vs.committee, vs.verifier) +} + +// TestVoteOK checks the happy case, which is the default for the suite +func (vs *VoteSuite) TestVoteOK() { + _, err := vs.validator.ValidateVote(&vs.vote) + assert.NoError(vs.T(), err, "a valid vote should be accepted") +} + +// TestVoteSignatureError checks that the Validator does not misinterpret +// unexpected exceptions for invalid votes. +func (vs *VoteSuite) TestVoteSignatureError() { + *vs.verifier = mocks.Verifier[*helper.TestVote]{} + vs.verifier.On("VerifyVote", &vs.vote).Return(fmt.Errorf("some exception")) + + // check that the vote is no longer validated + _, err := vs.validator.ValidateVote(&vs.vote) + assert.Error(vs.T(), err, "a vote with error on signature validation should be rejected") + assert.False(vs.T(), models.IsInvalidVoteError[*helper.TestVote](err), "internal exception should not be interpreted as invalid vote") +} + +// TestVoteVerifyVote_ErrRankUnknown tests if ValidateVote correctly handles VerifyVote's ErrRankUnknown sentinel error +// Validator shouldn't return a sentinel error here because this behavior is a symptom of internal bug, this behavior is not expected. +func (vs *VoteSuite) TestVoteVerifyVote_ErrRankUnknown() { + *vs.verifier = mocks.Verifier[*helper.TestVote]{} + vs.verifier.On("VerifyVote", &vs.vote).Return(models.ErrRankUnknown) + + // check that the vote is no longer validated + _, err := vs.validator.ValidateVote(&vs.vote) + assert.Error(vs.T(), err) + assert.False(vs.T(), models.IsInvalidVoteError[*helper.TestVote](err), "internal exception should not be interpreted as invalid vote") + assert.NotErrorIs(vs.T(), err, models.ErrRankUnknown, "we don't expect a sentinel error here") +} + +// TestVoteInvalidSignerID checks that the Validator correctly handles a vote +// with a SignerID that does not correspond to a valid consensus participant. +// In this case, the `consensus.DynamicCommittee` returns a `models.InvalidSignerError`, +// which the Validator should recognize as a symptom for an invalid vote. +// Hence, we expect the validator to return a `models.InvalidVoteError`. +func (vs *VoteSuite) TestVoteInvalidSignerID() { + *vs.committee = mocks.Replicas{} + vs.committee.On("IdentityByRank", vs.state.Rank, vs.vote.ID).Return(nil, models.NewInvalidSignerErrorf("")) + + // A `models.InvalidSignerError` from the committee should be interpreted as + // the Vote being invalid, i.e. we expect an InvalidVoteError to be returned + _, err := vs.validator.ValidateVote(&vs.vote) + assert.Error(vs.T(), err, "a vote with unknown SignerID should be rejected") + assert.True(vs.T(), models.IsInvalidVoteError[*helper.TestVote](err), "a vote with unknown SignerID should be rejected") +} + +// TestVoteSignatureInvalid checks that the Validator correctly handles votes +// with cryptographically invalid consensus. In this case, the `consensus.Verifier` +// returns a `models.ErrInvalidSignature`, which the Validator should recognize as +// a symptom for an invalid vote. +// Hence, we expect the validator to return a `models.InvalidVoteError`. +func (vs *VoteSuite) TestVoteSignatureInvalid() { + *vs.verifier = mocks.Verifier[*helper.TestVote]{} + vs.verifier.On("VerifyVote", &vs.vote).Return(fmt.Errorf("staking sig is invalid: %w", models.ErrInvalidSignature)) + + // A `models.ErrInvalidSignature` from the `consensus.Verifier` should be interpreted as + // the Vote being invalid, i.e. we expect an InvalidVoteError to be returned + _, err := vs.validator.ValidateVote(&vs.vote) + assert.Error(vs.T(), err, "a vote with an invalid signature should be rejected") + assert.True(vs.T(), models.IsInvalidVoteError[*helper.TestVote](err), "a vote with an invalid signature should be rejected") +} + +func TestValidateQuorumCertificate(t *testing.T) { + suite.Run(t, new(QCSuite)) +} + +type QCSuite struct { + suite.Suite + participants []models.WeightedIdentity + signers []models.WeightedIdentity + state *models.State[*helper.TestState] + qc models.QuorumCertificate + committee *mocks.Replicas + verifier *mocks.Verifier[*helper.TestVote] + validator *Validator[*helper.TestState, *helper.TestVote] +} + +func (qs *QCSuite) SetupTest() { + // create a list of 10 nodes with 1-weight each + qs.participants = helper.WithWeightedIdentityList(10) + + // signers are a qualified majority at 7 + qs.signers = qs.participants[:7] + + // create a state that has the signers in its QC + qs.state = helper.MakeState[*helper.TestState]() + indices := []byte{0b01111111, 0b00000000} + + qs.qc = helper.MakeQC(helper.WithQCState[*helper.TestState](qs.state), helper.WithQCSigners(indices)) + + // return the correct participants and identities from rank state + qs.committee = &mocks.Replicas{} + qs.committee.On("IdentitiesByRank", mock.Anything).Return( + func(_ uint64) []models.WeightedIdentity { + return qs.participants + }, + nil, + ) + qs.committee.On("QuorumThresholdForRank", mock.Anything).Return(uint64(7000), nil) + + // set up the mocked verifier to verify the QC correctly + qs.verifier = &mocks.Verifier[*helper.TestVote]{} + qs.verifier.On("VerifyQuorumCertificate", qs.qc).Return(nil) + + // set up the validator with the mocked dependencies + qs.validator = NewValidator[*helper.TestState, *helper.TestVote](qs.committee, qs.verifier) +} + +// TestQCOK verifies the default happy case +func (qs *QCSuite) TestQCOK() { + + // check the default happy case passes + err := qs.validator.ValidateQuorumCertificate(qs.qc) + assert.NoError(qs.T(), err, "a valid QC should be accepted") +} + +// TestQCRetrievingParticipantsError tests that validation errors if: +// there is an error retrieving identities of consensus participants +func (qs *QCSuite) TestQCRetrievingParticipantsError() { + // change the consensus.DynamicCommittee to fail on retrieving participants + *qs.committee = mocks.Replicas{} + qs.committee.On("IdentitiesByRank", mock.Anything).Return(qs.participants, errors.New("FATAL internal error")) + + // verifier should escalate unspecific internal error to surrounding logic, but NOT as ErrorInvalidQC + err := qs.validator.ValidateQuorumCertificate(qs.qc) + assert.Error(qs.T(), err, "unspecific error when retrieving consensus participants should be escalated to surrounding logic") + assert.False(qs.T(), models.IsInvalidQuorumCertificateError(err), "unspecific internal errors should not result in ErrorInvalidQC error") +} + +// TestQCSignersError tests that a qc fails validation if: +// QC signer's have insufficient weight (but are all valid consensus participants otherwise) +func (qs *QCSuite) TestQCInsufficientWeight() { + // signers only have weight 6 out of 10 total (NOT have a supermajority) + qs.signers = qs.participants[:6] + indices := []byte{0b00111111, 0b00000000} + + qs.qc = helper.MakeQC(helper.WithQCState[*helper.TestState](qs.state), helper.WithQCSigners(indices)) + + // the QC should not be validated anymore + err := qs.validator.ValidateQuorumCertificate(qs.qc) + assert.Error(qs.T(), err, "a QC should be rejected if it has insufficient voted weight") + + // we should get a threshold error to bubble up for extra info + assert.True(qs.T(), models.IsInvalidQuorumCertificateError(err), "if there is insufficient voted weight, an invalid state error should be raised") +} + +// TestQCSignatureError tests that validation errors if: +// there is an unspecific internal error while validating the signature +func (qs *QCSuite) TestQCSignatureError() { + + // set up the verifier to fail QC verification + *qs.verifier = mocks.Verifier[*helper.TestVote]{} + qs.verifier.On("VerifyQuorumCertificate", qs.qc).Return(errors.New("dummy error")) + + // verifier should escalate unspecific internal error to surrounding logic, but NOT as ErrorInvalidQC + err := qs.validator.ValidateQuorumCertificate(qs.qc) + assert.Error(qs.T(), err, "unspecific sig verification error should be escalated to surrounding logic") + assert.False(qs.T(), models.IsInvalidQuorumCertificateError(err), "unspecific internal errors should not result in ErrorInvalidQC error") +} + +// TestQCSignatureInvalid verifies that the Validator correctly handles the models.ErrInvalidSignature. +// This error return from `Verifier.VerifyQuorumCertificate` is an expected failure case in case of a byzantine input, where +// one of the signatures in the QC is broken. Hence, the Validator should wrap it as InvalidProposalError. +func (qs *QCSuite) TestQCSignatureInvalid() { + // change the verifier to fail the QC signature + *qs.verifier = mocks.Verifier[*helper.TestVote]{} + qs.verifier.On("VerifyQuorumCertificate", qs.qc).Return(fmt.Errorf("invalid qc: %w", models.ErrInvalidSignature)) + + // the QC should no longer pass validation + err := qs.validator.ValidateQuorumCertificate(qs.qc) + assert.True(qs.T(), models.IsInvalidQuorumCertificateError(err), "if the signature is invalid an ErrorInvalidQC error should be raised") +} + +// TestQCVerifyQuorumCertificate_ErrRankUnknown tests if ValidateQuorumCertificate correctly handles VerifyQuorumCertificate's ErrRankUnknown sentinel error +// Validator shouldn't return a sentinel error here because this behavior is a symptom of internal bug, this behavior is not expected. +func (qs *QCSuite) TestQCVerifyQuorumCertificate_ErrRankUnknown() { + *qs.verifier = mocks.Verifier[*helper.TestVote]{} + qs.verifier.On("VerifyQuorumCertificate", qs.qc).Return(models.ErrRankUnknown) + err := qs.validator.ValidateQuorumCertificate(qs.qc) + assert.Error(qs.T(), err) + assert.False(qs.T(), models.IsInvalidQuorumCertificateError(err), "we don't expect a sentinel error here") + assert.NotErrorIs(qs.T(), err, models.ErrRankUnknown, "we don't expect a sentinel error here") +} + +// TestQCSignatureInvalidFormat verifies that the Validator correctly handles the models.InvalidFormatError. +// This error return from `Verifier.VerifyQuorumCertificate` is an expected failure case in case of a byzantine input, where +// some binary vector (e.g. `sigData`) is broken. Hence, the Validator should wrap it as InvalidProposalError. +func (qs *QCSuite) TestQCSignatureInvalidFormat() { + // change the verifier to fail the QC signature + *qs.verifier = mocks.Verifier[*helper.TestVote]{} + qs.verifier.On("VerifyQuorumCertificate", qs.qc).Return(models.NewInvalidFormatErrorf("invalid sigType")) + + // the QC should no longer pass validation + err := qs.validator.ValidateQuorumCertificate(qs.qc) + assert.True(qs.T(), models.IsInvalidQuorumCertificateError(err), "if the signature has an invalid format, an ErrorInvalidQC error should be raised") +} + +// TestQCEmptySigners verifies that the Validator correctly handles the models.InsufficientSignaturesError: +// In the validator, we previously checked the total weight of all signers meets the supermajority threshold, +// which is a _positive_ number. Hence, there must be at least one signer. Hence, `Verifier.VerifyQuorumCertificate` +// returning this error would be a symptom of a fatal internal bug. The Validator should _not_ interpret +// this error as an invalid QC / invalid state, i.e. it should _not_ return an `InvalidProposalError`. +func (qs *QCSuite) TestQCEmptySigners() { + *qs.verifier = mocks.Verifier[*helper.TestVote]{} + qs.verifier.On("VerifyQuorumCertificate", qs.qc).Return( + fmt.Errorf("%w", models.NewInsufficientSignaturesErrorf(""))) + + // the Validator should _not_ interpret this as a invalid QC, but as an internal error + err := qs.validator.ValidateQuorumCertificate(qs.qc) + assert.True(qs.T(), models.IsInsufficientSignaturesError(err)) // unexpected error should be wrapped and propagated upwards + assert.False(qs.T(), models.IsInvalidProposalError[*helper.TestState, *helper.TestVote](err), err, "should _not_ interpret this as a invalid QC, but as an internal error") +} + +func TestValidateTimeoutCertificate(t *testing.T) { + suite.Run(t, new(TCSuite)) +} + +type TCSuite struct { + suite.Suite + participants []models.WeightedIdentity + signers []models.WeightedIdentity + indices []byte + state *models.State[*helper.TestState] + tc models.TimeoutCertificate + committee *mocks.DynamicCommittee + verifier *mocks.Verifier[*helper.TestVote] + validator *Validator[*helper.TestState, *helper.TestVote] +} + +func (s *TCSuite) SetupTest() { + + // create a list of 10 nodes with 1-weight each + s.participants = helper.WithWeightedIdentityList(10) + + // signers are a qualified majority at 7 + s.signers = s.participants[:7] + + var err error + s.indices = []byte{0b01111111, 0b00000000} + require.NoError(s.T(), err) + + rank := uint64(int(rand.Uint32()) + len(s.participants)) + + highQCRanks := make([]uint64, 0, len(s.signers)) + for i := range s.signers { + highQCRanks = append(highQCRanks, rank-uint64(i)-1) + } + + rand.Shuffle(len(highQCRanks), func(i, j int) { + highQCRanks[i], highQCRanks[j] = highQCRanks[j], highQCRanks[i] + }) + + // create a state that has the signers in its QC + parent := helper.MakeState[*helper.TestState](helper.WithStateRank[*helper.TestState](rank - 1)) + s.state = helper.MakeState[*helper.TestState](helper.WithStateRank[*helper.TestState](rank), + helper.WithParentState(parent), + helper.WithParentSigners[*helper.TestState](s.indices)) + s.tc = helper.MakeTC(helper.WithTCNewestQC(s.state.ParentQuorumCertificate), + helper.WithTCRank(rank+1), + helper.WithTCSigners(s.indices), + helper.WithTCHighQCRanks(highQCRanks)) + + // return the correct participants and identities from rank state + s.committee = &mocks.DynamicCommittee{} + s.committee.On("IdentitiesByRank", mock.Anything, mock.Anything).Return( + func(rank uint64) []models.WeightedIdentity { + return s.participants + }, + nil, + ) + s.committee.On("QuorumThresholdForRank", mock.Anything).Return(uint64(7000), nil) + + s.verifier = &mocks.Verifier[*helper.TestVote]{} + s.verifier.On("VerifyQuorumCertificate", s.state.ParentQuorumCertificate).Return(nil) + + // set up the validator with the mocked dependencies + s.validator = NewValidator[*helper.TestState, *helper.TestVote](s.committee, s.verifier) +} + +// TestTCOk tests if happy-path returns correct result +func (s *TCSuite) TestTCOk() { + s.verifier.On("VerifyTimeoutCertificate", s.tc).Return(nil).Once() + + // check the default happy case passes + err := s.validator.ValidateTimeoutCertificate(s.tc) + assert.NoError(s.T(), err, "a valid TC should be accepted") +} + +// TestTCNewestQCFromFuture tests if correct error is returned when included QC is higher than TC's rank +func (s *TCSuite) TestTCNewestQCFromFuture() { + // highest QC from future rank + s.tc.(*helper.TestTimeoutCertificate).LatestQuorumCert.(*helper.TestQuorumCertificate).Rank = s.tc.GetRank() + 1 + err := s.validator.ValidateTimeoutCertificate(s.tc) // the QC should not be validated anymore + assert.True(s.T(), models.IsInvalidTimeoutCertificateError(err), "if NewestQC.Rank > TC.Rank, an ErrorInvalidTC error should be raised") +} + +// TestTCNewestQCIsNotHighest tests if correct error is returned when included QC is not highest +func (s *TCSuite) TestTCNewestQCIsNotHighest() { + s.verifier.On("VerifyTimeoutCertificate", s.tc).Return(nil).Once() + + // highest QC rank is not equal to max(TONewestQCRanks) + s.tc.(*helper.TestTimeoutCertificate).LatestRanks[0] = s.tc.GetLatestQuorumCert().GetRank() + 1 + err := s.validator.ValidateTimeoutCertificate(s.tc) // the QC should not be validated anymore + assert.True(s.T(), models.IsInvalidTimeoutCertificateError(err), "if max(highQCRanks) != NewestQC.Rank, an ErrorInvalidTC error should be raised") +} + +// TestTCInvalidSigners tests if correct error is returned when signers are invalid +func (s *TCSuite) TestTCInvalidSigners() { + s.participants = s.participants[:6] // remove participant[6+] from the list of valid consensus participant + err := s.validator.ValidateTimeoutCertificate(s.tc) // the QC should not be validated anymore + assert.True(s.T(), models.IsInvalidTimeoutCertificateError(err), "if some signers are invalid consensus participants, an ErrorInvalidTC error should be raised") +} + +// TestTCThresholdNotReached tests if correct error is returned when TC's singers don't have enough weight +func (s *TCSuite) TestTCThresholdNotReached() { + // signers only have weight 1 out of 10 total (NOT have a supermajority) + s.signers = s.participants[:1] + indices := []byte{0b00000001, 0b00000000} + + s.tc.(*helper.TestTimeoutCertificate).AggregatedSignature.(*helper.TestAggregatedSignature).Bitmask = indices + + // adjust signers to be less than total weight + err := s.validator.ValidateTimeoutCertificate(s.tc) // the QC should not be validated anymore + assert.True(s.T(), models.IsInvalidTimeoutCertificateError(err), "if signers don't have enough weight, an ErrorInvalidTC error should be raised") +} + +// TestTCInvalidNewestQC tests if correct error is returned when included highest QC is invalid +func (s *TCSuite) TestTCInvalidNewestQC() { + *s.verifier = mocks.Verifier[*helper.TestVote]{} + s.verifier.On("VerifyTimeoutCertificate", s.tc).Return(nil).Once() + s.verifier.On("VerifyQuorumCertificate", s.tc.GetLatestQuorumCert()).Return(models.NewInvalidFormatErrorf("invalid qc")).Once() + err := s.validator.ValidateTimeoutCertificate(s.tc) // the QC should not be validated anymore + assert.True(s.T(), models.IsInvalidTimeoutCertificateError(err), "if included QC is invalid, an ErrorInvalidTC error should be raised") +} + +// TestTCVerifyQuorumCertificate_ErrRankUnknown tests if ValidateTimeoutCertificate correctly handles VerifyQuorumCertificate's ErrRankUnknown sentinel error +// Validator shouldn't return a sentinel error here because this behavior is a symptom of internal bug, this behavior is not expected. +func (s *TCSuite) TestTCVerifyQuorumCertificate_ErrRankUnknown() { + *s.verifier = mocks.Verifier[*helper.TestVote]{} + s.verifier.On("VerifyTimeoutCertificate", s.tc).Return(nil).Once() + s.verifier.On("VerifyQuorumCertificate", s.tc.GetLatestQuorumCert()).Return(models.ErrRankUnknown).Once() + err := s.validator.ValidateTimeoutCertificate(s.tc) // the QC should not be validated anymore + assert.Error(s.T(), err) + assert.False(s.T(), models.IsInvalidTimeoutCertificateError(err), "we don't expect a sentinel error here") + assert.NotErrorIs(s.T(), err, models.ErrRankUnknown, "we don't expect a sentinel error here") +} + +// TestTCInvalidSignature tests a few scenarios when the signature is invalid or TC signers is malformed +func (s *TCSuite) TestTCInvalidSignature() { + s.Run("insufficient-signatures", func() { + *s.verifier = mocks.Verifier[*helper.TestVote]{} + s.verifier.On("VerifyQuorumCertificate", mock.Anything).Return(nil).Once() + s.verifier.On("VerifyTimeoutCertificate", s.tc).Return(models.NewInsufficientSignaturesErrorf("")).Once() + + // the Validator should _not_ interpret this as an invalid TC, but as an internal error + err := s.validator.ValidateTimeoutCertificate(s.tc) + assert.True(s.T(), models.IsInsufficientSignaturesError(err)) // unexpected error should be wrapped and propagated upwards + assert.False(s.T(), models.IsInvalidTimeoutCertificateError(err), "should _not_ interpret this as a invalid TC, but as an internal error") + }) + s.Run("invalid-format", func() { + *s.verifier = mocks.Verifier[*helper.TestVote]{} + s.verifier.On("VerifyQuorumCertificate", mock.Anything).Return(nil).Once() + s.verifier.On("VerifyTimeoutCertificate", s.tc).Return(models.NewInvalidFormatErrorf("")).Once() + err := s.validator.ValidateTimeoutCertificate(s.tc) + assert.True(s.T(), models.IsInvalidTimeoutCertificateError(err), "if included TC's inputs are invalid, an ErrorInvalidTC error should be raised") + }) + s.Run("invalid-signature", func() { + *s.verifier = mocks.Verifier[*helper.TestVote]{} + s.verifier.On("VerifyQuorumCertificate", mock.Anything).Return(nil).Once() + s.verifier.On("VerifyTimeoutCertificate", s.tc).Return(models.ErrInvalidSignature).Once() + err := s.validator.ValidateTimeoutCertificate(s.tc) + assert.True(s.T(), models.IsInvalidTimeoutCertificateError(err), "if included TC's signature is invalid, an ErrorInvalidTC error should be raised") + }) + s.Run("verify-sig-exception", func() { + exception := errors.New("verify-sig-exception") + *s.verifier = mocks.Verifier[*helper.TestVote]{} + s.verifier.On("VerifyQuorumCertificate", mock.Anything).Return(nil).Once() + s.verifier.On("VerifyTimeoutCertificate", s.tc).Return(exception).Once() + err := s.validator.ValidateTimeoutCertificate(s.tc) + assert.ErrorIs(s.T(), err, exception, "if included TC's signature is invalid, an exception should be propagated") + assert.False(s.T(), models.IsInvalidTimeoutCertificateError(err)) + }) +} diff --git a/consensus/verification/common.go b/consensus/verification/common.go new file mode 100644 index 0000000..fa79301 --- /dev/null +++ b/consensus/verification/common.go @@ -0,0 +1,128 @@ +package verification + +import ( + "encoding/binary" + "fmt" + "slices" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// MakeVoteMessage generates the message we have to sign in order to be able +// to verify signatures without having the full state. To that effect, each data +// structure that is signed contains the sometimes redundant rank number and +// state ID; this allows us to create the signed message and verify the signed +// message without having the full state contents. +func MakeVoteMessage( + filter []byte, + rank uint64, + stateID models.Identity, +) []byte { + return slices.Concat( + filter, + binary.BigEndian.AppendUint64( + slices.Clone([]byte(stateID)), + rank, + ), + ) +} + +// MakeTimeoutMessage generates the message we have to sign in order to be able +// to contribute to Active Pacemaker protocol. Each replica signs with the +// highest QC rank known to that replica. +func MakeTimeoutMessage( + filter []byte, + rank uint64, + newestQCRank uint64, +) []byte { + msg := make([]byte, 16) + binary.BigEndian.PutUint64(msg[:8], rank) + binary.BigEndian.PutUint64(msg[8:], newestQCRank) + + return slices.Concat(filter, msg) +} + +// verifyAggregatedSignatureOneMessage encapsulates the logic of verifying an +// aggregated signature under the same message. Proofs of possession of all +// input keys are assumed to be valid (checked by the protocol). This logic is +// commonly used across the different implementations of `consensus.Verifier`. +// In this context, all signatures apply to states. +// Return values: +// - nil if `aggregatedSig` is valid against the public keys and message. +// - models.InsufficientSignaturesError if `pubKeys` is empty or nil. +// - models.ErrInvalidSignature if the signature is invalid against the public +// keys and message. +// - unexpected errors should be treated as symptoms of bugs or uncovered +// edge cases in the logic (i.e. as fatal) +func verifyAggregatedSignatureOneMessage( + validator consensus.SignatureAggregator, + aggregatedSig models.AggregatedSignature, + dsTag []byte, + msg []byte, // message to verify against +) error { + valid := validator.VerifySignatureRaw( + aggregatedSig.GetPubKey(), + aggregatedSig.GetSignature(), + msg, + dsTag, + ) + if !valid { + return fmt.Errorf( + "invalid aggregated signature: %w", + models.ErrInvalidSignature, + ) + } + return nil +} + +// verifyTCSignatureManyMessages checks cryptographic validity of the TC's +// signature w.r.t. multiple messages and public keys. Proofs of possession of +// all input keys are assumed to be valid (checked by the protocol). This logic +// is commonly used across the different implementations of +// `consensus.Verifier`. It is the responsibility of the calling code to ensure +// that all `pks` are authorized, without duplicates. The caller must also make +// sure the `hasher` passed is non nil and has 128-bytes outputs. +// Return values: +// - nil if `sigData` is cryptographically valid +// - models.InsufficientSignaturesError if `pks` is empty. +// - models.InvalidFormatError if `pks`/`highQCRanks` have differing lengths +// - models.ErrInvalidSignature if a signature is invalid +// - unexpected errors should be treated as symptoms of bugs or uncovered +// edge cases in the logic (i.e. as fatal) +func verifyTCSignatureManyMessages( + validator consensus.SignatureAggregator, + filter []byte, + pks [][]byte, + sigData []byte, + rank uint64, + highQCRanks []uint64, + dsTag []byte, +) error { + if len(pks) != len(highQCRanks) { + return models.NewInvalidFormatErrorf("public keys and highQCRanks mismatch") + } + + messages := make([][]byte, 0, len(pks)) + for i := 0; i < len(pks); i++ { + messages = append( + messages, + MakeTimeoutMessage(filter, rank, highQCRanks[i]), + ) + } + + valid := validator.VerifySignatureMultiMessage( + pks, + sigData, + messages, + dsTag, + ) + if !valid { + return fmt.Errorf( + "invalid aggregated TC signature for rank %d: %w", + rank, + models.ErrInvalidSignature, + ) + } + return nil +} diff --git a/consensus/verification/signer.go b/consensus/verification/signer.go new file mode 100644 index 0000000..cb90b28 --- /dev/null +++ b/consensus/verification/signer.go @@ -0,0 +1,120 @@ +package verification + +import ( + "context" + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Signer creates votes for the collector clusters consensus. When a +// participant votes for a state, it _always_ provide the proving signature +// as part of their vote. Signer is responsible for creating correctly +// signed proposals and votes. +type Signer[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +] struct { + voter consensus.VotingProvider[StateT, VoteT, PeerIDT] +} + +var _ consensus.Signer[*nilUnique, *nilUnique] = (*Signer[*nilUnique, *nilUnique, *nilUnique])(nil) + +// NewSigner instantiates a Signer, which signs votes and +// proposals with the proving key. The generated signatures are aggregatable. +func NewSigner[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +]( + voter consensus.VotingProvider[StateT, VoteT, PeerIDT], +) *Signer[StateT, VoteT, PeerIDT] { + + sc := &Signer[StateT, VoteT, PeerIDT]{ + voter: voter, + } + return sc +} + +// CreateVote will create a vote with a proving signature for the given state. +func (c *Signer[StateT, VoteT, PeerIDT]) CreateVote( + state *models.State[StateT], +) (*VoteT, error) { + + // create the signature data + vote, err := c.voter.SignVote(context.TODO(), state) + if err != nil { + return nil, fmt.Errorf("could not create signature: %w", err) + } + + return vote, nil +} + +// CreateTimeout will create a signed timeout state for the given rank. +func (c *Signer[StateT, VoteT, PeerIDT]) CreateTimeout( + curRank uint64, + newestQC models.QuorumCertificate, + previousRankTimeoutCert models.TimeoutCertificate, +) (*models.TimeoutState[VoteT], error) { + // create timeout state specific message + vote, err := c.voter.SignTimeoutVote( + context.TODO(), + newestQC.GetFilter(), + curRank, + newestQC.GetRank(), + ) + if err != nil { + return nil, fmt.Errorf( + "could not generate signature for timeout state at rank %d: %w", + curRank, + err, + ) + } + + timeout := &models.TimeoutState[VoteT]{ + Rank: curRank, + LatestQuorumCertificate: newestQC, + PriorRankTimeoutCertificate: previousRankTimeoutCert, + Vote: vote, + TimeoutTick: 0, + } + + return timeout, nil +} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/vote_aggregator.go b/consensus/vote_aggregator.go new file mode 100644 index 0000000..eb1b083 --- /dev/null +++ b/consensus/vote_aggregator.go @@ -0,0 +1,40 @@ +package consensus + +import ( + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" +) + +// VoteAggregator verifies and aggregates votes to build QC. When enough votes +// have been collected, it builds a QC and send it to the EventLoop. +// VoteAggregator also detects protocol violation, including invalid votes, +// double voting etc, and notifies a HotStuff consumer for slashing. +type VoteAggregator[StateT models.Unique, VoteT models.Unique] interface { + lifecycle.Component + + // AddVote verifies and aggregates a vote. The voting state could either be + // known or unknown. If the voting state is unknown, the vote won't be + // processed until AddState is called with the state. This method can be + // called concurrently, votes will be queued and processed asynchronously. + AddVote(vote *VoteT) + + // AddState notifies the VoteAggregator that it should start processing votes + // for the given state. The input state is queued internally within the + // `VoteAggregator` and processed _asynchronously_ by the VoteAggregator's + // internal worker routines. + // CAUTION: we expect that the input state's validity has been confirmed prior + // to calling AddState, including the proposer's consensus. Otherwise, + // VoteAggregator might crash or exhibit undefined behaviour. + AddState(state *models.SignedProposal[StateT, VoteT]) + + // InvalidState notifies the VoteAggregator about an invalid proposal, so that + // it can process votes for the invalid state and slash the voters. No errors + // are expected during normal operations. + InvalidState(state *models.SignedProposal[StateT, VoteT]) error + + // PruneUpToRank deletes all votes _below_ to the given rank, as well as + // related indices. We only retain and process whose rank is equal or larger + // than `lowestRetainedRank`. If `lowestRetainedRank` is smaller than the + // previous value, the previous value is kept and the method call is a NoOp. + PruneUpToRank(rank uint64) +} diff --git a/consensus/vote_collector.go b/consensus/vote_collector.go new file mode 100644 index 0000000..7caeeb7 --- /dev/null +++ b/consensus/vote_collector.go @@ -0,0 +1,146 @@ +package consensus + +import ( + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VoteConsumer consumes all votes for one specific rank. It is registered with +// the `VoteCollector` for the respective rank. Upon registration, the +// `VoteCollector` feeds votes into the consumer in the order they are received +// (already cached votes as well as votes received in the future). Only votes +// that pass de-duplication and equivocation detection are passed on. CAUTION, +// VoteConsumer implementations must be +// - NON-BLOCKING and consume the votes without noteworthy delay, and +// - CONCURRENCY SAFE +type VoteConsumer[VoteT models.Unique] func(vote *VoteT) + +// OnQuorumCertificateCreated is a callback which will be used by VoteCollector +// to submit a QuorumCertificate when it's able to create it +type OnQuorumCertificateCreated func(models.QuorumCertificate) + +// VoteCollectorStatus indicates the VoteCollector's status +// It has three different status. +type VoteCollectorStatus int + +const ( + // VoteCollectorStatusCaching is for the status when the state has not been + // received. The vote collector in this status will cache all the votes + // without verifying them. + VoteCollectorStatusCaching VoteCollectorStatus = iota + + // VoteCollectorStatusVerifying is for the status when the state has been + // received, and is able to process all votes for it. + VoteCollectorStatusVerifying + + // VoteCollectorStatusInvalid is for the status when the state has been + // verified and is invalid. All votes to this state will be collected to slash + // the voter. + VoteCollectorStatusInvalid +) + +// VoteCollector collects votes for the same state, produces QuorumCertificate +// when enough votes are collected VoteCollector takes a callback function to +// report the event that a QuorumCertificate has been produced. +var collectorStatusNames = [...]string{"VoteCollectorStatusCaching", + "VoteCollectorStatusVerifying", + "VoteCollectorStatusInvalid"} + +func (ps VoteCollectorStatus) String() string { + if ps < 0 || int(ps) > len(collectorStatusNames) { + return "UNKNOWN" + } + return collectorStatusNames[ps] +} + +// VoteCollector collects all votes for a specified rank. On the happy path, it +// generates a QuorumCertificate when enough votes have been collected. +// The VoteCollector internally delegates the vote-format specific processing +// to the VoteProcessor. +type VoteCollector[StateT models.Unique, VoteT models.Unique] interface { + // ProcessState performs validation of state signature and processes state + // with respected collector. Calling this function will mark conflicting + // collector as stale and change state of valid collectors. It returns nil if + // the state is valid. It returns models.InvalidProposalError if state is + // invalid. It returns other error if there is exception processing the state. + ProcessState(state *models.SignedProposal[StateT, VoteT]) error + + // AddVote adds a vote to the collector. When enough votes have been added to + // produce a QuorumCertificate, the QuorumCertificate will be created + // asynchronously, and passed to EventLoop through a callback. No errors are + // expected during normal operations. + AddVote(vote *VoteT) error + + // RegisterVoteConsumer registers a VoteConsumer. Upon registration, the + // collector feeds all cached votes into the consumer in the order they + // arrived. + // CAUTION, VoteConsumer implementations must be + // * NON-BLOCKING and consume the votes without noteworthy delay, and + // * CONCURRENCY SAFE + RegisterVoteConsumer(consumer VoteConsumer[VoteT]) + + // Rank returns the rank that this instance is collecting votes for. + // This method is useful when adding the newly created vote collector to vote + // collectors map. + Rank() uint64 + + // Status returns the status of the vote collector + Status() VoteCollectorStatus +} + +// VoteProcessor processes votes. It implements the vote-format specific +// processing logic. Depending on their implementation, a VoteProcessor might +// drop votes or attempt to construct a QuorumCertificate. +type VoteProcessor[VoteT models.Unique] interface { + // Process performs processing of single vote. This function is safe to call + // from multiple goroutines. + // Expected error returns during normal operations: + // * VoteForIncompatibleStateError - submitted vote for incompatible state + // * VoteForIncompatibleRankError - submitted vote for incompatible rank + // * models.InvalidVoteError - submitted vote with invalid signature + // * models.DuplicatedSignerError - vote from a signer whose vote was + // previously already processed + // All other errors should be treated as exceptions. + Process(vote *VoteT) error + + // Status returns the status of the vote processor + Status() VoteCollectorStatus +} + +// VerifyingVoteProcessor is a VoteProcessor that attempts to construct a +// QuorumCertificate for the given state. +type VerifyingVoteProcessor[ + StateT models.Unique, + VoteT models.Unique, +] interface { + VoteProcessor[VoteT] + + // State returns which state that will be used to collector votes for. + // Transition to VerifyingVoteCollector can occur only when we have received + // state proposal so this information has to be available. + State() *models.State[StateT] +} + +// VoteProcessorFactory is a factory that can be used to create a verifying vote +// processors for a specific proposal. Depending on factory implementation it +// will return processors for consensus or collection clusters +type VoteProcessorFactory[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +] interface { + // Create instantiates a VerifyingVoteProcessor for processing votes for a + // specific proposal. Caller can be sure that proposal vote was successfully + // verified and processed. Expected error returns during normal operations: + // * models.InvalidProposalError - proposal has invalid proposer vote + Create( + tracer TraceLogger, + filter []byte, + proposal *models.SignedProposal[StateT, VoteT], + dsTag []byte, + aggregator SignatureAggregator, + votingProvider VotingProvider[StateT, VoteT, PeerIDT], + ) ( + VerifyingVoteProcessor[StateT, VoteT], + error, + ) +} diff --git a/consensus/vote_collectors.go b/consensus/vote_collectors.go new file mode 100644 index 0000000..5cefc2f --- /dev/null +++ b/consensus/vote_collectors.go @@ -0,0 +1,58 @@ +package consensus + +import ( + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" +) + +// VoteCollectors is an interface which allows VoteAggregator to interact with +// collectors structured by rank. +// Implementations of this interface are responsible for state transitions of +// `VoteCollector`s and pruning of stale and outdated collectors by rank. +type VoteCollectors[StateT models.Unique, VoteT models.Unique] interface { + lifecycle.Component + + // GetOrCreateCollector retrieves the consensus.VoteCollector for the specified + // rank or creates one if none exists. + // It returns: + // - (collector, true, nil) if no collector can be found by the rank, and a + // new collector was created. + // - (collector, false, nil) if the collector can be found by the rank + // - (nil, false, error) if running into any exception creating the vote + // collector state machine + // Expected error returns during normal operations: + // * models.BelowPrunedThresholdError - in case rank is lower than last + // pruned rank + GetOrCreateCollector(rank uint64) ( + collector VoteCollector[StateT, VoteT], + created bool, + err error, + ) + + // PruneUpToRank prunes the vote collectors with ranks _below_ the given + // value, i.e. we only retain and process whose rank is equal or larger than + // `lowestRetainedRank`. If `lowestRetainedRank` is smaller than the previous + // value, the previous value is kept and the method call is a NoOp. + PruneUpToRank(lowestRetainedRank uint64) +} + +// Workers queues and processes submitted tasks. We explicitly do not +// expose any functionality to terminate the worker pool. +type Workers interface { + // Submit enqueues a function for a worker to execute. Submit will not block + // regardless of the number of tasks submitted. Each task is immediately + // given to an available worker or queued otherwise. Tasks are processed in + // FiFO order. + Submit(task func()) +} + +// Workerpool adds the functionality to terminate the workers to the +// Workers interface. +type Workerpool interface { + Workers + + // StopWait stops the worker pool and waits for all queued tasks to + // complete. No additional tasks may be submitted, but all pending tasks are + // executed by workers before this function returns. + StopWait() +} diff --git a/consensus/voteaggregator/pending_status.go b/consensus/voteaggregator/pending_status.go new file mode 100644 index 0000000..be77691 --- /dev/null +++ b/consensus/voteaggregator/pending_status.go @@ -0,0 +1,54 @@ +package voteaggregator + +import "source.quilibrium.com/quilibrium/monorepo/consensus/models" + +// PendingVotes stores all the pending votes for different state proposals +type PendingVotes[VoteT models.Unique] struct { + // maps state ID to pending status for that state + votes map[models.Identity]*PendingStatus[VoteT] +} + +// PendingStatus keeps track of pending votes for the same state +type PendingStatus[VoteT models.Unique] struct { + // When receiving missing state, first received votes will be accumulated + orderedVotes []*VoteT + // For avoiding duplicate votes + voteMap map[models.Identity]struct{} +} + +// AddVote adds a vote as a pending vote +// returns true if it can be added to a PendingStatus successfully +// returns false otherwise +func (pv *PendingVotes[VoteT]) AddVote(vote *VoteT) bool { + status, exists := pv.votes[(*vote).Source()] + if !exists { + status = NewPendingStatus[VoteT]() + pv.votes[(*vote).Source()] = status + } + return status.AddVote(vote) +} + +// AddVote adds a vote as a pending vote +// returns false if it has been added before +// returns true otherwise +func (ps *PendingStatus[VoteT]) AddVote(vote *VoteT) bool { + _, exists := ps.voteMap[(*vote).Identity()] + if exists { + return false + } + ps.voteMap[(*vote).Identity()] = struct{}{} + ps.orderedVotes = append(ps.orderedVotes, vote) + return true +} + +// NewPendingVotes creates a PendingVotes instance +func NewPendingVotes[VoteT models.Unique]() *PendingVotes[VoteT] { + return &PendingVotes[VoteT]{ + votes: make(map[models.Identity]*PendingStatus[VoteT]), + } +} + +// NewPendingStatus creates a PendingStatus instance +func NewPendingStatus[VoteT models.Unique]() *PendingStatus[VoteT] { + return &PendingStatus[VoteT]{voteMap: make(map[models.Identity]struct{})} +} diff --git a/consensus/voteaggregator/vote_aggregator.go b/consensus/voteaggregator/vote_aggregator.go new file mode 100644 index 0000000..9a8cc8b --- /dev/null +++ b/consensus/voteaggregator/vote_aggregator.go @@ -0,0 +1,474 @@ +package voteaggregator + +import ( + "context" + "fmt" + "sync" + + "golang.org/x/sync/errgroup" + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/counters" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" +) + +// defaultVoteAggregatorWorkers number of workers to dispatch events for vote +// aggregators +const defaultVoteAggregatorWorkers = 8 + +// defaultVoteQueueCapacity maximum capacity of buffering unprocessed votes +const defaultVoteQueueCapacity = 1000 + +// defaultStateQueueCapacity maximum capacity of buffering unprocessed states +const defaultStateQueueCapacity = 1000 + +// VoteAggregator stores the votes and aggregates them into a QC when enough +// votes have been collected. +type VoteAggregator[StateT models.Unique, VoteT models.Unique] struct { + *lifecycle.ComponentManager + tracer consensus.TraceLogger + notifier consensus.VoteAggregationViolationConsumer[ + StateT, + VoteT, + ] + lowestRetainedRank counters.StrictMonotonicCounter // lowest rank, for which we still process votes + collectors consensus.VoteCollectors[StateT, VoteT] + queuedMessagesNotifier chan struct{} + finalizationEventsNotifier chan struct{} + finalizedRank counters.StrictMonotonicCounter // cache the last finalized rank to queue up the pruning work, and unstate the caller who's delivering the finalization event. + queuedVotes chan *VoteT + queuedStates chan *models.SignedProposal[StateT, VoteT] + wg errgroup.Group +} + +var _ consensus.VoteAggregator[*nilUnique, *nilUnique] = (*VoteAggregator[*nilUnique, *nilUnique])(nil) + +// NewVoteAggregator creates an instance of vote aggregator +func NewVoteAggregator[StateT models.Unique, VoteT models.Unique]( + tracer consensus.TraceLogger, + notifier consensus.VoteAggregationViolationConsumer[StateT, VoteT], + lowestRetainedRank uint64, + collectors consensus.VoteCollectors[StateT, VoteT], +) (*VoteAggregator[StateT, VoteT], error) { + + queuedVotes := make(chan *VoteT, defaultVoteQueueCapacity) + queuedStates := make( + chan *models.SignedProposal[StateT, VoteT], + defaultStateQueueCapacity, + ) + + aggregator := &VoteAggregator[StateT, VoteT]{ + tracer: tracer, + notifier: notifier, + lowestRetainedRank: counters.NewMonotonicCounter( + lowestRetainedRank, + ), + finalizedRank: counters.NewMonotonicCounter( + lowestRetainedRank, + ), + collectors: collectors, + queuedVotes: queuedVotes, + queuedStates: queuedStates, + queuedMessagesNotifier: make(chan struct{}, 1), + finalizationEventsNotifier: make(chan struct{}, 1), + wg: errgroup.Group{}, + } + + componentBuilder := lifecycle.NewComponentManagerBuilder() + var wg sync.WaitGroup + wg.Add(defaultVoteAggregatorWorkers) + for i := 0; i < defaultVoteAggregatorWorkers; i++ { + // manager for worker routines that process inbound messages + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + defer wg.Done() + ready() + aggregator.queuedMessagesProcessingLoop(ctx) + }) + } + componentBuilder.AddWorker(func( + parentCtx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + // Create new context which is not connected to parent. We need to + // ensure that our internal workers stop before asking vote collectors + // to stop. We want to avoid delivering events to already stopped vote + // collectors. + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx, errCh := lifecycle.WithSignaler(ctx) + + // start vote collectors + aggregator.collectors.Start(signalerCtx) + + // Handle the component lifecycle in a separate goroutine so we can + // capture any errors thrown during initialization in the main + // goroutine. + go func() { + if err := lifecycle.WaitClosed( + parentCtx, + aggregator.collectors.Ready(), + ); err == nil { + // only signal ready when collectors are ready, but always handle + // shutdown + ready() + } + + // wait for internal workers to stop, then signal vote collectors to stop + wg.Wait() + cancel() + }() + + // since we are breaking the connection between parentCtx and signalerCtx, + // we need to explicitly rethrow any errors from signalerCtx to parentCtx, + // otherwise they are dropped. Handle errors in the main worker goroutine to + // guarantee that they are rethrown to the parent before the component is + // marked done. + if err := lifecycle.WaitError( + errCh, + aggregator.collectors.Done(), + ); err != nil { + parentCtx.Throw(err) + } + }) + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + aggregator.finalizationProcessingLoop(ctx) + }) + + aggregator.ComponentManager = componentBuilder.Build() + return aggregator, nil +} + +func (va *VoteAggregator[StateT, VoteT]) queuedMessagesProcessingLoop( + ctx lifecycle.SignalerContext, +) { + notifier := va.queuedMessagesNotifier + for { + select { + case <-ctx.Done(): + return + case <-notifier: + err := va.processQueuedMessages(ctx) + if err != nil { + ctx.Throw(fmt.Errorf( + "internal error processing queued messages: %w", + err, + )) + return + } + } + } +} + +// processQueuedMessages is a function which dispatches previously queued +// messages on worker thread. This function is called whenever we have queued +// messages ready to be dispatched. No errors are expected during normal +// operations. +func (va *VoteAggregator[StateT, VoteT]) processQueuedMessages( + ctx context.Context, +) error { + for { + select { + case <-ctx.Done(): + return nil + + case state, ok := <-va.queuedStates: + if ok { + err := va.processQueuedState(state) + if err != nil { + return fmt.Errorf( + "could not process pending state %x: %w", + state.State.Identifier, + err, + ) + } + + continue + } + + case vote, ok := <-va.queuedVotes: + if ok { + err := va.processQueuedVote(vote) + + if err != nil { + return fmt.Errorf( + "could not process pending vote %x for state %x: %w", + (*vote).Identity(), + (*vote).Source(), + err, + ) + } + + continue + } + + default: + } + + // when there is no more messages in the queue, back to the loop to wait + // for the next incoming message to arrive. + return nil + } +} + +// processQueuedVote performs actual processing of queued votes, this method is +// called from multiple concurrent goroutines. +func (va *VoteAggregator[StateT, VoteT]) processQueuedVote(vote *VoteT) error { + collector, created, err := va.collectors.GetOrCreateCollector( + (*vote).GetRank(), + ) + if err != nil { + // ignore if our routine is outdated and some other one has pruned + // collectors + if models.IsBelowPrunedThresholdError(err) { + return nil + } + return fmt.Errorf( + "could not get collector for rank %d: %w", + (*vote).GetRank(), + err, + ) + } + if created { + va.tracer.Trace("vote collector is created by processing vote") + } + + err = collector.AddVote(vote) + if err != nil { + return fmt.Errorf( + "could not process vote for rank %d, stateID %x: %w", + (*vote).GetRank(), + (*vote).Source(), + err, + ) + } + + va.tracer.Trace("vote has been processed successfully") + + return nil +} + +// processQueuedState performs actual processing of queued state proposals, this +// method is called from multiple concurrent goroutines. +// CAUTION: we expect that the input state's validity has been confirmed prior +// to calling AddState, including the proposer's consensus. Otherwise, +// VoteAggregator might crash or exhibit undefined behaviour. No errors are +// expected during normal operation. +func (va *VoteAggregator[StateT, VoteT]) processQueuedState( + state *models.SignedProposal[StateT, VoteT], +) error { + // check if the state is for a rank that has already been pruned (and is thus + // stale) + if state.State.Rank < va.lowestRetainedRank.Value() { + return nil + } + + collector, created, err := va.collectors.GetOrCreateCollector( + state.State.Rank, + ) + if err != nil { + if models.IsBelowPrunedThresholdError(err) { + return nil + } + return fmt.Errorf( + "could not get or create collector for state %x: %w", + state.State.Identifier, + err, + ) + } + if created { + va.tracer.Trace("vote collector is created by processing state") + } + + err = collector.ProcessState(state) + if err != nil { + if models.IsInvalidProposalError[StateT, VoteT](err) { + // We are attempting process a state which is invalid + // This should never happen, because any component that feeds states into + // VoteAggregator needs to make sure that it's submitting for processing + // ONLY valid states. + return fmt.Errorf( + "received invalid state for processing %x at rank %d: %+w", + state.State.Identifier, + state.State.Rank, + err, + ) + } + return fmt.Errorf( + "could not process state: %x, %w", + state.State.Identifier, + err, + ) + } + + va.tracer.Trace("state has been processed successfully") + + return nil +} + +// AddVote checks if vote is stale and appends vote into processing queue +// actual vote processing will be called in other dispatching goroutine. +func (va *VoteAggregator[StateT, VoteT]) AddVote(vote *VoteT) { + // drop stale votes + if (*vote).GetRank() < va.lowestRetainedRank.Value() { + va.tracer.Trace("drop stale votes") + return + } + + // It's ok to silently drop votes in case our processing pipeline is full. + // It means that we are probably catching up. + select { + case va.queuedVotes <- vote: + select { + case va.queuedMessagesNotifier <- struct{}{}: + default: + } + default: + va.tracer.Trace("no queue capacity, dropping vote") + } +} + +// AddState notifies the VoteAggregator that it should start processing votes +// for the given state. The input state is queued internally within the +// `VoteAggregator` and processed _asynchronously_ by the VoteAggregator's +// internal worker routines. +// CAUTION: we expect that the input state's validity has been confirmed prior +// to calling AddState, including the proposer's consensus. Otherwise, +// VoteAggregator might crash or exhibit undefined behaviour. +func (va *VoteAggregator[StateT, VoteT]) AddState( + state *models.SignedProposal[StateT, VoteT], +) { + // It's ok to silently drop states in case our processing pipeline is full. + // It means that we are probably catching up. + select { + case va.queuedStates <- state: + select { + case va.queuedMessagesNotifier <- struct{}{}: + default: + } + default: + va.tracer.Trace(fmt.Sprintf( + "dropping state %x because queue is full", + state.State.Identifier, + )) + } +} + +// InvalidState notifies the VoteAggregator about an invalid proposal, so that +// it can process votes for the invalid state and slash the voters. +// No errors are expected during normal operations +func (va *VoteAggregator[StateT, VoteT]) InvalidState( + proposal *models.SignedProposal[StateT, VoteT], +) error { + slashingVoteConsumer := func(vote *VoteT) { + if proposal.State.Identifier == (*vote).Source() { + va.notifier.OnVoteForInvalidStateDetected(vote, proposal) + } + } + + state := proposal.State + collector, _, err := va.collectors.GetOrCreateCollector(state.Rank) + if err != nil { + // ignore if our routine is outdated and some other one has pruned + // collectors + if models.IsBelowPrunedThresholdError(err) { + return nil + } + return fmt.Errorf( + "could not retrieve vote collector for rank %d: %w", + state.Rank, + err, + ) + } + + // registering vote consumer will deliver all previously cached votes in + // strict order and will keep delivering votes if more are collected + collector.RegisterVoteConsumer(slashingVoteConsumer) + return nil +} + +// PruneUpToRank deletes all votes _below_ to the given rank, as well as +// related indices. We only retain and process whose rank is equal or larger +// than `lowestRetainedRank`. If `lowestRetainedRank` is smaller than the +// previous value, the previous value is kept and the method call is a NoOp. +func (va *VoteAggregator[StateT, VoteT]) PruneUpToRank( + lowestRetainedRank uint64, +) { + if va.lowestRetainedRank.Set(lowestRetainedRank) { + va.collectors.PruneUpToRank(lowestRetainedRank) + } +} + +// OnFinalizedState implements the `OnFinalizedState` callback from the +// `consensus.FinalizationConsumer`. It informs sealing.Core about finalization +// of respective state. +// +// CAUTION: the input to this callback is treated as trusted; precautions should +// be taken that messages from external nodes cannot be considered as inputs to +// this function +func (va *VoteAggregator[StateT, VoteT]) OnFinalizedState( + state *models.State[StateT], +) { + if va.finalizedRank.Set(state.Rank) { + select { + case va.finalizationEventsNotifier <- struct{}{}: + default: + } + } +} + +// finalizationProcessingLoop is a separate goroutine that performs processing +// of finalization events +func (va *VoteAggregator[StateT, VoteT]) finalizationProcessingLoop( + ctx context.Context, +) { + finalizationNotifier := va.finalizationEventsNotifier + for { + select { + case <-ctx.Done(): + return + case <-finalizationNotifier: + va.PruneUpToRank(va.finalizedRank.Value()) + } + } +} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/voteaggregator/vote_aggregator_test.go b/consensus/voteaggregator/vote_aggregator_test.go new file mode 100644 index 0000000..11d3f5d --- /dev/null +++ b/consensus/voteaggregator/vote_aggregator_test.go @@ -0,0 +1,108 @@ +package voteaggregator + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + "source.quilibrium.com/quilibrium/monorepo/consensus/mocks" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" + "source.quilibrium.com/quilibrium/monorepo/lifecycle/unittest" +) + +func TestVoteAggregator(t *testing.T) { + ts := new(VoteAggregatorTestSuite) + ts.errs = make(chan error, 1) + suite.Run(t, ts) +} + +// VoteAggregatorTestSuite is a test suite for isolated testing of VoteAggregator. +// Contains mocked state which is used to verify correct behavior of VoteAggregator. +// Automatically starts and stops module.Startable in SetupTest and TearDownTest respectively. +type VoteAggregatorTestSuite struct { + suite.Suite + + aggregator *VoteAggregator[*helper.TestState, *helper.TestVote] + collectors *mocks.VoteCollectors[*helper.TestState, *helper.TestVote] + consumer *mocks.VoteAggregationConsumer[*helper.TestState, *helper.TestVote] + stopAggregator context.CancelFunc + errs <-chan error +} + +func (s *VoteAggregatorTestSuite) SetupTest() { + var err error + s.collectors = mocks.NewVoteCollectors[*helper.TestState, *helper.TestVote](s.T()) + s.consumer = mocks.NewVoteAggregationConsumer[*helper.TestState, *helper.TestVote](s.T()) + + s.collectors.On("Start", mock.Anything).Return(nil).Once() + unittest.Componentify(&s.collectors.Mock) + s.aggregator, err = NewVoteAggregator( + helper.Logger(), + s.consumer, + 0, + s.collectors, + ) + require.NoError(s.T(), err) + + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx, errs := lifecycle.WithSignaler(ctx) + s.stopAggregator = cancel + s.errs = errs + s.aggregator.Start(signalerCtx) + unittest.RequireCloseBefore(s.T(), s.aggregator.Ready(), 100*time.Millisecond, "should close before timeout") +} + +func (s *VoteAggregatorTestSuite) TearDownTest() { + s.stopAggregator() + unittest.RequireCloseBefore(s.T(), s.aggregator.Done(), 10*time.Second, "should close before timeout") +} + +// TestOnFinalizedState tests if finalized state gets processed when send through `VoteAggregator`. +// Tests the whole processing pipeline. +func (s *VoteAggregatorTestSuite) TestOnFinalizedState() { + finalizedState := helper.MakeState(helper.WithStateRank[*helper.TestState](100)) + done := make(chan struct{}) + s.collectors.On("PruneUpToRank", uint64(100)).Run(func(args mock.Arguments) { + close(done) + }).Once() + s.aggregator.OnFinalizedState(finalizedState) + unittest.AssertClosesBefore(s.T(), done, time.Second) +} + +// TestProcessInvalidState tests that processing invalid state results in exception, when given as +// an input to AddState (only expects _valid_ states per API contract). +// The exception should be propagated to the VoteAggregator's internal `ComponentManager`. +func (s *VoteAggregatorTestSuite) TestProcessInvalidState() { + state := helper.MakeSignedProposal(helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal( + helper.WithState( + helper.MakeState( + helper.WithStateRank[*helper.TestState](100), + ), + ), + ))) + processed := make(chan struct{}) + collector := mocks.NewVoteCollector[*helper.TestState, *helper.TestVote](s.T()) + collector.On("ProcessState", state).Run(func(_ mock.Arguments) { + close(processed) + }).Return(models.InvalidProposalError[*helper.TestState, *helper.TestVote]{}) + s.collectors.On("GetOrCreateCollector", state.State.Rank).Return(collector, true, nil).Once() + + // submit state for processing + s.aggregator.AddState(state) + unittest.RequireCloseBefore(s.T(), processed, 100*time.Millisecond, "should close before timeout") + + // expect a thrown error + select { + case err := <-s.errs: + require.Error(s.T(), err) + require.True(s.T(), models.IsInvalidProposalError[*helper.TestState, *helper.TestVote](err)) + case <-time.After(100 * time.Millisecond): + s.T().Fatalf("expected error but haven't received anything") + } +} diff --git a/consensus/voteaggregator/vote_collectors.go b/consensus/voteaggregator/vote_collectors.go new file mode 100644 index 0000000..383081d --- /dev/null +++ b/consensus/voteaggregator/vote_collectors.go @@ -0,0 +1,172 @@ +package voteaggregator + +import ( + "fmt" + "sync" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" +) + +// NewCollectorFactoryMethod is a factory method to generate a VoteCollector for +// concrete rank +type NewCollectorFactoryMethod[StateT models.Unique, VoteT models.Unique] = func( + rank uint64, + workers consensus.Workers, +) (consensus.VoteCollector[StateT, VoteT], error) + +// VoteCollectors implements management of multiple vote collectors indexed by +// rank. Implements consensus.VoteCollectors interface. Creating a VoteCollector +// for a particular rank is lazy (instances are created on demand). +// This structure is concurrency safe. +type VoteCollectors[StateT models.Unique, VoteT models.Unique] struct { + *lifecycle.ComponentManager + tracer consensus.TraceLogger + lock sync.RWMutex + lowestRetainedRank uint64 // lowest rank, for which we still retain a VoteCollector and process votes + collectors map[uint64]consensus.VoteCollector[StateT, VoteT] // rank -> VoteCollector + workerPool consensus.Workerpool // for processing votes that are already cached in VoteCollectors and waiting for respective state + createCollector NewCollectorFactoryMethod[StateT, VoteT] // factory method for creating collectors +} + +var _ consensus.VoteCollectors[*nilUnique, *nilUnique] = (*VoteCollectors[*nilUnique, *nilUnique])(nil) + +func NewVoteCollectors[StateT models.Unique, VoteT models.Unique]( + tracer consensus.TraceLogger, + lowestRetainedRank uint64, + workerPool consensus.Workerpool, + factoryMethod NewCollectorFactoryMethod[StateT, VoteT], +) *VoteCollectors[StateT, VoteT] { + v := &VoteCollectors[StateT, VoteT]{ + tracer: tracer, + lowestRetainedRank: lowestRetainedRank, + collectors: make(map[uint64]consensus.VoteCollector[StateT, VoteT]), + workerPool: workerPool, + createCollector: factoryMethod, + } + // Component manager for wrapped worker pool + componentBuilder := lifecycle.NewComponentManagerBuilder() + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + <-ctx.Done() // wait for parent context to signal shutdown + v.workerPool.StopWait() // wait till all workers exit + }) + v.ComponentManager = componentBuilder.Build() + return v +} + +// GetOrCreateCollector retrieves the consensus.VoteCollector for the specified +// rank or creates one if none exists. +// - (collector, true, nil) if no collector can be found by the rank, and a +// new collector was created. +// - (collector, false, nil) if the collector can be found by the rank +// - (nil, false, error) if running into any exception creating the vote +// collector state machine +// +// Expected error returns during normal operations: +// - models.BelowPrunedThresholdError - in case rank is lower than +// lowestRetainedRank +func (v *VoteCollectors[StateT, VoteT]) GetOrCreateCollector(rank uint64) ( + consensus.VoteCollector[StateT, VoteT], + bool, + error, +) { + cachedCollector, hasCachedCollector, err := v.getCollector(rank) + if err != nil { + return nil, false, err + } + + if hasCachedCollector { + return cachedCollector, false, nil + } + + collector, err := v.createCollector(rank, v.workerPool) + if err != nil { + return nil, false, fmt.Errorf( + "could not create vote collector for rank %d: %w", + rank, + err, + ) + } + + // Initial check showed that there was no collector. However, it's possible + // that after the initial check but before acquiring the lock to add the + // newly-created collector, another goroutine already added the needed + // collector. Hence, check again after acquiring the lock: + v.lock.Lock() + defer v.lock.Unlock() + + clr, found := v.collectors[rank] + if found { + return clr, false, nil + } + + v.collectors[rank] = collector + return collector, true, nil +} + +// getCollector retrieves consensus.VoteCollector from local cache in +// concurrency safe way. Performs check for lowestRetainedRank. +// Expected error returns during normal operations: +// - models.BelowPrunedThresholdError - in case rank is lower than +// lowestRetainedRank +func (v *VoteCollectors[StateT, VoteT]) getCollector(rank uint64) ( + consensus.VoteCollector[StateT, VoteT], + bool, + error, +) { + v.lock.RLock() + defer v.lock.RUnlock() + if rank < v.lowestRetainedRank { + return nil, false, models.NewBelowPrunedThresholdErrorf( + "cannot retrieve collector for pruned rank %d (lowest retained rank %d)", + rank, + v.lowestRetainedRank, + ) + } + + clr, found := v.collectors[rank] + + return clr, found, nil +} + +// PruneUpToRank prunes the vote collectors with ranks _below_ the given value, +// i.e. we only retain and process whose rank is equal or larger than +// `lowestRetainedRank`. If `lowestRetainedRank` is smaller than the previous +// value, the previous value is kept and the method call is a NoOp. +func (v *VoteCollectors[StateT, VoteT]) PruneUpToRank( + lowestRetainedRank uint64, +) { + v.lock.Lock() + defer v.lock.Unlock() + if v.lowestRetainedRank >= lowestRetainedRank { + return + } + if len(v.collectors) == 0 { + v.lowestRetainedRank = lowestRetainedRank + return + } + + // to optimize the pruning of large rank-ranges, we compare: + // * the number of ranks for which we have collectors: len(v.collectors) + // * the number of ranks that need to be pruned: rank-v.lowestRetainedRank + // We iterate over the dimension which is smaller. + if uint64(len(v.collectors)) < lowestRetainedRank-v.lowestRetainedRank { + for w := range v.collectors { + if w < lowestRetainedRank { + delete(v.collectors, w) + } + } + } else { + for w := v.lowestRetainedRank; w < lowestRetainedRank; w++ { + delete(v.collectors, w) + } + } + + v.lowestRetainedRank = lowestRetainedRank + v.tracer.Trace("pruned vote collectors") +} diff --git a/consensus/voteaggregator/vote_collectors_test.go b/consensus/voteaggregator/vote_collectors_test.go new file mode 100644 index 0000000..db78991 --- /dev/null +++ b/consensus/voteaggregator/vote_collectors_test.go @@ -0,0 +1,158 @@ +package voteaggregator + +import ( + "errors" + "fmt" + "sync" + "testing" + + "github.com/gammazero/workerpool" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "go.uber.org/atomic" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + "source.quilibrium.com/quilibrium/monorepo/consensus/mocks" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +var factoryError = errors.New("factory error") + +func TestVoteCollectors(t *testing.T) { + suite.Run(t, new(VoteCollectorsTestSuite)) +} + +// VoteCollectorsTestSuite is a test suite for isolated testing of VoteCollectors. +// Contains helper methods and mocked state which is used to verify correct behavior of VoteCollectors. +type VoteCollectorsTestSuite struct { + suite.Suite + + mockedCollectors map[uint64]*mocks.VoteCollector[*helper.TestState, *helper.TestVote] + factoryMethod NewCollectorFactoryMethod[*helper.TestState, *helper.TestVote] + collectors *VoteCollectors[*helper.TestState, *helper.TestVote] + lowestLevel uint64 + workerPool *workerpool.WorkerPool +} + +func (s *VoteCollectorsTestSuite) SetupTest() { + s.lowestLevel = 1000 + s.mockedCollectors = make(map[uint64]*mocks.VoteCollector[*helper.TestState, *helper.TestVote]) + s.workerPool = workerpool.New(2) + s.factoryMethod = func(rank uint64, _ consensus.Workers) (consensus.VoteCollector[*helper.TestState, *helper.TestVote], error) { + if collector, found := s.mockedCollectors[rank]; found { + return collector, nil + } + return nil, fmt.Errorf("mocked collector %v not found: %w", rank, factoryError) + } + s.collectors = NewVoteCollectors(helper.Logger(), s.lowestLevel, s.workerPool, s.factoryMethod) +} + +func (s *VoteCollectorsTestSuite) TearDownTest() { + s.workerPool.StopWait() +} + +// prepareMockedCollector prepares a mocked collector and stores it in map, later it will be used +// to mock behavior of vote collectors. +func (s *VoteCollectorsTestSuite) prepareMockedCollector(rank uint64) *mocks.VoteCollector[*helper.TestState, *helper.TestVote] { + collector := &mocks.VoteCollector[*helper.TestState, *helper.TestVote]{} + collector.On("Rank").Return(rank).Maybe() + s.mockedCollectors[rank] = collector + return collector +} + +// TestGetOrCreatorCollector_RankLowerThanLowest tests a scenario where caller tries to create a collector with rank +// lower than already pruned one. This should result in sentinel error `BelowPrunedThresholdError` +func (s *VoteCollectorsTestSuite) TestGetOrCreatorCollector_RankLowerThanLowest() { + collector, created, err := s.collectors.GetOrCreateCollector(s.lowestLevel - 10) + require.Nil(s.T(), collector) + require.False(s.T(), created) + require.Error(s.T(), err) + require.True(s.T(), models.IsBelowPrunedThresholdError(err)) +} + +// TestGetOrCreateCollector_ValidCollector tests a happy path scenario where we try first to create and then retrieve cached collector. +func (s *VoteCollectorsTestSuite) TestGetOrCreateCollector_ValidCollector() { + rank := s.lowestLevel + 10 + s.prepareMockedCollector(rank) + collector, created, err := s.collectors.GetOrCreateCollector(rank) + require.NoError(s.T(), err) + require.True(s.T(), created) + require.Equal(s.T(), rank, collector.Rank()) + + cached, cachedCreated, err := s.collectors.GetOrCreateCollector(rank) + require.NoError(s.T(), err) + require.False(s.T(), cachedCreated) + require.Equal(s.T(), collector, cached) +} + +// TestGetOrCreateCollector_FactoryError tests that error from factory method is propagated to caller. +func (s *VoteCollectorsTestSuite) TestGetOrCreateCollector_FactoryError() { + // creating collector without calling prepareMockedCollector will yield factoryError. + collector, created, err := s.collectors.GetOrCreateCollector(s.lowestLevel + 10) + require.Nil(s.T(), collector) + require.False(s.T(), created) + require.ErrorIs(s.T(), err, factoryError) +} + +// TestGetOrCreateCollectors_ConcurrentAccess tests that concurrently accessing of GetOrCreateCollector creates +// only one collector and all other instances are retrieved from cache. +func (s *VoteCollectorsTestSuite) TestGetOrCreateCollectors_ConcurrentAccess() { + createdTimes := atomic.NewUint64(0) + rank := s.lowestLevel + 10 + s.prepareMockedCollector(rank) + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + _, created, err := s.collectors.GetOrCreateCollector(rank) + require.NoError(s.T(), err) + if created { + createdTimes.Add(1) + } + wg.Done() + }() + } + + wg.Wait() + require.Equal(s.T(), uint64(1), createdTimes.Load()) +} + +// TestPruneUpToRank tests pruning removes item below pruning height and leaves unmodified other items. +func (s *VoteCollectorsTestSuite) TestPruneUpToRank() { + numberOfCollectors := uint64(10) + prunedRanks := make([]uint64, 0) + for i := uint64(0); i < numberOfCollectors; i++ { + rank := s.lowestLevel + i + s.prepareMockedCollector(rank) + _, _, err := s.collectors.GetOrCreateCollector(rank) + require.NoError(s.T(), err) + prunedRanks = append(prunedRanks, rank) + } + + pruningHeight := s.lowestLevel + numberOfCollectors + + expectedCollectors := make([]consensus.VoteCollector[*helper.TestState, *helper.TestVote], 0) + for i := uint64(0); i < numberOfCollectors; i++ { + rank := pruningHeight + i + s.prepareMockedCollector(rank) + collector, _, err := s.collectors.GetOrCreateCollector(rank) + require.NoError(s.T(), err) + expectedCollectors = append(expectedCollectors, collector) + } + + // after this operation collectors below pruning height should be pruned and everything higher + // should be left unmodified + s.collectors.PruneUpToRank(pruningHeight) + + for _, prunedRank := range prunedRanks { + _, _, err := s.collectors.GetOrCreateCollector(prunedRank) + require.Error(s.T(), err) + require.True(s.T(), models.IsBelowPrunedThresholdError(err)) + } + + for _, collector := range expectedCollectors { + cached, _, _ := s.collectors.GetOrCreateCollector(collector.Rank()) + require.Equal(s.T(), collector, cached) + } +} diff --git a/consensus/votecollector/common.go b/consensus/votecollector/common.go new file mode 100644 index 0000000..24d2da2 --- /dev/null +++ b/consensus/votecollector/common.go @@ -0,0 +1,68 @@ +package votecollector + +import ( + "errors" + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +var ( + VoteForIncompatibleRankError = errors.New("vote for incompatible rank") + VoteForIncompatibleStateError = errors.New("vote for incompatible state") +) + +/******************************* NoopProcessor *******************************/ + +// NoopProcessor implements consensus.VoteProcessor. It drops all votes. +type NoopProcessor[VoteT models.Unique] struct { + status consensus.VoteCollectorStatus +} + +func NewNoopCollector[VoteT models.Unique]( + status consensus.VoteCollectorStatus, +) *NoopProcessor[VoteT] { + return &NoopProcessor[VoteT]{status} +} + +func (c *NoopProcessor[VoteT]) Process(*VoteT) error { + return nil +} + +func (c *NoopProcessor[VoteT]) Status() consensus.VoteCollectorStatus { + return c.status +} + +/************************ enforcing vote is for state ************************/ + +// EnsureVoteForState verifies that the vote is for the given state. +// Returns nil on success and sentinel errors: +// - models.VoteForIncompatibleRankError if the vote is from a different rank +// than state +// - models.VoteForIncompatibleStateError if the vote is from the same rank as +// state but for a different stateID +func EnsureVoteForState[StateT models.Unique, VoteT models.Unique]( + vote *VoteT, + state *models.State[StateT], +) error { + if (*vote).GetRank() != state.Rank { + return fmt.Errorf( + "vote %x has rank %d while state's rank is %d: %w ", + (*vote).Identity(), + (*vote).GetRank(), + state.Rank, + VoteForIncompatibleRankError, + ) + } + if (*vote).Source() != state.Identifier { + return fmt.Errorf( + "expecting only votes for state %x, but vote %x is for state %x: %w ", + state.Identifier, + (*vote).Identity(), + (*vote).Source(), + VoteForIncompatibleStateError, + ) + } + return nil +} diff --git a/consensus/votecollector/factory.go b/consensus/votecollector/factory.go new file mode 100644 index 0000000..a39cee6 --- /dev/null +++ b/consensus/votecollector/factory.go @@ -0,0 +1,181 @@ +package votecollector + +import ( + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// baseFactory instantiates VerifyingVoteProcessors. Depending on the specific +// signing scheme, a different baseFactory can be used. +// CAUTION: the baseFactory creates the VerifyingVoteProcessor for the given +// state. It does _not_ check the proposer's vote for its own state. The API +// reflects this by expecting a `models.State` as input (which does _not_ +// contain the proposer vote) as opposed to `models.SignedProposal` (combines +// state with proposer's vote). Therefore, baseFactory does _not_ implement +// `consensus.VoteProcessorFactory` by itself. The VoteProcessorFactory adds the +// missing logic to verify the proposer's vote, by wrapping the baseFactory +// (decorator pattern). +type baseFactory[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +] func( + tracer consensus.TraceLogger, + filter []byte, + state *models.State[StateT], + dsTag []byte, + aggregator consensus.SignatureAggregator, + votingProvider consensus.VotingProvider[StateT, VoteT, PeerIDT], +) (consensus.VerifyingVoteProcessor[StateT, VoteT], error) + +// VoteProcessorFactory implements `consensus.VoteProcessorFactory`. Its main +// purpose is to construct instances of VerifyingVoteProcessors for a given +// state proposal. +// VoteProcessorFactory +// * delegates the creation of the actual instances to baseFactory +// * adds the logic to verify the proposer's vote for its own state +// Thereby, VoteProcessorFactory guarantees that only proposals with valid +// proposer vote are accepted (as per API specification). Otherwise, an +// `models.InvalidProposalError` is returned. +type VoteProcessorFactory[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +] struct { + baseFactory baseFactory[StateT, VoteT, PeerIDT] +} + +var _ consensus.VoteProcessorFactory[*nilUnique, *nilUnique, *nilUnique] = (*VoteProcessorFactory[*nilUnique, *nilUnique, *nilUnique])(nil) + +// Create instantiates a VerifyingVoteProcessor for the given state proposal. +// A VerifyingVoteProcessor are only created for proposals with valid proposer +// votes. Expected error returns during normal operations: +// * models.InvalidProposalError - proposal has invalid proposer vote +func (f *VoteProcessorFactory[StateT, VoteT, PeerIDT]) Create( + tracer consensus.TraceLogger, + filter []byte, + proposal *models.SignedProposal[StateT, VoteT], + dsTag []byte, + aggregator consensus.SignatureAggregator, + votingProvider consensus.VotingProvider[StateT, VoteT, PeerIDT], +) (consensus.VerifyingVoteProcessor[StateT, VoteT], error) { + processor, err := f.baseFactory( + tracer, + filter, + proposal.State, + dsTag, + aggregator, + votingProvider, + ) + if err != nil { + return nil, fmt.Errorf( + "instantiating vote processor for state %x failed: %w", + proposal.State.Identifier, + err, + ) + } + + vote, err := proposal.ProposerVote() + if err != nil { + return nil, fmt.Errorf("could not get vote from proposer vote: %w", err) + } + + err = processor.Process(vote) + if err != nil { + if models.IsInvalidVoteError[VoteT](err) { + return nil, models.NewInvalidProposalErrorf( + proposal, + "invalid proposer vote: %w", + err, + ) + } + return nil, fmt.Errorf( + "processing proposer's vote for state %x failed: %w", + proposal.State.Identifier, + err, + ) + } + return processor, nil +} + +// NewVoteProcessorFactory implements consensus.VoteProcessorFactory. +func NewVoteProcessorFactory[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +]( + committee consensus.DynamicCommittee, + onQCCreated consensus.OnQuorumCertificateCreated, +) *VoteProcessorFactory[StateT, VoteT, PeerIDT] { + base := &provingVoteProcessorFactoryBase[StateT, VoteT, PeerIDT]{ + committee: committee, + onQCCreated: onQCCreated, + } + return &VoteProcessorFactory[StateT, VoteT, PeerIDT]{ + baseFactory: base.Create, + } +} + +/* ***************************** VerifyingVoteProcessor constructors for bootstrapping ***************************** */ + +// NewBootstrapVoteProcessor directly creates a `VoteProcessor`, +// suitable for the collector's local cluster consensus. +// Intended use: only for bootstrapping. +// UNSAFE: the proposer vote for `state` is _not_ validated or included +func NewBootstrapVoteProcessor[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +]( + tracer consensus.TraceLogger, + filter []byte, + committee consensus.DynamicCommittee, + state *models.State[StateT], + onQCCreated consensus.OnQuorumCertificateCreated, + dsTag []byte, + aggregator consensus.SignatureAggregator, + votingProvider consensus.VotingProvider[StateT, VoteT, PeerIDT], +) (consensus.VerifyingVoteProcessor[StateT, VoteT], error) { + factory := &provingVoteProcessorFactoryBase[StateT, VoteT, PeerIDT]{ + committee: committee, + onQCCreated: onQCCreated, + } + return factory.Create(tracer, filter, state, dsTag, aggregator, votingProvider) +} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/votecollector/factory_test.go b/consensus/votecollector/factory_test.go new file mode 100644 index 0000000..b0e86bb --- /dev/null +++ b/consensus/votecollector/factory_test.go @@ -0,0 +1,118 @@ +package votecollector + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + "source.quilibrium.com/quilibrium/monorepo/consensus/mocks" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TestVoteProcessorFactory_CreateWithValidProposal checks if +// VoteProcessorFactory checks the proposer vote based on submitted proposal +func TestVoteProcessorFactory_CreateWithValidProposal(t *testing.T) { + mockedFactory := mocks.VoteProcessorFactory[*helper.TestState, *helper.TestVote, *helper.TestPeer]{} + + proposal := helper.MakeSignedProposal[*helper.TestState, *helper.TestVote]() + mockedProcessor := &mocks.VerifyingVoteProcessor[*helper.TestState, *helper.TestVote]{} + vote, err := proposal.ProposerVote() + require.NoError(t, err) + mockedProcessor.On("Process", vote).Return(nil).Once() + mockedFactory.On("Create", helper.Logger(), []byte{}, proposal, mock.Anything, mock.Anything, mock.Anything).Return(mockedProcessor, nil).Once() + + voteProcessorFactory := &VoteProcessorFactory[*helper.TestState, *helper.TestVote, *helper.TestPeer]{ + baseFactory: func(log consensus.TraceLogger, filter []byte, state *models.State[*helper.TestState], dsTag []byte, aggregator consensus.SignatureAggregator, votingProvider consensus.VotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer]) (consensus.VerifyingVoteProcessor[*helper.TestState, *helper.TestVote], error) { + return mockedFactory.Create(log, filter, proposal, dsTag, aggregator, votingProvider) + }, + } + + processor, err := voteProcessorFactory.Create(helper.Logger(), []byte{}, proposal, []byte{}, mocks.NewSignatureAggregator(t), mocks.NewVotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer](t)) + require.NoError(t, err) + require.NotNil(t, processor) + + mockedProcessor.AssertExpectations(t) + mockedFactory.AssertExpectations(t) +} + +// TestVoteProcessorFactory_CreateWithInvalidVote tests that processing proposal with invalid vote doesn't return +// vote processor and returns correct error(sentinel or exception). +func TestVoteProcessorFactory_CreateWithInvalidVote(t *testing.T) { + mockedFactory := mocks.VoteProcessorFactory[*helper.TestState, *helper.TestVote, *helper.TestPeer]{} + + t.Run("invalid-vote", func(t *testing.T) { + proposal := helper.MakeSignedProposal[*helper.TestState, *helper.TestVote]() + mockedProcessor := &mocks.VerifyingVoteProcessor[*helper.TestState, *helper.TestVote]{} + vote, err := proposal.ProposerVote() + require.NoError(t, err) + mockedProcessor.On("Process", vote).Return(models.NewInvalidVoteErrorf(vote, "")).Once() + mockedFactory.On("Create", helper.Logger(), []byte{}, proposal, mock.Anything, mock.Anything, mock.Anything).Return(mockedProcessor, nil).Once() + + voteProcessorFactory := &VoteProcessorFactory[*helper.TestState, *helper.TestVote, *helper.TestPeer]{ + baseFactory: func(log consensus.TraceLogger, filter []byte, state *models.State[*helper.TestState], dsTag []byte, aggregator consensus.SignatureAggregator, votingProvider consensus.VotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer]) (consensus.VerifyingVoteProcessor[*helper.TestState, *helper.TestVote], error) { + return mockedFactory.Create(log, filter, proposal, dsTag, aggregator, votingProvider) + }, + } + + processor, err := voteProcessorFactory.Create(helper.Logger(), []byte{}, proposal, []byte{}, mocks.NewSignatureAggregator(t), mocks.NewVotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer](t)) + require.Error(t, err) + require.Nil(t, processor) + require.True(t, models.IsInvalidProposalError[*helper.TestState, *helper.TestVote](err)) + + mockedProcessor.AssertExpectations(t) + }) + t.Run("process-vote-exception", func(t *testing.T) { + proposal := helper.MakeSignedProposal[*helper.TestState, *helper.TestVote]() + mockedProcessor := &mocks.VerifyingVoteProcessor[*helper.TestState, *helper.TestVote]{} + exception := errors.New("process-exception") + vote, err := proposal.ProposerVote() + require.NoError(t, err) + mockedProcessor.On("Process", vote).Return(exception).Once() + + mockedFactory.On("Create", helper.Logger(), []byte{}, proposal, mock.Anything, mock.Anything, mock.Anything).Return(mockedProcessor, nil).Once() + + voteProcessorFactory := &VoteProcessorFactory[*helper.TestState, *helper.TestVote, *helper.TestPeer]{ + baseFactory: func(log consensus.TraceLogger, filter []byte, state *models.State[*helper.TestState], dsTag []byte, aggregator consensus.SignatureAggregator, votingProvider consensus.VotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer]) (consensus.VerifyingVoteProcessor[*helper.TestState, *helper.TestVote], error) { + return mockedFactory.Create(log, filter, proposal, dsTag, aggregator, votingProvider) + }, + } + + processor, err := voteProcessorFactory.Create(helper.Logger(), []byte{}, proposal, []byte{}, mocks.NewSignatureAggregator(t), mocks.NewVotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer](t)) + require.ErrorIs(t, err, exception) + require.Nil(t, processor) + // an unexpected exception should _not_ be interpreted as the state being invalid + require.False(t, models.IsInvalidProposalError[*helper.TestState, *helper.TestVote](err)) + + mockedProcessor.AssertExpectations(t) + }) + + mockedFactory.AssertExpectations(t) +} + +// TestVoteProcessorFactory_CreateProcessException tests that VoteProcessorFactory correctly handles exception +// while creating processor for requested proposal. +func TestVoteProcessorFactory_CreateProcessException(t *testing.T) { + mockedFactory := mocks.VoteProcessorFactory[*helper.TestState, *helper.TestVote, *helper.TestPeer]{} + + proposal := helper.MakeSignedProposal[*helper.TestState, *helper.TestVote]() + exception := errors.New("create-exception") + + mockedFactory.On("Create", helper.Logger(), []byte{}, proposal, mock.Anything, mock.Anything, mock.Anything).Return(nil, exception).Once() + voteProcessorFactory := &VoteProcessorFactory[*helper.TestState, *helper.TestVote, *helper.TestPeer]{ + baseFactory: func(log consensus.TraceLogger, filter []byte, state *models.State[*helper.TestState], dsTag []byte, aggregator consensus.SignatureAggregator, votingProvider consensus.VotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer]) (consensus.VerifyingVoteProcessor[*helper.TestState, *helper.TestVote], error) { + return mockedFactory.Create(log, filter, proposal, dsTag, aggregator, votingProvider) + }, + } + + processor, err := voteProcessorFactory.Create(helper.Logger(), []byte{}, proposal, []byte{}, mocks.NewSignatureAggregator(t), mocks.NewVotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer](t)) + require.ErrorIs(t, err, exception) + require.Nil(t, processor) + // an unexpected exception should _not_ be interpreted as the state being invalid + require.False(t, models.IsInvalidProposalError[*helper.TestState, *helper.TestVote](err)) + + mockedFactory.AssertExpectations(t) +} diff --git a/consensus/votecollector/statemachine.go b/consensus/votecollector/statemachine.go new file mode 100644 index 0000000..e314422 --- /dev/null +++ b/consensus/votecollector/statemachine.go @@ -0,0 +1,417 @@ +package votecollector + +import ( + "errors" + "fmt" + "sync" + + "go.uber.org/atomic" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/consensus/voteaggregator" +) + +var ( + ErrDifferentCollectorState = errors.New("different state") +) + +// VerifyingVoteProcessorFactory generates consensus.VerifyingVoteCollector +// instances +type VerifyingVoteProcessorFactory[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +] = func( + tracer consensus.TraceLogger, + filter []byte, + proposal *models.SignedProposal[StateT, VoteT], + dsTag []byte, + aggregator consensus.SignatureAggregator, + votingProvider consensus.VotingProvider[StateT, VoteT, PeerIDT], +) (consensus.VerifyingVoteProcessor[StateT, VoteT], error) + +// VoteCollector implements a state machine for transition between different +// states of vote collector +type VoteCollector[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +] struct { + sync.Mutex + tracer consensus.TraceLogger + filter []byte + workers consensus.Workers + notifier consensus.VoteAggregationConsumer[StateT, VoteT] + createVerifyingProcessor VerifyingVoteProcessorFactory[StateT, VoteT, PeerIDT] + dsTag []byte + aggregator consensus.SignatureAggregator + voter consensus.VotingProvider[StateT, VoteT, PeerIDT] + + votesCache VotesCache[VoteT] + votesProcessor atomic.Value +} + +var _ consensus.VoteCollector[*nilUnique, *nilUnique] = (*VoteCollector[*nilUnique, *nilUnique, *nilUnique])(nil) + +func ( + m *VoteCollector[StateT, VoteT, PeerIDT], +) atomicLoadProcessor() consensus.VoteProcessor[VoteT] { + return m.votesProcessor.Load().(*atomicValueWrapper[VoteT]).processor +} + +// atomic.Value doesn't allow storing interfaces as atomic values, +// it requires that stored type is always the same, so we need a wrapper that +// will mitigate this restriction +// https://github.com/golang/go/issues/22550 +type atomicValueWrapper[VoteT models.Unique] struct { + processor consensus.VoteProcessor[VoteT] +} + +func NewStateMachineFactory[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +]( + tracer consensus.TraceLogger, + filter []byte, + notifier consensus.VoteAggregationConsumer[StateT, VoteT], + verifyingVoteProcessorFactory VerifyingVoteProcessorFactory[ + StateT, + VoteT, + PeerIDT, + ], + dsTag []byte, + aggregator consensus.SignatureAggregator, + voter consensus.VotingProvider[StateT, VoteT, PeerIDT], +) voteaggregator.NewCollectorFactoryMethod[StateT, VoteT] { + return func(rank uint64, workers consensus.Workers) ( + consensus.VoteCollector[StateT, VoteT], + error, + ) { + return NewStateMachine[StateT, VoteT]( + rank, + filter, + tracer, + workers, + notifier, + verifyingVoteProcessorFactory, + dsTag, + aggregator, + voter, + ), nil + } +} + +func NewStateMachine[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +]( + rank uint64, + filter []byte, + tracer consensus.TraceLogger, + workers consensus.Workers, + notifier consensus.VoteAggregationConsumer[StateT, VoteT], + verifyingVoteProcessorFactory VerifyingVoteProcessorFactory[ + StateT, + VoteT, + PeerIDT, + ], + dsTag []byte, + aggregator consensus.SignatureAggregator, + voter consensus.VotingProvider[StateT, VoteT, PeerIDT], +) *VoteCollector[StateT, VoteT, PeerIDT] { + sm := &VoteCollector[StateT, VoteT, PeerIDT]{ + tracer: tracer, + filter: filter, + workers: workers, + notifier: notifier, + createVerifyingProcessor: verifyingVoteProcessorFactory, + votesCache: *NewVotesCache[VoteT](rank), + dsTag: dsTag, + aggregator: aggregator, + voter: voter, + } + + // without a state, we don't process votes (only cache them) + sm.votesProcessor.Store(&atomicValueWrapper[VoteT]{ + processor: NewNoopCollector[VoteT](consensus.VoteCollectorStatusCaching), + }) + return sm +} + +// AddVote adds a vote to current vote collector +// All expected errors are handled via callbacks to notifier. +// Under normal execution only exceptions are propagated to caller. +func (m *VoteCollector[StateT, VoteT, PeerIDT]) AddVote(vote *VoteT) error { + // Cache vote + err := m.votesCache.AddVote(vote) + if err != nil { + if errors.Is(err, RepeatedVoteErr) { + return nil + } + doubleVoteErr, isDoubleVoteErr := models.AsDoubleVoteError[VoteT](err) + if isDoubleVoteErr { + m.notifier.OnDoubleVotingDetected( + doubleVoteErr.FirstVote, + doubleVoteErr.ConflictingVote, + ) + return nil + } + return fmt.Errorf( + "internal error adding vote %x to cache for state %x: %w", + (*vote).Identity(), + (*vote).Source(), + err, + ) + } + + err = m.processVote(vote) + if err != nil { + if errors.Is(err, VoteForIncompatibleStateError) { + // For honest nodes, there should be only a single proposal per rank and + // all votes should be for this proposal. However, byzantine nodes might + // deviate from this happy path: + // * A malicious leader might create multiple (individually valid) + // conflicting proposals for the same rank. Honest replicas will send + // correct votes for whatever proposal they see first. We only accept + // the first valid state and reject any other conflicting states that + // show up later. + // * Alternatively, malicious replicas might send votes with the expected + // rank, but for states that don't exist. + // In either case, receiving votes for the same rank but for different + // state IDs is a symptom of malicious consensus participants. Hence, we + // log it here as a warning: + m.tracer.Error("received vote for incompatible state", err) + + return nil + } + return fmt.Errorf( + "internal error processing vote %x for state %x: %w", + (*vote).Identity(), + (*vote).Source(), + err, + ) + } + return nil +} + +// processVote uses compare-and-repeat pattern to process vote with underlying +// vote processor +func (m *VoteCollector[StateT, VoteT, PeerIDT]) processVote(vote *VoteT) error { + for { + processor := m.atomicLoadProcessor() + currentState := processor.Status() + err := processor.Process(vote) + if err != nil { + if invalidVoteErr, ok := models.AsInvalidVoteError[VoteT](err); ok { + m.notifier.OnInvalidVoteDetected(*invalidVoteErr) + return nil + } + // ATTENTION: due to how our logic is designed this situation is only + // possible where we receive the same vote twice, this is not a case of + // double voting. This scenario is possible if leader submits their vote + // additionally to the vote in proposal. + if models.IsDuplicatedSignerError(err) { + m.tracer.Trace(fmt.Sprintf("duplicated signer %x", (*vote).Identity())) + return nil + } + return err + } + + if currentState != m.Status() { + continue + } + + m.notifier.OnVoteProcessed(vote) + return nil + } +} + +// Status returns the status of underlying vote processor +func (m *VoteCollector[StateT, VoteT, PeerIDT]) Status() consensus.VoteCollectorStatus { + return m.atomicLoadProcessor().Status() +} + +// Rank returns rank associated with this collector +func (m *VoteCollector[StateT, VoteT, PeerIDT]) Rank() uint64 { + return m.votesCache.Rank() +} + +// ProcessState performs validation of state signature and processes state with +// respected collector. In case we have received double proposal, we will stop +// attempting to build a QC for this rank, because we don't want to build on any +// proposal from an equivocating primary. Note: slashing challenges for proposal +// equivocation are triggered by consensus.Forks, so we don't have to do +// anything else here. +// +// The internal state change is implemented as an atomic compare-and-swap, i.e. +// the state transition is only executed if VoteCollector's internal state is +// equal to `expectedValue`. The implementation only allows the transitions +// +// CachingVotes -> VerifyingVotes +// CachingVotes -> Invalid +// VerifyingVotes -> Invalid +func (m *VoteCollector[StateT, VoteT, PeerIDT]) ProcessState( + proposal *models.SignedProposal[StateT, VoteT], +) error { + + if proposal.State.Rank != m.Rank() { + return fmt.Errorf( + "this VoteCollector requires a proposal for rank %d but received state %x with rank %d", + m.votesCache.Rank(), + proposal.State.Identifier, + proposal.State.Rank, + ) + } + + for { + proc := m.atomicLoadProcessor() + + switch proc.Status() { + // first valid state for this rank: commence state transition from caching + // to verifying + case consensus.VoteCollectorStatusCaching: + err := m.caching2Verifying(proposal) + if errors.Is(err, ErrDifferentCollectorState) { + continue // concurrent state update by other thread => restart our logic + } + + if err != nil { + return fmt.Errorf( + "internal error updating VoteProcessor's status from %s to %s for state %x: %w", + proc.Status().String(), + consensus.VoteCollectorStatusVerifying.String(), + proposal.State.Identifier, + err, + ) + } + + m.tracer.Trace("vote collector status changed from caching to verifying") + + m.processCachedVotes(proposal.State) + + // We already received a valid state for this rank. Check whether the + // proposer is equivocating and terminate vote processing in this case. + // Note: proposal equivocation is handled by consensus.Forks, so we don't + // have to do anything else here. + case consensus.VoteCollectorStatusVerifying: + verifyingProc, ok := proc.(consensus.VerifyingVoteProcessor[StateT, VoteT]) + if !ok { + return fmt.Errorf( + "while processing state %x, found that VoteProcessor reports status %s but has an incompatible implementation type %T", + proposal.State.Identifier, + proc.Status(), + verifyingProc, + ) + } + if verifyingProc.State().Identifier != proposal.State.Identifier { + m.terminateVoteProcessing() + } + + // Vote processing for this rank has already been terminated. Note: proposal + // equivocation is handled by consensus.Forks, so we don't have anything to + // do here. + case consensus.VoteCollectorStatusInvalid: /* no op */ + + default: + return fmt.Errorf( + "while processing state %x, found that VoteProcessor reported unknown status %s", + proposal.State.Identifier, + proc.Status(), + ) + } + + return nil + } +} + +// RegisterVoteConsumer registers a VoteConsumer. Upon registration, the +// collector feeds all cached votes into the consumer in the order they arrived. +// CAUTION, VoteConsumer implementations must be +// - NON-BLOCKING and consume the votes without noteworthy delay, and +// - CONCURRENCY SAFE +func (m *VoteCollector[StateT, VoteT, PeerIDT]) RegisterVoteConsumer( + consumer consensus.VoteConsumer[VoteT], +) { + m.votesCache.RegisterVoteConsumer(consumer) +} + +// caching2Verifying ensures that the VoteProcessor is currently in state +// `VoteCollectorStatusCaching` and replaces it by a newly-created +// VerifyingVoteProcessor. +// Error returns: +// - ErrDifferentCollectorState if the VoteCollector's state is _not_ +// `CachingVotes` +// - all other errors are unexpected and potential symptoms of internal bugs +// or state corruption (fatal) +func (m *VoteCollector[StateT, VoteT, PeerIDT]) caching2Verifying( + proposal *models.SignedProposal[StateT, VoteT], +) error { + stateID := proposal.State.Identifier + newProc, err := m.createVerifyingProcessor( + m.tracer, + m.filter, + proposal, + m.dsTag, + m.aggregator, + m.voter, + ) + if err != nil { + return fmt.Errorf( + "failed to create VerifyingVoteProcessor for state %x: %w", + stateID, + err, + ) + } + newProcWrapper := &atomicValueWrapper[VoteT]{processor: newProc} + + m.Lock() + defer m.Unlock() + proc := m.atomicLoadProcessor() + if proc.Status() != consensus.VoteCollectorStatusCaching { + return fmt.Errorf( + "processors's current state is %s: %w", + proc.Status().String(), + ErrDifferentCollectorState, + ) + } + m.votesProcessor.Store(newProcWrapper) + return nil +} + +func (m *VoteCollector[StateT, VoteT, PeerIDT]) terminateVoteProcessing() { + if m.Status() == consensus.VoteCollectorStatusInvalid { + return + } + newProcWrapper := &atomicValueWrapper[VoteT]{ + processor: NewNoopCollector[VoteT](consensus.VoteCollectorStatusInvalid), + } + + m.Lock() + defer m.Unlock() + m.votesProcessor.Store(newProcWrapper) +} + +// processCachedVotes feeds all cached votes into the VoteProcessor +func (m *VoteCollector[StateT, VoteT, PeerIDT]) processCachedVotes( + state *models.State[StateT], +) { + cachedVotes := m.votesCache.All() + m.tracer.Trace(fmt.Sprintf("processing %d cached votes", len(cachedVotes))) + for _, vote := range cachedVotes { + if (*vote).Source() != state.Identifier { + continue + } + + stateVote := vote + voteProcessingTask := func() { + err := m.processVote(stateVote) + if err != nil { + m.tracer.Error("internal error processing cached vote", err) + } + } + m.workers.Submit(voteProcessingTask) + } +} diff --git a/consensus/votecollector/statemachine_test.go b/consensus/votecollector/statemachine_test.go new file mode 100644 index 0000000..3b5978e --- /dev/null +++ b/consensus/votecollector/statemachine_test.go @@ -0,0 +1,286 @@ +package votecollector + +import ( + "errors" + "fmt" + "testing" + "time" + + "github.com/gammazero/workerpool" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + "source.quilibrium.com/quilibrium/monorepo/consensus/mocks" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +func TestStateMachine(t *testing.T) { + suite.Run(t, new(StateMachineTestSuite)) +} + +var factoryError = errors.New("factory error") + +// StateMachineTestSuite is a test suite for testing VoteCollector. It stores mocked +// VoteProcessors internally for testing behavior and state transitions for VoteCollector. +type StateMachineTestSuite struct { + suite.Suite + + rank uint64 + notifier *mocks.VoteAggregationConsumer[*helper.TestState, *helper.TestVote] + workerPool *workerpool.WorkerPool + factoryMethod VerifyingVoteProcessorFactory[*helper.TestState, *helper.TestVote, *helper.TestPeer] + mockedProcessors map[models.Identity]*mocks.VerifyingVoteProcessor[*helper.TestState, *helper.TestVote] + collector *VoteCollector[*helper.TestState, *helper.TestVote, *helper.TestPeer] +} + +func (s *StateMachineTestSuite) TearDownTest() { + // Without this line we are risking running into weird situations where one test has finished but there are active workers + // that are executing some work on the shared pool. Need to ensure that all pending work has been executed before + // starting next test. + s.workerPool.StopWait() +} + +func (s *StateMachineTestSuite) SetupTest() { + s.rank = 1000 + s.mockedProcessors = make(map[models.Identity]*mocks.VerifyingVoteProcessor[*helper.TestState, *helper.TestVote]) + s.notifier = mocks.NewVoteAggregationConsumer[*helper.TestState, *helper.TestVote](s.T()) + + s.factoryMethod = func(log consensus.TraceLogger, filter []byte, state *models.SignedProposal[*helper.TestState, *helper.TestVote], dsTag []byte, aggregator consensus.SignatureAggregator, voter consensus.VotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer]) (consensus.VerifyingVoteProcessor[*helper.TestState, *helper.TestVote], error) { + if processor, found := s.mockedProcessors[state.State.Identifier]; found { + return processor, nil + } + return nil, fmt.Errorf("mocked processor %v not found: %w", state.State.Identifier, factoryError) + } + + s.workerPool = workerpool.New(4) + s.collector = NewStateMachine(s.rank, []byte{}, helper.Logger(), s.workerPool, s.notifier, s.factoryMethod, []byte{}, consensus.SignatureAggregator(mocks.NewSignatureAggregator(s.T())), mocks.NewVotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer](s.T())) +} + +// prepareMockedProcessor prepares a mocked processor and stores it in map, later it will be used +// to mock behavior of verifying vote processor. +func (s *StateMachineTestSuite) prepareMockedProcessor(proposal *models.SignedProposal[*helper.TestState, *helper.TestVote]) *mocks.VerifyingVoteProcessor[*helper.TestState, *helper.TestVote] { + processor := &mocks.VerifyingVoteProcessor[*helper.TestState, *helper.TestVote]{} + processor.On("State").Return(func() *models.State[*helper.TestState] { + return proposal.State + }).Maybe() + processor.On("Status").Return(consensus.VoteCollectorStatusVerifying) + s.mockedProcessors[proposal.State.Identifier] = processor + return processor +} + +// TestStatus_StateTransitions tests that Status returns correct state of VoteCollector in different scenarios +// when proposal processing can possibly change state of collector +func (s *StateMachineTestSuite) TestStatus_StateTransitions() { + state := helper.MakeState(helper.WithStateRank[*helper.TestState](s.rank)) + proposal := helper.MakeSignedProposal(helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal(helper.WithState(state)))) + s.prepareMockedProcessor(proposal) + + // by default, we should create in caching status + require.Equal(s.T(), consensus.VoteCollectorStatusCaching, s.collector.Status()) + + // after processing state we should get into verifying status + err := s.collector.ProcessState(proposal) + require.NoError(s.T(), err) + require.Equal(s.T(), consensus.VoteCollectorStatusVerifying, s.collector.Status()) + + // after submitting double proposal we should transfer into invalid state + err = s.collector.ProcessState(makeSignedProposalWithRank(s.rank)) + require.NoError(s.T(), err) + require.Equal(s.T(), consensus.VoteCollectorStatusInvalid, s.collector.Status()) +} + +// TestStatus_FactoryErrorPropagation verifies that errors from the injected +// factory are handed through (potentially wrapped), but are not replaced. +func (s *StateMachineTestSuite) Test_FactoryErrorPropagation() { + factoryError := errors.New("factory error") + factory := func(log consensus.TraceLogger, filter []byte, state *models.SignedProposal[*helper.TestState, *helper.TestVote], dsTag []byte, aggregator consensus.SignatureAggregator, voter consensus.VotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer]) (consensus.VerifyingVoteProcessor[*helper.TestState, *helper.TestVote], error) { + return nil, factoryError + } + s.collector.createVerifyingProcessor = factory + + // failing to create collector has to result in error and won't change state + proposal := makeSignedProposalWithRank(s.rank) + err := s.collector.ProcessState(proposal) + require.ErrorIs(s.T(), err, factoryError) + require.Equal(s.T(), consensus.VoteCollectorStatusCaching, s.collector.Status()) +} + +// TestAddVote_VerifyingState tests that AddVote correctly process valid and invalid votes as well +// as repeated, invalid and double votes in verifying state +func (s *StateMachineTestSuite) TestAddVote_VerifyingState() { + proposal := makeSignedProposalWithRank(s.rank) + state := proposal.State + processor := s.prepareMockedProcessor(proposal) + err := s.collector.ProcessState(proposal) + require.NoError(s.T(), err) + s.T().Run("add-valid-vote", func(t *testing.T) { + vote := helper.VoteForStateFixture(state) + s.notifier.On("OnVoteProcessed", &vote).Once() + processor.On("Process", &vote).Return(nil).Once() + err := s.collector.AddVote(&vote) + require.NoError(t, err) + processor.AssertCalled(t, "Process", &vote) + }) + s.T().Run("add-double-vote", func(t *testing.T) { + firstVote := helper.VoteForStateFixture(state) + s.notifier.On("OnVoteProcessed", &firstVote).Once() + processor.On("Process", &firstVote).Return(nil).Once() + err := s.collector.AddVote(&firstVote) + require.NoError(t, err) + + secondVote := helper.VoteFixture(func(vote **helper.TestVote) { + (*vote).Rank = firstVote.Rank + (*vote).ID = firstVote.ID + }) // voted stateID is randomly sampled, i.e. it will be different from firstVote + s.notifier.On("OnDoubleVotingDetected", &firstVote, &secondVote).Return(nil).Once() + + err = s.collector.AddVote(&secondVote) + // we shouldn't get an error + require.NoError(t, err) + + // but should get notified about double voting + s.notifier.AssertCalled(t, "OnDoubleVotingDetected", &firstVote, &secondVote) + processor.AssertCalled(t, "Process", &firstVote) + }) + s.T().Run("add-invalid-vote", func(t *testing.T) { + vote := helper.VoteForStateFixture(state, func(vote **helper.TestVote) { + (*vote).Rank = s.rank + }) + processor.On("Process", &vote).Return(models.NewInvalidVoteErrorf[*helper.TestVote](&vote, "")).Once() + s.notifier.On("OnInvalidVoteDetected", mock.Anything).Run(func(args mock.Arguments) { + invalidVoteErr := args.Get(0).(models.InvalidVoteError[*helper.TestVote]) + require.Equal(s.T(), &vote, invalidVoteErr.Vote) + }).Return(nil).Once() + err := s.collector.AddVote(&vote) + // in case process returns models.InvalidVoteError we should silently ignore this error + require.NoError(t, err) + + // but should get notified about invalid vote + s.notifier.AssertCalled(t, "OnInvalidVoteDetected", mock.Anything) + processor.AssertCalled(t, "Process", &vote) + }) + s.T().Run("add-repeated-vote", func(t *testing.T) { + vote := helper.VoteForStateFixture(state) + s.notifier.On("OnVoteProcessed", &vote).Once() + processor.On("Process", &vote).Return(nil).Once() + err := s.collector.AddVote(&vote) + require.NoError(t, err) + + // calling with same vote should exit early without error and don't do any extra processing + err = s.collector.AddVote(&vote) + require.NoError(t, err) + + processor.AssertCalled(t, "Process", &vote) + }) + s.T().Run("add-incompatible-rank-vote", func(t *testing.T) { + vote := helper.VoteForStateFixture(state, func(vote **helper.TestVote) { + (*vote).Rank = s.rank + 1 + }) + err := s.collector.AddVote(&vote) + require.ErrorIs(t, err, VoteForIncompatibleRankError) + }) + s.T().Run("add-incompatible-state-vote", func(t *testing.T) { + vote := helper.VoteForStateFixture(state, func(vote **helper.TestVote) { + (*vote).Rank = s.rank + }) + processor.On("Process", &vote).Return(VoteForIncompatibleStateError).Once() + err := s.collector.AddVote(&vote) + // in case process returns VoteForIncompatibleStateError we should silently ignore this error + require.NoError(t, err) + processor.AssertCalled(t, "Process", &vote) + }) + s.T().Run("unexpected-VoteProcessor-errors-are-passed-up", func(t *testing.T) { + unexpectedError := errors.New("some unexpected error") + vote := helper.VoteForStateFixture(state, func(vote **helper.TestVote) { + (*vote).Rank = s.rank + }) + processor.On("Process", &vote).Return(unexpectedError).Once() + err := s.collector.AddVote(&vote) + require.ErrorIs(t, err, unexpectedError) + }) +} + +// TestProcessState_ProcessingOfCachedVotes tests that after processing state proposal are cached votes +// are sent to vote processor +func (s *StateMachineTestSuite) TestProcessState_ProcessingOfCachedVotes() { + votes := 10 + proposal := makeSignedProposalWithRank(s.rank) + state := proposal.State + processor := s.prepareMockedProcessor(proposal) + for i := 0; i < votes; i++ { + vote := helper.VoteForStateFixture(state) + // once when caching vote, and once when processing cached vote + s.notifier.On("OnVoteProcessed", &vote).Twice() + // eventually it has to be processed by processor + processor.On("Process", &vote).Return(nil).Once() + require.NoError(s.T(), s.collector.AddVote(&vote)) + } + + err := s.collector.ProcessState(proposal) + require.NoError(s.T(), err) + + time.Sleep(100 * time.Millisecond) + + processor.AssertExpectations(s.T()) +} + +// Test_VoteProcessorErrorPropagation verifies that unexpected errors from the `VoteProcessor` +// are propagated up the call stack (potentially wrapped), but are not replaced. +func (s *StateMachineTestSuite) Test_VoteProcessorErrorPropagation() { + proposal := makeSignedProposalWithRank(s.rank) + state := proposal.State + processor := s.prepareMockedProcessor(proposal) + + err := s.collector.ProcessState(helper.MakeSignedProposal[*helper.TestState, *helper.TestVote]( + helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal(helper.WithState[*helper.TestState](state))))) + require.NoError(s.T(), err) + + unexpectedError := errors.New("some unexpected error") + vote := helper.VoteForStateFixture(state, func(vote **helper.TestVote) { + (*vote).Rank = s.rank + }) + processor.On("Process", &vote).Return(unexpectedError).Once() + err = s.collector.AddVote(&vote) + require.ErrorIs(s.T(), err, unexpectedError) +} + +// RegisterVoteConsumer verifies that after registering vote consumer we are receiving all new and past votes +// in strict ordering of arrival. +func (s *StateMachineTestSuite) RegisterVoteConsumer() { + votes := 10 + proposal := makeSignedProposalWithRank(s.rank) + state := proposal.State + processor := s.prepareMockedProcessor(proposal) + expectedVotes := make([]*helper.TestVote, 0) + for i := 0; i < votes; i++ { + vote := helper.VoteForStateFixture(state) + // eventually it has to be process by processor + processor.On("Process", &vote).Return(nil).Once() + require.NoError(s.T(), s.collector.AddVote(&vote)) + expectedVotes = append(expectedVotes, vote) + } + + actualVotes := make([]*helper.TestVote, 0) + consumer := func(vote **helper.TestVote) { + actualVotes = append(actualVotes, *vote) + } + + s.collector.RegisterVoteConsumer(consumer) + + for i := 0; i < votes; i++ { + vote := helper.VoteForStateFixture(state) + // eventually it has to be process by processor + processor.On("Process", &vote).Return(nil).Once() + require.NoError(s.T(), s.collector.AddVote(&vote)) + expectedVotes = append(expectedVotes, vote) + } + + require.Equal(s.T(), expectedVotes, actualVotes) +} + +func makeSignedProposalWithRank(rank uint64) *models.SignedProposal[*helper.TestState, *helper.TestVote] { + return helper.MakeSignedProposal[*helper.TestState, *helper.TestVote](helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal(helper.WithState(helper.MakeState(helper.WithStateRank[*helper.TestState](rank)))))) +} diff --git a/consensus/votecollector/testutil.go b/consensus/votecollector/testutil.go new file mode 100644 index 0000000..d49cd6b --- /dev/null +++ b/consensus/votecollector/testutil.go @@ -0,0 +1,51 @@ +package votecollector + +import ( + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + mockconsensus "source.quilibrium.com/quilibrium/monorepo/consensus/mocks" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +type VoteProcessorTestSuiteBase struct { + suite.Suite + + sigWeight uint64 + provingTotalWeight uint64 + onQCCreatedState mock.Mock + + provingAggregator *mockconsensus.WeightedSignatureAggregator + minRequiredWeight uint64 + proposal *models.SignedProposal[*helper.TestState, *helper.TestVote] +} + +func (s *VoteProcessorTestSuiteBase) SetupTest() { + s.provingAggregator = &mockconsensus.WeightedSignatureAggregator{} + s.proposal = helper.MakeSignedProposal[*helper.TestState, *helper.TestVote]() + + // let's assume we have 19 nodes each with weight 100 + s.sigWeight = 100 + s.minRequiredWeight = 1300 // we require at least 13 sigs to collect min weight + s.provingTotalWeight = 0 + + // setup proving signature aggregator + s.provingAggregator.On("TrustedAdd", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + s.provingTotalWeight += s.sigWeight + }).Return(func(signerID models.Identity, sig []byte) uint64 { + return s.provingTotalWeight + }, func(signerID models.Identity, sig []byte) error { + return nil + }).Maybe() + s.provingAggregator.On("TotalWeight").Return(func() uint64 { + return s.provingTotalWeight + }).Maybe() +} + +// onQCCreated is a special function that registers call in mocked state. +// ATTENTION: don't change name of this function since the same name is used in: +// s.onQCCreatedState.On("onQCCreated") statements +func (s *VoteProcessorTestSuiteBase) onQCCreated(qc models.QuorumCertificate) { + s.onQCCreatedState.Called(qc) +} diff --git a/consensus/votecollector/vote_cache.go b/consensus/votecollector/vote_cache.go new file mode 100644 index 0000000..5963e1d --- /dev/null +++ b/consensus/votecollector/vote_cache.go @@ -0,0 +1,149 @@ +package votecollector + +import ( + "errors" + "sync" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +var ( + // RepeatedVoteErr is emitted, when we receive a vote for the same state + // from the same voter multiple times. This error does _not_ indicate + // equivocation. + RepeatedVoteErr = errors.New("duplicated vote") +) + +// voteContainer container stores the vote and in index representing +// the order in which the votes were received +type voteContainer[VoteT models.Unique] struct { + Vote *VoteT + index int +} + +// VotesCache maintains a _concurrency safe_ cache of votes for one particular +// rank. The cache memorizes the order in which the votes were received. Votes +// are de-duplicated based on the following rules: +// - Vor each voter (i.e. SignerID), we store the _first_ vote v0. +// - For any subsequent vote v, we check whether v.Identifier == v0.Identifier. +// If this is the case, we consider the vote a duplicate and drop it. +// If v and v0 have different Identifiers, the voter is equivocating and +// we return a models.DoubleVoteError +type VotesCache[VoteT models.Unique] struct { + lock sync.RWMutex + rank uint64 + votes map[models.Identity]voteContainer[VoteT] // signerID -> first vote + voteConsumers []consensus.VoteConsumer[VoteT] +} + +// NewVotesCache instantiates a VotesCache for the given rank +func NewVotesCache[VoteT models.Unique](rank uint64) *VotesCache[VoteT] { + return &VotesCache[VoteT]{ + rank: rank, + votes: make(map[models.Identity]voteContainer[VoteT]), + } +} + +func (vc *VotesCache[VoteT]) Rank() uint64 { return vc.rank } + +// AddVote stores a vote in the cache. The following errors are expected during +// normal operations: +// - nil: if the vote was successfully added +// - models.DoubleVoteError is returned if the voter is equivocating +// (i.e. voting in the same rank for different states). +// - RepeatedVoteErr is returned when adding a vote for the same state from +// the same voter multiple times. +// - IncompatibleRankErr is returned if the vote is for a different rank. +// +// When AddVote returns an error, the vote is _not_ stored. +func (vc *VotesCache[VoteT]) AddVote(vote *VoteT) error { + if (*vote).GetRank() != vc.rank { + return VoteForIncompatibleRankError + } + vc.lock.Lock() + defer vc.lock.Unlock() + + // De-duplicated votes based on the following rules: + // * Vor each voter (i.e. SignerID), we store the _first_ vote v0. + // * For any subsequent vote v, we check whether + // v.Identifier == v0.Identifier. + // If this is the case, we consider the vote a duplicate and drop it. + // If v and v0 have different Identifiers, the voter is equivocating and + // we return a models.DoubleVoteError + firstVote, exists := vc.votes[(*vote).Identity()] + if exists { + if (*firstVote.Vote).Source() != (*vote).Source() { + return models.NewDoubleVoteErrorf( + firstVote.Vote, + vote, + "detected vote equivocation at rank: %d", + vc.rank, + ) + } + return RepeatedVoteErr + } + + // previously unknown vote: (1) store and (2) forward to consumers + vc.votes[(*vote).Identity()] = voteContainer[VoteT]{vote, len(vc.votes)} + for _, consumer := range vc.voteConsumers { + consumer(vote) + } + return nil +} + +// GetVote returns the stored vote for the given `signerID`. Returns: +// - (vote, true) if a vote from signerID is known +// - (false, nil) no vote from signerID is known +func (vc *VotesCache[VoteT]) GetVote(signerID models.Identity) (*VoteT, bool) { + vc.lock.RLock() + container, exists := vc.votes[signerID] // if signerID is unknown, its `Vote` pointer is nil + vc.lock.RUnlock() + return container.Vote, exists +} + +// Size returns the number of cached votes +func (vc *VotesCache[VoteT]) Size() int { + vc.lock.RLock() + s := len(vc.votes) + vc.lock.RUnlock() + return s +} + +// RegisterVoteConsumer registers a VoteConsumer. Upon registration, the cache +// feeds all cached votes into the consumer in the order they arrived. +// CAUTION: a consumer _must_ be non-stateing and consume the votes without +// noteworthy delay. Otherwise, consensus speed is impacted. +// +// Expected usage patter: During happy-path operations, the state arrives in a +// timely manner. Hence, we expect that only a few votes are cached when a +// consumer is registered. For the purpose of forensics, we might register a +// consumer later, when already lots of votes are cached. However, this should +// be a rare occurrence (we except moderate performance overhead in this case). +func (vc *VotesCache[VoteT]) RegisterVoteConsumer( + consumer consensus.VoteConsumer[VoteT], +) { + vc.lock.Lock() + defer vc.lock.Unlock() + + vc.voteConsumers = append(vc.voteConsumers, consumer) + for _, vote := range vc.all() { // feed the consumer with the cached votes + consumer(vote) // non-stateing per API contract + } +} + +// All returns all currently cached votes. Concurrency safe. +func (vc *VotesCache[VoteT]) All() []*VoteT { + vc.lock.Lock() + defer vc.lock.Unlock() + return vc.all() +} + +// all returns all currently cached votes. NOT concurrency safe +func (vc *VotesCache[VoteT]) all() []*VoteT { + orderedVotes := make([]*VoteT, len(vc.votes)) + for _, v := range vc.votes { + orderedVotes[v.index] = v.Vote + } + return orderedVotes +} diff --git a/consensus/votecollector/vote_cache_test.go b/consensus/votecollector/vote_cache_test.go new file mode 100644 index 0000000..b2ebbc0 --- /dev/null +++ b/consensus/votecollector/vote_cache_test.go @@ -0,0 +1,189 @@ +package votecollector + +import ( + "fmt" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TestVotesCache_Rank tests that Rank returns same value that was set by constructor +func TestVotesCache_Rank(t *testing.T) { + rank := uint64(100) + cache := NewVotesCache[*helper.TestVote](rank) + require.Equal(t, rank, cache.Rank()) +} + +// TestVotesCache_AddVoteRepeatedVote tests that AddVote skips duplicated votes +func TestVotesCache_AddVoteRepeatedVote(t *testing.T) { + t.Parallel() + + rank := uint64(100) + cache := NewVotesCache[*helper.TestVote](rank) + vote := helper.VoteFixture(func(v **helper.TestVote) { + (*v).Rank = rank + }) + + require.NoError(t, cache.AddVote(&vote)) + err := cache.AddVote(&vote) + require.ErrorIs(t, err, RepeatedVoteErr) +} + +// TestVotesCache_AddVoteIncompatibleRank tests that adding vote with incompatible rank results in error +func TestVotesCache_AddVoteIncompatibleRank(t *testing.T) { + t.Parallel() + + rank := uint64(100) + cache := NewVotesCache[*helper.TestVote](rank) + vote := helper.VoteFixture(func(v **helper.TestVote) { + (*v).Rank = rank + 1 + }) + err := cache.AddVote(&vote) + require.ErrorIs(t, err, VoteForIncompatibleRankError) +} + +// TestVotesCache_GetVote tests that GetVote method +func TestVotesCache_GetVote(t *testing.T) { + rank := uint64(100) + knownVote := helper.VoteFixture(func(v **helper.TestVote) { + (*v).Rank = rank + }) + doubleVote := helper.VoteFixture(func(v **helper.TestVote) { + (*v).Rank = rank + (*v).ID = knownVote.ID + }) + + cache := NewVotesCache[*helper.TestVote](rank) + + // unknown vote + vote, found := cache.GetVote(helper.MakeIdentity()) + assert.Nil(t, vote) + assert.False(t, found) + + // known vote + err := cache.AddVote(&knownVote) + assert.NoError(t, err) + vote, found = cache.GetVote(knownVote.ID) + assert.Equal(t, &knownVote, vote) + assert.True(t, found) + + // for a signer ID with a known vote, the cache should memorize the _first_ encountered vote + err = cache.AddVote(&doubleVote) + assert.True(t, models.IsDoubleVoteError[*helper.TestVote](err)) + vote, found = cache.GetVote(doubleVote.ID) + assert.Equal(t, &knownVote, vote) + assert.True(t, found) +} + +// TestVotesCache_All tests that All returns previously added votes in same order +func TestVotesCache_All(t *testing.T) { + t.Parallel() + + rank := uint64(100) + cache := NewVotesCache[*helper.TestVote](rank) + expectedVotes := make([]**helper.TestVote, 0, 5) + for i := range expectedVotes { + vote := helper.VoteFixture(func(v **helper.TestVote) { + (*v).Rank = rank + }) + expectedVotes[i] = &vote + require.NoError(t, cache.AddVote(&vote)) + } + require.Equal(t, expectedVotes, cache.All()) +} + +// TestVotesCache_RegisterVoteConsumer tests that registered vote consumer receives all previously added votes as well as +// new ones in expected order. +func TestVotesCache_RegisterVoteConsumer(t *testing.T) { + t.Parallel() + + rank := uint64(100) + cache := NewVotesCache[*helper.TestVote](rank) + votesBatchSize := 5 + expectedVotes := make([]*helper.TestVote, 0, votesBatchSize) + // produce first batch before registering vote consumer + for i := range expectedVotes { + vote := helper.VoteFixture(func(v **helper.TestVote) { + (*v).Rank = rank + }) + expectedVotes[i] = vote + require.NoError(t, cache.AddVote(&vote)) + } + + consumedVotes := make([]*helper.TestVote, 0) + voteConsumer := func(vote **helper.TestVote) { + consumedVotes = append(consumedVotes, *vote) + } + + // registering vote consumer has to fill consumedVotes using callback + cache.RegisterVoteConsumer(voteConsumer) + // all cached votes should be fed into the consumer right away + require.Equal(t, expectedVotes, consumedVotes) + + // produce second batch after registering vote consumer + for i := 0; i < votesBatchSize; i++ { + vote := helper.VoteFixture(func(v **helper.TestVote) { + (*v).Rank = rank + }) + expectedVotes = append(expectedVotes, vote) + require.NoError(t, cache.AddVote(&vote)) + } + + // at this point consumedVotes has to have all votes created before and after registering vote + // consumer, and they must be in same order + require.Equal(t, expectedVotes, consumedVotes) +} + +// BenchmarkAdd measured the time it takes to add `numberVotes` concurrently to the VotesCache. +// On MacBook with Intel i7-7820HQ CPU @ 2.90GHz: +// adding 1 million votes in total, with 20 threads concurrently, took 0.48s +func BenchmarkAdd(b *testing.B) { + numberVotes := 1_000_000 + threads := 20 + + // Setup: create worker routines and votes to feed + rank := uint64(10) + cache := NewVotesCache[*helper.TestVote](rank) + + var start sync.WaitGroup + start.Add(threads) + var done sync.WaitGroup + done.Add(threads) + + stateID := helper.MakeIdentity() + n := numberVotes / threads + + for ; threads > 0; threads-- { + go func(i int) { + // create votes and signal ready + votes := make([]*helper.TestVote, 0, n) + for len(votes) < n { + v := helper.VoteFixture(func(v **helper.TestVote) { + (*v).Rank = rank + (*v).StateID = stateID + }) + votes = append(votes, v) + } + start.Done() + + // Wait for last worker routine to signal ready. Then, + // feed all votes into cache + start.Wait() + for _, v := range votes { + err := cache.AddVote(&v) + assert.NoError(b, err) + } + done.Done() + }(threads) + } + start.Wait() + t1 := time.Now() + done.Wait() + duration := time.Since(t1) + fmt.Printf("=> adding %d votes to Cache took %f seconds\n", cache.Size(), duration.Seconds()) +} diff --git a/consensus/votecollector/vote_processor.go b/consensus/votecollector/vote_processor.go new file mode 100644 index 0000000..d542ebe --- /dev/null +++ b/consensus/votecollector/vote_processor.go @@ -0,0 +1,241 @@ +package votecollector + +import ( + "context" + "errors" + "fmt" + + "go.uber.org/atomic" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/consensus/signature" + "source.quilibrium.com/quilibrium/monorepo/consensus/verification" +) + +/* ***************** Base-Factory for VoteProcessor ****************** */ + +// provingVoteProcessorFactoryBase implements a factory for creating +// VoteProcessor holds needed dependencies to initialize VoteProcessor. +// CAUTION: +// this base factory only creates the VerifyingVoteProcessor for the given +// state. It does _not_ check the proposer's vote for its own state, i.e. it +// does _not_ implement `consensus.VoteProcessorFactory`. This base factory +// should be wrapped by `votecollector.VoteProcessorFactory` which adds the +// logic to verify the proposer's vote (decorator pattern). +type provingVoteProcessorFactoryBase[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +] struct { + committee consensus.DynamicCommittee + onQCCreated consensus.OnQuorumCertificateCreated +} + +// Create creates VoteProcessor for processing votes for the given state. +// Caller must treat all errors as exceptions +func (f *provingVoteProcessorFactoryBase[StateT, VoteT, PeerIDT]) Create( + tracer consensus.TraceLogger, + filter []byte, + state *models.State[StateT], + dsTag []byte, + aggregator consensus.SignatureAggregator, + votingProvider consensus.VotingProvider[StateT, VoteT, PeerIDT], +) (consensus.VerifyingVoteProcessor[StateT, VoteT], error) { + allParticipants, err := f.committee.IdentitiesByState(state.Identifier) + if err != nil { + return nil, fmt.Errorf("error retrieving consensus participants: %w", err) + } + + // message that has to be verified against aggregated signature + msg := verification.MakeVoteMessage(filter, state.Rank, state.Identifier) + + // prepare the proving public keys of participants + provingKeys := make([][]byte, 0, len(allParticipants)) + for _, participant := range allParticipants { + provingKeys = append(provingKeys, participant.PublicKey()) + } + + provingSigAggtor, err := signature.NewWeightedSignatureAggregator( + allParticipants, + provingKeys, + msg, + dsTag, + aggregator, + ) + if err != nil { + return nil, fmt.Errorf( + "could not create aggregator for proving signatures: %w", + err, + ) + } + + minRequiredWeight, err := f.committee.QuorumThresholdForRank(state.Rank) + if err != nil { + return nil, fmt.Errorf( + "could not get weight threshold for rank %d: %w", + state.Rank, + err, + ) + } + + return &VoteProcessor[StateT, VoteT, PeerIDT]{ + tracer: tracer, + state: state, + provingSigAggtor: provingSigAggtor, + votingProvider: votingProvider, + onQCCreated: f.onQCCreated, + minRequiredWeight: minRequiredWeight, + done: *atomic.NewBool(false), + allParticipants: allParticipants, + }, nil +} + +/* ****************** VoteProcessor Implementation ******************* */ + +// VoteProcessor implements the consensus.VerifyingVoteProcessor interface. +// It processes hotstuff votes from a collector cluster, where participants vote +// in favour of a state by proving their proving key consensus. +// Concurrency safe. +type VoteProcessor[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +] struct { + tracer consensus.TraceLogger + state *models.State[StateT] + provingSigAggtor consensus.WeightedSignatureAggregator + onQCCreated consensus.OnQuorumCertificateCreated + votingProvider consensus.VotingProvider[StateT, VoteT, PeerIDT] + minRequiredWeight uint64 + done atomic.Bool + allParticipants []models.WeightedIdentity +} + +// State returns state that is part of proposal that we are processing votes for. +func (p *VoteProcessor[StateT, VoteT, PeerIDT]) State() *models.State[StateT] { + return p.state +} + +// Status returns status of this vote processor, it's always verifying. +func (p *VoteProcessor[ + StateT, + VoteT, + PeerIDT, +]) Status() consensus.VoteCollectorStatus { + return consensus.VoteCollectorStatusVerifying +} + +// Process performs processing of single vote in concurrent safe way. This +// function is implemented to be called by multiple goroutines at the same time. +// Supports processing of both proving and threshold signatures. Design of this +// function is event driven, as soon as we collect enough weight to create a QC +// we will immediately do this and submit it via callback for further +// processing. +// Expected error returns during normal operations: +// * VoteForIncompatibleStateError - submitted vote for incompatible state +// * VoteForIncompatibleRankError - submitted vote for incompatible rank +// * models.InvalidVoteError - submitted vote with invalid signature +// All other errors should be treated as exceptions. +func (p *VoteProcessor[StateT, VoteT, PeerIDT]) Process(vote *VoteT) error { + err := EnsureVoteForState[StateT, VoteT](vote, p.state) + if err != nil { + return fmt.Errorf("received incompatible vote: %w", err) + } + + // Vote Processing state machine + if p.done.Load() { + return nil + } + err = p.provingSigAggtor.Verify((*vote).Identity(), (*vote).GetSignature()) + if err != nil { + if models.IsInvalidSignerError(err) { + return models.NewInvalidVoteErrorf( + vote, + "vote %x for rank %d is not signed by an authorized consensus participant: %w", + (*vote).Identity(), + (*vote).GetRank(), + err, + ) + } + if errors.Is(err, models.ErrInvalidSignature) { + return models.NewInvalidVoteErrorf( + vote, + "vote %x for rank %d has an invalid proving signature: %w", + (*vote).Identity(), + (*vote).GetRank(), + err, + ) + } + return fmt.Errorf("internal error checking signature validity: %w", err) + } + + if p.done.Load() { + return nil + } + totalWeight, err := p.provingSigAggtor.TrustedAdd( + (*vote).Identity(), + (*vote).GetSignature(), + ) + if err != nil { + // we don't expect any errors here during normal operation, as we previously + // checked for duplicated votes from the same signer and verified the + // signer+signature + return fmt.Errorf( + "unexpected exception adding signature from vote %x to proving aggregator: %w", + (*vote).Identity(), + err, + ) + } + + p.tracer.Trace(fmt.Sprintf( + "processed vote, total weight=(%d), required=(%d)", + totalWeight, + p.minRequiredWeight, + )) + + // checking of conditions for building QC are satisfied + if totalWeight < p.minRequiredWeight { + return nil + } + + // At this point, we have enough signatures to build a QC. Another routine + // might just be at this point. To avoid duplicate work, only one routine can + // pass: + if !p.done.CompareAndSwap(false, true) { + return nil + } + qc, err := p.buildQC() + if err != nil { + return fmt.Errorf("internal error constructing QC from votes: %w", err) + } + + p.tracer.Trace("new QC has been created") + p.onQCCreated(qc) + + return nil +} + +// buildQC performs aggregation of signatures when we have collected enough +// weight for building QC. This function is run only once by single worker. +// Any error should be treated as exception. +func (p *VoteProcessor[StateT, VoteT, PeerIDT]) buildQC() ( + models.QuorumCertificate, + error, +) { + _, aggregatedSig, err := p.provingSigAggtor.Aggregate() + if err != nil { + return nil, fmt.Errorf("could not aggregate proving signature: %w", err) + } + + qc, err := p.votingProvider.FinalizeQuorumCertificate( + context.Background(), + p.state, + aggregatedSig, + ) + if err != nil { + return nil, fmt.Errorf("could not build quorum certificate: %w", err) + } + + return qc, nil +} diff --git a/consensus/votecollector/vote_processor_test.go b/consensus/votecollector/vote_processor_test.go new file mode 100644 index 0000000..9c4e924 --- /dev/null +++ b/consensus/votecollector/vote_processor_test.go @@ -0,0 +1,269 @@ +package votecollector + +import ( + "errors" + "sync" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "go.uber.org/atomic" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/helper" + "source.quilibrium.com/quilibrium/monorepo/consensus/mocks" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +func TestVoteProcessor(t *testing.T) { + suite.Run(t, new(VoteProcessorTestSuite)) +} + +// VoteProcessorTestSuite is a test suite that holds mocked state for isolated testing of VoteProcessor. +type VoteProcessorTestSuite struct { + VoteProcessorTestSuiteBase + + processor *VoteProcessor[*helper.TestState, *helper.TestVote, *helper.TestPeer] + allParticipants []models.WeightedIdentity +} + +func (s *VoteProcessorTestSuite) SetupTest() { + s.VoteProcessorTestSuiteBase.SetupTest() + s.allParticipants = helper.WithWeightedIdentityList(14) + votingProvider := mocks.NewVotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer](s.T()) + s.processor = &VoteProcessor[*helper.TestState, *helper.TestVote, *helper.TestPeer]{ + tracer: helper.Logger(), + state: s.proposal.State, + provingSigAggtor: s.provingAggregator, + onQCCreated: s.onQCCreated, + minRequiredWeight: s.minRequiredWeight, + done: *atomic.NewBool(false), + allParticipants: s.allParticipants, + votingProvider: votingProvider, + } +} + +// TestInitialState tests that State() and Status() return correct values after calling constructor +func (s *VoteProcessorTestSuite) TestInitialState() { + require.Equal(s.T(), s.proposal.State, s.processor.State()) + require.Equal(s.T(), consensus.VoteCollectorStatusVerifying, s.processor.Status()) +} + +// TestProcess_VoteNotForProposal tests that vote should pass to validation only if it has correct +// rank and state ID matching proposal that is locked in VoteProcessor +func (s *VoteProcessorTestSuite) TestProcess_VoteNotForProposal() { + v := helper.VoteForStateFixture(s.proposal.State) + v.StateID = "" + err := s.processor.Process(&v) + require.ErrorAs(s.T(), err, &VoteForIncompatibleStateError) + require.False(s.T(), models.IsInvalidVoteError[*helper.TestVote](err)) + + v = helper.VoteForStateFixture(s.proposal.State) + v.Rank = 0 + err = s.processor.Process(&v) + require.ErrorAs(s.T(), err, &VoteForIncompatibleRankError) + require.False(s.T(), models.IsInvalidVoteError[*helper.TestVote](err)) + + s.provingAggregator.AssertNotCalled(s.T(), "Verify") +} + +// TestProcess_InvalidSignature tests that VoteProcessor doesn't collect signatures for votes with invalid consensus. +// Checks are made for cases where both proving and threshold signatures were submitted. +func (s *VoteProcessorTestSuite) TestProcess_InvalidSignature() { + exception := errors.New("unexpected-exception") + + // sentinel error from `InvalidSignerError` should be wrapped as `InvalidVoteError` + voteA := helper.VoteForStateFixture(s.proposal.State) + s.provingAggregator.On("Verify", voteA.ID, mock.Anything).Return(models.NewInvalidSignerErrorf("")).Once() + err := s.processor.Process(&voteA) + require.Error(s.T(), err) + require.True(s.T(), models.IsInvalidVoteError[*helper.TestVote](err)) + require.True(s.T(), models.IsInvalidSignerError(err)) + + // sentinel error from `ErrInvalidSignature` should be wrapped as `InvalidVoteError` + voteB := helper.VoteForStateFixture(s.proposal.State) + s.provingAggregator.On("Verify", voteB.ID, mock.Anything).Return(models.ErrInvalidSignature).Once() + err = s.processor.Process(&voteB) + require.Error(s.T(), err) + require.True(s.T(), models.IsInvalidVoteError[*helper.TestVote](err)) + require.ErrorAs(s.T(), err, &models.ErrInvalidSignature) + + // unexpected errors from `Verify` should be propagated, but should _not_ be wrapped as `InvalidVoteError` + voteC := helper.VoteForStateFixture(s.proposal.State) + s.provingAggregator.On("Verify", voteC.ID, mock.Anything).Return(exception) + err = s.processor.Process(&voteC) + require.ErrorIs(s.T(), err, exception) // unexpected errors from verifying the vote signature should be propagated + require.False(s.T(), models.IsInvalidVoteError[*helper.TestVote](err)) // but not interpreted as an invalid vote + + s.provingAggregator.AssertNotCalled(s.T(), "TrustedAdd") +} + +// TestProcess_TrustedAdd_Exception tests that unexpected exceptions returned by +// WeightedSignatureAggregator.TrustedAdd(..) are _not_ interpreted as invalid votes +func (s *VoteProcessorTestSuite) TestProcess_TrustedAdd_Exception() { + exception := errors.New("unexpected-exception") + provingVote := helper.VoteForStateFixture(s.proposal.State) + s.provingAggregator = mocks.NewWeightedSignatureAggregator(s.T()) + s.provingAggregator.On("Verify", provingVote.ID, mock.Anything).Return(nil).Once() + s.provingAggregator.On("TrustedAdd", provingVote.ID, mock.Anything).Return(uint64(0), exception).Once() + s.processor.provingSigAggtor = s.provingAggregator + err := s.processor.Process(&provingVote) + require.ErrorIs(s.T(), err, exception) + require.False(s.T(), models.IsInvalidVoteError[*helper.TestVote](err)) + s.provingAggregator.AssertExpectations(s.T()) +} + +// TestProcess_BuildQCError tests error path during process of building QC. +// Building QC is a one time operation, we need to make sure that failing in one of the steps leads to exception. +func (s *VoteProcessorTestSuite) TestProcess_BuildQCError() { + // In this test we will mock all dependencies for happy path, and replace some branches with unhappy path + // to simulate errors along the branches. + vote := helper.VoteForStateFixture(s.proposal.State) + + // in this test case we aren't able to aggregate proving signature + exception := errors.New("proving-aggregate-exception") + provingSigAggregator := mocks.NewWeightedSignatureAggregator(s.T()) + provingSigAggregator.On("Verify", mock.Anything, mock.Anything).Return(nil).Once() + provingSigAggregator.On("TrustedAdd", mock.Anything, mock.Anything).Return(s.minRequiredWeight, nil).Once() + provingSigAggregator.On("Aggregate").Return(nil, nil, exception).Once() + + s.processor.provingSigAggtor = provingSigAggregator + err := s.processor.Process(&vote) + require.ErrorIs(s.T(), err, exception) + provingSigAggregator.AssertExpectations(s.T()) +} + +// TestProcess_NotEnoughWeight tests a scenario where we first don't have enough weight, +// then we iteratively increase it to the point where we have enough proving weight. No QC should be created. +func (s *VoteProcessorTestSuite) TestProcess_NotEnoughWeight() { + for i := s.sigWeight; i < s.minRequiredWeight; i += s.sigWeight { + vote := helper.VoteForStateFixture(s.proposal.State) + s.provingAggregator.On("Verify", vote.ID, []byte(vote.Signature)).Return(nil).Once() + err := s.processor.Process(&vote) + require.NoError(s.T(), err) + } + require.False(s.T(), s.processor.done.Load()) + s.onQCCreatedState.AssertNotCalled(s.T(), "onQCCreated") + s.provingAggregator.AssertExpectations(s.T()) +} + +// TestProcess_CreatingQC tests a scenario when we have collected enough proving weight +// and proceed to build QC. Created QC has to have all signatures and identities aggregated by +// aggregator. +func (s *VoteProcessorTestSuite) TestProcess_CreatingQC() { + // prepare test setup: 13 votes with proving sigs + provingSigners := s.allParticipants[:14] + signerIndices := []byte{0b11111111, 0b00011111} + + // setup aggregator + s.provingAggregator = mocks.NewWeightedSignatureAggregator(s.T()) + expectedSig := &helper.TestAggregatedSignature{ + Signature: make([]byte, 74), + PublicKey: make([]byte, 585), + Bitmask: signerIndices, + } + s.provingAggregator.On("Aggregate").Return(provingSigners, expectedSig, nil).Once() + s.processor.provingSigAggtor = s.provingAggregator + s.processor.votingProvider.(*mocks.VotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer]).On( + "FinalizeQuorumCertificate", + mock.Anything, + mock.Anything, + mock.Anything, + ).Return(&helper.TestQuorumCertificate{ + Filter: nil, + Rank: s.proposal.State.Rank, + Selector: s.proposal.State.Identifier, + AggregatedSignature: expectedSig, + }, nil) + // expected QC + s.onQCCreatedState.On("onQCCreated", mock.Anything).Run(func(args mock.Arguments) { + qc := args.Get(0).(models.QuorumCertificate) + // ensure that QC contains correct field + expectedQC := &helper.TestQuorumCertificate{ + Rank: s.proposal.State.Rank, + Selector: s.proposal.State.Identifier, + AggregatedSignature: expectedSig, + } + require.Equal(s.T(), expectedQC, qc) + }).Return(nil).Once() + + // add votes + for _, signer := range provingSigners { + vote := helper.VoteForStateFixture(s.proposal.State) + vote.ID = signer.Identity() + expectedSig := []byte(vote.Signature) + s.provingAggregator.On("Verify", vote.ID, expectedSig).Return(nil).Once() + s.provingAggregator.On("TrustedAdd", vote.ID, expectedSig).Run(func(args mock.Arguments) { + s.provingTotalWeight += s.sigWeight + }).Return(s.provingTotalWeight, nil).Once() + err := s.processor.Process(&vote) + require.NoError(s.T(), err) + } + + require.True(s.T(), s.processor.done.Load()) + s.onQCCreatedState.AssertExpectations(s.T()) + s.provingAggregator.AssertExpectations(s.T()) + + // processing extra votes shouldn't result in creating new QCs + vote := helper.VoteForStateFixture(s.proposal.State) + err := s.processor.Process(&vote) + require.NoError(s.T(), err) + + s.onQCCreatedState.AssertExpectations(s.T()) +} + +// TestProcess_ConcurrentCreatingQC tests a scenario where multiple goroutines process vote at same time, +// we expect only one QC created in this scenario. +func (s *VoteProcessorTestSuite) TestProcess_ConcurrentCreatingQC() { + provingSigners := s.allParticipants[:10] + // mock aggregators, so we have enough weight and shares for creating QC + s.provingAggregator = mocks.NewWeightedSignatureAggregator(s.T()) + s.provingAggregator.On("Verify", mock.Anything, mock.Anything).Return(nil) + s.provingAggregator.On("TrustedAdd", mock.Anything, mock.Anything).Return(s.minRequiredWeight, nil) + expectedSig := &helper.TestAggregatedSignature{ + Signature: make([]byte, 74), + PublicKey: make([]byte, 585), + Bitmask: []byte{0b11111111, 0b00000011}, + } + s.provingAggregator.On("Aggregate").Return(provingSigners, expectedSig, nil) + s.processor.provingSigAggtor = s.provingAggregator + + // at this point sending any vote should result in creating QC. + s.onQCCreatedState.On("onQCCreated", mock.Anything).Return(nil).Once() + + s.processor.votingProvider.(*mocks.VotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer]).On( + "FinalizeQuorumCertificate", + mock.Anything, + mock.Anything, + mock.Anything, + ).Return(&helper.TestQuorumCertificate{ + Filter: nil, + Rank: s.proposal.State.Rank, + Selector: s.proposal.State.Identifier, + FrameNumber: s.proposal.State.Rank, + Timestamp: uint64(s.proposal.State.Timestamp), + AggregatedSignature: expectedSig, + }, nil) + var startupWg, shutdownWg sync.WaitGroup + + vote := helper.VoteForStateFixture(s.proposal.State) + startupWg.Add(1) + // prepare goroutines, so they are ready to submit a vote at roughly same time + for i := 0; i < 5; i++ { + shutdownWg.Add(1) + go func() { + defer shutdownWg.Done() + startupWg.Wait() + err := s.processor.Process(&vote) + require.NoError(s.T(), err) + }() + } + + startupWg.Done() + + // wait for all routines to finish + shutdownWg.Wait() + + s.onQCCreatedState.AssertNumberOfCalls(s.T(), "onQCCreated", 1) +} diff --git a/go-libp2p-blossomsub/backoff.go b/go-libp2p-blossomsub/backoff.go index 1b32a24..87e8cb7 100644 --- a/go-libp2p-blossomsub/backoff.go +++ b/go-libp2p-blossomsub/backoff.go @@ -43,7 +43,6 @@ func newBackoff(ctx context.Context, sizeThreshold int, cleanupInterval time.Dur info: make(map[peer.ID]*backoffHistory), } - rand.Seed(time.Now().UnixNano()) // used for jitter go b.cleanupLoop(ctx) return b diff --git a/go-libp2p-blossomsub/bitmask_test.go b/go-libp2p-blossomsub/bitmask_test.go index c95c3e9..49adab7 100644 --- a/go-libp2p-blossomsub/bitmask_test.go +++ b/go-libp2p-blossomsub/bitmask_test.go @@ -710,7 +710,7 @@ func notifSubThenUnSub(ctx context.Context, t *testing.T, bitmasks []*Bitmask) { } // Wait for the unsubscribe messages to reach the primary peer - for len(primaryBitmask.ListPeers()) < 0 { + for len(primaryBitmask.ListPeers()) != 0 { time.Sleep(time.Millisecond * 100) } diff --git a/go-libp2p-blossomsub/blossomsub_test.go b/go-libp2p-blossomsub/blossomsub_test.go index 35c3c3a..e9e2865 100644 --- a/go-libp2p-blossomsub/blossomsub_test.go +++ b/go-libp2p-blossomsub/blossomsub_test.go @@ -3469,12 +3469,10 @@ func TestBloomRouting(t *testing.T) { } go func() { - for _ = range sub { - select { - case err := <-errch: - if err != nil { - errs = append(errs, err) - } + for range sub { + err := <-errch + if err != nil { + errs = append(errs, err) } } g.Done() @@ -3573,13 +3571,12 @@ func TestBloomPropagationOverSubTreeTopology(t *testing.T) { var msg *struct{} = nil go func() { - for i := 0; i < len(subs); i++ { - select { - case m := <-msgch: - msg = &m - cancel() - } + for range subs { + m := <-msgch + msg = &m + cancel() } + }() g.Wait() if msg == nil { @@ -3796,12 +3793,10 @@ func assertReceivedBitmaskSubgroup(t *testing.T, ctx context.Context, subs [][]* var msg *struct{} = nil go func() { - for i := 0; i < len(subs); i++ { - select { - case m := <-msgch: - msg = &m - cancel() - } + for range subs { + m := <-msgch + msg = &m + cancel() } }() g.Wait() diff --git a/go-libp2p-blossomsub/subscription_filter_test.go b/go-libp2p-blossomsub/subscription_filter_test.go index 6b139b7..5829af3 100644 --- a/go-libp2p-blossomsub/subscription_filter_test.go +++ b/go-libp2p-blossomsub/subscription_filter_test.go @@ -32,15 +32,15 @@ func TestBasicSubscriptionFilter(t *testing.T) { bitmask3 := []byte{0x00, 0x00, 0x02, 0x00} yes := true subs := []*pb.RPC_SubOpts{ - &pb.RPC_SubOpts{ + { Bitmask: bitmask1, Subscribe: yes, }, - &pb.RPC_SubOpts{ + { Bitmask: bitmask2, Subscribe: yes, }, - &pb.RPC_SubOpts{ + { Bitmask: bitmask3, Subscribe: yes, }, @@ -88,24 +88,24 @@ func TestSubscriptionFilterDeduplication(t *testing.T) { yes := true no := false subs := []*pb.RPC_SubOpts{ - &pb.RPC_SubOpts{ + { Bitmask: bitmask1, Subscribe: yes, }, - &pb.RPC_SubOpts{ + { Bitmask: bitmask1, Subscribe: yes, }, - &pb.RPC_SubOpts{ + { Bitmask: bitmask2, Subscribe: yes, }, - &pb.RPC_SubOpts{ + { Bitmask: bitmask2, Subscribe: no, }, - &pb.RPC_SubOpts{ + { Bitmask: bitmask3, Subscribe: yes, }, diff --git a/go.mod b/go.mod deleted file mode 100644 index 53b7abd..0000000 --- a/go.mod +++ /dev/null @@ -1,11 +0,0 @@ -module source.quilibrium.com/quilibrium/monorepo - -go 1.18 - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/objx v0.5.2 // indirect - github.com/stretchr/testify v1.10.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) diff --git a/go.sum b/go.sum deleted file mode 100644 index d821b09..0000000 --- a/go.sum +++ /dev/null @@ -1,11 +0,0 @@ -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/lifecycle/.mockery.yaml b/lifecycle/.mockery.yaml new file mode 100644 index 0000000..8c9a8ac --- /dev/null +++ b/lifecycle/.mockery.yaml @@ -0,0 +1,18 @@ +dir: "{{.InterfaceDir}}/mock" +outpkg: "mock" +filename: "{{.InterfaceName | snakecase}}.go" +mockname: "{{.InterfaceName}}" + +all: True +with-expecter: False +include-auto-generated: False +disable-func-mocks: True +fail-on-missing: True +disable-version-string: True +resolve-type-alias: False + +packages: + source.quilibrium.com/quilibrium/monorepo/lifecycle: + config: + dir: "mocks" + outpkg: "mocks" diff --git a/lifecycle/common.go b/lifecycle/common.go new file mode 100644 index 0000000..010f925 --- /dev/null +++ b/lifecycle/common.go @@ -0,0 +1,215 @@ +package lifecycle + +import ( + "context" + "math" + "reflect" +) + +// AllReady calls Ready on all input components and returns a channel that is +// closed when all input components are ready. +func AllReady(components ...Component) <-chan struct{} { + readyChans := make([]<-chan struct{}, len(components)) + + for i, c := range components { + readyChans[i] = c.Ready() + } + + return AllClosed(readyChans...) +} + +// AllDone calls Done on all input components and returns a channel that is +// closed when all input components are done. +func AllDone(components ...Component) <-chan struct{} { + doneChans := make([]<-chan struct{}, len(components)) + + for i, c := range components { + doneChans[i] = c.Done() + } + + return AllClosed(doneChans...) +} + +// AllClosed returns a channel that is closed when all input channels are +// closed. +func AllClosed(channels ...<-chan struct{}) <-chan struct{} { + done := make(chan struct{}) + if len(channels) == 0 { + close(done) + return done + } + + go func() { + for _, ch := range channels { + <-ch + } + close(done) + }() + + return done +} + +// WaitClosed waits for either a signal/close on the channel or for the context +// to be cancelled. Returns nil if the channel was signalled/closed before +// returning, otherwise, it returns the context error. +// +// This handles the corner case where the context is cancelled at the same time +// that the channel is closed, and the Done case was selected. This is intended +// for situations where ignoring a signal can cause safety issues. +func WaitClosed(ctx context.Context, ch <-chan struct{}) error { + select { + case <-ctx.Done(): + select { + case <-ch: + return nil + default: + } + return ctx.Err() + case <-ch: + return nil + } +} + +// CheckClosed checks if the provided channel has a signal or was closed. +// Returns true if the channel was signaled/closed, otherwise, returns false. +// +// This is intended to reduce boilerplate code when multiple channel checks are +// required because missed signals could cause safety issues. +func CheckClosed(done <-chan struct{}) bool { + select { + case <-done: + return true + default: + return false + } +} + +// MergeChannels merges a list of channels into a single channel +func MergeChannels(channels interface{}) interface{} { + sliceType := reflect.TypeOf(channels) + if sliceType.Kind() != reflect.Slice && sliceType.Kind() != reflect.Array { + panic("argument must be an array or slice") + } + chanType := sliceType.Elem() + if chanType.ChanDir() == reflect.SendDir { + panic("channels cannot be send-only") + } + c := reflect.ValueOf(channels) + var cases []reflect.SelectCase + for i := 0; i < c.Len(); i++ { + cases = append(cases, reflect.SelectCase{ + Dir: reflect.SelectRecv, + Chan: c.Index(i), + }) + } + elemType := chanType.Elem() + out := reflect.MakeChan(reflect.ChanOf(reflect.BothDir, elemType), 0) + go func() { + for len(cases) > 0 { + i, v, ok := reflect.Select(cases) + if !ok { + lastIndex := len(cases) - 1 + cases[i], cases[lastIndex] = cases[lastIndex], cases[i] + cases = cases[:lastIndex] + continue + } + out.Send(v) + } + out.Close() + }() + return out.Convert(reflect.ChanOf(reflect.RecvDir, elemType)).Interface() +} + +// WaitError waits for either an error on the error channel or the done channel +// to close. Returns an error if one is received on the error channel, otherwise +// it returns nil. +// +// This handles a race condition where the done channel could have been closed +// as a result of a fatal error being thrown, so that when the scheduler yields +// control back to this goroutine, both channels are available to read from. If +// the done case happens to be chosen at random to proceed instead of the error +// case, then we would return without error which could result in unsafe +// continuation. +func WaitError(errChan <-chan error, done <-chan struct{}) error { + select { + case err := <-errChan: + return err + case <-done: + select { + case err := <-errChan: + return err + default: + } + return nil + } +} + +// componentMerger is a utility structure which implements lifecycle.Component +// and is used to merge []T into one T. +type componentMerger struct { + components []Component +} + +func (m componentMerger) Start(signalerContext SignalerContext) error { + for _, component := range m.components { + startable, ok := component.(Component) + if ok { + err := startable.Start(signalerContext) + if err != nil { + return err + } + } + } + return nil +} + +func (m componentMerger) Ready() <-chan struct{} { + return AllReady(m.components...) +} + +func (m componentMerger) Done() <-chan struct{} { + return AllDone(m.components...) +} + +var _ Component = (*componentMerger)(nil) + +// MergeComponents merges []Component into one Component. +func MergeComponents(components ...Component) Component { + return componentMerger{components: components} +} + +// DetypeSlice converts a typed slice containing any kind of elements into an +// untyped []any type, in effect removing the element type information from the +// slice. It is useful for passing data into structpb.NewValue, which accepts +// []any but not []T for any specific type T. +func DetypeSlice[T any](typedSlice []T) []any { + untypedSlice := make([]any, len(typedSlice)) + for i, t := range typedSlice { + untypedSlice[i] = t + } + return untypedSlice +} + +// SampleN computes a percentage of the given number 'n', and returns the result +// as an unsigned integer. If the calculated sample is greater than the provided +// 'max' value, it returns the ceil of 'max'. If 'n' is less than or equal to 0, +// it returns 0. +// +// Parameters: +// - n: The input number, used as the base to compute the percentage. +// - max: The maximum value that the computed sample should not exceed. +// - percentage: The percentage (in range 0.0 to 1.0) to be applied to 'n'. +// +// Returns: +// - The computed sample as an unsigned integer, with consideration to the +// given constraints. +func SampleN(n int, max, percentage float64) uint { + if n <= 0 { + return 0 + } + sample := float64(n) * percentage + if sample > max { + sample = max + } + return uint(math.Ceil(sample)) +} diff --git a/lifecycle/common_test.go b/lifecycle/common_test.go new file mode 100644 index 0000000..d97a174 --- /dev/null +++ b/lifecycle/common_test.go @@ -0,0 +1,338 @@ +package lifecycle_test + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" + "source.quilibrium.com/quilibrium/monorepo/lifecycle/mocks" + "source.quilibrium.com/quilibrium/monorepo/lifecycle/unittest" +) + +// TestAllReady tests that AllReady closes its returned Ready channel only once +// all input Component instances close their Ready channel. +func TestAllReady(t *testing.T) { + cases := []int{0, 1, 100} + for _, n := range cases { + t.Run(fmt.Sprintf("n=%d", n), func(t *testing.T) { + testAllReady(n, t) + }) + } +} + +// TestAllDone tests that AllDone closes its returned Done channel only once +// all input Component instances close their Done channel. +func TestAllDone(t *testing.T) { + cases := []int{0, 1, 100} + for _, n := range cases { + t.Run(fmt.Sprintf("n=%d", n), func(t *testing.T) { + testAllDone(n, t) + }) + } +} + +func testAllDone(n int, t *testing.T) { + components := make([]lifecycle.Component, n) + for i := 0; i < n; i++ { + c := mocks.NewComponent(t) + unittest.Componentify(&c.Mock) + components[i] = c + } + + unittest.AssertClosesBefore(t, lifecycle.AllReady(components...), time.Second) + + for _, component := range components { + mock := component.(*mocks.Component) + mock.AssertCalled(t, "Ready") + mock.AssertNotCalled(t, "Done") + } +} + +func testAllReady(n int, t *testing.T) { + components := make([]lifecycle.Component, n) + for i := 0; i < n; i++ { + c := mocks.NewComponent(t) + unittest.Componentify(&c.Mock) + components[i] = c + } + + unittest.AssertClosesBefore(t, lifecycle.AllDone(components...), time.Second) + + for _, component := range components { + mock := component.(*mocks.Component) + mock.AssertCalled(t, "Done") + mock.AssertNotCalled(t, "Ready") + } +} + +func TestMergeChannels(t *testing.T) { + t.Run("empty slice", func(t *testing.T) { + t.Parallel() + channels := make([]<-chan int, 0) + merged := lifecycle.MergeChannels(channels).(<-chan int) + _, ok := <-merged + assert.False(t, ok) + }) + t.Run("empty array", func(t *testing.T) { + t.Parallel() + channels := []<-chan int{} + merged := lifecycle.MergeChannels(channels).(<-chan int) + _, ok := <-merged + assert.False(t, ok) + }) + t.Run("nil slice", func(t *testing.T) { + t.Parallel() + var channels []<-chan int + merged := lifecycle.MergeChannels(channels).(<-chan int) + _, ok := <-merged + assert.False(t, ok) + }) + t.Run("nil", func(t *testing.T) { + t.Parallel() + assert.Panics(t, func() { + lifecycle.MergeChannels(nil) + }) + }) + t.Run("map", func(t *testing.T) { + t.Parallel() + channels := make(map[string]<-chan int) + assert.Panics(t, func() { + lifecycle.MergeChannels(channels) + }) + }) + t.Run("string", func(t *testing.T) { + t.Parallel() + channels := "abcde" + assert.Panics(t, func() { + lifecycle.MergeChannels(channels) + }) + }) + t.Run("array of non-channel", func(t *testing.T) { + t.Parallel() + channels := []int{1, 2, 3} + assert.Panics(t, func() { + lifecycle.MergeChannels(channels) + }) + }) + t.Run("send channel", func(t *testing.T) { + t.Parallel() + channels := []chan<- int{make(chan int), make(chan int)} + assert.Panics(t, func() { + lifecycle.MergeChannels(channels) + }) + }) + t.Run("cast returned channel to send channel", func(t *testing.T) { + t.Parallel() + channels := []<-chan int{make(<-chan int), make(<-chan int)} + _, ok := lifecycle.MergeChannels(channels).(chan int) + assert.False(t, ok) + }) + t.Run("happy path", func(t *testing.T) { + t.Parallel() + channels := []chan int{make(chan int), make(chan int), make(chan int)} + merged := lifecycle.MergeChannels(channels).(<-chan int) + for i, ch := range channels { + i := i + ch := ch + go func() { + ch <- i + close(ch) + }() + } + var elements []int + for i := range merged { + elements = append(elements, i) + } + assert.ElementsMatch(t, elements, []int{0, 1, 2}) + }) +} + +func TestWaitClosed(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + t.Run("channel closed returns nil", func(t *testing.T) { + finished := make(chan struct{}) + ch := make(chan struct{}) + go func() { + err := lifecycle.WaitClosed(ctx, ch) + assert.NoError(t, err) + close(finished) + }() + close(ch) + + select { + case <-finished: + case <-time.After(100 * time.Millisecond): + t.Error("timed out") + } + }) + + t.Run("context cancelled returns error", func(t *testing.T) { + testCtx, testCancel := context.WithCancel(ctx) + finished := make(chan struct{}) + ch := make(chan struct{}) + go func() { + err := lifecycle.WaitClosed(testCtx, ch) + assert.ErrorIs(t, err, context.Canceled) + close(finished) + }() + testCancel() + + select { + case <-finished: + case <-time.After(100 * time.Millisecond): + t.Error("timed out") + } + }) + + t.Run("both conditions triggered returns nil", func(t *testing.T) { + // both conditions are met when WaitClosed is called. Since one is randomly selected, + // there is a 99.9% probability that each condition will be picked first at least once + // during this test. + for i := 0; i < 10; i++ { + testCtx, testCancel := context.WithCancel(ctx) + finished := make(chan struct{}) + ch := make(chan struct{}) + close(ch) + testCancel() + + go func() { + err := lifecycle.WaitClosed(testCtx, ch) + assert.NoError(t, err) + close(finished) + }() + + select { + case <-finished: + case <-time.After(100 * time.Millisecond): + t.Error("timed out") + } + } + }) +} + +func TestCheckClosed(t *testing.T) { + done := make(chan struct{}) + assert.False(t, lifecycle.CheckClosed(done)) + close(done) + assert.True(t, lifecycle.CheckClosed(done)) +} + +func TestWaitError(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testErr := errors.New("test error channel") + t.Run("error received returns error", func(t *testing.T) { + finished := make(chan struct{}) + ch := make(chan error) + + go func() { + err := lifecycle.WaitError(ch, ctx.Done()) + assert.ErrorIs(t, err, testErr) + close(finished) + }() + ch <- testErr + + select { + case <-finished: + case <-time.After(100 * time.Millisecond): + t.Error("timed out") + } + }) + + t.Run("context cancelled returns error", func(t *testing.T) { + testCtx, testCancel := context.WithCancel(ctx) + finished := make(chan struct{}) + ch := make(chan error) + go func() { + err := lifecycle.WaitError(ch, testCtx.Done()) + assert.NoError(t, err) + close(finished) + }() + testCancel() + + select { + case <-finished: + case <-time.After(100 * time.Millisecond): + t.Error("timed out") + } + }) + + t.Run("both conditions triggered returns error", func(t *testing.T) { + // both conditions are met when WaitError is called. Since one is randomly selected, + // there is a 99.9% probability that each condition will be picked first at least once + // during this test. + for i := 0; i < 10; i++ { + finished := make(chan struct{}) + ch := make(chan error, 1) // buffered so we can add before starting + done := make(chan struct{}) + + ch <- testErr + close(done) + + go func() { + err := lifecycle.WaitError(ch, done) + assert.ErrorIs(t, err, testErr) + close(finished) + }() + + select { + case <-finished: + case <-time.After(100 * time.Millisecond): + t.Error("timed out") + } + } + }) +} + +// TestDetypeSlice tests that DetypeSlice returns a slice which is identical +// besides the element type information. +func TestDetypeSlice(t *testing.T) { + slice := []int{1, 2, 5, 3, 53, 1234} + detyped := lifecycle.DetypeSlice(slice) + assert.Equal(t, len(slice), len(detyped)) + for i := range slice { + assert.Equal(t, slice[i], detyped[i].(int)) + } +} + +// TestSampleN contains a series of test cases to validate the behavior of the lifecycle.SampleN function. +// The test cases cover different scenarios: +// 1. "returns expected sample": Checks if the function returns the expected sample value when +// given a valid input. +// 2. "returns max value when sample greater than max": Verifies that the function returns the +// maximum allowed value when the calculated sample exceeds the maximum limit. +// 3. "returns 0 when n is less than or equal to 0": Asserts that the function returns 0 when +// the input 'n' is less than or equal to 0, which represents an invalid input. +func TestSampleN(t *testing.T) { + t.Run("returns expected sample", func(t *testing.T) { + n := 8 + max := 5.0 + percentage := .5 + sample := lifecycle.SampleN(n, max, percentage) + assert.Equal(t, uint(4), sample) + }) + t.Run("returns max value when sample greater than max", func(t *testing.T) { + n := 20 + max := 5.0 + percentage := .5 + sample := lifecycle.SampleN(n, max, percentage) + assert.Equal(t, uint(max), sample) + }) + t.Run("returns 0 when n is less than or equal to 0", func(t *testing.T) { + n := 0 + max := 5.0 + percentage := .5 + sample := lifecycle.SampleN(n, max, percentage) + assert.Equal(t, uint(0), sample, "sample returned should be 0 when n == 0") + n = -1 + sample = lifecycle.SampleN(n, max, percentage) + assert.Equal(t, uint(0), sample, "sample returned should be 0 when n < 0") + }) +} diff --git a/lifecycle/component.go b/lifecycle/component.go new file mode 100644 index 0000000..e92d77f --- /dev/null +++ b/lifecycle/component.go @@ -0,0 +1,373 @@ +package lifecycle + +import ( + "context" + "fmt" + "sync" + + "go.uber.org/atomic" +) + +// Component represents a lifecycle component which can be started, with +// channels that signal readiness and termination. +type Component interface { + // Ready provides a channel that is closed once the component has completed + // all initialization steps required to be considered operational. + Ready() <-chan struct{} + + // Done provides a channel that is closed once the component has shut down and + // is considered no longer running. + Done() <-chan struct{} + + // Start starts the component. Any fatal errors encountered while the + // component is running should be signaled with the given SignalerContext. + // This method should only be called once, and subsequent calls should fail + // with ErrMultipleStartup. + Start(SignalerContext) error +} + +type ComponentFactory func() (Component, error) + +// OnError represents a handler for a fatal, component-halting error. The +// handler must return ErrorHandlingBehavior, specific to how the component +// lifecycle should be managed: +// - ErrorShouldRestart - The component can be safely restarted by the component +// manager +// - ErrorShouldStop – The component cannot be safely restarted and must stop. +// If the component has no dependencies, it simply stops with no consequence +// to other managed components. If the component has dependencies, all +// descendant dependencies will also be stopped. +// - ErrorShouldStopParents - The component cannot be safely restarted and must +// stop. If the component has no dependencies, it simply stops with no +// consequence to other managed components. If the component has dependencies, +// all dependencies, descendant and ancestors will also be stopped. +// - ErrorShouldShutdown – The component cannot be safely restarted and must +// stop. The error is severe enough that it warrants a stop signal to all +// managed components. +// - ErrorShouldSpinHalt – The component cannot be safely restarted and must +// stop. The error is severe enough that a stop signal to all managed +// components is insufficient, but instead all components must be stopped and +// the lifecycle manager should enter a spin halt state where only a SIGTERM +// (not SIGINT/ctrl-c) can stop it. This return condition must not be used +// frivolously, as it directly interferes with external service management, +// so is reserved for a condition in which allowing the service to terminate +// (and likely be restarted by the user and/or service runner) needs a clear +// warning that restarting the service is dangerous. +type OnError = func(error) ErrorHandlingBehavior + +type ErrorHandlingBehavior int + +const ( + ErrorShouldRestart ErrorHandlingBehavior = iota + ErrorShouldStop + ErrorShouldStopParents + ErrorShouldShutdown + ErrorShouldSpinHalt +) + +// RunComponent repeatedly starts components returned from the given +// ComponentFactory, shutting them down when they encounter fatal errors +// and passing those errors to the given error handler. If the given context is +// cancelled, it will wait for the current component instance to shutdown before +// returning. +// The returned error is either: +// - The context error if the context was canceled. +// - The last error handled if the error handler returns ErrorShouldStop, +// ErrorShouldStopParents, or ErrorShouldShutdown. +// - An error returned from componentFactory while generating an instance of +// component. +// +// This method will hang until a SIGTERM is issued if the handler returns +// ErrorShouldSpinHalt. +func RunComponent( + ctx context.Context, + componentFactory ComponentFactory, + handler OnError, +) error { + // reference to per-run signals for the component + var component Component + var cancel context.CancelFunc + var done <-chan struct{} + var fatalErr <-chan error + + start := func() error { + var err error + + component, err = componentFactory() + if err != nil { + // failure to generate the component, should be handled out-of-band + // because a restart won't help + return err + } + + // context used to run the component + var runCtx context.Context + runCtx, cancel = context.WithCancel(ctx) + + // signaler context used for fatals + var signalCtx SignalerContext + signalCtx, fatalErr = WithSignaler(runCtx) + + // we start the component in a separate goroutine, since a fatal error + // could be thrown with `signalCtx` which terminates the calling goroutine + go component.Start(signalCtx) + + done = component.Done() + + return nil + } + + stop := func() { + // shutdown the component and wait until it's done + cancel() + <-done + } + + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + if err := start(); err != nil { + return err // failure to start + } + + if err := WaitError(fatalErr, done); err != nil { + // a fatal error was encountered + stop() + + // send error to the handler + switch result := handler(err); result { + case ErrorShouldRestart: + // try a fresh instance + continue + + case ErrorShouldStop: + // stop this component (and possibly others by supervisor), + // but do NOT tear down the whole process + return err + + case ErrorShouldStopParents: + // same return semantics as Stop; the supervisor orchestrates + // ancestor/descendant cancellation. + return err + + case ErrorShouldShutdown: + // caller will typically treat this as process-wide shutdown + return err + + case ErrorShouldSpinHalt: + // not handled here; supervisor will do local wait on SIGTERM. + return err + + default: + panic(fmt.Sprintf("invalid error handling result: %v", result)) + } + } else if ctx.Err() != nil { + // the parent context was cancelled + stop() + return ctx.Err() + } + + // clean completion + return nil + } +} + +// ReadyFunc is called within a ComponentWorker function to indicate that the +// worker is ready. ComponentManager's Ready channel is closed when all workers +// are ready. +type ReadyFunc func() + +// ComponentWorker represents a worker routine of a component. It takes a +// SignalerContext which can be used to throw any fatal errors it encounters, +// as well as a ReadyFunc which must be called to signal that it is ready. The +// ComponentManager waits until all workers have signaled that they are ready +// before closing its own Ready channel. +type ComponentWorker func(ctx SignalerContext, ready ReadyFunc) + +// NoopWorker is a worker routine which is immediately ready, does nothing, and +// exits when the context is done. +func NoopWorker(ctx SignalerContext, ready ReadyFunc) { + ready() + <-ctx.Done() +} + +// ComponentManagerBuilder provides a mechanism for building a ComponentManager +type ComponentManagerBuilder interface { + // AddWorker adds a worker routine for the ComponentManager + AddWorker(ComponentWorker) ComponentManagerBuilder + + // Build builds and returns a new ComponentManager instance + Build() *ComponentManager +} + +type componentManagerBuilderImpl struct { + workers []ComponentWorker +} + +// NewComponentManagerBuilder returns a new ComponentManagerBuilder +func NewComponentManagerBuilder() ComponentManagerBuilder { + return &componentManagerBuilderImpl{} +} + +// AddWorker adds a ComponentWorker closure to the ComponentManagerBuilder. All +// worker functions will be run in parallel when the ComponentManager is +// started. Note: AddWorker is not concurrency-safe, and should only be called +// on an individual builder within a single goroutine. +func (c *componentManagerBuilderImpl) AddWorker( + worker ComponentWorker, +) ComponentManagerBuilder { + c.workers = append(c.workers, worker) + return c +} + +// Build returns a new ComponentManager instance with the configured workers. +// Build may be called multiple times to create multiple individual +// ComponentManagers. This will result in the worker routines being called +// multiple times. If this is unsafe, do not call it more than once! +func (c *componentManagerBuilderImpl) Build() *ComponentManager { + return &ComponentManager{ + started: atomic.NewBool(false), + ready: make(chan struct{}), + done: make(chan struct{}), + workersDone: make(chan struct{}), + shutdownSignal: make(chan struct{}), + workers: c.workers, + } +} + +var _ Component = (*ComponentManager)(nil) + +// ComponentManager is used to manage the worker routines of a Component, and +// implements all of the methods required by the Component interface, +// abstracting them away from individual implementations. +// +// Since component manager implements the Component interface, its Ready() and +// Done() methods are idempotent, and can be called immediately after +// instantiation. The Ready() channel is closed when all worker functions have +// called their ReadyFunc, and its Done() channel is closed after all worker +// functions have returned. +// +// Shutdown is signalled by cancelling the SignalerContext passed to Start(). +// This context is also used by workers to communicate fatal errors. All fatal +// errors are propagated to the caller of Start() via the context's Throw +// method. +type ComponentManager struct { + started *atomic.Bool + ready chan struct{} + done chan struct{} + workersDone chan struct{} + shutdownSignal chan struct{} + + workers []ComponentWorker +} + +// Start initiates the ComponentManager by launching all worker routines. Start +// must only be called once. It will panic if called more than once. +func (c *ComponentManager) Start(parent SignalerContext) error { + // Make sure we only start once. atomically check if started is false then set + // it to true. If it was not false, return ErrComponentRunning. + if !c.started.CompareAndSwap(false, true) { + return ErrComponentRunning + } + + ctx, cancel := context.WithCancel(parent) + signalerCtx, errChan := WithSignaler(ctx) + + go c.waitForShutdownSignal(ctx.Done()) + + // launch goroutine to propagate fatal error + go func() { + // Closing the done channel here guarantees that any fatal errors + // encountered will be propagated to the parent first. Otherwise, there's a + // race condition between when this goroutine and the parent's are + // scheduled. If the parent is scheduled first, any errors thrown within + // workers would not have propagated, and it would only receive the done + // signal. + defer func() { + cancel() // shutdown all workers + // wait for shutdown signal before signalling the component is done + // this guarantees that ShutdownSignal is closed before Done + <-c.shutdownSignal + <-c.workersDone + close(c.done) + }() + + // wait until the workersDone channel is closed or a fatal error is + // encountered + if err := WaitError(errChan, c.workersDone); err != nil { + // propagate the error directly to the parent because a failure in a + // worker routine is considered fatal + parent.Throw(err) + } + }() + + var workersReady sync.WaitGroup + var workersDone sync.WaitGroup + workersReady.Add(len(c.workers)) + workersDone.Add(len(c.workers)) + + // launch workers + for _, worker := range c.workers { + worker := worker + go func() { + defer workersDone.Done() + var readyOnce sync.Once + worker(signalerCtx, func() { + readyOnce.Do(func() { + workersReady.Done() + }) + }) + }() + } + + // launch goroutine to close ready channel + go c.waitForReady(&workersReady) + + // launch goroutine to close workersDone channel + go c.waitForDone(&workersDone) + + return nil +} + +func (c *ComponentManager) waitForShutdownSignal( + shutdownSignal <-chan struct{}, +) { + <-shutdownSignal + close(c.shutdownSignal) +} + +func (c *ComponentManager) waitForReady(workersReady *sync.WaitGroup) { + workersReady.Wait() + close(c.ready) +} + +func (c *ComponentManager) waitForDone(workersDone *sync.WaitGroup) { + workersDone.Wait() + close(c.workersDone) +} + +// Ready returns a channel which is closed once all the worker routines have +// been launched and are ready. If any worker routines exit before they indicate +// that they are ready, the channel returned from Ready will never close. +func (c *ComponentManager) Ready() <-chan struct{} { + return c.ready +} + +// Done returns a channel which is closed once the ComponentManager has shut +// down. This happens after all worker routines have shut down (either +// gracefully or by throwing an error). +func (c *ComponentManager) Done() <-chan struct{} { + return c.done +} + +// ShutdownSignal returns a channel that is closed when shutdown has commenced. +// This can happen either if the ComponentManager's context is canceled, or a +// worker routine encounters a fatal error. If this is called before Start, a +// nil channel will be returned. +func (c *ComponentManager) ShutdownSignal() <-chan struct{} { + return c.shutdownSignal +} diff --git a/lifecycle/component_test.go b/lifecycle/component_test.go new file mode 100644 index 0000000..e333781 --- /dev/null +++ b/lifecycle/component_test.go @@ -0,0 +1,655 @@ +package lifecycle_test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/hashicorp/go-multierror" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "pgregory.net/rapid" + + "source.quilibrium.com/quilibrium/monorepo/lifecycle" + "source.quilibrium.com/quilibrium/monorepo/lifecycle/unittest" +) + +const CHANNEL_CLOSE_LATENCY_ALLOWANCE = 25 * time.Millisecond + +type WorkerState int + +const ( + UnknownWorkerState = iota + WorkerStartingUp // worker is starting up + WorkerStartupShuttingDown // worker was canceled during startup and is shutting down + WorkerStartupCanceled // worker has exited after being canceled during startup + WorkerStartupEncounteredFatal // worker encountered a fatal error during startup + WorkerRunning // worker has started up and is running normally + WorkerShuttingDown // worker was canceled and is shutting down + WorkerCanceled // worker has exited after being canceled + WorkerEncounteredFatal // worker encountered a fatal error + WorkerDone // worker has shut down after running normally +) + +func (s WorkerState) String() string { + switch s { + case WorkerStartingUp: + return "WORKER_STARTING_UP" + case WorkerStartupShuttingDown: + return "WORKER_STARTUP_SHUTTING_DOWN" + case WorkerStartupCanceled: + return "WORKER_STARTUP_CANCELED" + case WorkerStartupEncounteredFatal: + return "WORKER_STARTUP_ENCOUNTERED_FATAL" + case WorkerRunning: + return "WORKER_RUNNING" + case WorkerShuttingDown: + return "WORKER_SHUTTING_DOWN" + case WorkerCanceled: + return "WORKER_CANCELED" + case WorkerEncounteredFatal: + return "WORKER_ENCOUNTERED_FATAL" + case WorkerDone: + return "WORKER_DONE" + default: + return "UNKNOWN" + } +} + +type WorkerStateList []WorkerState + +func (wsl WorkerStateList) Contains(ws WorkerState) bool { + for _, s := range wsl { + if s == ws { + return true + } + } + return false +} + +type WorkerStateTransition int + +const ( + UnknownWorkerStateTransition WorkerStateTransition = iota + WorkerCheckCtxAndShutdown // check context and shutdown if canceled + WorkerCheckCtxAndExit // check context and exit immediately if canceled + WorkerFinishStartup // finish starting up + WorkerDoWork // do work + WorkerExit // exit + WorkerThrowError // throw error +) + +func (wst WorkerStateTransition) String() string { + switch wst { + case WorkerCheckCtxAndShutdown: + return "WORKER_CHECK_CTX_AND_SHUTDOWN" + case WorkerCheckCtxAndExit: + return "WORKER_CHECK_CTX_AND_EXIT" + case WorkerFinishStartup: + return "WORKER_FINISH_STARTUP" + case WorkerDoWork: + return "WORKER_DO_WORK" + case WorkerExit: + return "WORKER_EXIT" + case WorkerThrowError: + return "WORKER_THROW_ERROR" + default: + return "UNKNOWN" + } +} + +// WorkerStateTransitions is a map from worker state to valid state transitions +var WorkerStateTransitions = map[WorkerState][]WorkerStateTransition{ + WorkerStartingUp: {WorkerCheckCtxAndExit, WorkerCheckCtxAndShutdown, WorkerDoWork, WorkerFinishStartup}, + WorkerStartupShuttingDown: {WorkerDoWork, WorkerExit, WorkerThrowError}, + WorkerRunning: {WorkerCheckCtxAndExit, WorkerCheckCtxAndShutdown, WorkerDoWork, WorkerExit, WorkerThrowError}, + WorkerShuttingDown: {WorkerDoWork, WorkerExit, WorkerThrowError}, +} + +// CheckWorkerStateTransition checks the validity of a worker state transition +func CheckWorkerStateTransition(t *rapid.T, id int, start, end WorkerState, transition WorkerStateTransition, canceled bool) { + if !(func() bool { + switch start { + case WorkerStartingUp: + switch transition { + case WorkerCheckCtxAndExit: + return (canceled && end == WorkerStartupCanceled) || (!canceled && end == WorkerStartingUp) + case WorkerCheckCtxAndShutdown: + return (canceled && end == WorkerStartupShuttingDown) || (!canceled && end == WorkerStartingUp) + case WorkerDoWork: + return end == WorkerStartingUp + case WorkerFinishStartup: + return end == WorkerRunning + } + case WorkerStartupShuttingDown: + switch transition { + case WorkerDoWork: + return end == WorkerStartupShuttingDown + case WorkerExit: + return end == WorkerStartupCanceled + case WorkerThrowError: + return end == WorkerStartupEncounteredFatal + } + case WorkerRunning: + switch transition { + case WorkerCheckCtxAndExit: + return (canceled && end == WorkerCanceled) || (!canceled && end == WorkerRunning) + case WorkerCheckCtxAndShutdown: + return (canceled && end == WorkerShuttingDown) || (!canceled && end == WorkerRunning) + case WorkerDoWork: + return end == WorkerRunning + case WorkerExit: + return end == WorkerDone + case WorkerThrowError: + return end == WorkerEncounteredFatal + } + case WorkerShuttingDown: + switch transition { + case WorkerDoWork: + return end == WorkerShuttingDown + case WorkerExit: + return end == WorkerCanceled + case WorkerThrowError: + return end == WorkerEncounteredFatal + } + } + + return false + }()) { + require.Fail(t, "invalid worker state transition", "[worker %v] start=%v, canceled=%v, transition=%v, end=%v", id, start, canceled, transition, end) + } +} + +type WSTConsumer func(WorkerStateTransition) WorkerState +type WSTProvider func(WorkerState) WorkerStateTransition + +// MakeWorkerTransitionFuncs creates a WorkerStateTransition Consumer / Provider pair. +// The Consumer is called by the worker to notify the test code of the completion of a state transition +// and receive the next state transition instruction. +// The Provider is called by the test code to send the next state transition instruction and get the +// resulting end state. +func MakeWorkerTransitionFuncs() (WSTConsumer, WSTProvider) { + var started bool + stateChan := make(chan WorkerState, 1) + transitionChan := make(chan WorkerStateTransition) + + consumer := func(wst WorkerStateTransition) WorkerState { + transitionChan <- wst + return <-stateChan + } + + provider := func(ws WorkerState) WorkerStateTransition { + if started { + stateChan <- ws + } else { + started = true + } + + if _, ok := WorkerStateTransitions[ws]; !ok { + return UnknownWorkerStateTransition + } + + return <-transitionChan + } + + return consumer, provider +} + +func ComponentWorker(t *rapid.T, id int, next WSTProvider) lifecycle.ComponentWorker { + unexpectedStateTransition := func(s WorkerState, wst WorkerStateTransition) { + panic(fmt.Sprintf("[worker %v] unexpected state transition: received %v for state %v", id, wst, s)) + } + + log := func(msg string) { + t.Logf("[worker %v] %v\n", id, msg) + } + + return func(ctx lifecycle.SignalerContext, ready lifecycle.ReadyFunc) { + var state WorkerState + goto startingUp + + startingUp: + log("starting up") + state = WorkerStartingUp + switch transition := next(state); transition { + case WorkerCheckCtxAndExit: + if lifecycle.CheckClosed(ctx.Done()) { + goto startupCanceled + } + goto startingUp + case WorkerCheckCtxAndShutdown: + if lifecycle.CheckClosed(ctx.Done()) { + goto startupShuttingDown + } + goto startingUp + case WorkerDoWork: + goto startingUp + case WorkerFinishStartup: + ready() + goto running + default: + unexpectedStateTransition(state, transition) + } + + startupShuttingDown: + log("startup shutting down") + state = WorkerStartupShuttingDown + switch transition := next(state); transition { + case WorkerDoWork: + goto startupShuttingDown + case WorkerExit: + goto startupCanceled + case WorkerThrowError: + goto startupEncounteredFatal + default: + unexpectedStateTransition(state, transition) + } + + startupCanceled: + log("startup canceled") + state = WorkerStartupCanceled + next(state) + return + + startupEncounteredFatal: + log("startup encountered fatal") + state = WorkerStartupEncounteredFatal + defer next(state) + ctx.Throw(&WorkerError{id}) + + running: + log("running") + state = WorkerRunning + switch transition := next(state); transition { + case WorkerCheckCtxAndExit: + if lifecycle.CheckClosed(ctx.Done()) { + goto canceled + } + goto running + case WorkerCheckCtxAndShutdown: + if lifecycle.CheckClosed(ctx.Done()) { + goto shuttingDown + } + goto running + case WorkerDoWork: + goto running + case WorkerExit: + goto done + case WorkerThrowError: + goto encounteredFatal + default: + unexpectedStateTransition(state, transition) + } + + shuttingDown: + log("shutting down") + state = WorkerShuttingDown + switch transition := next(state); transition { + case WorkerDoWork: + goto shuttingDown + case WorkerExit: + goto canceled + case WorkerThrowError: + goto encounteredFatal + default: + unexpectedStateTransition(state, transition) + } + + canceled: + log("canceled") + state = WorkerCanceled + next(state) + return + + encounteredFatal: + log("encountered fatal") + state = WorkerEncounteredFatal + defer next(state) + ctx.Throw(&WorkerError{id}) + + done: + log("done") + state = WorkerDone + next(state) + } +} + +type WorkerError struct { + id int +} + +func (e *WorkerError) Is(target error) bool { + if t, ok := target.(*WorkerError); ok { + return t.id == e.id + } + return false +} + +func (e *WorkerError) Error() string { + return fmt.Sprintf("[worker %v] irrecoverable error", e.id) +} + +// StartStateTransition returns a pair of functions AddTransition and ExecuteTransitions. +// AddTransition is called to add a state transition step, and then ExecuteTransitions shuffles +// all of the added steps and executes them in a random order. +func StartStateTransition() (func(t func()), func(*rapid.T)) { + var transitions []func() + + addTransition := func(t func()) { + transitions = append(transitions, t) + } + + executeTransitions := func(t *rapid.T) { + for i := 0; i < len(transitions); i++ { + j := rapid.IntRange(0, len(transitions)-i-1).Draw(t, "") + transitions[i], transitions[j+i] = transitions[j+i], transitions[i] + transitions[i]() + } + // TODO: is this simpler? + // executionOrder := rapid.SliceOfNDistinct( + // rapid.IntRange(0, len(transitions)-1), len(transitions), len(transitions), nil, + // ).Draw(t, "transition_execution_order").([]int) + // for _, i := range executionOrder { + // transitions[i]() + // } + } + + return addTransition, executeTransitions +} + +type StateTransition struct { + cancel bool + workerIDs []int + workerTransitions []WorkerStateTransition +} + +func (st *StateTransition) String() string { + return fmt.Sprintf( + "stateTransition{ cancel=%v, workerIDs=%v, workerTransitions=%v }", + st.cancel, st.workerIDs, st.workerTransitions, + ) +} + +type ComponentManagerMachine struct { + cm *lifecycle.ComponentManager + + cancel context.CancelFunc + workerTransitionConsumers []WSTConsumer + + canceled bool + workerErrors error + workerStates []WorkerState + + resetChannelReadTimeout func() + assertClosed func(t *rapid.T, ch <-chan struct{}, msgAndArgs ...interface{}) + assertNotClosed func(t *rapid.T, ch <-chan struct{}, msgAndArgs ...interface{}) + assertErrorThrownMatches func(t *rapid.T, err error, msgAndArgs ...interface{}) + assertErrorNotThrown func(t *rapid.T) + + cancelGenerator *rapid.Generator[bool] + drawStateTransition func(t *rapid.T) *StateTransition +} + +func (c *ComponentManagerMachine) init(t *rapid.T) { + numWorkers := rapid.IntRange(0, 5).Draw(t, "num_workers") + pCancel := rapid.Float64Range(0, 100).Draw(t, "p_cancel") + + c.cancelGenerator = rapid.Map(rapid.Float64Range(0, 100), func(n float64) bool { + return pCancel == 100 || n < pCancel + }) + + c.drawStateTransition = func(t *rapid.T) *StateTransition { + st := &StateTransition{} + + if !c.canceled { + st.cancel = c.cancelGenerator.Draw(t, "cancel") + } + + for workerId, state := range c.workerStates { + if allowedTransitions, ok := WorkerStateTransitions[state]; ok { + label := fmt.Sprintf("worker_transition_%v", workerId) + st.workerIDs = append(st.workerIDs, workerId) + st.workerTransitions = append(st.workerTransitions, rapid.SampledFrom(allowedTransitions).Draw(t, label)) + } + } + + return rapid.Just(st).Draw(t, "state_transition") + } + + ctx, cancel := context.WithCancel(context.Background()) + c.cancel = cancel + + signalerCtx, errChan := lifecycle.WithSignaler(ctx) + + var channelReadTimeout <-chan struct{} + var signalerErr error + + c.resetChannelReadTimeout = func() { + ctx, cancel := context.WithTimeout(context.Background(), CHANNEL_CLOSE_LATENCY_ALLOWANCE) + _ = cancel + channelReadTimeout = ctx.Done() + } + + c.assertClosed = func(t *rapid.T, ch <-chan struct{}, msgAndArgs ...interface{}) { + select { + case <-ch: + default: + select { + case <-channelReadTimeout: + assert.Fail(t, "channel is not closed", msgAndArgs...) + case <-ch: + } + } + } + + c.assertNotClosed = func(t *rapid.T, ch <-chan struct{}, msgAndArgs ...interface{}) { + select { + case <-ch: + assert.Fail(t, "channel is closed", msgAndArgs...) + default: + select { + case <-ch: + assert.Fail(t, "channel is closed", msgAndArgs...) + case <-channelReadTimeout: + } + } + } + + c.assertErrorThrownMatches = func(t *rapid.T, err error, msgAndArgs ...interface{}) { + if signalerErr == nil { + select { + case signalerErr = <-errChan: + default: + select { + case <-channelReadTimeout: + assert.Fail(t, "error was not thrown") + return + case signalerErr = <-errChan: + } + } + } + + assert.ErrorIs(t, err, signalerErr, msgAndArgs...) + } + + c.assertErrorNotThrown = func(t *rapid.T) { + if signalerErr == nil { + select { + case signalerErr = <-errChan: + default: + select { + case signalerErr = <-errChan: + case <-channelReadTimeout: + return + } + } + } + + assert.Fail(t, "error was thrown: %v", signalerErr) + } + + c.workerTransitionConsumers = make([]WSTConsumer, numWorkers) + c.workerStates = make([]WorkerState, numWorkers) + + cmb := lifecycle.NewComponentManagerBuilder() + + for i := 0; i < numWorkers; i++ { + wtc, wtp := MakeWorkerTransitionFuncs() + c.workerTransitionConsumers[i] = wtc + cmb.AddWorker(ComponentWorker(t, i, wtp)) + } + + c.cm = cmb.Build() + c.cm.Start(signalerCtx) + + for i := 0; i < numWorkers; i++ { + c.workerStates[i] = WorkerStartingUp + } +} + +func (c *ComponentManagerMachine) ExecuteStateTransition(t *rapid.T) { + st := c.drawStateTransition(t) + + t.Logf("drew state transition: %v\n", st) + + var errors *multierror.Error + + addTransition, executeTransitionsInRandomOrder := StartStateTransition() + + if st.cancel { + addTransition(func() { + t.Log("executing cancel transition\n") + c.cancel() + c.canceled = true + c.resetChannelReadTimeout() + c.assertClosed(t, c.cm.ShutdownSignal()) + }) + } + + for i, workerId := range st.workerIDs { + i := i + workerId := workerId + addTransition(func() { + wst := st.workerTransitions[i] + t.Logf("executing worker %v transition: %v\n", workerId, wst) + endState := c.workerTransitionConsumers[workerId](wst) + CheckWorkerStateTransition(t, workerId, c.workerStates[workerId], endState, wst, c.canceled) + c.workerStates[workerId] = endState + + if (WorkerStateList{WorkerStartupEncounteredFatal, WorkerEncounteredFatal}).Contains(endState) { + err := &WorkerError{workerId} + require.NotErrorIs(t, c.workerErrors, err) + require.NotErrorIs(t, errors, err) + errors = multierror.Append(errors, err) + c.canceled = true + c.resetChannelReadTimeout() + c.assertClosed(t, c.cm.ShutdownSignal()) + } + }) + } + + executeTransitionsInRandomOrder(t) + + if c.workerErrors == nil { + c.workerErrors = errors.ErrorOrNil() + } + + t.Logf("end state: { canceled=%v, workerErrors=%v, workerStates=%v }\n", c.canceled, c.workerErrors, c.workerStates) +} + +func (c *ComponentManagerMachine) Check(t *rapid.T) { + c.resetChannelReadTimeout() + + if c.canceled { + c.assertClosed(t, c.cm.ShutdownSignal(), "context is canceled but component manager shutdown signal is not closed") + } + + allWorkersReady := true + allWorkersDone := true + + for workerID, state := range c.workerStates { + if (WorkerStateList{ + WorkerStartingUp, + WorkerStartupShuttingDown, + WorkerStartupCanceled, + WorkerStartupEncounteredFatal, + }).Contains(state) { + allWorkersReady = false + c.assertNotClosed(t, c.cm.Ready(), "worker %v has not finished startup but component manager ready channel is closed", workerID) + } + + if !(WorkerStateList{ + WorkerStartupCanceled, + WorkerStartupEncounteredFatal, + WorkerCanceled, + WorkerEncounteredFatal, + WorkerDone, + }).Contains(state) { + allWorkersDone = false + c.assertNotClosed(t, c.cm.Done(), "worker %v has not exited but component manager done channel is closed", workerID) + } + + if (WorkerStateList{ + WorkerStartupShuttingDown, + WorkerStartupCanceled, + WorkerStartupEncounteredFatal, + WorkerShuttingDown, + WorkerCanceled, + WorkerEncounteredFatal, + }).Contains(state) { + c.assertClosed(t, c.cm.ShutdownSignal(), "worker %v has been canceled or encountered a fatal error but component manager shutdown signal is not closed", workerID) + } + } + + if allWorkersReady { + c.assertClosed(t, c.cm.Ready(), "all workers are ready but component manager ready channel is not closed") + } + + if allWorkersDone { + c.assertClosed(t, c.cm.Done(), "all workers are done but component manager done channel is not closed") + } + + if c.workerErrors != nil { + c.assertErrorThrownMatches(t, c.workerErrors, "error received by signaler does not match any of the ones thrown") + c.assertClosed(t, c.cm.ShutdownSignal(), "fatal error thrown but context is not canceled") + } else { + c.assertErrorNotThrown(t) + } +} + +func TestComponentManager(t *testing.T) { + rapid.Check(t, func(t *rapid.T) { + sm := new(ComponentManagerMachine) + sm.init(t) + t.Repeat(rapid.StateMachineActions(sm)) + }) +} + +func TestComponentManagerShutdown(t *testing.T) { + mgr := lifecycle.NewComponentManagerBuilder(). + AddWorker(func(ctx lifecycle.SignalerContext, ready lifecycle.ReadyFunc) { + ready() + <-ctx.Done() + }).Build() + + parent, cancel := context.WithCancel(context.Background()) + ctx := unittest.NewMockSignalerContext(t, parent) + + mgr.Start(ctx) + unittest.AssertClosesBefore(t, mgr.Ready(), 10*time.Millisecond) + cancel() + + // ShutdownSignal indicates we have started shutdown, Done indicates we have completed + // shutdown. If we have completed shutdown, we must have started shutdown. + unittest.AssertClosesBefore(t, mgr.Done(), 10*time.Millisecond) + closed := lifecycle.CheckClosed(mgr.ShutdownSignal()) + assert.True(t, closed) +} + +// run the test many times to reproduce consistently +func TestComponentManagerShutdown_100(t *testing.T) { + for i := 0; i < 100; i++ { + TestComponentManagerShutdown(t) + } +} diff --git a/lifecycle/errors.go b/lifecycle/errors.go new file mode 100644 index 0000000..aedc67e --- /dev/null +++ b/lifecycle/errors.go @@ -0,0 +1,13 @@ +package lifecycle + +import ( + "errors" + "fmt" +) + +// ErrComponentRunning is returned by a component that has already been started +// and has had Startable.Start called a second time. +var ErrComponentRunning = errors.New("component is already running") + +// ErrComponentShutdown is returned by a component that has already shut down. +var ErrComponentShutdown = fmt.Errorf("component has already shut down") diff --git a/lifecycle/go.mod b/lifecycle/go.mod new file mode 100644 index 0000000..bb5098b --- /dev/null +++ b/lifecycle/go.mod @@ -0,0 +1,21 @@ +module source.quilibrium.com/quilibrium/monorepo/lifecycle + +go 1.24.0 + +toolchain go1.24.9 + +require ( + github.com/stretchr/testify v1.11.1 + go.uber.org/atomic v1.11.0 +) + +require github.com/hashicorp/errwrap v1.0.0 // indirect + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/hashicorp/go-multierror v1.1.1 + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + pgregory.net/rapid v1.2.0 +) diff --git a/lifecycle/go.sum b/lifecycle/go.sum new file mode 100644 index 0000000..4713f8b --- /dev/null +++ b/lifecycle/go.sum @@ -0,0 +1,19 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk= +pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= diff --git a/lifecycle/mocks/component.go b/lifecycle/mocks/component.go new file mode 100644 index 0000000..e3fb70f --- /dev/null +++ b/lifecycle/mocks/component.go @@ -0,0 +1,85 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + lifecycle "source.quilibrium.com/quilibrium/monorepo/lifecycle" +) + +// Component is an autogenerated mock type for the Component type +type Component struct { + mock.Mock +} + +// Done provides a mock function with no fields +func (_m *Component) Done() <-chan struct{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Done") + } + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } + } + + return r0 +} + +// Ready provides a mock function with no fields +func (_m *Component) Ready() <-chan struct{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *Component) Start(_a0 lifecycle.SignalerContext) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(lifecycle.SignalerContext) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewComponent creates a new instance of Component. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewComponent(t interface { + mock.TestingT + Cleanup(func()) +}) *Component { + mock := &Component{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/lifecycle/mocks/component_manager_builder.go b/lifecycle/mocks/component_manager_builder.go new file mode 100644 index 0000000..70246c7 --- /dev/null +++ b/lifecycle/mocks/component_manager_builder.go @@ -0,0 +1,67 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + lifecycle "source.quilibrium.com/quilibrium/monorepo/lifecycle" +) + +// ComponentManagerBuilder is an autogenerated mock type for the ComponentManagerBuilder type +type ComponentManagerBuilder struct { + mock.Mock +} + +// AddWorker provides a mock function with given fields: _a0 +func (_m *ComponentManagerBuilder) AddWorker(_a0 lifecycle.ComponentWorker) lifecycle.ComponentManagerBuilder { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for AddWorker") + } + + var r0 lifecycle.ComponentManagerBuilder + if rf, ok := ret.Get(0).(func(lifecycle.ComponentWorker) lifecycle.ComponentManagerBuilder); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(lifecycle.ComponentManagerBuilder) + } + } + + return r0 +} + +// Build provides a mock function with no fields +func (_m *ComponentManagerBuilder) Build() *lifecycle.ComponentManager { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Build") + } + + var r0 *lifecycle.ComponentManager + if rf, ok := ret.Get(0).(func() *lifecycle.ComponentManager); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*lifecycle.ComponentManager) + } + } + + return r0 +} + +// NewComponentManagerBuilder creates a new instance of ComponentManagerBuilder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewComponentManagerBuilder(t interface { + mock.TestingT + Cleanup(func()) +}) *ComponentManagerBuilder { + mock := &ComponentManagerBuilder{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/lifecycle/mocks/signaler_context.go b/lifecycle/mocks/signaler_context.go new file mode 100644 index 0000000..cfb2d24 --- /dev/null +++ b/lifecycle/mocks/signaler_context.go @@ -0,0 +1,124 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + time "time" + + mock "github.com/stretchr/testify/mock" +) + +// SignalerContext is an autogenerated mock type for the SignalerContext type +type SignalerContext struct { + mock.Mock +} + +// Deadline provides a mock function with no fields +func (_m *SignalerContext) Deadline() (time.Time, bool) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Deadline") + } + + var r0 time.Time + var r1 bool + if rf, ok := ret.Get(0).(func() (time.Time, bool)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() time.Time); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Time) + } + + if rf, ok := ret.Get(1).(func() bool); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// Done provides a mock function with no fields +func (_m *SignalerContext) Done() <-chan struct{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Done") + } + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } + } + + return r0 +} + +// Err provides a mock function with no fields +func (_m *SignalerContext) Err() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Err") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Throw provides a mock function with given fields: err +func (_m *SignalerContext) Throw(err error) { + _m.Called(err) +} + +// Value provides a mock function with given fields: key +func (_m *SignalerContext) Value(key any) any { + ret := _m.Called(key) + + if len(ret) == 0 { + panic("no return value specified for Value") + } + + var r0 any + if rf, ok := ret.Get(0).(func(any) any); ok { + r0 = rf(key) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(any) + } + } + + return r0 +} + +// sealed provides a mock function with no fields +func (_m *SignalerContext) sealed() { + _m.Called() +} + +// NewSignalerContext creates a new instance of SignalerContext. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSignalerContext(t interface { + mock.TestingT + Cleanup(func()) +}) *SignalerContext { + mock := &SignalerContext{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/lifecycle/signaler.go b/lifecycle/signaler.go new file mode 100644 index 0000000..7da7dd1 --- /dev/null +++ b/lifecycle/signaler.go @@ -0,0 +1,112 @@ +package lifecycle + +import ( + "context" + "fmt" + "log" + "os" + "runtime" + + "go.uber.org/atomic" +) + +// Signaler sends the error out. +type Signaler struct { + errChan chan error + errThrown *atomic.Bool +} + +func NewSignaler() (*Signaler, <-chan error) { + errChan := make(chan error, 1) + return &Signaler{ + errChan: errChan, + errThrown: atomic.NewBool(false), + }, errChan +} + +// Throw is a narrow drop-in replacement for panic, log.Fatal, log.Panic, etc +// anywhere there's something connected to the error channel. It only sends +// the first error it is called with to the error channel, and logs subsequent +// errors as unhandled. +func (s *Signaler) Throw(err error) { + defer runtime.Goexit() + if s.errThrown.CompareAndSwap(false, true) { + s.errChan <- err + close(s.errChan) + } else { + // TODO: we simply log the unhandled fatal to stderr for now, but we should + // probably allow the user to customize the logger / logging format used + log.New(os.Stderr, "", log.LstdFlags).Println( + fmt.Errorf("unhandled fatal: %w", err), + ) + } +} + +// SignalerContext is a constrained interface to provide a drop-in replacement +// for context.Context including in interfaces that compose it. +type SignalerContext interface { + context.Context + Throw(err error) // delegates to the signaler +} + +// SignalerContextKey represents the key type for retrieving a SignalerContext +// from a value `context.Context`. +type SignalerContextKey struct{} + +// private, to force context derivation / WithSignaler +type signalerCtx struct { + context.Context + *Signaler +} + +// WithSignaler is the One True Way of getting a SignalerContext. +func WithSignaler(parent context.Context) (SignalerContext, <-chan error) { + sig, errChan := NewSignaler() + return &signalerCtx{parent, sig}, errChan +} + +// WithSignalerContext wraps `SignalerContext` using `context.WithValue` so it +// can later be used with `Throw`. +func WithSignalerContext( + parent context.Context, + ctx SignalerContext, +) context.Context { + return context.WithValue(parent, SignalerContextKey{}, ctx) +} + +// Throw enables throwing a fatal error using any context.Context. +// +// If we have an SignalerContext, we can directly ctx.Throw. +// But a lot of library methods expect context.Context, & we want to pass the +// same w/o boilerplate. Moreover, we could have built with: +// +// context.WithCancel(lifecycle.WithSignaler(ctx, sig)), +// +// "downcasting" to context.Context. Yet, we can still type-assert and recover. +// +// Throw can be a drop-in replacement anywhere we have a context.Context likely +// to support signals. IT WILL PANIC IF THE CONTEXT DOES NOT SUPPORT SIGNALS +func Throw(ctx context.Context, err error) { + signalerAbleContext, ok := ctx.Value(SignalerContextKey{}).(SignalerContext) + if ok { + signalerAbleContext.Throw(err) + } else { + // Be spectacular on how this does not -but should- handle fatals: + log.Fatalf( + "fatal error: signaler not found for context, please implement! Unhandled fatal error: %v", + err, + ) + } +} + +// WithSignallerAndCancel returns an fatal context, the cancel function for the +// context, and the error channel for the context. +func WithSignallerAndCancel(ctx context.Context) ( + SignalerContext, + context.CancelFunc, + <-chan error, +) { + parent, cancel := context.WithCancel(ctx) + fatalCtx, errCh := WithSignaler(parent) + return fatalCtx, cancel, errCh +} diff --git a/lifecycle/supervisor.go b/lifecycle/supervisor.go new file mode 100644 index 0000000..f4e920d --- /dev/null +++ b/lifecycle/supervisor.go @@ -0,0 +1,268 @@ +package lifecycle + +import ( + "context" + "errors" + "fmt" + "os" + "os/signal" + "sync" + "syscall" + + "go.uber.org/atomic" +) + +// Node describes one component in the graph. +type Node struct { + Name string + Deps []string // names this node depends on (parents) + Factory ComponentFactory + OnError OnError // the handler for this node +} + +// Supervisor runs a DAG of nodes with policy-aware error propagation. +type Supervisor struct { + nodes map[string]*Node + parents map[string][]string + kids map[string][]string + + // runtime + cancels map[string]context.CancelFunc + wg sync.WaitGroup + + // decision requests from node wrappers + requests chan decisionReq + // suppress events that are just the fallout of our own cancels + suppress sync.Map // name -> struct{} +} + +type decisionReq struct { + from string + err error + want ErrorHandlingBehavior // node's own OnError verdict + reply chan ErrorHandlingBehavior +} + +func NewSupervisor(nodes []*Node) (*Supervisor, error) { + s := &Supervisor{ + nodes: map[string]*Node{}, + parents: map[string][]string{}, + kids: map[string][]string{}, + cancels: map[string]context.CancelFunc{}, + requests: make(chan decisionReq, 64), + } + for _, n := range nodes { + if _, dup := s.nodes[n.Name]; dup { + return nil, fmt.Errorf("dup node %q", n.Name) + } + s.nodes[n.Name] = n + } + // build edges + for name, n := range s.nodes { + for _, p := range n.Deps { + if _, ok := s.nodes[p]; !ok { + return nil, fmt.Errorf("%s depends on unknown %s", name, p) + } + s.parents[name] = append(s.parents[name], p) + s.kids[p] = append(s.kids[p], name) + } + } + // cycle check via Kahn + if _, err := topoOrder(s.nodes, s.parents); err != nil { + return nil, err + } + return s, nil +} + +func (s *Supervisor) Start(ctx context.Context) error { + ctx, stopSignals := signal.NotifyContext(ctx, syscall.SIGINT, syscall.SIGTERM) + defer stopSignals() + + order, _ := topoOrder(s.nodes, s.parents) + + // start in topo order so deps come up first + for _, name := range order { + n := s.nodes[name] + cctx, cancel := context.WithCancel(ctx) + s.cancels[name] = cancel + s.wg.Add(1) + go func(name string, n *Node, cctx context.Context) { + defer s.wg.Done() + // Wrap node's OnError to route decisions through supervisor. + handler := func(err error) ErrorHandlingBehavior { + want := ErrorShouldRestart + if n.OnError != nil { + want = n.OnError(err) + } + // ignore events we ourselves triggered via cancel + if errors.Is(err, context.Canceled) { + return ErrorShouldStop + } + reply := make(chan ErrorHandlingBehavior, 1) + s.requests <- decisionReq{ + from: name, + err: err, + want: want, + reply: reply, + } + return <-reply + } + _ = RunComponent(cctx, n.Factory, handler) + }(name, n, cctx) + } + + // coordinator loop + var shutdownAll atomic.Bool + for { + select { + case <-ctx.Done(): + s.stopAll() + s.wg.Wait() + return ctx.Err() + + case req := <-s.requests: + // Dedup if this node was targeted by a prior cascade + if _, silenced := s.suppress.Load(req.from); silenced { + req.reply <- ErrorShouldStop + continue + } + + switch req.want { + case ErrorShouldRestart: + // no graph action; let RunComponent restart it + req.reply <- ErrorShouldRestart + + case ErrorShouldStop: + s.stopSubtree(req.from) // stop node + descendants + req.reply <- ErrorShouldStop + + case ErrorShouldStopParents: + s.stopAncestorsAndDesc(req.from) + req.reply <- ErrorShouldStop + + case ErrorShouldShutdown: + // Let the child return promptly, then synchronously wait and exit. + req.reply <- ErrorShouldStop + // Return the precipitating error so callers can log/act on it. + return req.err + + case ErrorShouldSpinHalt: + shutdownAll.Store(true) + s.stopAll() + req.reply <- ErrorShouldStop // child returns promptly + // Block the supervisor until SIGTERM (local wait). Ignore SIGINT. + term := make(chan os.Signal, 1) + signal.Notify(term, syscall.SIGTERM) + <-term + // After SIGTERM, join everything and return the original error. + s.wg.Wait() + return req.err + } + } + } +} + +func topoOrder( + nodes map[string]*Node, + parents map[string][]string, +) ([]string, error) { + indeg := map[string]int{} + for name := range nodes { + indeg[name] = 0 + } + for name := range nodes { + for _ = range parents[name] { + indeg[name]++ + } + } + q := make([]string, 0) + for n, d := range indeg { + if d == 0 { + q = append(q, n) + } + } + var order []string + for len(q) > 0 { + n := q[0] + q = q[1:] + order = append(order, n) + for _, kid := range kidsOf(n, nodes, parents) { + indeg[kid]-- + if indeg[kid] == 0 { + q = append(q, kid) + } + } + } + if len(order) != len(nodes) { + return nil, fmt.Errorf("dependency cycle") + } + return order, nil +} + +func kidsOf(n string, nodes map[string]*Node, parents map[string][]string) []string { + // build once in NewSupervisor; simplified here: + var out []string + for name := range nodes { + for _, p := range parents[name] { + if p == n { + out = append(out, name) + } + } + } + return out +} + +func (s *Supervisor) collectDesc(start string, acc map[string]struct{}) { + for _, k := range s.kids[start] { + if _, seen := acc[k]; seen { + continue + } + acc[k] = struct{}{} + s.collectDesc(k, acc) + } +} +func (s *Supervisor) collectAnc(start string, acc map[string]struct{}) { + for _, p := range s.parents[start] { + if _, seen := acc[p]; seen { + continue + } + acc[p] = struct{}{} + s.collectAnc(p, acc) + } +} + +func (s *Supervisor) stopAll() { + for name := range s.cancels { + s.suppress.Store(name, struct{}{}) + } + for _, cancel := range s.cancels { + cancel() + } +} + +func (s *Supervisor) stopSubtree(root string) { + victims := map[string]struct{}{root: {}} + s.collectDesc(root, victims) + for v := range victims { + s.suppress.Store(v, struct{}{}) + } + for v := range victims { + if c := s.cancels[v]; c != nil { + c() + } + } +} + +func (s *Supervisor) stopAncestorsAndDesc(root string) { + victims := map[string]struct{}{root: {}} + s.collectDesc(root, victims) + s.collectAnc(root, victims) + for v := range victims { + s.suppress.Store(v, struct{}{}) + } + for v := range victims { + if c := s.cancels[v]; c != nil { + c() + } + } +} diff --git a/lifecycle/supervisor_test.go b/lifecycle/supervisor_test.go new file mode 100644 index 0000000..3c6ba8b --- /dev/null +++ b/lifecycle/supervisor_test.go @@ -0,0 +1,449 @@ +package lifecycle_test + +import ( + "context" + "errors" + "sync/atomic" + "testing" + "time" + + "source.quilibrium.com/quilibrium/monorepo/lifecycle" +) + +// Ensures the first Throw wins and the caller goroutine exits via Goexit. +// Goexit runs defers, but code after Throw must not execute. +func TestSignaler_FirstThrowWins_AndGoexit(t *testing.T) { + s, errCh := lifecycle.NewSignaler() + + after := make(chan struct{}, 1) // written if code after Throw executes (it shouldn't) + deferred := make(chan struct{}, 1) // closed by defer; should run even with Goexit + go func() { + defer close(deferred) // Goexit SHOULD run defers + s.Throw(errors.New("boom-1")) + after <- struct{}{} // must never execute + }() + + select { + case err := <-errCh: + if err == nil || err.Error() != "boom-1" { + t.Fatalf("expected boom-1, got %v", err) + } + case <-time.After(1 * time.Second): + t.Fatalf("timed out waiting for first error") + } + + // Defer should have run. + select { + case <-deferred: + // ok + case <-time.After(200 * time.Millisecond): + t.Fatalf("deferred function did not run before goroutine exit") + } + + // Code after Throw must NOT have executed. + select { + case <-after: + t.Fatalf("code after Throw executed; Goexit should prevent it") + case <-time.After(200 * time.Millisecond): + // ok + } + + // Second Throw should be ignored (no panic), just logged to stderr. + // We can call it from a fresh goroutine; nothing observable should change. + go s.Throw(errors.New("boom-2")) + time.Sleep(50 * time.Millisecond) // small settle; nothing to assert further +} + +// Ensures Throw(ctx, err) works when the ctx carries a SignalerContext. +func TestThrow_WithContextBridge(t *testing.T) { + base := context.Background() + sctx, errCh := lifecycle.WithSignaler(base) + + ctx := lifecycle.WithSignalerContext(base, sctx) + + go func() { + lifecycle.Throw(ctx, errors.New("ctx-boom")) + }() + + select { + case err := <-errCh: + if err == nil || err.Error() != "ctx-boom" { + t.Fatalf("expected ctx-boom, got %v", err) + } + case <-time.After(1 * time.Second): + t.Fatalf("timed out waiting for ctx error") + } +} + +type fakeComp struct { + ready chan struct{} + done chan struct{} + started atomic.Int32 + // Triggers: + triggerFatal chan error // if non-nil error arrives, call ctx.Throw(err) +} + +func newFakeComp() *fakeComp { + return &fakeComp{ + ready: make(chan struct{}), + done: make(chan struct{}), + triggerFatal: make(chan error, 1), + } +} + +func (f *fakeComp) Ready() <-chan struct{} { return f.ready } +func (f *fakeComp) Done() <-chan struct{} { return f.done } + +func (f *fakeComp) Start(ctx lifecycle.SignalerContext) error { + if f.started.Add(1) != 1 { + return lifecycle.ErrComponentRunning + } + // simulate startup finishing quickly + close(f.ready) + + go func() { + defer close(f.done) + select { + case err := <-f.triggerFatal: + if err != nil { + ctx.Throw(err) + } + // nil means "clean exit" + return + case <-ctx.Done(): + // graceful stop + return + } + }() + + return nil +} + +func (f *fakeComp) factory() lifecycle.ComponentFactory { + return func() (lifecycle.Component, error) { + return newFakeComp(), nil + } +} + +// helpers for timing in tests +func waitClosed(ch <-chan struct{}, d time.Duration) bool { + select { + case <-ch: + return true + case <-time.After(d): + return false + } +} + +func TestComponentManager_ReadyAndDoneOrdering_NoFatal(t *testing.T) { + builder := lifecycle.NewComponentManagerBuilder(). + AddWorker(lifecycle.NoopWorker). + AddWorker(lifecycle.NoopWorker) + + mgr := builder.Build() + + // Parent signaler context + sctx, cancel, errCh := lifecycle.WithSignallerAndCancel(context.Background()) + defer cancel() + + if err := mgr.Start(sctx); err != nil { + t.Fatalf("start: %v", err) + } + + if ok := waitClosed(mgr.Ready(), time.Second); !ok { + t.Fatalf("ready never closed") + } + + // No errors expected + select { + case err := <-errCh: + t.Fatalf("unexpected fatal: %v", err) + case <-time.After(50 * time.Millisecond): + } + + // Cancel triggers shutdown; ShutdownSignal should close before Done. + cancel() + + if ok := waitClosed(mgr.ShutdownSignal(), time.Second); !ok { + t.Fatalf("shutdown signal not closed before done") + } + if ok := waitClosed(mgr.Done(), time.Second); !ok { + t.Fatalf("done never closed") + } +} + +func TestComponentManager_PropagatesWorkerFatal_ThenDone(t *testing.T) { + fatalErr := errors.New("worker-boom") + + worker := func(ctx lifecycle.SignalerContext, ready lifecycle.ReadyFunc) { + ready() + ctx.Throw(fatalErr) // immediate fatal + } + + mgr := lifecycle.NewComponentManagerBuilder().AddWorker(worker).Build() + + sctx, _, errCh := lifecycle.WithSignallerAndCancel(context.Background()) + + if err := mgr.Start(sctx); err != nil { + t.Fatalf("start: %v", err) + } + + // Expect fatal to reach parent err channel. + select { + case err := <-errCh: + if err == nil || !errors.Is(err, fatalErr) { + t.Fatalf("expected %v, got %v", fatalErr, err) + } + case <-time.After(time.Second): + t.Fatalf("timeout waiting for fatal") + } + + // After fatal, manager must eventually be done. + if ok := waitClosed(mgr.Done(), time.Second); !ok { + t.Fatalf("done never closed") + } +} + +// Ensures Ready closes exactly once after all workers call Ready(). +func TestComponentManager_ReadyClosesAfterAllWorkers(t *testing.T) { + worker := func(delay time.Duration) lifecycle.ComponentWorker { + return func(ctx lifecycle.SignalerContext, ready lifecycle.ReadyFunc) { + time.Sleep(delay) + ready() + <-ctx.Done() + } + } + + mgr := lifecycle.NewComponentManagerBuilder(). + AddWorker(worker(150 * time.Millisecond)). + AddWorker(worker(20 * time.Millisecond)). + Build() + + sctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + defer cancel() + + if err := mgr.Start(sctx); err != nil { + t.Fatalf("start: %v", err) + } + + start := time.Now() + if ok := waitClosed(mgr.Ready(), time.Second); !ok { + t.Fatalf("ready never closed") + } + elapsed := time.Since(start) + if elapsed < 150*time.Millisecond { + t.Fatalf("ready closed before slowest worker (%v < 150ms)", elapsed) + } + cancel() + _ = waitClosed(mgr.Done(), time.Second) +} + +// Verifies that RunComponent restarts on ErrorShouldRestart +// and stops on ErrorShouldShutdown, surfacing the last error. +func TestRunComponent_RestartThenShutdown(t *testing.T) { + var starts atomic.Int32 + + // One-shot fake: first instance throws, second instance throws again triggering shutdown. + componentFactory := func() (lifecycle.Component, error) { + f := newFakeComp() + idx := starts.Add(1) + + go func() { + // Wait for Start to close ready + _ = waitClosed(f.Ready(), time.Second) + switch idx { + case 1: + f.triggerFatal <- errors.New("first-fatal") + case 2: + f.triggerFatal <- errors.New("second-fatal") + default: + // any further restarts cleanly exit + f.triggerFatal <- nil + } + }() + return f, nil + } + + first := true + handler := func(err error) lifecycle.ErrorHandlingBehavior { + if first { + first = false + return lifecycle.ErrorShouldRestart + } + return lifecycle.ErrorShouldShutdown + } + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + err := lifecycle.RunComponent(ctx, componentFactory, handler) + if err == nil || err.Error() != "second-fatal" { + t.Fatalf("expected second-fatal, got %v", err) + } + if got := starts.Load(); got < 2 { + t.Fatalf("expected at least 2 starts, got %d", got) + } +} + +// Verifies RunComponent returns ctx error on parent cancel and waits for Done. +func TestRunComponent_ContextCancel(t *testing.T) { + f := newFakeComp() + + componentFactory := func() (lifecycle.Component, error) { return f, nil } + + handler := func(err error) lifecycle.ErrorHandlingBehavior { + t.Fatalf("no fatal expected, got %v", err) + return lifecycle.ErrorShouldShutdown + } + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + _ = waitClosed(f.Ready(), time.Second) + time.Sleep(100 * time.Millisecond) + cancel() + }() + + err := lifecycle.RunComponent(ctx, componentFactory, handler) + if !errors.Is(err, context.Canceled) { + t.Fatalf("expected context.Canceled, got %v", err) + } + if ok := waitClosed(f.Done(), time.Second); !ok { + t.Fatalf("component not done after cancel") + } +} + +// Utilities to build Nodes whose OnError returns specific behaviors. +func nodeWithFake(name string, deps []string, fatalCh chan<- func()) *lifecycle.Node { + var fired atomic.Bool + fc := func() (lifecycle.Component, error) { + f := newFakeComp() + // expose a way for the test to trigger this node's fatal + if fatalCh != nil && fired.CompareAndSwap(false, true) { + fatalCh <- func() { f.triggerFatal <- errors.New(name + "-fatal") } + } + return f, nil + } + // Default OnError: Stop just this subtree unless overridden in tests. + return &lifecycle.Node{Name: name, Deps: deps, Factory: fc, OnError: func(error) lifecycle.ErrorHandlingBehavior { + return lifecycle.ErrorShouldStop + }} +} + +func TestSupervisor_Stop_StopsDescendantsOnly(t *testing.T) { + // graph: A -> B -> C ; A -> D + trigger := make(chan func(), 1) + + a := nodeWithFake("A", nil, nil) + b := nodeWithFake("B", []string{"A"}, nil) + c := nodeWithFake("C", []string{"B"}, nil) + d := nodeWithFake("D", []string{"A"}, nil) + boom := nodeWithFake("X", nil, trigger) // will be re-wired to B below + b.Factory = boom.Factory // trigger drives B + + s, err := lifecycle.NewSupervisor([]*lifecycle.Node{a, b, c, d}) + if err != nil { + t.Fatalf("build: %v", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { + _ = s.Start(ctx) + }() + + // Wait for all to be Ready. + time.Sleep(150 * time.Millisecond) + + // Fire B's fatal (policy: ErrorShouldStop) + fire := <-trigger + fire() + + // B and its descendants (C) should stop; A and D continue. + // We cannot directly peek internals; observe via time — fake comps close Done quickly. + time.Sleep(200 * time.Millisecond) + + // There isn't direct access to components; so instead assert supervisor keeps running, + // then cancel and ensure clean exit (sanity). This smoke test verifies the cascade + // did not shutdown the whole graph. + cancel() +} + +func TestSupervisor_StopParents_StopsAncestorsAndDesc(t *testing.T) { + trigger := make(chan func(), 1) + + a := nodeWithFake("A", nil, nil) + b := nodeWithFake("B", []string{"A"}, nil) + c := nodeWithFake("C", []string{"B"}, nil) + + // Make C fire with StopParents + c.Factory = func() (lifecycle.Component, error) { + f := newFakeComp() + go func() { + _ = waitClosed(f.Ready(), time.Second) + trigger <- func() { f.triggerFatal <- errors.New("C-fatal") } + }() + return f, nil + } + c.OnError = func(error) lifecycle.ErrorHandlingBehavior { return lifecycle.ErrorShouldStopParents } + + s, err := lifecycle.NewSupervisor([]*lifecycle.Node{a, b, c}) + if err != nil { + t.Fatalf("build: %v", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { _ = s.Start(ctx) }() + + time.Sleep(150 * time.Millisecond) + + fire := <-trigger + fire() + + // Expect whole chain (A,B,C) to be canceled. Give it a moment, then end. + time.Sleep(200 * time.Millisecond) + cancel() +} + +func TestSupervisor_ShutdownAll(t *testing.T) { + trigger := make(chan func(), 1) + + a := nodeWithFake("A", nil, nil) + b := nodeWithFake("B", []string{"A"}, nil) + + // A fatal on A requests full shutdown. + a.Factory = func() (lifecycle.Component, error) { + f := newFakeComp() + go func() { + _ = waitClosed(f.Ready(), time.Second) + trigger <- func() { f.triggerFatal <- errors.New("A-fatal") } + }() + return f, nil + } + a.OnError = func(error) lifecycle.ErrorHandlingBehavior { return lifecycle.ErrorShouldShutdown } + + s, err := lifecycle.NewSupervisor([]*lifecycle.Node{a, b}) + if err != nil { + t.Fatalf("build: %v", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + done := make(chan struct{}) + go func() { + _ = s.Start(ctx) // Run should return after Shutdown cascade completes + close(done) + }() + + time.Sleep(150 * time.Millisecond) + (<-trigger)() + + select { + case <-done: + // ok + case <-time.After(2 * time.Second): + t.Fatalf("supervisor did not exit on Shutdown") + } +} diff --git a/lifecycle/unittest/utils.go b/lifecycle/unittest/utils.go new file mode 100644 index 0000000..e0d18e7 --- /dev/null +++ b/lifecycle/unittest/utils.go @@ -0,0 +1,339 @@ +package unittest + +import ( + "context" + "math" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" +) + +// MockSignalerContext is a SignalerContext that can be used in tests to assert +// that an error is thrown. It embeds a mock.Mock, so it can be used to assert +// that Throw is called with a specific error. Use +// NewMockSignalerContextExpectError to create a new MockSignalerContext that +// expects a specific error, otherwise NewMockSignalerContext. +type MockSignalerContext struct { + context.Context + *mock.Mock +} + +var _ lifecycle.SignalerContext = &MockSignalerContext{} + +func (m MockSignalerContext) Throw(err error) { + m.Called(err) +} + +func NewMockSignalerContext( + t *testing.T, + ctx context.Context, +) *MockSignalerContext { + m := &MockSignalerContext{ + Context: ctx, + Mock: &mock.Mock{}, + } + m.Mock.Test(t) + t.Cleanup(func() { m.AssertExpectations(t) }) + return m +} + +// NewMockSignalerContextWithCancel creates a new MockSignalerContext with a +// cancel function. +func NewMockSignalerContextWithCancel( + t *testing.T, + parent context.Context, +) (*MockSignalerContext, context.CancelFunc) { + ctx, cancel := context.WithCancel(parent) + return NewMockSignalerContext(t, ctx), cancel +} + +// NewMockSignalerContextExpectError creates a new MockSignalerContext which +// expects a specific error to be thrown. +func NewMockSignalerContextExpectError( + t *testing.T, + ctx context.Context, + err error, +) *MockSignalerContext { + require.NotNil(t, err) + m := NewMockSignalerContext(t, ctx) + + // since we expect an error, we should expect a call to Throw + m.On("Throw", err).Once().Return() + + return m +} + +// AssertReturnsBefore asserts that the given function returns before the +// duration expires. +func AssertReturnsBefore( + t *testing.T, + f func(), + duration time.Duration, + msgAndArgs ...interface{}, +) bool { + done := make(chan struct{}) + + go func() { + f() + close(done) + }() + + select { + case <-time.After(duration): + t.Log("function did not return in time") + assert.Fail(t, "function did not close in time", msgAndArgs...) + case <-done: + return true + } + return false +} + +// ClosedChannel returns a closed channel. +func ClosedChannel() <-chan struct{} { + ch := make(chan struct{}) + close(ch) + return ch +} + +// AssertClosesBefore asserts that the given channel closes before the +// duration expires. +func AssertClosesBefore( + t assert.TestingT, + done <-chan struct{}, + duration time.Duration, + msgAndArgs ...interface{}, +) { + select { + case <-time.After(duration): + assert.Fail(t, "channel did not return in time", msgAndArgs...) + case <-done: + return + } +} + +func AssertFloatEqual(t *testing.T, expected, actual float64, message string) { + tolerance := .00001 + if !(math.Abs(expected-actual) < tolerance) { + assert.Equal(t, expected, actual, message) + } +} + +// AssertNotClosesBefore asserts that the given channel does not close before +// the duration expires. +func AssertNotClosesBefore( + t assert.TestingT, + done <-chan struct{}, + duration time.Duration, + msgAndArgs ...interface{}, +) { + select { + case <-time.After(duration): + return + case <-done: + assert.Fail(t, "channel closed before timeout", msgAndArgs...) + } +} + +// RequireReturnsBefore requires that the given function returns before the +// duration expires. +func RequireReturnsBefore( + t testing.TB, + f func(), + duration time.Duration, + message string, +) { + done := make(chan struct{}) + + go func() { + f() + close(done) + }() + + RequireCloseBefore( + t, + done, + duration, + message+": function did not return on time", + ) +} + +// RequireComponentsDoneBefore invokes the done method of each of the input +// components concurrently, and fails the test if any components shutdown +// takes longer than the specified duration. +func RequireComponentsDoneBefore( + t testing.TB, + duration time.Duration, + components ...lifecycle.Component, +) { + done := lifecycle.AllDone(components...) + RequireCloseBefore( + t, + done, + duration, + "failed to shutdown all components on time", + ) +} + +// RequireComponentsReadyBefore invokes the ready method of each of the input +// components concurrently, and fails the test if any components startup takes +// longer than the specified duration. +func RequireComponentsReadyBefore( + t testing.TB, + duration time.Duration, + components ...lifecycle.Component, +) { + ready := lifecycle.AllReady(components...) + RequireCloseBefore( + t, + ready, + duration, + "failed to start all components on time", + ) +} + +// RequireCloseBefore requires that the given channel returns before the +// duration expires. +func RequireCloseBefore( + t testing.TB, + c <-chan struct{}, + duration time.Duration, + message string, +) { + select { + case <-time.After(duration): + require.Fail(t, "could not close done channel on time: "+message) + case <-c: + return + } +} + +// RequireClosed is a test helper function that fails the test if channel `ch` +// is not closed. +func RequireClosed(t *testing.T, ch <-chan struct{}, message string) { + select { + case <-ch: + default: + require.Fail(t, "channel is not closed: "+message) + } +} + +// RequireConcurrentCallsReturnBefore is a test helper that runs function `f` +// count-many times concurrently, and requires all invocations to return within +// duration. +func RequireConcurrentCallsReturnBefore( + t *testing.T, + f func(), + count int, + duration time.Duration, + message string, +) { + wg := &sync.WaitGroup{} + for i := 0; i < count; i++ { + wg.Add(1) + go func() { + f() + wg.Done() + }() + } + + RequireReturnsBefore(t, wg.Wait, duration, message) +} + +// RequireNeverReturnBefore is a test helper that tries invoking function `f` +// and fails the test if either: +// - function `f` is not invoked within 1 second. +// - function `f` returns before specified `duration`. +// +// It also returns a channel that is closed once the function `f` returns and +// hence its openness can evaluate return status of function `f` for intervals +// longer than duration. +func RequireNeverReturnBefore( + t *testing.T, + f func(), + duration time.Duration, + message string, +) <-chan struct{} { + ch := make(chan struct{}) + wg := sync.WaitGroup{} + wg.Add(1) + + go func() { + wg.Done() + f() + close(ch) + }() + + // requires function invoked within next 1 second + RequireReturnsBefore( + t, + wg.Wait, + 1*time.Second, + "could not invoke the function: "+message, + ) + + // requires function never returns within duration + RequireNeverClosedWithin(t, ch, duration, "unexpected return: "+message) + + return ch +} + +// RequireNeverClosedWithin is a test helper function that fails the test if +// channel `ch` is closed before the determined duration. +func RequireNeverClosedWithin( + t *testing.T, + ch <-chan struct{}, + duration time.Duration, + message string, +) { + select { + case <-time.After(duration): + case <-ch: + require.Fail(t, "channel closed before timeout: "+message) + } +} + +// RequireNotClosed is a test helper function that fails the test if channel +// `ch` is closed. +func RequireNotClosed(t *testing.T, ch <-chan struct{}, message string) { + select { + case <-ch: + require.Fail(t, "channel is closed: "+message) + default: + } +} + +// AssertErrSubstringMatch asserts that two errors match with substring +// checking on the Error method (`expected` must be a substring of `actual`, to +// account for the actual error being wrapped). Fails the test if either error +// is nil. +// +// NOTE: This should only be used in cases where `errors.Is` cannot be, like +// when errors are transmitted over the network without type information. +func AssertErrSubstringMatch(t testing.TB, expected, actual error) { + require.NotNil(t, expected) + require.NotNil(t, actual) + assert.True( + t, + strings.Contains(actual.Error(), expected.Error()) || + strings.Contains(expected.Error(), actual.Error()), + "expected error: '%s', got: '%s'", expected.Error(), actual.Error(), + ) +} + +// Componentify sets up a generated mock to respond to Component lifecycle +// methods. Any mock type generated by mockery can be used. +func Componentify(mockable *mock.Mock) { + rwch := make(chan struct{}) + var ch <-chan struct{} = rwch + close(rwch) + + mockable.On("Ready").Return(ch).Maybe() + mockable.On("Done").Return(ch).Maybe() + mockable.On("Start").Return(nil).Maybe() +} diff --git a/node/app/node.go b/node/app/node.go index 733bc2d..9b0461e 100644 --- a/node/app/node.go +++ b/node/app/node.go @@ -1,7 +1,10 @@ package app import ( + "context" + "go.uber.org/zap" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/node/consensus/global" consensustime "source.quilibrium.com/quilibrium/monorepo/node/consensus/time" "source.quilibrium.com/quilibrium/monorepo/node/execution/manager" @@ -81,22 +84,37 @@ func (d *DHTNode) Stop() { }() } -func (m *MasterNode) Start(quitCh chan struct{}) error { +func (m *MasterNode) Start(ctx context.Context) <-chan error { + errChan := make(chan error) + // Start the global consensus engine - m.quit = quitCh - errChan := m.globalConsensus.Start(quitCh) - select { - case err := <-errChan: - if err != nil { - return err - } + supervisor, err := lifecycle.NewSupervisor( + []*lifecycle.Node{ + &lifecycle.Node{ + Name: "master node", + Factory: func() (lifecycle.Component, error) { + return m.globalConsensus, nil + }, + OnError: func(err error) lifecycle.ErrorHandlingBehavior { + return lifecycle.ErrorShouldShutdown + }, + }, + }, + ) + if err != nil { + go func() { + errChan <- err + }() + return errChan } + go func() { + errChan <- supervisor.Start(ctx) + }() + m.logger.Info("master node started", zap.Uint("core_id", m.coreId)) - // Wait for shutdown signal - <-m.quit - return nil + return errChan } func (m *MasterNode) Stop() { diff --git a/node/app/wire.go b/node/app/wire.go index 738d37a..286642a 100644 --- a/node/app/wire.go +++ b/node/app/wire.go @@ -10,6 +10,7 @@ import ( "source.quilibrium.com/quilibrium/monorepo/bulletproofs" "source.quilibrium.com/quilibrium/monorepo/channel" "source.quilibrium.com/quilibrium/monorepo/config" + qconsensus "source.quilibrium.com/quilibrium/monorepo/consensus" "source.quilibrium.com/quilibrium/monorepo/node/compiler" "source.quilibrium.com/quilibrium/monorepo/node/consensus/app" "source.quilibrium.com/quilibrium/monorepo/node/consensus/difficulty" @@ -26,6 +27,7 @@ import ( "source.quilibrium.com/quilibrium/monorepo/node/rpc" "source.quilibrium.com/quilibrium/monorepo/node/store" "source.quilibrium.com/quilibrium/monorepo/node/tests" + "source.quilibrium.com/quilibrium/monorepo/protobufs" tchannel "source.quilibrium.com/quilibrium/monorepo/types/channel" tcompiler "source.quilibrium.com/quilibrium/monorepo/types/compiler" "source.quilibrium.com/quilibrium/monorepo/types/consensus" @@ -90,6 +92,7 @@ var storeSet = wire.NewSet( store.NewPeerstoreDatastore, store.NewPebbleShardsStore, store.NewPebbleWorkerStore, + store.NewPebbleConsensusStore, wire.Bind(new(tstore.ClockStore), new(*store.PebbleClockStore)), wire.Bind(new(tstore.TokenStore), new(*store.PebbleTokenStore)), wire.Bind(new(tstore.DataProofStore), new(*store.PebbleDataProofStore)), @@ -99,6 +102,10 @@ var storeSet = wire.NewSet( wire.Bind(new(tries.TreeBackingStore), new(*store.PebbleHypergraphStore)), wire.Bind(new(tstore.ShardsStore), new(*store.PebbleShardsStore)), wire.Bind(new(tstore.WorkerStore), new(*store.PebbleWorkerStore)), + wire.Bind( + new(qconsensus.ConsensusStore[*protobufs.ProposalVote]), + new(*store.PebbleConsensusStore), + ), ) var pubSubSet = wire.NewSet( @@ -354,11 +361,11 @@ func provideDifficultyAnchorFrameNumber(config *config.Config) uint64 { } func provideDifficultyAnchorParentTime() int64 { - return 1761217200000 + return 1762862400000 } func provideDifficultyAnchorDifficulty() uint32 { - return 160000 // Initial difficulty + return 80000 // Initial difficulty } func provideGlobalTimeReel( diff --git a/node/app/wire_gen.go b/node/app/wire_gen.go index 6485880..825210a 100644 --- a/node/app/wire_gen.go +++ b/node/app/wire_gen.go @@ -13,6 +13,7 @@ import ( "source.quilibrium.com/quilibrium/monorepo/bulletproofs" "source.quilibrium.com/quilibrium/monorepo/channel" "source.quilibrium.com/quilibrium/monorepo/config" + "source.quilibrium.com/quilibrium/monorepo/consensus" "source.quilibrium.com/quilibrium/monorepo/node/compiler" "source.quilibrium.com/quilibrium/monorepo/node/consensus/app" "source.quilibrium.com/quilibrium/monorepo/node/consensus/difficulty" @@ -29,9 +30,10 @@ import ( "source.quilibrium.com/quilibrium/monorepo/node/rpc" store2 "source.quilibrium.com/quilibrium/monorepo/node/store" "source.quilibrium.com/quilibrium/monorepo/node/tests" + "source.quilibrium.com/quilibrium/monorepo/protobufs" channel2 "source.quilibrium.com/quilibrium/monorepo/types/channel" compiler2 "source.quilibrium.com/quilibrium/monorepo/types/compiler" - "source.quilibrium.com/quilibrium/monorepo/types/consensus" + consensus2 "source.quilibrium.com/quilibrium/monorepo/types/consensus" "source.quilibrium.com/quilibrium/monorepo/types/crypto" "source.quilibrium.com/quilibrium/monorepo/types/hypergraph" keys2 "source.quilibrium.com/quilibrium/monorepo/types/keys" @@ -105,6 +107,7 @@ func NewDataWorkerNodeWithProxyPubsub(logger *zap.Logger, config2 *config.Config } pebbleInboxStore := store2.NewPebbleInboxStore(pebbleDB, logger) pebbleShardsStore := store2.NewPebbleShardsStore(pebbleDB, logger) + pebbleConsensusStore := store2.NewPebbleConsensusStore(pebbleDB, logger) bedlamCompiler := compiler.NewBedlamCompiler() inMemoryPeerInfoManager := p2p.NewInMemoryPeerInfoManager(logger) dynamicFeeManager := fees.NewDynamicFeeManager(logger, kzgInclusionProver) @@ -116,7 +119,7 @@ func NewDataWorkerNodeWithProxyPubsub(logger *zap.Logger, config2 *config.Config asertDifficultyAdjuster := difficulty.NewAsertDifficultyAdjuster(uint64_2, int64_2, uint32_2) optimizedProofOfMeaningfulWorkRewardIssuance := reward.NewOptRewardIssuance() doubleRatchetEncryptedChannel := channel.NewDoubleRatchetEncryptedChannel() - appConsensusEngineFactory := app.NewAppConsensusEngineFactory(logger, config2, proxyBlossomSub, hypergraph, fileKeyManager, pebbleKeyStore, pebbleClockStore, pebbleInboxStore, pebbleShardsStore, pebbleHypergraphStore, frameProver, kzgInclusionProver, decaf448BulletproofProver, mpCitHVerifiableEncryptor, decaf448KeyConstructor, bedlamCompiler, cachedSignerRegistry, proverRegistry, inMemoryPeerInfoManager, dynamicFeeManager, blsAppFrameValidator, blsGlobalFrameValidator, asertDifficultyAdjuster, optimizedProofOfMeaningfulWorkRewardIssuance, bls48581KeyConstructor, doubleRatchetEncryptedChannel) + appConsensusEngineFactory := app.NewAppConsensusEngineFactory(logger, config2, proxyBlossomSub, hypergraph, fileKeyManager, pebbleKeyStore, pebbleClockStore, pebbleInboxStore, pebbleShardsStore, pebbleHypergraphStore, pebbleConsensusStore, frameProver, kzgInclusionProver, decaf448BulletproofProver, mpCitHVerifiableEncryptor, decaf448KeyConstructor, bedlamCompiler, cachedSignerRegistry, proverRegistry, inMemoryPeerInfoManager, dynamicFeeManager, blsAppFrameValidator, blsGlobalFrameValidator, asertDifficultyAdjuster, optimizedProofOfMeaningfulWorkRewardIssuance, bls48581KeyConstructor, doubleRatchetEncryptedChannel) dataWorkerIPCServer := provideDataWorkerIPC(rpcMultiaddr, config2, cachedSignerRegistry, proverRegistry, appConsensusEngineFactory, inMemoryPeerInfoManager, frameProver, logger, coreId, parentProcess) globalTimeReel, err := provideGlobalTimeReel(appConsensusEngineFactory) if err != nil { @@ -161,6 +164,7 @@ func NewDataWorkerNodeWithoutProxyPubsub(logger *zap.Logger, config2 *config.Con blossomSub := p2p.NewBlossomSub(p2PConfig, engineConfig, logger, coreId) pebbleInboxStore := store2.NewPebbleInboxStore(pebbleDB, logger) pebbleShardsStore := store2.NewPebbleShardsStore(pebbleDB, logger) + pebbleConsensusStore := store2.NewPebbleConsensusStore(pebbleDB, logger) bedlamCompiler := compiler.NewBedlamCompiler() inMemoryPeerInfoManager := p2p.NewInMemoryPeerInfoManager(logger) dynamicFeeManager := fees.NewDynamicFeeManager(logger, kzgInclusionProver) @@ -172,7 +176,7 @@ func NewDataWorkerNodeWithoutProxyPubsub(logger *zap.Logger, config2 *config.Con asertDifficultyAdjuster := difficulty.NewAsertDifficultyAdjuster(uint64_2, int64_2, uint32_2) optimizedProofOfMeaningfulWorkRewardIssuance := reward.NewOptRewardIssuance() doubleRatchetEncryptedChannel := channel.NewDoubleRatchetEncryptedChannel() - appConsensusEngineFactory := app.NewAppConsensusEngineFactory(logger, config2, blossomSub, hypergraph, fileKeyManager, pebbleKeyStore, pebbleClockStore, pebbleInboxStore, pebbleShardsStore, pebbleHypergraphStore, frameProver, kzgInclusionProver, decaf448BulletproofProver, mpCitHVerifiableEncryptor, decaf448KeyConstructor, bedlamCompiler, cachedSignerRegistry, proverRegistry, inMemoryPeerInfoManager, dynamicFeeManager, blsAppFrameValidator, blsGlobalFrameValidator, asertDifficultyAdjuster, optimizedProofOfMeaningfulWorkRewardIssuance, bls48581KeyConstructor, doubleRatchetEncryptedChannel) + appConsensusEngineFactory := app.NewAppConsensusEngineFactory(logger, config2, blossomSub, hypergraph, fileKeyManager, pebbleKeyStore, pebbleClockStore, pebbleInboxStore, pebbleShardsStore, pebbleHypergraphStore, pebbleConsensusStore, frameProver, kzgInclusionProver, decaf448BulletproofProver, mpCitHVerifiableEncryptor, decaf448KeyConstructor, bedlamCompiler, cachedSignerRegistry, proverRegistry, inMemoryPeerInfoManager, dynamicFeeManager, blsAppFrameValidator, blsGlobalFrameValidator, asertDifficultyAdjuster, optimizedProofOfMeaningfulWorkRewardIssuance, bls48581KeyConstructor, doubleRatchetEncryptedChannel) dataWorkerIPCServer := provideDataWorkerIPC(rpcMultiaddr, config2, cachedSignerRegistry, proverRegistry, appConsensusEngineFactory, inMemoryPeerInfoManager, frameProver, logger, coreId, parentProcess) globalTimeReel, err := provideGlobalTimeReel(appConsensusEngineFactory) if err != nil { @@ -227,9 +231,10 @@ func NewMasterNode(logger *zap.Logger, config2 *config.Config, coreId uint) (*Ma pebbleInboxStore := store2.NewPebbleInboxStore(pebbleDB, logger) pebbleShardsStore := store2.NewPebbleShardsStore(pebbleDB, logger) pebbleWorkerStore := store2.NewPebbleWorkerStore(pebbleDB, logger) + pebbleConsensusStore := store2.NewPebbleConsensusStore(pebbleDB, logger) doubleRatchetEncryptedChannel := channel.NewDoubleRatchetEncryptedChannel() bedlamCompiler := compiler.NewBedlamCompiler() - consensusEngineFactory := global.NewConsensusEngineFactory(logger, config2, blossomSub, hypergraph, fileKeyManager, pebbleKeyStore, frameProver, kzgInclusionProver, cachedSignerRegistry, proverRegistry, dynamicFeeManager, blsAppFrameValidator, blsGlobalFrameValidator, asertDifficultyAdjuster, optimizedProofOfMeaningfulWorkRewardIssuance, pebbleClockStore, pebbleInboxStore, pebbleHypergraphStore, pebbleShardsStore, pebbleWorkerStore, doubleRatchetEncryptedChannel, decaf448BulletproofProver, mpCitHVerifiableEncryptor, decaf448KeyConstructor, bedlamCompiler, bls48581KeyConstructor, inMemoryPeerInfoManager) + consensusEngineFactory := global.NewConsensusEngineFactory(logger, config2, blossomSub, hypergraph, fileKeyManager, pebbleKeyStore, frameProver, kzgInclusionProver, cachedSignerRegistry, proverRegistry, dynamicFeeManager, blsAppFrameValidator, blsGlobalFrameValidator, asertDifficultyAdjuster, optimizedProofOfMeaningfulWorkRewardIssuance, pebbleClockStore, pebbleInboxStore, pebbleHypergraphStore, pebbleShardsStore, pebbleWorkerStore, pebbleConsensusStore, doubleRatchetEncryptedChannel, decaf448BulletproofProver, mpCitHVerifiableEncryptor, decaf448KeyConstructor, bedlamCompiler, bls48581KeyConstructor, inMemoryPeerInfoManager) globalConsensusComponents, err := provideGlobalConsensusComponents(consensusEngineFactory, config2) if err != nil { return nil, err @@ -272,7 +277,11 @@ var verencSet = wire.NewSet( ), ) -var storeSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "DB"), store2.NewPebbleDB, wire.Bind(new(store.KVDB), new(*store2.PebbleDB)), store2.NewPebbleClockStore, store2.NewPebbleTokenStore, store2.NewPebbleDataProofStore, store2.NewPebbleHypergraphStore, store2.NewPebbleInboxStore, store2.NewPebbleKeyStore, store2.NewPeerstoreDatastore, store2.NewPebbleShardsStore, store2.NewPebbleWorkerStore, wire.Bind(new(store.ClockStore), new(*store2.PebbleClockStore)), wire.Bind(new(store.TokenStore), new(*store2.PebbleTokenStore)), wire.Bind(new(store.DataProofStore), new(*store2.PebbleDataProofStore)), wire.Bind(new(store.HypergraphStore), new(*store2.PebbleHypergraphStore)), wire.Bind(new(store.InboxStore), new(*store2.PebbleInboxStore)), wire.Bind(new(store.KeyStore), new(*store2.PebbleKeyStore)), wire.Bind(new(tries.TreeBackingStore), new(*store2.PebbleHypergraphStore)), wire.Bind(new(store.ShardsStore), new(*store2.PebbleShardsStore)), wire.Bind(new(store.WorkerStore), new(*store2.PebbleWorkerStore))) +var storeSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "DB"), store2.NewPebbleDB, wire.Bind(new(store.KVDB), new(*store2.PebbleDB)), store2.NewPebbleClockStore, store2.NewPebbleTokenStore, store2.NewPebbleDataProofStore, store2.NewPebbleHypergraphStore, store2.NewPebbleInboxStore, store2.NewPebbleKeyStore, store2.NewPeerstoreDatastore, store2.NewPebbleShardsStore, store2.NewPebbleWorkerStore, store2.NewPebbleConsensusStore, wire.Bind(new(store.ClockStore), new(*store2.PebbleClockStore)), wire.Bind(new(store.TokenStore), new(*store2.PebbleTokenStore)), wire.Bind(new(store.DataProofStore), new(*store2.PebbleDataProofStore)), wire.Bind(new(store.HypergraphStore), new(*store2.PebbleHypergraphStore)), wire.Bind(new(store.InboxStore), new(*store2.PebbleInboxStore)), wire.Bind(new(store.KeyStore), new(*store2.PebbleKeyStore)), wire.Bind(new(tries.TreeBackingStore), new(*store2.PebbleHypergraphStore)), wire.Bind(new(store.ShardsStore), new(*store2.PebbleShardsStore)), wire.Bind(new(store.WorkerStore), new(*store2.PebbleWorkerStore)), wire.Bind( + new(consensus.ConsensusStore[*protobufs.ProposalVote]), + new(*store2.PebbleConsensusStore), +), +) var pubSubSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "P2P"), wire.FieldsOf(new(*config.Config), "Engine"), p2p.NewInMemoryPeerInfoManager, p2p.NewBlossomSub, channel.NewDoubleRatchetEncryptedChannel, wire.Bind(new(p2p2.PubSub), new(*p2p.BlossomSub)), wire.Bind(new(p2p2.PeerInfoManager), new(*p2p.InMemoryPeerInfoManager)), wire.Bind( new(channel2.EncryptedChannel), @@ -302,21 +311,21 @@ var hypergraphSet = wire.NewSet( ) var validatorSet = wire.NewSet(registration.NewCachedSignerRegistry, wire.Bind( - new(consensus.SignerRegistry), + new(consensus2.SignerRegistry), new(*registration.CachedSignerRegistry), ), provers.NewProverRegistry, fees.NewDynamicFeeManager, validator.NewBLSGlobalFrameValidator, wire.Bind( - new(consensus.GlobalFrameValidator), + new(consensus2.GlobalFrameValidator), new(*validator.BLSGlobalFrameValidator), ), validator.NewBLSAppFrameValidator, wire.Bind( - new(consensus.AppFrameValidator), + new(consensus2.AppFrameValidator), new(*validator.BLSAppFrameValidator), ), provideDifficultyAnchorFrameNumber, provideDifficultyAnchorParentTime, provideDifficultyAnchorDifficulty, difficulty.NewAsertDifficultyAdjuster, wire.Bind( - new(consensus.DifficultyAdjuster), + new(consensus2.DifficultyAdjuster), new(*difficulty.AsertDifficultyAdjuster), ), reward.NewOptRewardIssuance, wire.Bind( - new(consensus.RewardIssuance), + new(consensus2.RewardIssuance), new(*reward.OptimizedProofOfMeaningfulWorkRewardIssuance), ), ) @@ -348,8 +357,8 @@ func NewDataWorkerNode( func provideDataWorkerIPC( rpcMultiaddr string, config2 *config.Config, - signerRegistry consensus.SignerRegistry, - proverRegistry consensus.ProverRegistry, + signerRegistry consensus2.SignerRegistry, + proverRegistry consensus2.ProverRegistry, appConsensusEngineFactory *app.AppConsensusEngineFactory, peerInfoManager p2p2.PeerInfoManager, frameProver crypto.FrameProver, @@ -414,11 +423,11 @@ func provideDifficultyAnchorFrameNumber(config2 *config.Config) uint64 { } func provideDifficultyAnchorParentTime() int64 { - return 1761217200000 + return 1762862400000 } func provideDifficultyAnchorDifficulty() uint32 { - return 160000 + return 80000 } func provideGlobalTimeReel( diff --git a/node/consensus/aggregator/consensus_signature_aggregator_wrapper.go b/node/consensus/aggregator/consensus_signature_aggregator_wrapper.go new file mode 100644 index 0000000..04fd632 --- /dev/null +++ b/node/consensus/aggregator/consensus_signature_aggregator_wrapper.go @@ -0,0 +1,115 @@ +package aggregator + +import ( + "github.com/pkg/errors" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + typesconsensus "source.quilibrium.com/quilibrium/monorepo/types/consensus" + "source.quilibrium.com/quilibrium/monorepo/types/crypto" +) + +type ConsensusSignatureAggregatorWrapper struct { + blsConstructor crypto.BlsConstructor + provers typesconsensus.ProverRegistry + filter []byte +} + +type ConsensusAggregatedSignature struct { + output crypto.BlsAggregateOutput + bitmask []byte +} + +// GetBitmask implements models.AggregatedSignature. +func (c *ConsensusAggregatedSignature) GetBitmask() []byte { + return c.bitmask +} + +// GetPubKey implements models.AggregatedSignature. +func (c *ConsensusAggregatedSignature) GetPubKey() []byte { + return c.output.GetAggregatePublicKey() +} + +// GetSignature implements models.AggregatedSignature. +func (c *ConsensusAggregatedSignature) GetSignature() []byte { + return c.output.GetAggregateSignature() +} + +// Aggregate implements consensus.SignatureAggregator. +func (c *ConsensusSignatureAggregatorWrapper) Aggregate( + publicKeys [][]byte, + signatures [][]byte, +) (models.AggregatedSignature, error) { + noextSigs := [][]byte{} + if len(c.filter) != 0 { + for _, s := range signatures { + noextSigs = append(noextSigs, s[:74]) + } + } else { + noextSigs = signatures + } + + output, err := c.blsConstructor.Aggregate( + publicKeys, + noextSigs, + ) + if err != nil { + return nil, errors.Wrap(err, "aggregate") + } + + provers, err := c.provers.GetActiveProvers(c.filter) + if err != nil { + return nil, errors.Wrap(err, "aggregate") + } + + pubs := map[string]struct{}{} + for _, p := range publicKeys { + pubs[string(p)] = struct{}{} + } + + bitmask := make([]byte, (len(provers)+7)/8) + for i, p := range provers { + if _, ok := pubs[string(p.PublicKey)]; ok { + bitmask[i/8] |= (1 << (i % 8)) + } + } + + return &ConsensusAggregatedSignature{output, bitmask}, nil +} + +// VerifySignatureMultiMessage implements consensus.SignatureAggregator. +func (c *ConsensusSignatureAggregatorWrapper) VerifySignatureMultiMessage( + publicKeys [][]byte, + signature []byte, + messages [][]byte, + context []byte, +) bool { + panic("unsupported") +} + +// VerifySignatureRaw implements consensus.SignatureAggregator. +func (c *ConsensusSignatureAggregatorWrapper) VerifySignatureRaw( + publicKey []byte, + signature []byte, + message []byte, + context []byte, +) bool { + return c.blsConstructor.VerifySignatureRaw( + publicKey, + signature, + message, + context, + ) +} + +func WrapSignatureAggregator( + blsConstructor crypto.BlsConstructor, + proverRegistry typesconsensus.ProverRegistry, + filter []byte, +) consensus.SignatureAggregator { + return &ConsensusSignatureAggregatorWrapper{ + blsConstructor, + proverRegistry, + filter, + } +} diff --git a/node/consensus/app/app_consensus_engine.go b/node/consensus/app/app_consensus_engine.go index 66598f8..344323e 100644 --- a/node/consensus/app/app_consensus_engine.go +++ b/node/consensus/app/app_consensus_engine.go @@ -4,13 +4,17 @@ import ( "bytes" "context" "encoding/hex" + "fmt" "math/big" + "math/rand" "slices" "strings" "sync" "time" "github.com/iden3/go-iden3-crypto/poseidon" + pcrypto "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" "github.com/multiformats/go-multiaddr" mn "github.com/multiformats/go-multiaddr/net" "github.com/pkg/errors" @@ -19,10 +23,20 @@ import ( "google.golang.org/grpc" "source.quilibrium.com/quilibrium/monorepo/config" "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/forks" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/consensus/notifications/pubsub" + "source.quilibrium.com/quilibrium/monorepo/consensus/participant" + "source.quilibrium.com/quilibrium/monorepo/consensus/validator" + "source.quilibrium.com/quilibrium/monorepo/consensus/verification" "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" + "source.quilibrium.com/quilibrium/monorepo/node/consensus/aggregator" "source.quilibrium.com/quilibrium/monorepo/node/consensus/global" "source.quilibrium.com/quilibrium/monorepo/node/consensus/reward" consensustime "source.quilibrium.com/quilibrium/monorepo/node/consensus/time" + "source.quilibrium.com/quilibrium/monorepo/node/consensus/tracing" + "source.quilibrium.com/quilibrium/monorepo/node/consensus/voting" "source.quilibrium.com/quilibrium/monorepo/node/dispatch" "source.quilibrium.com/quilibrium/monorepo/node/execution/manager" hgstate "source.quilibrium.com/quilibrium/monorepo/node/execution/state/hypergraph" @@ -46,60 +60,74 @@ import ( // AppConsensusEngine uses the generic state machine for consensus type AppConsensusEngine struct { + *lifecycle.ComponentManager protobufs.AppShardServiceServer - logger *zap.Logger - config *config.Config - coreId uint - appAddress []byte - appFilter []byte - appAddressHex string - pubsub tp2p.PubSub - hypergraph hypergraph.Hypergraph - keyManager tkeys.KeyManager - keyStore store.KeyStore - clockStore store.ClockStore - inboxStore store.InboxStore - shardsStore store.ShardsStore - hypergraphStore store.HypergraphStore - frameProver crypto.FrameProver - inclusionProver crypto.InclusionProver - signerRegistry typesconsensus.SignerRegistry - proverRegistry typesconsensus.ProverRegistry - dynamicFeeManager typesconsensus.DynamicFeeManager - frameValidator typesconsensus.AppFrameValidator - globalFrameValidator typesconsensus.GlobalFrameValidator - difficultyAdjuster typesconsensus.DifficultyAdjuster - rewardIssuance typesconsensus.RewardIssuance - eventDistributor typesconsensus.EventDistributor - mixnet typesconsensus.Mixnet - appTimeReel *consensustime.AppTimeReel - globalTimeReel *consensustime.GlobalTimeReel - encryptedChannel channel.EncryptedChannel - dispatchService *dispatch.DispatchService - blsConstructor crypto.BlsConstructor - minimumProvers func() uint64 - executors map[string]execution.ShardExecutionEngine - executorsMu sync.RWMutex - executionManager *manager.ExecutionEngineManager - peerInfoManager tp2p.PeerInfoManager - currentDifficulty uint32 - currentDifficultyMu sync.RWMutex - pendingMessages []*protobufs.Message - pendingMessagesMu sync.RWMutex - collectedMessages map[string][]*protobufs.Message - collectedMessagesMu sync.RWMutex - lastProvenFrameTime time.Time - lastProvenFrameTimeMu sync.RWMutex - frameStore map[string]*protobufs.AppShardFrame - frameStoreMu sync.RWMutex - ctx context.Context - cancel context.CancelFunc - quit chan struct{} - wg sync.WaitGroup - canRunStandalone bool - blacklistMap map[string]bool - alertPublicKey []byte + logger *zap.Logger + config *config.Config + coreId uint + appAddress []byte + appFilter []byte + appAddressHex string + pubsub tp2p.PubSub + hypergraph hypergraph.Hypergraph + keyManager tkeys.KeyManager + keyStore store.KeyStore + clockStore store.ClockStore + inboxStore store.InboxStore + shardsStore store.ShardsStore + hypergraphStore store.HypergraphStore + consensusStore consensus.ConsensusStore[*protobufs.ProposalVote] + frameProver crypto.FrameProver + inclusionProver crypto.InclusionProver + signerRegistry typesconsensus.SignerRegistry + proverRegistry typesconsensus.ProverRegistry + dynamicFeeManager typesconsensus.DynamicFeeManager + frameValidator typesconsensus.AppFrameValidator + globalFrameValidator typesconsensus.GlobalFrameValidator + difficultyAdjuster typesconsensus.DifficultyAdjuster + rewardIssuance typesconsensus.RewardIssuance + eventDistributor typesconsensus.EventDistributor + mixnet typesconsensus.Mixnet + appTimeReel *consensustime.AppTimeReel + globalTimeReel *consensustime.GlobalTimeReel + forks consensus.Forks[*protobufs.AppShardFrame] + notifier consensus.Consumer[ + *protobufs.AppShardFrame, + *protobufs.ProposalVote, + ] + encryptedChannel channel.EncryptedChannel + dispatchService *dispatch.DispatchService + blsConstructor crypto.BlsConstructor + minimumProvers func() uint64 + executors map[string]execution.ShardExecutionEngine + executorsMu sync.RWMutex + executionManager *manager.ExecutionEngineManager + peerInfoManager tp2p.PeerInfoManager + currentDifficulty uint32 + currentDifficultyMu sync.RWMutex + pendingMessages []*protobufs.Message + pendingMessagesMu sync.RWMutex + collectedMessages []*protobufs.Message + collectedMessagesMu sync.RWMutex + provingMessages []*protobufs.Message + provingMessagesMu sync.RWMutex + lastProvenFrameTime time.Time + lastProvenFrameTimeMu sync.RWMutex + frameStore map[string]*protobufs.AppShardFrame + frameStoreMu sync.RWMutex + proposalCache map[uint64]*protobufs.AppShardProposal + proposalCacheMu sync.RWMutex + pendingCertifiedParents map[uint64]*protobufs.AppShardProposal + pendingCertifiedParentsMu sync.RWMutex + proofCache map[uint64][516]byte + proofCacheMu sync.RWMutex + ctx lifecycle.SignalerContext + cancel context.CancelFunc + quit chan struct{} + canRunStandalone bool + blacklistMap map[string]bool + alertPublicKey []byte // Message queues consensusMessageQueue chan *pb.Message @@ -109,19 +137,25 @@ type AppConsensusEngine struct { globalAlertMessageQueue chan *pb.Message globalPeerInfoMessageQueue chan *pb.Message dispatchMessageQueue chan *pb.Message + appShardProposalQueue chan *protobufs.AppShardProposal // Emergency halt haltCtx context.Context halt context.CancelFunc - // Generic state machine - stateMachine *consensus.StateMachine[ + // Consensus participant instance + consensusParticipant consensus.EventLoop[ *protobufs.AppShardFrame, - *protobufs.FrameVote, - PeerID, - CollectedCommitments, + *protobufs.ProposalVote, ] + // Consensus plugins + signatureAggregator consensus.SignatureAggregator + voteCollectorDistributor *pubsub.VoteCollectorDistributor[*protobufs.ProposalVote] + timeoutCollectorDistributor *pubsub.TimeoutCollectorDistributor[*protobufs.ProposalVote] + voteAggregator consensus.VoteAggregator[*protobufs.AppShardFrame, *protobufs.ProposalVote] + timeoutAggregator consensus.TimeoutAggregator[*protobufs.ProposalVote] + // Provider implementations syncProvider *AppSyncProvider votingProvider *AppVotingProvider @@ -149,7 +183,7 @@ func NewAppConsensusEngine( config *config.Config, coreId uint, appAddress []byte, - pubsub tp2p.PubSub, + ps tp2p.PubSub, hypergraph hypergraph.Hypergraph, keyManager tkeys.KeyManager, keyStore store.KeyStore, @@ -157,6 +191,7 @@ func NewAppConsensusEngine( inboxStore store.InboxStore, shardsStore store.ShardsStore, hypergraphStore store.HypergraphStore, + consensusStore consensus.ConsensusStore[*protobufs.ProposalVote], frameProver crypto.FrameProver, inclusionProver crypto.InclusionProver, bulletproofProver crypto.BulletproofProver, @@ -186,7 +221,7 @@ func NewAppConsensusEngine( appAddress: appAddress, appFilter: appFilter, appAddressHex: hex.EncodeToString(appAddress), - pubsub: pubsub, + pubsub: ps, hypergraph: hypergraph, keyManager: keyManager, keyStore: keyStore, @@ -194,6 +229,7 @@ func NewAppConsensusEngine( inboxStore: inboxStore, shardsStore: shardsStore, hypergraphStore: hypergraphStore, + consensusStore: consensusStore, frameProver: frameProver, inclusionProver: inclusionProver, signerRegistry: signerRegistry, @@ -212,7 +248,12 @@ func NewAppConsensusEngine( peerInfoManager: peerInfoManager, executors: make(map[string]execution.ShardExecutionEngine), frameStore: make(map[string]*protobufs.AppShardFrame), - collectedMessages: make(map[string][]*protobufs.Message), + proposalCache: make(map[uint64]*protobufs.AppShardProposal), + pendingCertifiedParents: make(map[uint64]*protobufs.AppShardProposal), + proofCache: make(map[uint64][516]byte), + pendingMessages: []*protobufs.Message{}, + collectedMessages: []*protobufs.Message{}, + provingMessages: []*protobufs.Message{}, consensusMessageQueue: make(chan *pb.Message, 1000), proverMessageQueue: make(chan *pb.Message, 1000), frameMessageQueue: make(chan *pb.Message, 100), @@ -220,11 +261,29 @@ func NewAppConsensusEngine( globalAlertMessageQueue: make(chan *pb.Message, 100), globalPeerInfoMessageQueue: make(chan *pb.Message, 1000), dispatchMessageQueue: make(chan *pb.Message, 1000), + appShardProposalQueue: make(chan *protobufs.AppShardProposal, 1000), currentDifficulty: config.Engine.Difficulty, blacklistMap: make(map[string]bool), alertPublicKey: []byte{}, } + engine.syncProvider = NewAppSyncProvider(engine) + engine.votingProvider = &AppVotingProvider{engine: engine} + engine.leaderProvider = &AppLeaderProvider{engine: engine} + engine.livenessProvider = &AppLivenessProvider{engine: engine} + engine.signatureAggregator = aggregator.WrapSignatureAggregator( + engine.blsConstructor, + engine.proverRegistry, + appAddress, + ) + voteAggregationDistributor := voting.NewAppShardVoteAggregationDistributor() + engine.voteCollectorDistributor = + voteAggregationDistributor.VoteCollectorDistributor + timeoutAggregationDistributor := + voting.NewAppShardTimeoutAggregationDistributor() + engine.timeoutCollectorDistributor = + timeoutAggregationDistributor.TimeoutCollectorDistributor + if config.Engine.AlertKey != "" { alertPublicKey, err := hex.DecodeString(config.Engine.AlertKey) if err != nil { @@ -318,30 +377,9 @@ func NewAppConsensusEngine( inboxStore, logger, keyManager, - pubsub, + ps, ) - // Initialize execution engines - if err := engine.executionManager.InitializeEngines(); err != nil { - return nil, errors.Wrap(err, "failed to initialize execution engines") - } - - // Register all execution engines with the consensus engine - err = engine.executionManager.RegisterAllEngines(engine.RegisterExecutor) - if err != nil { - return nil, errors.Wrap(err, "failed to register execution engines") - } - - engine.syncProvider = &AppSyncProvider{engine: engine} - engine.votingProvider = &AppVotingProvider{ - engine: engine, - proposalVotes: make( - map[consensus.Identity]map[consensus.Identity]**protobufs.FrameVote, - ), - } - engine.leaderProvider = &AppLeaderProvider{engine: engine} - engine.livenessProvider = &AppLivenessProvider{engine: engine} - appTimeReel.SetMaterializeFunc(engine.materialize) appTimeReel.SetRevertFunc(engine.revert) @@ -370,7 +408,7 @@ func NewAppConsensusEngine( engine.hyperSync = hypergraph engine.onionService = onion.NewGRPCTransport( logger, - pubsub.GetPeerID(), + ps.GetPeerID(), peerInfoManager, signerRegistry, ) @@ -398,173 +436,250 @@ func NewAppConsensusEngine( executorsRegistered.WithLabelValues(engine.appAddressHex).Set(0) pendingMessagesCount.WithLabelValues(engine.appAddressHex).Set(0) - return engine, nil -} + componentBuilder := lifecycle.NewComponentManagerBuilder() + // Add execution engines + componentBuilder.AddWorker(engine.executionManager.Start) + componentBuilder.AddWorker(engine.eventDistributor.Start) + componentBuilder.AddWorker(engine.appTimeReel.Start) -func (e *AppConsensusEngine) Start(quit chan struct{}) <-chan error { - errChan := make(chan error, 1) - - e.quit = quit - e.ctx, e.cancel = context.WithCancel(context.Background()) - - // Start execution engines - if err := e.executionManager.StartAll(e.quit); err != nil { - errChan <- errors.Wrap(err, "start execution engines") - close(errChan) - return errChan - } - - if err := e.eventDistributor.Start(e.ctx); err != nil { - errChan <- errors.Wrap(err, "start event distributor") - close(errChan) - return errChan - } - - err := e.appTimeReel.Start() + latest, err := engine.consensusStore.GetConsensusState(engine.appAddress) + var state *models.CertifiedState[*protobufs.AppShardFrame] + var pending []*models.SignedProposal[ + *protobufs.AppShardFrame, + *protobufs.ProposalVote, + ] if err != nil { - errChan <- errors.Wrap(err, "start") - close(errChan) - return errChan - } - - frame, _, err := e.clockStore.GetLatestShardClockFrame(e.appAddress) - if err != nil { - e.logger.Warn( - "invalid frame retrieved, will resync", - zap.Error(err), + frame, qc := engine.initializeGenesis() + state = &models.CertifiedState[*protobufs.AppShardFrame]{ + State: &models.State[*protobufs.AppShardFrame]{ + Rank: 0, + Identifier: frame.Identity(), + State: &frame, + }, + CertifyingQuorumCertificate: qc, + } + } else { + qc, err := engine.clockStore.GetQuorumCertificate(nil, latest.FinalizedRank) + if err != nil { + panic(err) + } + frame, _, err := engine.clockStore.GetShardClockFrame( + engine.appAddress, + qc.GetFrameNumber(), + false, ) + if err != nil { + panic(err) + } + parentFrame, err := engine.clockStore.GetGlobalClockFrame( + qc.GetFrameNumber() - 1, + ) + if err != nil { + panic(err) + } + parentQC, err := engine.clockStore.GetQuorumCertificate( + nil, + parentFrame.GetRank(), + ) + if err != nil { + panic(err) + } + state = &models.CertifiedState[*protobufs.AppShardFrame]{ + State: &models.State[*protobufs.AppShardFrame]{ + Rank: frame.GetRank(), + Identifier: frame.Identity(), + ProposerID: frame.Source(), + ParentQuorumCertificate: parentQC, + Timestamp: frame.GetTimestamp(), + State: &frame, + }, + CertifyingQuorumCertificate: qc, + } + pending = engine.getPendingProposals(frame.Header.FrameNumber) } - e.ensureGlobalClient() - - var initialState **protobufs.AppShardFrame = nil - if frame != nil { - initialState = &frame - } - - e.stateMachine = consensus.NewStateMachine( - e.getPeerID(), - initialState, // Initial state will be set by sync provider - true, // shouldEmitReceiveEventsOnSends - e.minimumProvers, - e.syncProvider, - e.votingProvider, - e.leaderProvider, - e.livenessProvider, - &AppTracer{ - logger: e.logger.Named("state_machine"), + engine.voteAggregator, err = voting.NewAppShardVoteAggregator[PeerID]( + tracing.NewZapTracer(logger), + appAddress, + engine, + voteAggregationDistributor, + engine.signatureAggregator, + engine.votingProvider, + func(qc models.QuorumCertificate) { + engine.consensusParticipant.OnQuorumCertificateConstructedFromVotes(qc) }, + state.Rank()+1, + ) + if err != nil { + return nil, err + } + engine.timeoutAggregator, err = voting.NewAppShardTimeoutAggregator[PeerID]( + tracing.NewZapTracer(logger), + appAddress, + engine, + engine, + engine.signatureAggregator, + timeoutAggregationDistributor, + engine.votingProvider, + state.Rank()+1, ) - e.stateMachine.AddListener(&AppTransitionListener{ - engine: e, - logger: e.logger.Named("transitions"), + notifier := pubsub.NewDistributor[ + *protobufs.AppShardFrame, + *protobufs.ProposalVote, + ]() + notifier.AddConsumer(engine) + engine.notifier = notifier + + forks, err := forks.NewForks(state, engine, notifier) + if err != nil { + return nil, err + } + engine.forks = forks + + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + if err := engine.startConsensus(state, pending, ctx, ready); err != nil { + ctx.Throw(err) + return + } + + <-ctx.Done() + <-lifecycle.AllDone(engine.voteAggregator, engine.timeoutAggregator) }) - err = e.subscribeToConsensusMessages() + // Start app shard proposal queue processor + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + engine.processAppShardProposalQueue(ctx) + }) + + err = engine.subscribeToConsensusMessages() if err != nil { - errChan <- errors.Wrap(err, "start") - close(errChan) - return errChan + return nil, err } - err = e.subscribeToProverMessages() + err = engine.subscribeToProverMessages() if err != nil { - errChan <- errors.Wrap(err, "start") - close(errChan) - return errChan + return nil, err } - err = e.subscribeToFrameMessages() + err = engine.subscribeToFrameMessages() if err != nil { - errChan <- errors.Wrap(err, "start") - close(errChan) - return errChan + return nil, err } - err = e.subscribeToGlobalFrameMessages() + err = engine.subscribeToGlobalFrameMessages() if err != nil { - errChan <- errors.Wrap(err, "start") - close(errChan) - return errChan + return nil, err } - err = e.subscribeToGlobalProverMessages() + err = engine.subscribeToGlobalProverMessages() if err != nil { - errChan <- errors.Wrap(err, "start") - close(errChan) - return errChan + return nil, err } - err = e.subscribeToGlobalAlertMessages() + err = engine.subscribeToGlobalAlertMessages() if err != nil { - errChan <- errors.Wrap(err, "start") - close(errChan) - return errChan + return nil, err } - err = e.subscribeToPeerInfoMessages() + err = engine.subscribeToPeerInfoMessages() if err != nil { - errChan <- errors.Wrap(err, "start") - close(errChan) - return errChan + return nil, err } - err = e.subscribeToDispatchMessages() + err = engine.subscribeToDispatchMessages() if err != nil { - errChan <- errors.Wrap(err, "start") - close(errChan) - return errChan + return nil, err } + // Add sync provider + componentBuilder.AddWorker(engine.syncProvider.Start) + // Start message queue processors - e.wg.Add(1) - go e.processConsensusMessageQueue() + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + engine.processConsensusMessageQueue(ctx) + }) - e.wg.Add(1) - go e.processProverMessageQueue() + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + engine.processProverMessageQueue(ctx) + }) - e.wg.Add(1) - go e.processFrameMessageQueue() + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + engine.processFrameMessageQueue(ctx) + }) - e.wg.Add(1) - go e.processGlobalFrameMessageQueue() + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + engine.processGlobalFrameMessageQueue(ctx) + }) - e.wg.Add(1) - go e.processAlertMessageQueue() + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + engine.processAlertMessageQueue(ctx) + }) - e.wg.Add(1) - go e.processPeerInfoMessageQueue() + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + engine.processPeerInfoMessageQueue(ctx) + }) - e.wg.Add(1) - go e.processDispatchMessageQueue() + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + engine.processDispatchMessageQueue(ctx) + }) // Start event distributor event loop - e.wg.Add(1) - go e.eventDistributorLoop() + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + engine.eventDistributorLoop(ctx) + }) // Start metrics update goroutine - e.wg.Add(1) - go e.updateMetricsLoop() + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + engine.updateMetricsLoop(ctx) + }) - // Start the state machine - if err := e.stateMachine.Start(); err != nil { - errChan <- errors.Wrap(err, "start state machine") - close(errChan) - return errChan - } + engine.ComponentManager = componentBuilder.Build() - e.logger.Info( - "app consensus engine started", - zap.String("app_address", hex.EncodeToString(e.appAddress)), - ) - - if e.grpcServer != nil { - e.RegisterServices(e.grpcServer) - } - - close(errChan) - return errChan + return engine, nil } func (e *AppConsensusEngine) Stop(force bool) <-chan error { @@ -575,73 +690,6 @@ func (e *AppConsensusEngine) Stop(force bool) <-chan error { e.cancel() } - // Stop the state machine - if e.stateMachine != nil { - if err := e.stateMachine.Stop(); err != nil && !force { - e.logger.Warn("error stopping state machine", zap.Error(err)) - select { - case errChan <- errors.Wrap(err, "stop state machine"): - default: - } - } - } - - // Stop event distributor - if e.eventDistributor != nil { - if err := e.eventDistributor.Stop(); err != nil && !force { - e.logger.Warn("error stopping event distributor", zap.Error(err)) - select { - case errChan <- errors.Wrap(err, "stop event distributor"): - default: - } - } - } - - // Stop execution engines - if e.executionManager != nil { - if err := e.executionManager.StopAll(force); err != nil && !force { - e.logger.Warn("error stopping execution engines", zap.Error(err)) - select { - case errChan <- errors.Wrap(err, "stop execution engines"): - default: - } - } - } - - // Wait for goroutines to finish with shorter timeout for tests - done := make(chan struct{}) - go func() { - e.wg.Wait() - close(done) - }() - - // Use shorter timeout in test environments - timeout := 30 * time.Second - if e.config.P2P.Network == 99 { - timeout = 5 * time.Second - } - - select { - case <-done: - // Clean shutdown - e.logger.Info("app consensus engine stopped cleanly") - case <-time.After(timeout): - if !force { - e.logger.Error("timeout waiting for graceful shutdown") - select { - case errChan <- errors.New("timeout waiting for graceful shutdown"): - default: - } - } else { - e.logger.Warn("forced shutdown after timeout") - } - } - - // Close the state machine - if e.stateMachine != nil { - e.stateMachine.Close() - } - // Unsubscribe from pubsub to stop new messages from arriving e.pubsub.Unsubscribe(e.getConsensusMessageBitmask(), false) e.pubsub.UnregisterValidator(e.getConsensusMessageBitmask()) @@ -681,109 +729,20 @@ func (e *AppConsensusEngine) GetDifficulty() uint32 { func (e *AppConsensusEngine) GetState() typesconsensus.EngineState { // Map the generic state machine state to engine state - if e.stateMachine == nil { + if e.consensusParticipant == nil { return typesconsensus.EngineStateStopped } - smState := e.stateMachine.GetState() - switch smState { - case consensus.StateStopped: - return typesconsensus.EngineStateStopped - case consensus.StateStarting: - return typesconsensus.EngineStateStarting - case consensus.StateLoading: - return typesconsensus.EngineStateLoading - case consensus.StateCollecting: - return typesconsensus.EngineStateCollecting - case consensus.StateLivenessCheck: - return typesconsensus.EngineStateLivenessCheck - case consensus.StateProving: + + select { + case <-e.consensusParticipant.Ready(): return typesconsensus.EngineStateProving - case consensus.StatePublishing: - return typesconsensus.EngineStatePublishing - case consensus.StateVoting: - return typesconsensus.EngineStateVoting - case consensus.StateFinalizing: - return typesconsensus.EngineStateFinalizing - default: + case <-e.consensusParticipant.Done(): return typesconsensus.EngineStateStopped + default: + return typesconsensus.EngineStateStarting } } -func (e *AppConsensusEngine) RegisterExecutor( - exec execution.ShardExecutionEngine, - frame uint64, -) <-chan error { - errChan := make(chan error, 1) - - e.executorsMu.Lock() - defer e.executorsMu.Unlock() - - name := exec.GetName() - if _, exists := e.executors[name]; exists { - errChan <- errors.New("executor already registered") - close(errChan) - return errChan - } - - e.executors[name] = exec - - // Update metrics - executorRegistrationTotal.WithLabelValues(e.appAddressHex, "register").Inc() - executorsRegistered.WithLabelValues( - e.appAddressHex, - ).Set(float64(len(e.executors))) - - close(errChan) - return errChan -} - -func (e *AppConsensusEngine) UnregisterExecutor( - name string, - frame uint64, - force bool, -) <-chan error { - errChan := make(chan error, 1) - - e.executorsMu.Lock() - defer e.executorsMu.Unlock() - - if _, exists := e.executors[name]; !exists { - errChan <- errors.New("executor not registered") - close(errChan) - return errChan - } - - // Stop the executor - if exec, ok := e.executors[name]; ok { - stopErrChan := exec.Stop(force) - select { - case err := <-stopErrChan: - if err != nil && !force { - errChan <- errors.Wrap(err, "stop executor") - close(errChan) - return errChan - } - case <-time.After(5 * time.Second): - if !force { - errChan <- errors.New("timeout stopping executor") - close(errChan) - return errChan - } - } - } - - delete(e.executors, name) - - // Update metrics - executorRegistrationTotal.WithLabelValues(e.appAddressHex, "unregister").Inc() - executorsRegistered.WithLabelValues( - e.appAddressHex, - ).Set(float64(len(e.executors))) - - close(errChan) - return errChan -} - func (e *AppConsensusEngine) GetProvingKey( engineConfig *config.EngineConfig, ) (crypto.Signer, crypto.KeyType, []byte, []byte) { @@ -1159,28 +1118,22 @@ func (e *AppConsensusEngine) cleanupFrameStore() { ) } -func (e *AppConsensusEngine) updateMetricsLoop() { +func (e *AppConsensusEngine) updateMetricsLoop( + ctx lifecycle.SignalerContext, +) { defer func() { if r := recover(); r != nil { e.logger.Error("fatal error encountered", zap.Any("panic", r)) - if e.cancel != nil { - e.cancel() - } - // Avoid blocking on quit channel during panic recovery - select { - case e.quit <- struct{}{}: - default: - } + ctx.Throw(errors.Errorf("fatal unhandled error encountered: %v", r)) } }() - defer e.wg.Done() ticker := time.NewTicker(10 * time.Second) defer ticker.Stop() for { select { - case <-e.ctx.Done(): + case <-ctx.Done(): return case <-e.quit: return @@ -1199,7 +1152,10 @@ func (e *AppConsensusEngine) updateMetricsLoop() { } } -func (e *AppConsensusEngine) initializeGenesis() *protobufs.AppShardFrame { +func (e *AppConsensusEngine) initializeGenesis() ( + *protobufs.AppShardFrame, + *protobufs.QuorumCertificate, +) { // Initialize state roots for hypergraph stateRoots := make([][]byte, 4) for i := range stateRoots { @@ -1232,11 +1188,74 @@ func (e *AppConsensusEngine) initializeGenesis() *protobufs.AppShardFrame { e.frameStore[string(frameID)] = genesisFrame e.frameStoreMu.Unlock() - if err := e.appTimeReel.Insert(e.ctx, genesisFrame); err != nil { - e.logger.Error("failed to add genesis frame to time reel", zap.Error(err)) - e.frameStoreMu.Lock() - delete(e.frameStore, string(frameID)) - e.frameStoreMu.Unlock() + txn, err := e.clockStore.NewTransaction(false) + if err != nil { + panic(err) + } + if err := e.clockStore.StageShardClockFrame( + []byte(genesisFrame.Identity()), + genesisFrame, + txn, + ); err != nil { + txn.Abort() + e.logger.Error("could not add frame", zap.Error(err)) + return nil, nil + } + if err := e.clockStore.CommitShardClockFrame( + e.appAddress, + genesisHeader.FrameNumber, + []byte(genesisFrame.Identity()), + nil, + txn, + false, + ); err != nil { + txn.Abort() + e.logger.Error("could not add frame", zap.Error(err)) + return nil, nil + } + genesisQC := &protobufs.QuorumCertificate{ + Rank: 0, + Filter: e.appFilter, + FrameNumber: genesisFrame.Header.FrameNumber, + Selector: []byte(genesisFrame.Identity()), + Timestamp: 0, + AggregateSignature: &protobufs.BLS48581AggregateSignature{ + PublicKey: &protobufs.BLS48581G2PublicKey{ + KeyValue: make([]byte, 585), + }, + Signature: make([]byte, 74), + Bitmask: bytes.Repeat([]byte{0xff}, 32), + }, + } + if err := e.clockStore.PutQuorumCertificate(genesisQC, txn); err != nil { + txn.Abort() + e.logger.Error("could not add quorum certificate", zap.Error(err)) + return nil, nil + } + if err := txn.Commit(); err != nil { + txn.Abort() + e.logger.Error("could not add frame", zap.Error(err)) + return nil, nil + } + if err = e.consensusStore.PutLivenessState( + &models.LivenessState{ + Filter: e.appAddress, + CurrentRank: 1, + LatestQuorumCertificate: genesisQC, + }, + ); err != nil { + e.logger.Error("could not add liveness state", zap.Error(err)) + return nil, nil + } + if err = e.consensusStore.PutConsensusState( + &models.ConsensusState[*protobufs.ProposalVote]{ + Filter: e.appAddress, + FinalizedRank: 0, + LatestAcknowledgedRank: 0, + }, + ); err != nil { + e.logger.Error("could not add consensus state", zap.Error(err)) + return nil, nil } e.logger.Info( @@ -1244,7 +1263,7 @@ func (e *AppConsensusEngine) initializeGenesis() *protobufs.AppShardFrame { zap.String("shard_address", hex.EncodeToString(e.appAddress)), ) - return genesisFrame + return genesisFrame, genesisQC } // adjustFeeForTraffic calculates a traffic-adjusted fee multiplier based on @@ -1351,6 +1370,7 @@ func (e *AppConsensusEngine) adjustFeeForTraffic(baseFee uint64) uint64 { } func (e *AppConsensusEngine) internalProveFrame( + rank uint64, messages []*protobufs.Message, previousFrame *protobufs.AppShardFrame, ) (*protobufs.AppShardFrame, error) { @@ -1417,7 +1437,7 @@ func (e *AppConsensusEngine) internalProveFrame( timestamp := time.Now().UnixMilli() difficulty := e.difficultyAdjuster.GetNextDifficulty( - previousFrame.Rank()+1, + previousFrame.GetRank()+1, timestamp, ) @@ -1478,7 +1498,7 @@ func (e *AppConsensusEngine) internalProveFrame( if err != nil { return nil, err } - + newHeader.Rank = rank newHeader.PublicKeySignatureBls48581 = nil newFrame := &protobufs.AppShardFrame{ @@ -1551,3 +1571,1139 @@ func (e *AppConsensusEngine) ensureGlobalClient() error { e.globalClient = protobufs.NewGlobalServiceClient(client) return nil } + +func (e *AppConsensusEngine) startConsensus( + trustedRoot *models.CertifiedState[*protobufs.AppShardFrame], + pending []*models.SignedProposal[ + *protobufs.AppShardFrame, + *protobufs.ProposalVote, + ], + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, +) error { + var err error + e.consensusParticipant, err = participant.NewParticipant[ + *protobufs.AppShardFrame, + *protobufs.ProposalVote, + PeerID, + CollectedCommitments, + ]( + tracing.NewZapTracer(e.logger), // logger + e, // committee + verification.NewSigner[ + *protobufs.AppShardFrame, + *protobufs.ProposalVote, + PeerID, + ](e.votingProvider), // signer + e.leaderProvider, // prover + e.votingProvider, // voter + e.notifier, // notifier + e.consensusStore, // consensusStore + e.signatureAggregator, // signatureAggregator + e, // consensusVerifier + e.voteCollectorDistributor, // voteCollectorDistributor + e.timeoutCollectorDistributor, // timeoutCollectorDistributor + e.forks, // forks + validator.NewValidator[ + *protobufs.AppShardFrame, + *protobufs.ProposalVote, + ](e, e), // validator + e.voteAggregator, // voteAggregator + e.timeoutAggregator, // timeoutAggregator + e, // finalizer + e.appAddress, // filter + trustedRoot, + pending, + ) + if err != nil { + return err + } + + ready() + e.voteAggregator.Start(ctx) + e.timeoutAggregator.Start(ctx) + <-lifecycle.AllReady(e.voteAggregator, e.timeoutAggregator) + e.consensusParticipant.Start(ctx) + return nil +} + +// MakeFinal implements consensus.Finalizer. +func (e *AppConsensusEngine) MakeFinal(stateID models.Identity) error { + // In a standard BFT-only approach, this would be how frames are finalized on + // the time reel. But we're PoMW, so we don't rely on BFT for anything outside + // of basic coordination. If the protocol were ever to move to something like + // PoS, this would be one of the touch points to revisit. + return nil +} + +// OnCurrentRankDetails implements consensus.Consumer. +func (e *AppConsensusEngine) OnCurrentRankDetails( + currentRank uint64, + finalizedRank uint64, + currentLeader models.Identity, +) { + e.logger.Info( + "entered new rank", + zap.Uint64("current_rank", currentRank), + zap.String("current_leader", hex.EncodeToString([]byte(currentLeader))), + ) +} + +// OnDoubleProposeDetected implements consensus.Consumer. +func (e *AppConsensusEngine) OnDoubleProposeDetected( + proposal1 *models.State[*protobufs.AppShardFrame], + proposal2 *models.State[*protobufs.AppShardFrame], +) { + select { + case <-e.haltCtx.Done(): + return + default: + } + e.eventDistributor.Publish(typesconsensus.ControlEvent{ + Type: typesconsensus.ControlEventAppEquivocation, + Data: &consensustime.AppEvent{ + Type: consensustime.TimeReelEventEquivocationDetected, + Frame: *proposal2.State, + OldHead: *proposal1.State, + Message: fmt.Sprintf( + "equivocation at rank %d", + proposal1.Rank, + ), + }, + }) +} + +// OnEventProcessed implements consensus.Consumer. +func (e *AppConsensusEngine) OnEventProcessed() {} + +// OnFinalizedState implements consensus.Consumer. +func (e *AppConsensusEngine) OnFinalizedState( + state *models.State[*protobufs.AppShardFrame], +) { +} + +// OnInvalidStateDetected implements consensus.Consumer. +func (e *AppConsensusEngine) OnInvalidStateDetected( + err *models.InvalidProposalError[ + *protobufs.AppShardFrame, + *protobufs.ProposalVote, + ], +) { +} // Presently a no-op, up for reconsideration + +// OnLocalTimeout implements consensus.Consumer. +func (e *AppConsensusEngine) OnLocalTimeout(currentRank uint64) {} + +// OnOwnProposal implements consensus.Consumer. +func (e *AppConsensusEngine) OnOwnProposal( + proposal *models.SignedProposal[ + *protobufs.AppShardFrame, + *protobufs.ProposalVote, + ], + targetPublicationTime time.Time, +) { + go func() { + select { + case <-time.After(time.Until(targetPublicationTime)): + case <-e.ShutdownSignal(): + return + } + var priorTC *protobufs.TimeoutCertificate = nil + if proposal.PreviousRankTimeoutCertificate != nil { + priorTC = + proposal.PreviousRankTimeoutCertificate.(*protobufs.TimeoutCertificate) + } + + // Manually override the signature as the vdf prover's signature is invalid + (*proposal.State.State).Header.PublicKeySignatureBls48581.Signature = + (*proposal.Vote).PublicKeySignatureBls48581.Signature + + pbProposal := &protobufs.AppShardProposal{ + State: *proposal.State.State, + ParentQuorumCertificate: proposal.Proposal.State.ParentQuorumCertificate.(*protobufs.QuorumCertificate), + PriorRankTimeoutCertificate: priorTC, + Vote: *proposal.Vote, + } + data, err := pbProposal.ToCanonicalBytes() + if err != nil { + e.logger.Error("could not serialize proposal", zap.Error(err)) + return + } + + e.voteAggregator.AddState(proposal) + e.consensusParticipant.SubmitProposal(proposal) + + if err := e.pubsub.PublishToBitmask( + e.getConsensusMessageBitmask(), + data, + ); err != nil { + e.logger.Error("could not publish", zap.Error(err)) + } + }() +} + +// OnOwnTimeout implements consensus.Consumer. +func (e *AppConsensusEngine) OnOwnTimeout( + timeout *models.TimeoutState[*protobufs.ProposalVote], +) { + select { + case <-e.haltCtx.Done(): + return + default: + } + + var priorTC *protobufs.TimeoutCertificate + if timeout.PriorRankTimeoutCertificate != nil { + priorTC = + timeout.PriorRankTimeoutCertificate.(*protobufs.TimeoutCertificate) + } + + pbTimeout := &protobufs.TimeoutState{ + LatestQuorumCertificate: timeout.LatestQuorumCertificate.(*protobufs.QuorumCertificate), + PriorRankTimeoutCertificate: priorTC, + Vote: *timeout.Vote, + TimeoutTick: timeout.TimeoutTick, + Timestamp: uint64(time.Now().UnixMilli()), + } + data, err := pbTimeout.ToCanonicalBytes() + if err != nil { + e.logger.Error("could not serialize timeout", zap.Error(err)) + return + } + + e.timeoutAggregator.AddTimeout(timeout) + + if err := e.pubsub.PublishToBitmask( + e.getConsensusMessageBitmask(), + data, + ); err != nil { + e.logger.Error("could not publish", zap.Error(err)) + } +} + +// OnOwnVote implements consensus.Consumer. +func (e *AppConsensusEngine) OnOwnVote( + vote **protobufs.ProposalVote, + recipientID models.Identity, +) { + select { + case <-e.haltCtx.Done(): + return + default: + } + + data, err := (*vote).ToCanonicalBytes() + if err != nil { + e.logger.Error("could not serialize timeout", zap.Error(err)) + return + } + + e.voteAggregator.AddVote(vote) + + if err := e.pubsub.PublishToBitmask( + e.getConsensusMessageBitmask(), + data, + ); err != nil { + e.logger.Error("could not publish", zap.Error(err)) + } +} + +// OnPartialTimeoutCertificate implements consensus.Consumer. +func (e *AppConsensusEngine) OnPartialTimeoutCertificate( + currentRank uint64, + partialTimeoutCertificate *consensus.PartialTimeoutCertificateCreated, +) { +} + +// OnQuorumCertificateTriggeredRankChange implements consensus.Consumer. +func (e *AppConsensusEngine) OnQuorumCertificateTriggeredRankChange( + oldRank uint64, + newRank uint64, + qc models.QuorumCertificate, +) { + e.logger.Debug("adding certified state", zap.Uint64("rank", newRank-1)) + + parentQC, err := e.clockStore.GetLatestQuorumCertificate(e.appAddress) + if err != nil { + e.logger.Error("no latest quorum certificate", zap.Error(err)) + return + } + + txn, err := e.clockStore.NewTransaction(false) + if err != nil { + e.logger.Error("could not create transaction", zap.Error(err)) + return + } + + aggregateSig := &protobufs.BLS48581AggregateSignature{ + Signature: qc.GetAggregatedSignature().GetSignature(), + PublicKey: &protobufs.BLS48581G2PublicKey{ + KeyValue: qc.GetAggregatedSignature().GetPubKey(), + }, + Bitmask: qc.GetAggregatedSignature().GetBitmask(), + } + if err := e.clockStore.PutQuorumCertificate( + &protobufs.QuorumCertificate{ + Filter: qc.GetFilter(), + Rank: qc.GetRank(), + FrameNumber: qc.GetFrameNumber(), + Selector: []byte(qc.Identity()), + AggregateSignature: aggregateSig, + }, + txn, + ); err != nil { + e.logger.Error("could not insert quorum certificate", zap.Error(err)) + txn.Abort() + return + } + + if err := txn.Commit(); err != nil { + e.logger.Error("could not commit transaction", zap.Error(err)) + txn.Abort() + return + } + + e.frameStoreMu.RLock() + frame, ok := e.frameStore[qc.Identity()] + e.frameStoreMu.RUnlock() + + if !ok { + e.logger.Error( + "no frame for quorum certificate", + zap.Uint64("rank", newRank-1), + zap.Uint64("frame_number", qc.GetFrameNumber()), + ) + return + } + + frame.Header.PublicKeySignatureBls48581 = aggregateSig + + err = e.appTimeReel.Insert(frame) + if err != nil { + e.logger.Error("could not insert frame into time reel", zap.Error(err)) + return + } + + if !bytes.Equal(frame.Header.ParentSelector, parentQC.Selector) { + e.logger.Error( + "quorum certificate does not match frame parent", + zap.String( + "frame_parent_selector", + hex.EncodeToString(frame.Header.ParentSelector), + ), + zap.String( + "parent_qc_selector", + hex.EncodeToString(parentQC.Selector), + ), + zap.Uint64("parent_qc_rank", parentQC.Rank), + ) + return + } + + priorRankTC, err := e.clockStore.GetTimeoutCertificate( + e.appAddress, + qc.GetRank()-1, + ) + if err != nil { + e.logger.Debug("no prior rank TC to include", zap.Uint64("rank", newRank-1)) + } + + vote, err := e.clockStore.GetProposalVote( + e.appAddress, + frame.GetRank(), + []byte(frame.Source()), + ) + if err != nil { + e.logger.Error( + "cannot find proposer's vote", + zap.Uint64("rank", newRank-1), + zap.String("proposer", hex.EncodeToString([]byte(frame.Source()))), + ) + return + } + + txn, err = e.clockStore.NewTransaction(false) + if err != nil { + e.logger.Error("could not create transaction", zap.Error(err)) + return + } + + if err := e.clockStore.PutCertifiedAppShardState( + &protobufs.AppShardProposal{ + State: frame, + ParentQuorumCertificate: parentQC, + PriorRankTimeoutCertificate: priorRankTC, + Vote: vote, + }, + txn, + ); err != nil { + e.logger.Error("could not insert certified state", zap.Error(err)) + txn.Abort() + return + } + + if err := txn.Commit(); err != nil { + e.logger.Error("could not commit transaction", zap.Error(err)) + txn.Abort() + return + } + + nextLeader, err := e.LeaderForRank(newRank) + if err != nil { + e.logger.Error("could nto determine next prover", zap.Error(err)) + return + } + + if nextLeader != e.Self() { + go func() { + info, err := e.proverRegistry.GetActiveProvers(frame.Header.Address) + if err != nil { + return + } + + myIndex := -1 + ids := [][]byte{} + for i := range info { + if bytes.Equal(info[i].Address, e.getProverAddress()) { + myIndex = i + } + ids = append(ids, info[i].Address) + } + + if myIndex == -1 { + return + } + + proof := e.frameProver.CalculateMultiProof( + [32]byte([]byte(frame.Identity())), + frame.Header.Difficulty, + ids, + uint32(myIndex), + ) + e.proofCacheMu.Lock() + e.proofCache[newRank] = proof + e.proofCacheMu.Unlock() + }() + } +} + +// OnRankChange implements consensus.Consumer. +func (e *AppConsensusEngine) OnRankChange(oldRank uint64, newRank uint64) { + err := e.ensureGlobalClient() + if err != nil { + e.logger.Error("cannot confirm cross-shard locks", zap.Error(err)) + return + } + + frame, err := e.appTimeReel.GetHead() + if err != nil { + e.logger.Error("cannot obtain time reel head", zap.Error(err)) + return + } + + res, err := e.globalClient.GetLockedAddresses( + context.Background(), + &protobufs.GetLockedAddressesRequest{ + ShardAddress: e.appAddress, + FrameNumber: frame.Header.FrameNumber, + }, + ) + if err != nil { + e.logger.Error("cannot confirm cross-shard locks", zap.Error(err)) + return + } + + // Build a map of transaction hashes to their committed status + txMap := map[string]bool{} + txIncluded := map[string]bool{} + txMessageMap := map[string]*protobufs.Message{} + txHashesInOrder := []string{} + txShardRefs := map[string]map[string]struct{}{} + e.collectedMessagesMu.Lock() + collected := make([]*protobufs.Message, len(e.collectedMessages)) + copy(collected, e.collectedMessages) + e.collectedMessages = []*protobufs.Message{} + e.collectedMessagesMu.Unlock() + + e.provingMessagesMu.Lock() + e.provingMessages = []*protobufs.Message{} + e.provingMessagesMu.Unlock() + + for _, req := range collected { + tx, err := req.ToCanonicalBytes() + if err != nil { + e.logger.Error("cannot confirm cross-shard locks", zap.Error(err)) + return + } + + txHash := sha3.Sum256(tx) + e.logger.Debug( + "adding transaction in frame to commit check", + zap.String("tx_hash", hex.EncodeToString(txHash[:])), + ) + hashStr := string(txHash[:]) + txMap[hashStr] = false + txIncluded[hashStr] = true + txMessageMap[hashStr] = req + txHashesInOrder = append(txHashesInOrder, hashStr) + } + + // Check that transactions are committed in our shard and collect shard + // addresses + shardAddressesSet := make(map[string]bool) + for _, tx := range res.Transactions { + e.logger.Debug( + "checking transaction from global map", + zap.String("tx_hash", hex.EncodeToString(tx.TransactionHash)), + ) + hashStr := string(tx.TransactionHash) + if _, ok := txMap[hashStr]; ok { + txMap[hashStr] = tx.Committed + + // Extract shard addresses from each locked transaction's shard addresses + for _, shardAddr := range tx.ShardAddresses { + // Extract the applicable shard address (can be shorter than the full + // address) + extractedShards := e.extractShardAddresses(shardAddr) + for _, extractedShard := range extractedShards { + shardAddrStr := string(extractedShard) + shardAddressesSet[shardAddrStr] = true + if txShardRefs[hashStr] == nil { + txShardRefs[hashStr] = make(map[string]struct{}) + } + txShardRefs[hashStr][shardAddrStr] = struct{}{} + } + } + } + } + + // Check that all transactions are committed in our shard + for _, committed := range txMap { + if !committed { + e.logger.Error("transaction not committed in local shard") + return + } + } + + // Check cross-shard locks for each unique shard address + for shardAddrStr := range shardAddressesSet { + shardAddr := []byte(shardAddrStr) + + // Skip our own shard since we already checked it + if bytes.Equal(shardAddr, e.appAddress) { + continue + } + + // Query the global client for locked addresses in this shard + shardRes, err := e.globalClient.GetLockedAddresses( + context.Background(), + &protobufs.GetLockedAddressesRequest{ + ShardAddress: shardAddr, + FrameNumber: frame.Header.FrameNumber, + }, + ) + if err != nil { + e.logger.Error( + "failed to get locked addresses for shard", + zap.String("shard_addr", hex.EncodeToString(shardAddr)), + zap.Error(err), + ) + for hashStr, shards := range txShardRefs { + if _, ok := shards[shardAddrStr]; ok { + txIncluded[hashStr] = false + } + } + continue + } + + // Check that all our transactions are committed in this shard + for txHashStr := range txMap { + committedInShard := false + for _, tx := range shardRes.Transactions { + if string(tx.TransactionHash) == txHashStr { + committedInShard = tx.Committed + break + } + } + + if !committedInShard { + e.logger.Error("cannot confirm cross-shard locks") + txIncluded[txHashStr] = false + } + } + } + + e.provingMessagesMu.Lock() + e.provingMessages = e.provingMessages[:0] + for _, hashStr := range txHashesInOrder { + if txIncluded[hashStr] { + e.provingMessages = append(e.provingMessages, txMessageMap[hashStr]) + } + } + e.provingMessagesMu.Unlock() + + commitments, err := e.livenessProvider.Collect(context.Background()) + if err != nil { + e.logger.Error("could not collect commitments", zap.Error(err)) + return + } + + if err := e.broadcastLivenessCheck(newRank, commitments); err != nil { + e.logger.Error("could not broadcast liveness check", zap.Error(err)) + } +} + +func (e *AppConsensusEngine) broadcastLivenessCheck( + newRank uint64, + commitments CollectedCommitments, +) error { + signer, _, publicKey, _ := e.GetProvingKey(e.config.Engine) + if signer == nil || publicKey == nil { + return errors.Wrap( + errors.New("no proving key available"), + "broadcast liveness check", + ) + } + + check := &protobufs.ProverLivenessCheck{ + Filter: slices.Clone(e.appAddress), + Rank: newRank, + FrameNumber: commitments.frameNumber, + Timestamp: time.Now().UnixMilli(), + CommitmentHash: slices.Clone(commitments.commitmentHash), + } + + payload, err := check.ConstructSignaturePayload() + if err != nil { + return errors.Wrap(err, "construct liveness payload") + } + + sig, err := signer.SignWithDomain(payload, check.GetSignatureDomain()) + if err != nil { + return errors.Wrap(err, "sign liveness check") + } + + check.PublicKeySignatureBls48581 = &protobufs.BLS48581AddressedSignature{ + Address: e.getAddressFromPublicKey(publicKey), + Signature: sig, + } + + bytes, err := check.ToCanonicalBytes() + if err != nil { + return errors.Wrap(err, "marshal liveness check") + } + + if err := e.pubsub.PublishToBitmask( + e.getConsensusMessageBitmask(), + bytes, + ); err != nil { + return errors.Wrap(err, "publish liveness check") + } + + return nil +} + +// OnReceiveProposal implements consensus.Consumer. +func (e *AppConsensusEngine) OnReceiveProposal( + currentRank uint64, + proposal *models.SignedProposal[ + *protobufs.AppShardFrame, + *protobufs.ProposalVote, + ], +) { +} + +// OnReceiveQuorumCertificate implements consensus.Consumer. +func (e *AppConsensusEngine) OnReceiveQuorumCertificate( + currentRank uint64, + qc models.QuorumCertificate, +) { +} + +// OnReceiveTimeoutCertificate implements consensus.Consumer. +func (e *AppConsensusEngine) OnReceiveTimeoutCertificate( + currentRank uint64, + tc models.TimeoutCertificate, +) { +} + +// OnStart implements consensus.Consumer. +func (e *AppConsensusEngine) OnStart(currentRank uint64) {} + +// OnStartingTimeout implements consensus.Consumer. +func (e *AppConsensusEngine) OnStartingTimeout( + startTime time.Time, + endTime time.Time, +) { +} + +// OnStateIncorporated implements consensus.Consumer. +func (e *AppConsensusEngine) OnStateIncorporated( + state *models.State[*protobufs.AppShardFrame], +) { + e.frameStoreMu.Lock() + e.frameStore[state.Identifier] = *state.State + e.frameStoreMu.Unlock() +} + +// OnTimeoutCertificateTriggeredRankChange implements consensus.Consumer. +func (e *AppConsensusEngine) OnTimeoutCertificateTriggeredRankChange( + oldRank uint64, + newRank uint64, + tc models.TimeoutCertificate, +) { + e.logger.Debug( + "inserting timeout certificate", + zap.Uint64("rank", tc.GetRank()), + ) + + txn, err := e.clockStore.NewTransaction(false) + if err != nil { + e.logger.Error("could not create transaction", zap.Error(err)) + return + } + + qc := tc.GetLatestQuorumCert() + err = e.clockStore.PutTimeoutCertificate(&protobufs.TimeoutCertificate{ + Filter: tc.GetFilter(), + Rank: tc.GetRank(), + LatestRanks: tc.GetLatestRanks(), + LatestQuorumCertificate: &protobufs.QuorumCertificate{ + Rank: qc.GetRank(), + FrameNumber: qc.GetFrameNumber(), + Selector: []byte(qc.Identity()), + AggregateSignature: &protobufs.BLS48581AggregateSignature{ + Signature: qc.GetAggregatedSignature().GetSignature(), + PublicKey: &protobufs.BLS48581G2PublicKey{ + KeyValue: qc.GetAggregatedSignature().GetPubKey(), + }, + Bitmask: qc.GetAggregatedSignature().GetBitmask(), + }, + }, + AggregateSignature: &protobufs.BLS48581AggregateSignature{ + Signature: tc.GetAggregatedSignature().GetSignature(), + PublicKey: &protobufs.BLS48581G2PublicKey{ + KeyValue: tc.GetAggregatedSignature().GetPubKey(), + }, + Bitmask: tc.GetAggregatedSignature().GetBitmask(), + }, + }, txn) + if err != nil { + txn.Abort() + e.logger.Error("could not insert timeout certificate") + return + } + + if err := txn.Commit(); err != nil { + txn.Abort() + e.logger.Error("could not commit transaction", zap.Error(err)) + } +} + +// VerifyQuorumCertificate implements consensus.Verifier. +func (e *AppConsensusEngine) VerifyQuorumCertificate( + quorumCertificate models.QuorumCertificate, +) error { + qc, ok := quorumCertificate.(*protobufs.QuorumCertificate) + if !ok { + return errors.Wrap( + errors.New("invalid quorum certificate"), + "verify quorum certificate", + ) + } + + if err := qc.Validate(); err != nil { + return models.NewInvalidFormatError( + errors.Wrap(err, "verify quorum certificate"), + ) + } + + // genesis qc is special: + if quorumCertificate.GetRank() == 0 { + genqc, err := e.clockStore.GetQuorumCertificate(nil, 0) + if err != nil { + return errors.Wrap(err, "verify quorum certificate") + } + + if genqc.Equals(quorumCertificate) { + return nil + } + } + + provers, err := e.proverRegistry.GetActiveProvers(e.appAddress) + if err != nil { + return errors.Wrap(err, "verify quorum certificate") + } + + pubkeys := [][]byte{} + signatures := [][]byte{} + if ((len(provers) + 7) / 8) > len(qc.AggregateSignature.Bitmask) { + return models.ErrInvalidSignature + } + for i, prover := range provers { + if qc.AggregateSignature.Bitmask[i/8]&(1<<(i%8)) == (1 << (i % 8)) { + pubkeys = append(pubkeys, prover.PublicKey) + signatures = append(signatures, qc.AggregateSignature.GetSignature()) + } + } + + aggregationCheck, err := e.blsConstructor.Aggregate(pubkeys, signatures) + if err != nil { + return models.ErrInvalidSignature + } + + if !bytes.Equal( + qc.AggregateSignature.GetPubKey(), + aggregationCheck.GetAggregatePublicKey(), + ) { + return models.ErrInvalidSignature + } + + if valid := e.blsConstructor.VerifySignatureRaw( + qc.AggregateSignature.GetPubKey(), + qc.AggregateSignature.GetSignature(), + verification.MakeVoteMessage(nil, qc.Rank, qc.Identity()), + []byte("appshard"), + ); !valid { + return models.ErrInvalidSignature + } + + return nil +} + +// VerifyTimeoutCertificate implements consensus.Verifier. +func (e *AppConsensusEngine) VerifyTimeoutCertificate( + timeoutCertificate models.TimeoutCertificate, +) error { + tc, ok := timeoutCertificate.(*protobufs.TimeoutCertificate) + if !ok { + return errors.Wrap( + errors.New("invalid timeout certificate"), + "verify timeout certificate", + ) + } + + if err := tc.Validate(); err != nil { + return models.NewInvalidFormatError( + errors.Wrap(err, "verify timeout certificate"), + ) + } + + provers, err := e.proverRegistry.GetActiveProvers(e.appAddress) + if err != nil { + return errors.Wrap(err, "verify timeout certificate") + } + + pubkeys := [][]byte{} + signatures := [][]byte{} + if ((len(provers) + 7) / 8) > len(tc.AggregateSignature.Bitmask) { + return models.ErrInvalidSignature + } + for i, prover := range provers { + if tc.AggregateSignature.Bitmask[i/8]&(1<<(i%8)) == (1 << (i % 8)) { + pubkeys = append(pubkeys, prover.PublicKey) + signatures = append(signatures, tc.AggregateSignature.GetSignature()) + } + } + + aggregationCheck, err := e.blsConstructor.Aggregate(pubkeys, signatures) + if err != nil { + return models.ErrInvalidSignature + } + + if !bytes.Equal( + tc.AggregateSignature.GetPubKey(), + aggregationCheck.GetAggregatePublicKey(), + ) { + return models.ErrInvalidSignature + } + + if valid := e.blsConstructor.VerifySignatureRaw( + tc.AggregateSignature.GetPubKey(), + tc.AggregateSignature.GetSignature(), + verification.MakeTimeoutMessage( + nil, + tc.Rank, + tc.LatestQuorumCertificate.Rank, + ), + []byte("appshardtimeout"), + ); !valid { + return models.ErrInvalidSignature + } + + return nil +} + +// VerifyVote implements consensus.Verifier. +func (e *AppConsensusEngine) VerifyVote( + vote **protobufs.ProposalVote, +) error { + if vote == nil || *vote == nil { + return errors.Wrap(errors.New("nil vote"), "verify vote") + } + + if err := (*vote).Validate(); err != nil { + return models.NewInvalidFormatError( + errors.Wrap(err, "verify vote"), + ) + } + + provers, err := e.proverRegistry.GetActiveProvers(e.appAddress) + if err != nil { + return errors.Wrap(err, "verify vote") + } + + var pubkey []byte + for _, p := range provers { + if bytes.Equal(p.Address, (*vote).PublicKeySignatureBls48581.Address) { + pubkey = p.PublicKey + break + } + } + + if bytes.Equal(pubkey, []byte{}) { + return models.ErrInvalidSignature + } + + if valid := e.blsConstructor.VerifySignatureRaw( + pubkey, + (*vote).PublicKeySignatureBls48581.Signature[:74], + verification.MakeVoteMessage(nil, (*vote).Rank, (*vote).Source()), + []byte("appshard"), + ); !valid { + return models.ErrInvalidSignature + } + + return nil +} + +func (e *AppConsensusEngine) getPendingProposals( + frameNumber uint64, +) []*models.SignedProposal[ + *protobufs.AppShardFrame, + *protobufs.ProposalVote, +] { + pendingFrames, err := e.clockStore.RangeShardClockFrames( + e.appAddress, + frameNumber, + 0xfffffffffffffffe, + ) + if err != nil { + panic(err) + } + defer pendingFrames.Close() + + result := []*models.SignedProposal[ + *protobufs.AppShardFrame, + *protobufs.ProposalVote, + ]{} + + pendingFrames.First() + if !pendingFrames.Valid() { + return result + } + value, err := pendingFrames.Value() + if err != nil || value == nil { + return result + } + + previous := value + for pendingFrames.First(); pendingFrames.Valid(); pendingFrames.Next() { + value, err := pendingFrames.Value() + if err != nil || value == nil { + break + } + + parent, err := e.clockStore.GetQuorumCertificate( + e.appAddress, + previous.GetRank(), + ) + if err != nil { + panic(err) + } + + priorTC, _ := e.clockStore.GetTimeoutCertificate( + e.appAddress, + value.GetRank()-1, + ) + var priorTCModel models.TimeoutCertificate = nil + if priorTC != nil { + priorTCModel = priorTC + } + + vote := &protobufs.ProposalVote{ + Filter: e.appAddress, + Rank: value.GetRank(), + FrameNumber: value.Header.FrameNumber, + Selector: []byte(value.Identity()), + PublicKeySignatureBls48581: &protobufs.BLS48581AddressedSignature{ + Signature: value.Header.PublicKeySignatureBls48581.Signature, + Address: []byte(value.Source()), + }, + } + result = append(result, &models.SignedProposal[ + *protobufs.AppShardFrame, + *protobufs.ProposalVote, + ]{ + Proposal: models.Proposal[*protobufs.AppShardFrame]{ + State: &models.State[*protobufs.AppShardFrame]{ + Rank: value.GetRank(), + Identifier: value.Identity(), + ProposerID: vote.Identity(), + ParentQuorumCertificate: parent, + State: &value, + }, + PreviousRankTimeoutCertificate: priorTCModel, + }, + Vote: &vote, + }) + previous = value + } + return result +} + +func (e *AppConsensusEngine) getRandomProverPeerId() (peer.ID, error) { + provers, err := e.proverRegistry.GetActiveProvers(e.appAddress) + if err != nil { + e.logger.Error( + "could not get active provers for sync", + zap.Error(err), + ) + } + if len(provers) == 0 { + return "", err + } + index := rand.Intn(len(provers)) + registry, err := e.signerRegistry.GetKeyRegistryByProver( + provers[index].Address, + ) + if err != nil { + e.logger.Debug( + "could not get registry for prover", + zap.Error(err), + ) + return "", err + } + + if registry == nil || registry.IdentityKey == nil { + e.logger.Debug("registry for prover not found") + return "", err + } + + pk, err := pcrypto.UnmarshalEd448PublicKey(registry.IdentityKey.KeyValue) + if err != nil { + e.logger.Debug( + "could not parse pub key", + zap.Error(err), + ) + return "", err + } + + id, err := peer.IDFromPublicKey(pk) + if err != nil { + e.logger.Debug( + "could not derive peer id", + zap.Error(err), + ) + return "", err + } + + return id, nil +} + +func (e *AppConsensusEngine) getPeerIDOfProver( + prover []byte, +) (peer.ID, error) { + registry, err := e.signerRegistry.GetKeyRegistryByProver(prover) + if err != nil { + e.logger.Debug( + "could not get registry for prover", + zap.Error(err), + ) + return "", err + } + + if registry == nil || registry.IdentityKey == nil { + e.logger.Debug("registry for prover not found") + return "", errors.New("registry not found for prover") + } + + pk, err := pcrypto.UnmarshalEd448PublicKey(registry.IdentityKey.KeyValue) + if err != nil { + e.logger.Debug( + "could not parse pub key", + zap.Error(err), + ) + return "", err + } + + id, err := peer.IDFromPublicKey(pk) + if err != nil { + e.logger.Debug( + "could not derive peer id", + zap.Error(err), + ) + return "", err + } + + return id, nil +} + +// extractShardAddresses extracts all possible shard addresses from a +// transaction address +func (e *AppConsensusEngine) extractShardAddresses(txAddress []byte) [][]byte { + var shardAddresses [][]byte + + // Get the full path from the transaction address + path := GetFullPath(txAddress) + + // The first 43 nibbles (258 bits) represent the base shard address + // We need to extract all possible shard addresses by considering path + // segments after the 43rd nibble + if len(path) <= 43 { + // If the path is too short, just return the original address truncated to + // 32 bytes + if len(txAddress) >= 32 { + shardAddresses = append(shardAddresses, txAddress[:32]) + } + return shardAddresses + } + + // Convert the first 43 nibbles to bytes (base shard address) + baseShardAddr := txAddress[:32] + l1 := up2p.GetBloomFilterIndices(baseShardAddr, 256, 3) + candidates := map[string]struct{}{} + + // Now generate all possible shard addresses by extending the path + // Each additional nibble after the 43rd creates a new shard address + for i := 43; i < len(path); i++ { + // Create a new shard address by extending the base with this path segment + extendedAddr := make([]byte, 32) + copy(extendedAddr, baseShardAddr) + + // Add the path segment as a byte + extendedAddr = append(extendedAddr, byte(path[i])) + + candidates[string(extendedAddr)] = struct{}{} + } + + shards, err := e.shardsStore.GetAppShards( + slices.Concat(l1, baseShardAddr), + []uint32{}, + ) + if err != nil { + return [][]byte{} + } + + for _, shard := range shards { + if _, ok := candidates[string( + slices.Concat(shard.L2, uint32ToBytes(shard.Path)), + )]; ok { + shardAddresses = append(shardAddresses, shard.L2) + } + } + + return shardAddresses +} + +var _ consensus.DynamicCommittee = (*AppConsensusEngine)(nil) diff --git a/node/consensus/app/app_consensus_engine_chaos_integration_test.go b/node/consensus/app/app_consensus_engine_chaos_integration_test.go index ff7a9cb..f2e1a37 100644 --- a/node/consensus/app/app_consensus_engine_chaos_integration_test.go +++ b/node/consensus/app/app_consensus_engine_chaos_integration_test.go @@ -27,6 +27,7 @@ import ( "source.quilibrium.com/quilibrium/monorepo/config" "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" "source.quilibrium.com/quilibrium/monorepo/hypergraph" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/node/compiler" "source.quilibrium.com/quilibrium/monorepo/node/consensus/difficulty" "source.quilibrium.com/quilibrium/monorepo/node/consensus/fees" @@ -219,6 +220,7 @@ func TestAppConsensusEngine_Integration_ChaosScenario(t *testing.T) { nodeClockStore := store.NewPebbleClockStore(nodeDB, logger) nodeInboxStore := store.NewPebbleInboxStore(nodeDB, logger) nodeShardsStore := store.NewPebbleShardsStore(nodeDB, logger) + nodeConsensusStore := store.NewPebbleConsensusStore(nodeDB, logger) nodeHg := hypergraph.NewHypergraph(logger, nodeHypergraphStore, nodeInclusionProver, []int{}, &tests.Nopthenticator{}) // Create mock pubsub for network simulation @@ -261,6 +263,7 @@ func TestAppConsensusEngine_Integration_ChaosScenario(t *testing.T) { nodeInboxStore, nodeShardsStore, nodeHypergraphStore, + nodeConsensusStore, frameProver, nodeInclusionProver, bulletproofs.NewBulletproofProver(), @@ -327,10 +330,10 @@ func TestAppConsensusEngine_Integration_ChaosScenario(t *testing.T) { // Subscribe to frames pubsub.Subscribe(engine.getConsensusMessageBitmask(), func(message *pb.Message) error { - frame := &protobufs.AppShardFrame{} + frame := &protobufs.AppShardProposal{} if err := frame.FromCanonicalBytes(message.Data); err == nil { node.mu.Lock() - node.frameHistory = append(node.frameHistory, frame) + node.frameHistory = append(node.frameHistory, frame.State) node.mu.Unlock() } return nil @@ -350,8 +353,12 @@ func TestAppConsensusEngine_Integration_ChaosScenario(t *testing.T) { // Start all nodes t.Log("Step 4: Starting all nodes") + cancels := []func(){} for _, node := range nodes { - node.engine.Start(node.quit) + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + err := node.engine.Start(ctx) + require.NoError(t, err) + cancels = append(cancels, cancel) } // Wait for genesis @@ -657,14 +664,16 @@ func TestAppConsensusEngine_Integration_ChaosScenario(t *testing.T) { voterAddress := nodes[nodeIdx].engine.getAddressFromPublicKey(publicKey) // Create vote message - vote := &protobufs.FrameVote{ + vote := &protobufs.ProposalVote{ FrameNumber: frame.Header.FrameNumber, - Proposer: frame.Header.Prover, - Approve: true, + Filter: frame.Header.Address, + Rank: frame.GetRank(), + Selector: []byte(frame.Identity()), PublicKeySignatureBls48581: &protobufs.BLS48581AddressedSignature{ Address: voterAddress, Signature: sig, }, + Timestamp: uint64(time.Now().UnixMilli()), } // Serialize and publish @@ -979,13 +988,6 @@ func TestAppConsensusEngine_Integration_ChaosScenario(t *testing.T) { // Stop all nodes t.Log("\nStep 8: Cleanup") for i, node := range nodes { - // Unregister executors - node.mu.RLock() - for name := range node.executors { - node.engine.UnregisterExecutor(name, 0, true) - } - node.mu.RUnlock() - // Stop engine node.engine.Stop(true) close(node.quit) diff --git a/node/consensus/app/app_consensus_engine_integration_test.go b/node/consensus/app/app_consensus_engine_integration_test.go index 2f4aae4..5dc8fca 100644 --- a/node/consensus/app/app_consensus_engine_integration_test.go +++ b/node/consensus/app/app_consensus_engine_integration_test.go @@ -31,6 +31,7 @@ import ( "source.quilibrium.com/quilibrium/monorepo/config" "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" "source.quilibrium.com/quilibrium/monorepo/hypergraph" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/node/compiler" "source.quilibrium.com/quilibrium/monorepo/node/consensus/difficulty" "source.quilibrium.com/quilibrium/monorepo/node/consensus/events" @@ -51,7 +52,6 @@ import ( "source.quilibrium.com/quilibrium/monorepo/types/crypto" thypergraph "source.quilibrium.com/quilibrium/monorepo/types/hypergraph" tkeys "source.quilibrium.com/quilibrium/monorepo/types/keys" - "source.quilibrium.com/quilibrium/monorepo/types/mocks" "source.quilibrium.com/quilibrium/monorepo/types/p2p" "source.quilibrium.com/quilibrium/monorepo/types/schema" tstore "source.quilibrium.com/quilibrium/monorepo/types/store" @@ -122,6 +122,9 @@ func TestAppConsensusEngine_Integration_BasicFrameProgression(t *testing.T) { hypergraphStore := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".test/app_basic"}, pebbleDB, logger, verifiableEncryptor, inclusionProver) hg := hypergraph.NewHypergraph(logger, hypergraphStore, inclusionProver, []int{}, &tests.Nopthenticator{}) + // Create consensus store + consensusStore := store.NewPebbleConsensusStore(pebbleDB, logger) + // Create key store keyStore := store.NewPebbleKeyStore(pebbleDB, logger) @@ -209,6 +212,7 @@ func TestAppConsensusEngine_Integration_BasicFrameProgression(t *testing.T) { inboxStore, shardsStore, hypergraphStore, + consensusStore, frameProver, inclusionProver, bulletproof, @@ -223,7 +227,7 @@ func TestAppConsensusEngine_Integration_BasicFrameProgression(t *testing.T) { globalFrameValidator, difficultyAdjuster, rewardIssuance, - &mocks.MockBlsConstructor{}, + bc, channel.NewDoubleRatchetEncryptedChannel(), ) @@ -252,30 +256,24 @@ func TestAppConsensusEngine_Integration_BasicFrameProgression(t *testing.T) { typePrefix := binary.BigEndian.Uint32(message.Data[:4]) switch typePrefix { - case protobufs.AppShardFrameType: - frame := &protobufs.AppShardFrame{} + case protobufs.AppShardProposalType: + frame := &protobufs.AppShardProposal{} if err := frame.FromCanonicalBytes(message.Data); err != nil { return errors.New("error") } framesMu.Lock() - frameHistory = append(frameHistory, frame) + frameHistory = append(frameHistory, frame.State) framesMu.Unlock() - case protobufs.ProverLivenessCheckType: - livenessCheck := &protobufs.ProverLivenessCheck{} - if err := livenessCheck.FromCanonicalBytes(message.Data); err != nil { - return errors.New("error") - } - - case protobufs.FrameVoteType: - vote := &protobufs.FrameVote{} + case protobufs.ProposalVoteType: + vote := &protobufs.ProposalVote{} if err := vote.FromCanonicalBytes(message.Data); err != nil { return errors.New("error") } - case protobufs.FrameConfirmationType: - confirmation := &protobufs.FrameConfirmation{} - if err := confirmation.FromCanonicalBytes(message.Data); err != nil { + case protobufs.TimeoutStateType: + state := &protobufs.TimeoutState{} + if err := state.FromCanonicalBytes(message.Data); err != nil { return errors.New("error") } @@ -287,8 +285,9 @@ func TestAppConsensusEngine_Integration_BasicFrameProgression(t *testing.T) { // Start engine t.Log("Step 2: Starting consensus engine") - quit := make(chan struct{}) - errChan := engine.Start(quit) + ctx, cancel, errChan := lifecycle.WithSignallerAndCancel(context.Background()) + err = engine.Start(ctx) + require.NoError(t, err) select { case err := <-errChan: @@ -357,9 +356,14 @@ func TestAppConsensusEngine_Integration_BasicFrameProgression(t *testing.T) { } framesMu.Unlock() + select { + case err := <-errChan: + require.NoError(t, err) + case <-time.After(100 * time.Millisecond): + } // Stop t.Log("Step 8: Cleaning up") - engine.UnregisterExecutor("test-executor", 0, false) + cancel() engine.Stop(false) } @@ -443,16 +447,16 @@ func TestAppConsensusEngine_Integration_FeeVotingMechanics(t *testing.T) { tempVerifiableEncryptor := verenc.NewMPCitHVerifiableEncryptor(1) tempHypergraphStore := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".test/app_fee_temp"}, tempDB, logger, tempVerifiableEncryptor, tempInclusionProver) - tempClockStore := store.NewPebbleClockStore(tempDB, logger) - tempInboxStore := store.NewPebbleInboxStore(tempDB, logger) - tempShardsStore := store.NewPebbleShardsStore(tempDB, logger) - // Create engines with different fee voting strategies t.Log("Step 4: Creating consensus engines for each node") for i := 0; i < numNodes; i++ { verifiableEncryptor := verenc.NewMPCitHVerifiableEncryptor(1) pebbleDB := store.NewPebbleDB(logger, &config.DBConfig{InMemoryDONOTUSE: true, Path: fmt.Sprintf(".test/app_fee_%d", i)}, 0) hypergraphStore := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: fmt.Sprintf(".test/app_fee_%d", i)}, pebbleDB, logger, verifiableEncryptor, inclusionProver) + consensusStore := store.NewPebbleConsensusStore(pebbleDB, logger) + tempClockStore := store.NewPebbleClockStore(tempDB, logger) + tempInboxStore := store.NewPebbleInboxStore(tempDB, logger) + tempShardsStore := store.NewPebbleShardsStore(tempDB, logger) hg := hypergraph.NewHypergraph(logger, hypergraphStore, inclusionProver, []int{}, &tests.Nopthenticator{}) proverRegistry, err := provers.NewProverRegistry(zap.NewNop(), hg) require.NoError(t, err) @@ -518,6 +522,7 @@ func TestAppConsensusEngine_Integration_FeeVotingMechanics(t *testing.T) { tempInboxStore, tempShardsStore, tempHypergraphStore, + consensusStore, frameProver, inclusionProver, bulletproof, @@ -532,7 +537,7 @@ func TestAppConsensusEngine_Integration_FeeVotingMechanics(t *testing.T) { globalFrameValidator, difficultyAdjuster, rewardIssuance, - &mocks.MockBlsConstructor{}, + bc, channel.NewDoubleRatchetEncryptedChannel(), ) @@ -552,13 +557,15 @@ func TestAppConsensusEngine_Integration_FeeVotingMechanics(t *testing.T) { // Start all engines t.Log("Step 5: Starting all consensus engines") - quits := make([]chan struct{}, numNodes) + cancels := []func(){} // Start remaining nodes one at a time to ensure proper sync for i := 0; i < numNodes; i++ { pubsubs[i].peerCount = numNodes - 1 - quits[i] = make(chan struct{}) - engines[i].Start(quits[i]) + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + err := engines[i].Start(ctx) + require.NoError(t, err) + cancels = append(cancels, cancel) t.Logf(" - Started engine %d with %d peers", i, pubsubs[i].peerCount) } @@ -661,6 +668,7 @@ func TestAppConsensusEngine_Integration_FeeVotingMechanics(t *testing.T) { // Stop all t.Log("Step 11: Stopping all nodes") for i := 0; i < numNodes; i++ { + cancels[i]() engines[i].Stop(false) t.Logf(" - Stopped node %d", i) } @@ -744,7 +752,6 @@ func TestAppConsensusEngine_Integration_ReconnectCatchup(t *testing.T) { } engines := make([]*AppConsensusEngine, numNodes) - quits := make([]chan struct{}, numNodes) for i := 0; i < numNodes; i++ { // Shared backing stores used by the factories @@ -756,6 +763,7 @@ func TestAppConsensusEngine_Integration_ReconnectCatchup(t *testing.T) { tempClockStore := store.NewPebbleClockStore(tempDB, baseLogger) tempInboxStore := store.NewPebbleInboxStore(tempDB, baseLogger) tempShardsStore := store.NewPebbleShardsStore(tempDB, baseLogger) + tempConsensusStore := store.NewPebbleConsensusStore(tempDB, baseLogger) cfg := zap.NewDevelopmentConfig() adBI, _ := poseidon.HashBytes(proverKeys[i]) addr := adBI.FillBytes(make([]byte, 32)) @@ -827,6 +835,7 @@ func TestAppConsensusEngine_Integration_ReconnectCatchup(t *testing.T) { tempInboxStore, tempShardsStore, tempHypergraphStore, + tempConsensusStore, frameProver, inclusionProver, bulletproof, @@ -841,7 +850,7 @@ func TestAppConsensusEngine_Integration_ReconnectCatchup(t *testing.T) { globalFrameValidator, difficultyAdjuster, rewardIssuance, - &mocks.MockBlsConstructor{}, + nodeBC, channel.NewDoubleRatchetEncryptedChannel(), ) @@ -862,10 +871,14 @@ func TestAppConsensusEngine_Integration_ReconnectCatchup(t *testing.T) { engines[i] = engine } + cancels := []func(){} + // Start all engines for i := 0; i < numNodes; i++ { - quits[i] = make(chan struct{}) - engines[i].Start(quits[i]) + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + err := engines[i].Start(ctx) + require.NoError(t, err) + cancels = append(cancels, cancel) } // Let connected nodes advance while the detached node remains isolated @@ -968,6 +981,7 @@ func TestAppConsensusEngine_Integration_ReconnectCatchup(t *testing.T) { // Stop all engines for i := 0; i < numNodes; i++ { + cancels[i]() engines[i].Stop(false) } } @@ -997,7 +1011,6 @@ func TestAppConsensusEngine_Integration_MultipleAppShards(t *testing.T) { // Create key managers and prover keys for all shards keyManagers := make([]tkeys.KeyManager, numShards) proverKeys := make([][]byte, numShards) - var err error for i := 0; i < numShards; i++ { bc := &bls48581.Bls48581KeyConstructor{} dc := &bulletproofs.Decaf448KeyConstructor{} @@ -1013,23 +1026,6 @@ func TestAppConsensusEngine_Integration_MultipleAppShards(t *testing.T) { tempInclusionProver := bls48581.NewKZGInclusionProver(logger) tempVerifiableEncryptor := verenc.NewMPCitHVerifiableEncryptor(1) tempHypergraphStore := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".test/app_multi_temp"}, tempDB, logger, tempVerifiableEncryptor, tempInclusionProver) - tempHg := hypergraph.NewHypergraph(logger, tempHypergraphStore, tempInclusionProver, []int{}, &tests.Nopthenticator{}) - tempClockStore := store.NewPebbleClockStore(tempDB, logger) - tempInboxStore := store.NewPebbleInboxStore(tempDB, logger) - tempShardsStore := store.NewPebbleShardsStore(tempDB, logger) - proverRegistry, err := provers.NewProverRegistry(zap.NewNop(), tempHg) - require.NoError(t, err) - - // Register all provers - for i, proverKey := range proverKeys { - proverAddress := calculateProverAddress(proverKey) - registerProverInHypergraphWithFilter(t, tempHg, proverKey, proverAddress, shardAddresses[i]) - t.Logf(" - Registered prover %d with address: %x", i, proverAddress) - } - globalTimeReel, err := consensustime.NewGlobalTimeReel(logger, proverRegistry, tempClockStore, 1, true) - require.NoError(t, err) - - proverRegistry.Refresh() _, m, cleanup := tests.GenerateSimnetHosts(t, numShards, []libp2p.Option{}) defer cleanup() @@ -1062,9 +1058,26 @@ func TestAppConsensusEngine_Integration_MultipleAppShards(t *testing.T) { bulletproof := bulletproofs.NewBulletproofProver() decafConstructor := &bulletproofs.Decaf448KeyConstructor{} compiler := compiler.NewBedlamCompiler() + tempConsensusStore := store.NewPebbleConsensusStore(pebbleDB, logger) hypergraphStore := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: fmt.Sprintf(".test/app_multi_%d", i)}, pebbleDB, logger, verifiableEncryptor, inclusionProver) hg := hypergraph.NewHypergraph(logger, hypergraphStore, inclusionProver, []int{}, &tests.Nopthenticator{}) + tempHg := hypergraph.NewHypergraph(logger, tempHypergraphStore, tempInclusionProver, []int{}, &tests.Nopthenticator{}) + tempClockStore := store.NewPebbleClockStore(tempDB, logger) + tempInboxStore := store.NewPebbleInboxStore(tempDB, logger) + tempShardsStore := store.NewPebbleShardsStore(tempDB, logger) + proverRegistry, err := provers.NewProverRegistry(zap.NewNop(), tempHg) + require.NoError(t, err) + // Register all provers + for i, proverKey := range proverKeys { + proverAddress := calculateProverAddress(proverKey) + registerProverInHypergraphWithFilter(t, tempHg, proverKey, proverAddress, shardAddresses[i]) + t.Logf(" - Registered prover %d with address: %x", i, proverAddress) + } + globalTimeReel, err := consensustime.NewGlobalTimeReel(logger, proverRegistry, tempClockStore, 1, true) + require.NoError(t, err) + + proverRegistry.Refresh() keyStore := store.NewPebbleKeyStore(pebbleDB, logger) @@ -1099,6 +1112,7 @@ func TestAppConsensusEngine_Integration_MultipleAppShards(t *testing.T) { tempInboxStore, tempShardsStore, tempHypergraphStore, + tempConsensusStore, frameProver, inclusionProver, bulletproof, @@ -1113,7 +1127,7 @@ func TestAppConsensusEngine_Integration_MultipleAppShards(t *testing.T) { globalFrameValidator, difficultyAdjuster, rewardIssuance, - &mocks.MockBlsConstructor{}, + bc, channel.NewDoubleRatchetEncryptedChannel(), ) @@ -1133,10 +1147,12 @@ func TestAppConsensusEngine_Integration_MultipleAppShards(t *testing.T) { // Start all shards t.Log("Step 4: Starting all shard engines") - quits := make([]chan struct{}, numShards) + cancels := []func(){} for i := 0; i < numShards; i++ { - quits[i] = make(chan struct{}) - engines[i].Start(quits[i]) + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + err := engines[i].Start(ctx) + require.NoError(t, err) + cancels = append(cancels, cancel) t.Logf(" - Started shard %d", i) } @@ -1179,6 +1195,7 @@ func TestAppConsensusEngine_Integration_MultipleAppShards(t *testing.T) { // Stop all t.Log("Step 8: Stopping all shards") for i := 0; i < numShards; i++ { + cancels[i]() engines[i].Stop(false) t.Logf(" - Stopped shard %d", i) } @@ -1187,9 +1204,6 @@ func TestAppConsensusEngine_Integration_MultipleAppShards(t *testing.T) { // Scenario: App consensus coordinates with global consensus events // Expected: App reacts to global new head events appropriately func TestAppConsensusEngine_Integration_GlobalAppCoordination(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - logger, _ := zap.NewDevelopment() appAddress := []byte{0xAA, 0x01, 0x02, 0x03} @@ -1243,8 +1257,6 @@ func TestAppConsensusEngine_Integration_GlobalAppCoordination(t *testing.T) { go func() { for { select { - case <-ctx.Done(): - return case event := <-eventCh: eventsMu.Lock() receivedEvents = append(receivedEvents, event) @@ -1253,10 +1265,6 @@ func TestAppConsensusEngine_Integration_GlobalAppCoordination(t *testing.T) { } }() - // Start event distributor - err = eventDistributor.Start(ctx) - require.NoError(t, err) - // Don't add initial frame - let the time reel initialize itself // Create app engine @@ -1266,6 +1274,7 @@ func TestAppConsensusEngine_Integration_GlobalAppCoordination(t *testing.T) { verifiableEncryptor := verenc.NewMPCitHVerifiableEncryptor(1) shardsStore := store.NewPebbleShardsStore(pebbleDB, logger) hypergraphStore := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".test/app_coordination"}, pebbleDB, logger, verifiableEncryptor, inclusionProver) + consensusStore := store.NewPebbleConsensusStore(pebbleDB, logger) hg := hypergraph.NewHypergraph(logger, hypergraphStore, inclusionProver, []int{}, &tests.Nopthenticator{}) keyStore := store.NewPebbleKeyStore(pebbleDB, logger) @@ -1321,6 +1330,7 @@ func TestAppConsensusEngine_Integration_GlobalAppCoordination(t *testing.T) { tempInboxStore, shardsStore, hypergraphStore, + consensusStore, frameProver, inclusionProver, bulletproofs.NewBulletproofProver(), // bulletproofProver @@ -1338,7 +1348,7 @@ func TestAppConsensusEngine_Integration_GlobalAppCoordination(t *testing.T) { qp2p.NewInMemoryPeerInfoManager(logger), appTimeReel, globalTimeReel, - &mocks.MockBlsConstructor{}, + bc, channel.NewDoubleRatchetEncryptedChannel(), nil, ) @@ -1347,8 +1357,9 @@ func TestAppConsensusEngine_Integration_GlobalAppCoordination(t *testing.T) { engine.SetGlobalClient(mockGSC) // Start engine - quit := make(chan struct{}) - engine.Start(quit) + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + err = engine.Start(ctx) + require.NoError(t, err) // Wait for genesis initialization time.Sleep(2 * time.Second) @@ -1374,16 +1385,13 @@ func TestAppConsensusEngine_Integration_GlobalAppCoordination(t *testing.T) { eventsMu.Unlock() eventDistributor.Unsubscribe("test-tracker") - eventDistributor.Stop() + cancel() engine.Stop(false) } // Scenario: Test prover trie membership and rotation // Expected: Only valid provers can prove frames func TestAppConsensusEngine_Integration_ProverTrieMembership(t *testing.T) { - _, cancel := context.WithCancel(context.Background()) - defer cancel() - logger, _ := zap.NewDevelopment() appAddress := []byte{0xAA, 0x01, 0x02, 0x03} @@ -1441,6 +1449,7 @@ func TestAppConsensusEngine_Integration_ProverTrieMembership(t *testing.T) { keyStore := store.NewPebbleKeyStore(pebbleDB, logger) clockStore := store.NewPebbleClockStore(pebbleDB, logger) inboxStore := store.NewPebbleInboxStore(pebbleDB, logger) + consensusStore := store.NewPebbleConsensusStore(pebbleDB, logger) frameProver := vdf.NewWesolowskiFrameProver(logger) signerRegistry, err := registration.NewCachedSignerRegistry(keyStore, keyManager, bc, bulletproofs.NewBulletproofProver(), logger) @@ -1493,6 +1502,7 @@ func TestAppConsensusEngine_Integration_ProverTrieMembership(t *testing.T) { inboxStore, shardsStore, hypergraphStore, + consensusStore, frameProver, inclusionProver, tempBulletproof, @@ -1507,7 +1517,7 @@ func TestAppConsensusEngine_Integration_ProverTrieMembership(t *testing.T) { globalFrameValidator, difficultyAdjuster, rewardIssuance, - &mocks.MockBlsConstructor{}, + bc, channel.NewDoubleRatchetEncryptedChannel(), ) @@ -1530,208 +1540,12 @@ func TestAppConsensusEngine_Integration_ProverTrieMembership(t *testing.T) { } } -// Scenario: Detailed state transition testing with various triggers -// Expected: Proper transitions through all states -func TestAppConsensusEngine_Integration_StateTransitions(t *testing.T) { - t.Log("Testing engine state transitions") - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - t.Log("Step 1: Setting up test components") - logger, _ := zap.NewDevelopment() - appAddress := []byte{0xAA, 0x01, 0x02, 0x03} - peerID := []byte{0x01, 0x02, 0x03, 0x04} - t.Logf(" - App shard address: %x", appAddress) - t.Logf(" - Peer ID: %x", peerID) - - // Create engine with controlled components - bc := &bls48581.Bls48581KeyConstructor{} - dc := &bulletproofs.Decaf448KeyConstructor{} - keyManager := keys.NewInMemoryKeyManager(bc, dc) - pk, _, err := keyManager.CreateSigningKey("q-prover-key", crypto.KeyTypeBLS48581G1) - require.NoError(t, err) - proverKey := pk.Public().([]byte) - - // Create stores - pebbleDB := store.NewPebbleDB(logger, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".test/app_state_transitions"}, 0) - - // Create inclusion prover and verifiable encryptor - inclusionProver := bls48581.NewKZGInclusionProver(logger) - verifiableEncryptor := verenc.NewMPCitHVerifiableEncryptor(1) - bulletproof := bulletproofs.NewBulletproofProver() - decafConstructor := &bulletproofs.Decaf448KeyConstructor{} - compiler := compiler.NewBedlamCompiler() - - // Create hypergraph - hypergraphStore := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".test/app_state_transitions"}, pebbleDB, logger, verifiableEncryptor, inclusionProver) - hg := hypergraph.NewHypergraph(logger, hypergraphStore, inclusionProver, []int{}, &tests.Nopthenticator{}) - - // Create key store - keyStore := store.NewPebbleKeyStore(pebbleDB, logger) - - // Create clock store - clockStore := store.NewPebbleClockStore(pebbleDB, logger) - - // Create inbox store - inboxStore := store.NewPebbleInboxStore(pebbleDB, logger) - - // Create shards store - shardsStore := store.NewPebbleShardsStore(pebbleDB, logger) - - frameProver := vdf.NewWesolowskiFrameProver(logger) - signerRegistry, err := registration.NewCachedSignerRegistry(keyStore, keyManager, bc, bulletproofs.NewBulletproofProver(), logger) - require.NoError(t, err) - proverRegistry, err := provers.NewProverRegistry(zap.NewNop(), hg) - require.NoError(t, err) - - // Register the prover - proverAddress := calculateProverAddress(proverKey) - registerProverInHypergraphWithFilter(t, hg, proverKey, proverAddress, appAddress) - - proverRegistry.Refresh() - - dynamicFeeManager := fees.NewDynamicFeeManager(logger, inclusionProver) - - frameValidator := validator.NewBLSAppFrameValidator(proverRegistry, bc, frameProver, logger) - globalFrameValidator := validator.NewBLSGlobalFrameValidator(proverRegistry, bc, frameProver, logger) - difficultyAdjuster := difficulty.NewAsertDifficultyAdjuster(0, time.Now().UnixMilli(), 80000) - rewardIssuance := reward.NewOptRewardIssuance() - - // Create pubsub with controlled peer count - _, m, cleanup := tests.GenerateSimnetHosts(t, 1, []libp2p.Option{}) - defer cleanup() - p2pcfg := config.P2PConfig{}.WithDefaults() - p2pcfg.Network = 1 - p2pcfg.StreamListenMultiaddr = "/ip4/0.0.0.0/tcp/0" - p2pcfg.MinBootstrapPeers = 0 - p2pcfg.DiscoveryPeerLookupLimit = 0 - c := &config.Config{ - Engine: &config.EngineConfig{ - Difficulty: 80000, - ProvingKeyId: "q-prover-key", - }, - P2P: &p2pcfg, - } - pubsub := newMockAppIntegrationPubSub(c, logger, []byte(m.Nodes[0].ID()), m.Nodes[0], m.Keys[0], m.Nodes) - pubsub.peerCount = 0 // Start with 0 peers to trigger genesis - - globalTimeReel, _ := consensustime.NewGlobalTimeReel(logger, proverRegistry, clockStore, 1, true) - - factory := NewAppConsensusEngineFactory( - logger, - &config.Config{ - Engine: &config.EngineConfig{ - Difficulty: 80000, - ProvingKeyId: "q-prover-key", - }, - P2P: &config.P2PConfig{ - Network: 1, - StreamListenMultiaddr: "/ip4/0.0.0.0/tcp/0", - }, - }, - pubsub, - hg, - keyManager, - keyStore, - clockStore, - inboxStore, - shardsStore, - hypergraphStore, - frameProver, - inclusionProver, - bulletproof, - verifiableEncryptor, - decafConstructor, - compiler, - signerRegistry, - proverRegistry, - qp2p.NewInMemoryPeerInfoManager(logger), - dynamicFeeManager, - frameValidator, - globalFrameValidator, - difficultyAdjuster, - rewardIssuance, - &mocks.MockBlsConstructor{}, - channel.NewDoubleRatchetEncryptedChannel(), - ) - - engine, err := factory.CreateAppConsensusEngine( - appAddress, - 0, // coreId - globalTimeReel, - nil, - ) - require.NoError(t, err) - mockGSC := &mockGlobalClientLocks{} - engine.SetGlobalClient(mockGSC) - - // Track state transitions - t.Log("Step 2: Setting up state transition tracking") - stateHistory := make([]consensus.EngineState, 0) - var statesMu sync.Mutex - - go func() { - ticker := time.NewTicker(1 * time.Millisecond) - defer ticker.Stop() - - lastState := consensus.EngineStateStopped - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - state := engine.GetState() - if state != lastState { - statesMu.Lock() - stateHistory = append(stateHistory, state) - statesMu.Unlock() - lastState = state - t.Logf(" [State Change] %v → %v", lastState, state) - } - } - } - }() - - // Start engine - t.Log("Step 3: Starting consensus engine") - quit := make(chan struct{}) - engine.Start(quit) - - // Should go through genesis initialization and reach collecting - t.Log("Step 4: Waiting for genesis initialization (0 peers)") - time.Sleep(10 * time.Second) - - // Increase peers to allow normal operation - t.Log("Step 5: Increasing peer count to allow normal operation") - pubsub.peerCount = 5 - t.Log(" - Set peer count to 5") - time.Sleep(10 * time.Second) - - // Should transition through states - t.Log("Step 6: Verifying state transitions") - statesMu.Lock() - t.Logf(" - Total state transitions: %d", len(stateHistory)) - assert.Contains(t, stateHistory, consensus.EngineStateLoading) - assert.Contains(t, stateHistory, consensus.EngineStateProving) - - // May also see Proving/Publishing if frames were produced - t.Logf(" - Complete state history: %v", stateHistory) - statesMu.Unlock() - - t.Log("Step 7: Stopping engine") - engine.Stop(false) -} - // Scenario: Invalid frames are rejected by the network // Expected: Only valid frames are accepted and processed func TestAppConsensusEngine_Integration_InvalidFrameRejection(t *testing.T) { t.Skip("retrofit for test pubsub") t.Log("Testing invalid frame rejection") - _, cancel := context.WithCancel(context.Background()) - defer cancel() - t.Log("Step 1: Setting up test components") logger, _ := zap.NewDevelopment() appAddress := []byte{ @@ -1765,6 +1579,7 @@ func TestAppConsensusEngine_Integration_InvalidFrameRejection(t *testing.T) { clockStore := store.NewPebbleClockStore(pebbleDB, logger) inboxStore := store.NewPebbleInboxStore(pebbleDB, logger) shardsStore := store.NewPebbleShardsStore(pebbleDB, logger) + consensusStore := store.NewPebbleConsensusStore(pebbleDB, logger) frameProver := vdf.NewWesolowskiFrameProver(logger) signerRegistry, err := registration.NewCachedSignerRegistry(keyStore, keyManager, bc, bulletproofs.NewBulletproofProver(), logger) require.NoError(t, err) @@ -1822,6 +1637,7 @@ func TestAppConsensusEngine_Integration_InvalidFrameRejection(t *testing.T) { inboxStore, shardsStore, hypergraphStore, + consensusStore, frameProver, inclusionProver, bulletproof, @@ -1836,7 +1652,7 @@ func TestAppConsensusEngine_Integration_InvalidFrameRejection(t *testing.T) { globalFrameValidator, difficultyAdjuster, rewardIssuance, - &mocks.MockBlsConstructor{}, + bc, channel.NewDoubleRatchetEncryptedChannel(), ) @@ -1856,8 +1672,9 @@ func TestAppConsensusEngine_Integration_InvalidFrameRejection(t *testing.T) { // Start engine t.Log("Step 2: Starting consensus engine") - quit := make(chan struct{}) - engine.Start(quit) + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + err = engine.Start(ctx) + require.NoError(t, err) // Wait a bit for engine to register validator time.Sleep(500 * time.Millisecond) @@ -1951,6 +1768,7 @@ func TestAppConsensusEngine_Integration_InvalidFrameRejection(t *testing.T) { validationMu.Unlock() t.Log("Step 6: Cleaning up") + cancel() engine.Stop(false) } @@ -2077,6 +1895,7 @@ func TestAppConsensusEngine_Integration_ComplexMultiShardScenario(t *testing.T) nodeClockStore := store.NewPebbleClockStore(nodeDB, logger) nodeInboxStore := store.NewPebbleInboxStore(nodeDB, logger) nodeShardsStore := store.NewPebbleShardsStore(nodeDB, logger) + nodeConsensusStore := store.NewPebbleConsensusStore(nodeDB, logger) nodeHg := hypergraph.NewHypergraph(logger, nodeHypergraphStore, nodeInclusionProver, []int{}, &tests.Nopthenticator{}) nodeProverRegistry, err := provers.NewProverRegistry(zap.NewNop(), nodeHg) nodeBulletproof := bulletproofs.NewBulletproofProver() @@ -2127,6 +1946,7 @@ func TestAppConsensusEngine_Integration_ComplexMultiShardScenario(t *testing.T) nodeInboxStore, nodeShardsStore, nodeHypergraphStore, + nodeConsensusStore, frameProver, nodeInclusionProver, nodeBulletproof, @@ -2141,7 +1961,7 @@ func TestAppConsensusEngine_Integration_ComplexMultiShardScenario(t *testing.T) globalFrameValidator, difficultyAdjuster, rewardIssuance, - &mocks.MockBlsConstructor{}, + bc, channel.NewDoubleRatchetEncryptedChannel(), ) @@ -2281,14 +2101,16 @@ func TestAppConsensusEngine_Integration_ComplexMultiShardScenario(t *testing.T) } // Start all nodes - quits := make([][]chan struct{}, numShards) + cancels := make([][]func(), numShards) for shardIdx := 0; shardIdx < numShards; shardIdx++ { - quits[shardIdx] = make([]chan struct{}, numNodesPerShard) + cancels[shardIdx] = make([]func(), numNodesPerShard) // Start first node in each shard to create genesis - quits[shardIdx][0] = make(chan struct{}) node := shards[shardIdx][0] - node.engine.Start(quits[shardIdx][0]) + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + err := node.engine.Start(ctx) + require.NoError(t, err) + cancels[shardIdx][0] = cancel // Set peer count for first node shards[shardIdx][0].pubsub.peerCount = numNodesPerShard - 1 @@ -2296,7 +2118,6 @@ func TestAppConsensusEngine_Integration_ComplexMultiShardScenario(t *testing.T) // Start remaining nodes in shard one at a time for nodeIdx := 1; nodeIdx < numNodesPerShard; nodeIdx++ { shards[shardIdx][nodeIdx].pubsub.peerCount = numNodesPerShard - 1 - quits[shardIdx][nodeIdx] = make(chan struct{}) } } @@ -2356,7 +2177,11 @@ func TestAppConsensusEngine_Integration_ComplexMultiShardScenario(t *testing.T) for shardIdx := 0; shardIdx < numShards; shardIdx++ { for nodeIdx := 0; nodeIdx < numNodesPerShard; nodeIdx++ { // Start engine - shards[shardIdx][nodeIdx].engine.Start(quits[shardIdx][nodeIdx]) + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + + err := shards[shardIdx][nodeIdx].engine.Start(ctx) + require.NoError(t, err) + cancels[shardIdx][nodeIdx] = cancel } } @@ -2488,7 +2313,7 @@ func TestAppConsensusEngine_Integration_ComplexMultiShardScenario(t *testing.T) for shardIdx := 0; shardIdx < numShards; shardIdx++ { for nodeIdx := 0; nodeIdx < numNodesPerShard; nodeIdx++ { node := shards[shardIdx][nodeIdx] - + cancels[shardIdx][nodeIdx]() // Stop engine node.engine.Stop(false) // Frame number is likely wrong, but irrelevant for the test @@ -2875,6 +2700,7 @@ func TestAppConsensusEngine_Integration_NoProversStaysInLoading(t *testing.T) { clockStore := store.NewPebbleClockStore(pebbleDB, logger) inboxStore := store.NewPebbleInboxStore(pebbleDB, logger) shardsStore := store.NewPebbleShardsStore(pebbleDB, logger) + consensusStore := store.NewPebbleConsensusStore(pebbleDB, logger) // Create prover registry - but don't register any provers proverRegistry, err := provers.NewProverRegistry(zap.NewNop(), hg) @@ -2950,6 +2776,7 @@ func TestAppConsensusEngine_Integration_NoProversStaysInLoading(t *testing.T) { inboxStore, shardsStore, hypergraphStore, + consensusStore, frameProver, inclusionProver, bulletproofs.NewBulletproofProver(), // bulletproofProver @@ -2967,7 +2794,7 @@ func TestAppConsensusEngine_Integration_NoProversStaysInLoading(t *testing.T) { qp2p.NewInMemoryPeerInfoManager(logger), appTimeReel, globalTimeReel, - &mocks.MockBlsConstructor{}, + bc, channel.NewDoubleRatchetEncryptedChannel(), nil, ) @@ -2991,9 +2818,13 @@ func TestAppConsensusEngine_Integration_NoProversStaysInLoading(t *testing.T) { pubsubs[i].mu.Unlock() } + cancels := []func(){} // Start all engines for i := 0; i < numNodes; i++ { - errChan := engines[i].Start(quits[i]) + ctx, cancel, errChan := lifecycle.WithSignallerAndCancel(context.Background()) + err := engines[i].Start(ctx) + require.NoError(t, err) + cancels = append(cancels, cancel) select { case err := <-errChan: require.NoError(t, err) @@ -3018,6 +2849,7 @@ func TestAppConsensusEngine_Integration_NoProversStaysInLoading(t *testing.T) { // Stop all engines for i := 0; i < numNodes; i++ { + cancels[i]() <-engines[i].Stop(false) } @@ -3078,6 +2910,9 @@ func TestAppConsensusEngine_Integration_AlertStopsProgression(t *testing.T) { // Create shards store shardsStore := store.NewPebbleShardsStore(pebbleDB, logger) + // Create consensus store + consensusStore := store.NewPebbleConsensusStore(pebbleDB, logger) + // Create concrete components frameProver := vdf.NewWesolowskiFrameProver(logger) signerRegistry, err := registration.NewCachedSignerRegistry(keyStore, keyManager, bc, bulletproofs.NewBulletproofProver(), logger) @@ -3151,6 +2986,7 @@ func TestAppConsensusEngine_Integration_AlertStopsProgression(t *testing.T) { inboxStore, shardsStore, hypergraphStore, + consensusStore, frameProver, inclusionProver, bulletproof, @@ -3165,7 +3001,7 @@ func TestAppConsensusEngine_Integration_AlertStopsProgression(t *testing.T) { globalFrameValidator, difficultyAdjuster, rewardIssuance, - &mocks.MockBlsConstructor{}, + bc, channel.NewDoubleRatchetEncryptedChannel(), ) @@ -3179,8 +3015,9 @@ func TestAppConsensusEngine_Integration_AlertStopsProgression(t *testing.T) { mockGSC := &mockGlobalClientLocks{} engine.SetGlobalClient(mockGSC) - quit := make(chan struct{}) - errChan := engine.Start(quit) + ctx, cancel, errChan := lifecycle.WithSignallerAndCancel(context.Background()) + err = engine.Start(ctx) + require.NoError(t, err) select { case err := <-errChan: @@ -3203,14 +3040,14 @@ func TestAppConsensusEngine_Integration_AlertStopsProgression(t *testing.T) { typePrefix := binary.BigEndian.Uint32(data[:4]) // Check if it's a GlobalFrame - if typePrefix == protobufs.AppShardFrameType { - frame := &protobufs.AppShardFrame{} + if typePrefix == protobufs.AppShardProposalType { + frame := &protobufs.AppShardProposal{} if err := frame.FromCanonicalBytes(data); err == nil { mu.Lock() if afterAlert { - afterAlertFrames = append(afterAlertFrames, frame) + afterAlertFrames = append(afterAlertFrames, frame.State) } else { - publishedFrames = append(publishedFrames, frame) + publishedFrames = append(publishedFrames, frame.State) } mu.Unlock() } @@ -3258,6 +3095,6 @@ func TestAppConsensusEngine_Integration_AlertStopsProgression(t *testing.T) { require.Equal(t, 0, afterAlertCount) // Stop - engine.UnregisterExecutor("test-executor", 0, false) + cancel() engine.Stop(false) } diff --git a/node/consensus/app/consensus_dynamic_committee.go b/node/consensus/app/consensus_dynamic_committee.go new file mode 100644 index 0000000..7ca1b28 --- /dev/null +++ b/node/consensus/app/consensus_dynamic_committee.go @@ -0,0 +1,216 @@ +package app + +import ( + "bytes" + "encoding/binary" + "slices" + + "github.com/iden3/go-iden3-crypto/poseidon" + "github.com/pkg/errors" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/protobufs" + tconsensus "source.quilibrium.com/quilibrium/monorepo/types/consensus" +) + +type ConsensusWeightedIdentity struct { + prover *tconsensus.ProverInfo +} + +// Identity implements models.WeightedIdentity. +func (c *ConsensusWeightedIdentity) Identity() models.Identity { + return models.Identity(c.prover.Address) +} + +// PublicKey implements models.WeightedIdentity. +func (c *ConsensusWeightedIdentity) PublicKey() []byte { + return c.prover.PublicKey +} + +// Weight implements models.WeightedIdentity. +func (c *ConsensusWeightedIdentity) Weight() uint64 { + return c.prover.Seniority +} + +// IdentitiesByRank implements consensus.DynamicCommittee. +func (e *AppConsensusEngine) IdentitiesByRank( + rank uint64, +) ([]models.WeightedIdentity, error) { + proverInfo, err := e.proverRegistry.GetActiveProvers(e.appAddress) + if err != nil { + return nil, errors.Wrap(err, "identities by rank") + } + + return internalProversToWeightedIdentity(proverInfo), nil +} + +// IdentitiesByState implements consensus.DynamicCommittee. +func (e *AppConsensusEngine) IdentitiesByState( + stateID models.Identity, +) ([]models.WeightedIdentity, error) { + proverInfo, err := e.proverRegistry.GetActiveProvers(e.appAddress) + if err != nil { + return nil, errors.Wrap(err, "identities by state") + } + + return internalProversToWeightedIdentity(proverInfo), nil +} + +// IdentityByRank implements consensus.DynamicCommittee. +func (e *AppConsensusEngine) IdentityByRank( + rank uint64, + participantID models.Identity, +) (models.WeightedIdentity, error) { + proverInfo, err := e.proverRegistry.GetActiveProvers(e.appAddress) + if err != nil { + return nil, errors.Wrap(err, "identity by rank") + } + + var found *tconsensus.ProverInfo + for _, p := range proverInfo { + if bytes.Equal(p.Address, []byte(participantID)) { + found = p + break + } + } + + if found == nil { + return nil, errors.Wrap(errors.New("prover not found"), "identity by rank") + } + + return internalProverToWeightedIdentity(found), nil +} + +// IdentityByState implements consensus.DynamicCommittee. +func (e *AppConsensusEngine) IdentityByState( + stateID models.Identity, + participantID models.Identity, +) (models.WeightedIdentity, error) { + proverInfo, err := e.proverRegistry.GetActiveProvers(e.appAddress) + if err != nil { + return nil, errors.Wrap(err, "identity by state") + } + + var found *tconsensus.ProverInfo + for _, p := range proverInfo { + if bytes.Equal(p.Address, []byte(participantID)) { + found = p + break + } + } + + if found == nil { + return nil, errors.Wrap(errors.New("prover not found"), "identity by state") + } + + return internalProverToWeightedIdentity(found), nil +} + +// LeaderForRank implements consensus.DynamicCommittee. +func (e *AppConsensusEngine) LeaderForRank( + rank uint64, +) (models.Identity, error) { + lineage, err := e.appTimeReel.GetLineage() + if err != nil { + return "", errors.Wrap(err, "leader for rank") + } + + var found *protobufs.AppShardFrame + for _, l := range lineage { + if l.GetRank() == (rank - 1) { + found = l + break + } + } + + var selector models.Identity + if found == nil { + selector = models.Identity(make([]byte, 32)) + } else { + selector = found.Identity() + } + + inputBI, err := poseidon.HashBytes(slices.Concat( + []byte(selector), + binary.BigEndian.AppendUint64(nil, rank), + )) + if err != nil { + return "", errors.Wrap(err, "leader for rank") + } + + input := inputBI.FillBytes(make([]byte, 32)) + prover, err := e.proverRegistry.GetNextProver( + [32]byte(input), + e.appAddress, + ) + if err != nil { + return "", errors.Wrap(err, "leader for rank") + } + + return models.Identity(prover), nil +} + +// QuorumThresholdForRank implements consensus.DynamicCommittee. +func (e *AppConsensusEngine) QuorumThresholdForRank( + rank uint64, +) (uint64, error) { + proverInfo, err := e.proverRegistry.GetActiveProvers(e.appAddress) + if err != nil { + return 0, errors.Wrap(err, "quorum threshold for rank") + } + + total := uint64(0) + for _, p := range proverInfo { + total += p.Seniority + } + + return (total * 2) / 3, nil +} + +// Self implements consensus.DynamicCommittee. +func (e *AppConsensusEngine) Self() models.Identity { + return e.getPeerID().Identity() +} + +// TimeoutThresholdForRank implements consensus.DynamicCommittee. +func (e *AppConsensusEngine) TimeoutThresholdForRank( + rank uint64, +) (uint64, error) { + proverInfo, err := e.proverRegistry.GetActiveProvers(e.appAddress) + if err != nil { + return 0, errors.Wrap(err, "timeout threshold for rank") + } + + leader, err := e.LeaderForRank(rank) + if err != nil { + return 0, errors.Wrap(err, "timeout threshold for rank") + } + + total := uint64(0) + // 2/3 majority doesn't quite work in this scenario, because if the timing out + // prover has a high enough seniority it could get things stuck where no + // timeout can occur + for _, p := range proverInfo { + if !bytes.Equal(p.Address, []byte(leader)) { + total += p.Seniority + } + } + + return (total * 2) / 3, nil +} + +func internalProversToWeightedIdentity( + provers []*tconsensus.ProverInfo, +) []models.WeightedIdentity { + wis := []models.WeightedIdentity{} + for _, p := range provers { + wis = append(wis, internalProverToWeightedIdentity(p)) + } + + return wis +} + +func internalProverToWeightedIdentity( + prover *tconsensus.ProverInfo, +) models.WeightedIdentity { + return &ConsensusWeightedIdentity{prover} +} diff --git a/node/consensus/app/consensus_leader_provider.go b/node/consensus/app/consensus_leader_provider.go index ea82012..85158c1 100644 --- a/node/consensus/app/consensus_leader_provider.go +++ b/node/consensus/app/consensus_leader_provider.go @@ -1,6 +1,7 @@ package app import ( + "bytes" "context" "encoding/hex" "time" @@ -9,6 +10,8 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" "source.quilibrium.com/quilibrium/monorepo/protobufs" ) @@ -18,8 +21,8 @@ type AppLeaderProvider struct { } func (p *AppLeaderProvider) GetNextLeaders( - prior **protobufs.AppShardFrame, ctx context.Context, + prior **protobufs.AppShardFrame, ) ([]PeerID, error) { // Get the parent selector for next prover calculation var parentSelector []byte @@ -58,36 +61,77 @@ func (p *AppLeaderProvider) GetNextLeaders( } func (p *AppLeaderProvider) ProveNextState( - prior **protobufs.AppShardFrame, - collected CollectedCommitments, ctx context.Context, + rank uint64, + filter []byte, + priorState models.Identity, ) (**protobufs.AppShardFrame, error) { timer := prometheus.NewTimer(frameProvingDuration.WithLabelValues( p.engine.appAddressHex, )) defer timer.ObserveDuration() - if prior == nil || *prior == nil { - frameProvingTotal.WithLabelValues(p.engine.appAddressHex, "error").Inc() - return nil, errors.Wrap(errors.New("nil prior frame"), "prove next state") + prior, _, err := p.engine.clockStore.GetLatestShardClockFrame( + p.engine.appAddress, + ) + if err != nil { + frameProvingTotal.WithLabelValues("error").Inc() + return nil, models.NewNoVoteErrorf("could not collect: %+w", err) + } + + if prior == nil { + frameProvingTotal.WithLabelValues("error").Inc() + return nil, models.NewNoVoteErrorf("missing prior frame") + } + + if prior.Identity() != priorState { + frameProvingTotal.WithLabelValues("error").Inc() + return nil, models.NewNoVoteErrorf( + "building on fork or needs sync: frame %d, rank %d, parent_id: %x, asked: rank %d, id: %x", + prior.Header.FrameNumber, + prior.Header.Rank, + prior.Header.ParentSelector, + rank, + priorState, + ) + } + + // Get prover index + provers, err := p.engine.proverRegistry.GetActiveProvers(p.engine.appAddress) + if err != nil { + frameProvingTotal.WithLabelValues("error").Inc() + return nil, errors.Wrap(err, "prove next state") + } + + found := false + for _, prover := range provers { + if bytes.Equal(prover.Address, p.engine.getProverAddress()) { + found = true + break + } + } + + if !found { + frameProvingTotal.WithLabelValues("error").Inc() + return nil, models.NewNoVoteErrorf("not a prover") } // Get collected messages to include in frame - p.engine.pendingMessagesMu.RLock() - messages := make([]*protobufs.Message, len(p.engine.collectedMessages[string( - collected.commitmentHash[:32], - )])) - copy(messages, p.engine.collectedMessages[string( - collected.commitmentHash[:32], - )]) - p.engine.pendingMessagesMu.RUnlock() + p.engine.provingMessagesMu.Lock() + messages := make([]*protobufs.Message, len(p.engine.provingMessages)) + copy(messages, p.engine.provingMessages) + p.engine.provingMessages = []*protobufs.Message{} + p.engine.provingMessagesMu.Unlock() - // Clear collected messages after copying - p.engine.collectedMessagesMu.Lock() - p.engine.collectedMessages[string( - collected.commitmentHash[:32], - )] = []*protobufs.Message{} - p.engine.collectedMessagesMu.Unlock() + if len(messages) == 0 { + p.engine.collectedMessagesMu.Lock() + if len(p.engine.collectedMessages) > 0 { + messages = make([]*protobufs.Message, len(p.engine.collectedMessages)) + copy(messages, p.engine.collectedMessages) + p.engine.collectedMessages = []*protobufs.Message{} + } + p.engine.collectedMessagesMu.Unlock() + } // Update pending messages metric pendingMessagesCount.WithLabelValues(p.engine.appAddressHex).Set(0) @@ -99,7 +143,7 @@ func (p *AppLeaderProvider) ProveNextState( ) // Prove the frame - newFrame, err := p.engine.internalProveFrame(messages, (*prior)) + newFrame, err := p.engine.internalProveFrame(rank, messages, prior) if err != nil { frameProvingTotal.WithLabelValues(p.engine.appAddressHex, "error").Inc() return nil, errors.Wrap(err, "prove frame") @@ -122,3 +166,9 @@ func (p *AppLeaderProvider) ProveNextState( return &newFrame, nil } + +var _ consensus.LeaderProvider[ + *protobufs.AppShardFrame, + PeerID, + CollectedCommitments, +] = (*AppLeaderProvider)(nil) diff --git a/node/consensus/app/consensus_liveness_provider.go b/node/consensus/app/consensus_liveness_provider.go index 549b4d4..db6def8 100644 --- a/node/consensus/app/consensus_liveness_provider.go +++ b/node/consensus/app/consensus_liveness_provider.go @@ -3,7 +3,6 @@ package app import ( "context" "slices" - "time" "github.com/pkg/errors" "go.uber.org/zap" @@ -77,7 +76,7 @@ func (p *AppLivenessProvider) Collect( zap.Int("valid_message_count", len(finalizedMessages)), zap.Uint64( "current_frame", - p.engine.GetFrame().Rank(), + p.engine.GetFrame().GetRank(), ), ) transactionsCollectedTotal.WithLabelValues(p.engine.appAddressHex).Add( @@ -91,7 +90,7 @@ func (p *AppLivenessProvider) Collect( } p.engine.collectedMessagesMu.Lock() - p.engine.collectedMessages[string(commitment[:32])] = finalizedMessages + p.engine.collectedMessages = finalizedMessages p.engine.collectedMessagesMu.Unlock() return CollectedCommitments{ @@ -101,77 +100,6 @@ func (p *AppLivenessProvider) Collect( }, nil } -func (p *AppLivenessProvider) SendLiveness( - prior **protobufs.AppShardFrame, - collected CollectedCommitments, - ctx context.Context, -) error { - // Get prover key - signer, _, publicKey, _ := p.engine.GetProvingKey(p.engine.config.Engine) - if publicKey == nil { - return errors.New("no proving key available for liveness check") - } - - frameNumber := uint64(0) - if prior != nil && (*prior).Header != nil { - frameNumber = (*prior).Header.FrameNumber + 1 - } - - lastProcessed := p.engine.GetFrame() - if lastProcessed != nil && lastProcessed.Header.FrameNumber > frameNumber { - return errors.New("out of sync, forcing resync") - } - - // Create liveness check message - livenessCheck := &protobufs.ProverLivenessCheck{ - Filter: p.engine.appAddress, - FrameNumber: frameNumber, - Timestamp: time.Now().UnixMilli(), - CommitmentHash: collected.commitmentHash, - } - - // Sign the message - signatureData, err := livenessCheck.ConstructSignaturePayload() - if err != nil { - return errors.Wrap(err, "send liveness") - } - - sig, err := signer.SignWithDomain( - signatureData, - livenessCheck.GetSignatureDomain(), - ) - if err != nil { - return errors.Wrap(err, "send liveness") - } - - proverAddress := p.engine.getAddressFromPublicKey(publicKey) - livenessCheck.PublicKeySignatureBls48581 = - &protobufs.BLS48581AddressedSignature{ - Address: proverAddress, - Signature: sig, - } - - // Serialize using canonical bytes - data, err := livenessCheck.ToCanonicalBytes() - if err != nil { - return errors.Wrap(err, "serialize liveness check") - } - - if err := p.engine.pubsub.PublishToBitmask( - p.engine.getConsensusMessageBitmask(), - data, - ); err != nil { - return errors.Wrap(err, "send liveness") - } - - p.engine.logger.Info( - "sent liveness check", - zap.Uint64("frame_number", frameNumber), - ) - - return nil -} - func (p *AppLivenessProvider) validateAndLockMessage( frameNumber uint64, i int, diff --git a/node/consensus/app/consensus_sync_provider.go b/node/consensus/app/consensus_sync_provider.go index 20e94ab..f1cefec 100644 --- a/node/consensus/app/consensus_sync_provider.go +++ b/node/consensus/app/consensus_sync_provider.go @@ -18,19 +18,87 @@ import ( "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/peer" + "github.com/multiformats/go-multiaddr" + mn "github.com/multiformats/go-multiaddr/net" "github.com/pkg/errors" "go.uber.org/zap" "google.golang.org/grpc" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token" "source.quilibrium.com/quilibrium/monorepo/node/internal/frametime" + "source.quilibrium.com/quilibrium/monorepo/node/p2p" "source.quilibrium.com/quilibrium/monorepo/protobufs" + "source.quilibrium.com/quilibrium/monorepo/types/channel" "source.quilibrium.com/quilibrium/monorepo/types/tries" up2p "source.quilibrium.com/quilibrium/monorepo/utils/p2p" ) +const defaultStateQueueCapacity = 10 + +type syncRequest struct { + frameNumber uint64 + peerId []byte +} + // AppSyncProvider implements SyncProvider type AppSyncProvider struct { - engine *AppConsensusEngine + // TODO(2.1.1+): Refactor out direct use of engine + engine *AppConsensusEngine + queuedStates chan syncRequest +} + +func NewAppSyncProvider( + engine *AppConsensusEngine, +) *AppSyncProvider { + return &AppSyncProvider{ + engine: engine, + queuedStates: make(chan syncRequest, defaultStateQueueCapacity), + } +} + +func (p *AppSyncProvider) Start( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, +) { + ready() + for { + select { + case <-ctx.Done(): + return + case request := <-p.queuedStates: + finalized := p.engine.forks.FinalizedState() + if request.frameNumber <= + (*p.engine.forks.FinalizedState().State).Header.FrameNumber { + continue + } + p.engine.logger.Info( + "synchronizing with peer", + zap.String("peer", peer.ID(request.peerId).String()), + zap.Uint64("finalized_rank", finalized.Rank), + zap.Uint64("peer_frame", request.frameNumber), + ) + p.processState( + ctx, + request.frameNumber, + request.peerId, + ) + } + } +} + +func (p *AppSyncProvider) processState( + ctx context.Context, + frameNumber uint64, + peerID []byte, +) { + err := p.syncWithPeer( + ctx, + frameNumber, + peerID, + ) + if err != nil { + p.engine.logger.Error("could not sync with peer", zap.Error(err)) + } } func (p *AppSyncProvider) Synchronize( @@ -58,16 +126,44 @@ func (p *AppSyncProvider) Synchronize( p.engine.frameStoreMu.RUnlock() if !hasFrame { - // No peers and no frame - we're the first node, initialize genesis - p.engine.logger.Info("no frame detected, initializing with genesis") - syncStatusCheck.WithLabelValues(p.engine.appAddressHex, "synced").Inc() - genesis := p.engine.initializeGenesis() - dataCh <- &genesis - errCh <- nil + errCh <- errors.New("no frame") return } peerCount := p.engine.pubsub.GetPeerstoreCount() + requiredPeers := p.engine.config.Engine.MinimumPeersRequired + if peerCount < requiredPeers { + p.engine.logger.Info( + "waiting for minimum peers", + zap.Int("current", peerCount), + zap.Int("required", requiredPeers), + ) + + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + + waitPeers: + for { + select { + case <-ctx.Done(): + errCh <- errors.Wrap( + ctx.Err(), + "synchronize cancelled while waiting for peers", + ) + return + case <-ticker.C: + peerCount = p.engine.pubsub.GetPeerstoreCount() + if peerCount >= requiredPeers { + p.engine.logger.Info( + "minimum peers reached", + zap.Int("peers", peerCount), + ) + break waitPeers + } + } + } + } + if peerCount < int(p.engine.minimumProvers()) { errCh <- errors.Wrap( errors.New("minimum provers not reached"), @@ -146,7 +242,7 @@ func (p *AppSyncProvider) Synchronize( } } - err := p.syncWithMesh() + err := p.syncWithMesh(ctx) if err != nil { if latestFrame != nil { dataCh <- &latestFrame @@ -173,7 +269,7 @@ func (p *AppSyncProvider) Synchronize( return dataCh, errCh } -func (p *AppSyncProvider) syncWithMesh() error { +func (p *AppSyncProvider) syncWithMesh(ctx context.Context) error { p.engine.logger.Info("synchronizing with peers") latest, err := p.engine.appTimeReel.GetHead() @@ -224,7 +320,7 @@ func (p *AppSyncProvider) syncWithMesh() error { latest = head } - latest, err = p.syncWithPeer(latest, []byte(peerID)) + err = p.syncWithPeer(ctx, latest.Header.FrameNumber, []byte(peerID)) if err != nil { p.engine.logger.Debug("error syncing frame", zap.Error(err)) } @@ -240,89 +336,190 @@ func (p *AppSyncProvider) syncWithMesh() error { } func (p *AppSyncProvider) syncWithPeer( - latest *protobufs.AppShardFrame, + ctx context.Context, + frameNumber uint64, peerId []byte, -) (*protobufs.AppShardFrame, error) { +) error { p.engine.logger.Info( "polling peer for new frames", zap.String("peer_id", peer.ID(peerId).String()), - zap.Uint64("current_frame", latest.Header.FrameNumber), + zap.Uint64("current_frame", frameNumber), ) - syncTimeout := p.engine.config.Engine.SyncTimeout - dialCtx, cancelDial := context.WithTimeout(p.engine.ctx, syncTimeout) - defer cancelDial() - cc, err := p.engine.pubsub.GetDirectChannel(dialCtx, peerId, "sync") - if err != nil { - p.engine.logger.Debug( - "could not establish direct channel", - zap.Error(err), - ) - return latest, errors.Wrap(err, "sync") - } - defer func() { - if err := cc.Close(); err != nil { - p.engine.logger.Error("error while closing connection", zap.Error(err)) - } - }() - - client := protobufs.NewAppShardServiceClient(cc) - for { - getCtx, cancelGet := context.WithTimeout(p.engine.ctx, syncTimeout) - response, err := client.GetAppShardFrame( - getCtx, - &protobufs.GetAppShardFrameRequest{ - Filter: p.engine.appAddress, - FrameNumber: latest.Header.FrameNumber + 1, - }, - // The message size limits are swapped because the server is the one - // sending the data. - grpc.MaxCallRecvMsgSize( - p.engine.config.Engine.SyncMessageLimits.MaxSendMsgSize, - ), - grpc.MaxCallSendMsgSize( - p.engine.config.Engine.SyncMessageLimits.MaxRecvMsgSize, - ), - ) - cancelGet() - if err != nil { - p.engine.logger.Debug( - "could not get frame", - zap.Error(err), - ) - return latest, errors.Wrap(err, "sync") - } - - if response == nil { - p.engine.logger.Debug("received no response from peer") - return latest, nil - } - - if response.Frame == nil || response.Frame.Header == nil || - response.Frame.Header.FrameNumber != latest.Header.FrameNumber+1 || - response.Frame.Header.Timestamp < latest.Header.Timestamp { - p.engine.logger.Debug("received invalid response from peer") - return latest, nil - } + info := p.engine.peerInfoManager.GetPeerInfo(peerId) + if info == nil { p.engine.logger.Info( - "received new leading frame", - zap.Uint64("frame_number", response.Frame.Header.FrameNumber), - zap.Duration("frame_age", frametime.AppFrameSince(response.Frame)), + "no peer info known yet, skipping sync", + zap.String("peer", peer.ID(peerId).String()), ) - - if _, err := p.engine.frameProver.VerifyFrameHeader( - response.Frame.Header, - p.engine.blsConstructor, - ); err != nil { - return latest, errors.Wrap(err, "sync") + return nil + } + if len(info.Reachability) == 0 { + p.engine.logger.Info( + "no reachability info known yet, skipping sync", + zap.String("peer", peer.ID(peerId).String()), + ) + return nil + } + syncTimeout := p.engine.config.Engine.SyncTimeout + for _, reachability := range info.Reachability { + if !bytes.Equal(reachability.Filter, p.engine.appAddress) { + continue } + for _, s := range reachability.StreamMultiaddrs { + creds, err := p2p.NewPeerAuthenticator( + p.engine.logger, + p.engine.config.P2P, + nil, + nil, + nil, + nil, + [][]byte{[]byte(peerId)}, + map[string]channel.AllowedPeerPolicyType{}, + map[string]channel.AllowedPeerPolicyType{}, + ).CreateClientTLSCredentials([]byte(peerId)) + if err != nil { + return errors.Wrap(err, "sync") + } - err = p.engine.appTimeReel.Insert(p.engine.ctx, response.Frame) - if err != nil { - return latest, errors.Wrap(err, "sync") + ma, err := multiaddr.StringCast(s) + if err != nil { + return errors.Wrap(err, "sync") + } + mga, err := mn.ToNetAddr(ma) + if err != nil { + return errors.Wrap(err, "sync") + } + cc, err := grpc.NewClient( + mga.String(), + grpc.WithTransportCredentials(creds), + ) + + if err != nil { + p.engine.logger.Debug( + "could not establish direct channel, trying next multiaddr", + zap.String("peer", peer.ID(peerId).String()), + zap.String("multiaddr", ma.String()), + + zap.Error(err), + ) + continue + } + + defer func() { + if err := cc.Close(); err != nil { + p.engine.logger.Error( + "error while closing connection", + zap.Error(err), + ) + } + }() + + client := protobufs.NewAppShardServiceClient(cc) + + inner: + for { + getCtx, cancelGet := context.WithTimeout(ctx, syncTimeout) + response, err := client.GetAppShardProposal( + getCtx, + &protobufs.GetAppShardProposalRequest{ + Filter: p.engine.appAddress, + FrameNumber: frameNumber, + }, + // The message size limits are swapped because the server is the one + // sending the data. + grpc.MaxCallRecvMsgSize( + p.engine.config.Engine.SyncMessageLimits.MaxSendMsgSize, + ), + grpc.MaxCallSendMsgSize( + p.engine.config.Engine.SyncMessageLimits.MaxRecvMsgSize, + ), + ) + cancelGet() + if err != nil { + p.engine.logger.Debug( + "could not get frame, trying next multiaddr", + zap.String("peer", peer.ID(peerId).String()), + zap.String("multiaddr", ma.String()), + zap.Error(err), + ) + break inner + } + + if response == nil { + p.engine.logger.Debug( + "received no response from peer", + zap.String("peer", peer.ID(peerId).String()), + zap.String("multiaddr", ma.String()), + zap.Error(err), + ) + break inner + } + if response.Proposal == nil || response.Proposal.State == nil || + response.Proposal.State.Header == nil || + response.Proposal.State.Header.FrameNumber != frameNumber { + p.engine.logger.Debug("received empty response from peer") + return nil + } + if err := response.Proposal.Validate(); err != nil { + p.engine.logger.Debug("received invalid response from peer") + return nil + } + p.engine.logger.Info( + "received new leading frame", + zap.Uint64( + "frame_number", + response.Proposal.State.Header.FrameNumber, + ), + zap.Duration( + "frame_age", + frametime.AppFrameSince(response.Proposal.State), + ), + ) + if _, err := p.engine.frameProver.VerifyFrameHeader( + response.Proposal.State.Header, + p.engine.blsConstructor, + ); err != nil { + return errors.Wrap(err, "sync") + } + + p.engine.appShardProposalQueue <- response.Proposal + frameNumber = frameNumber + 1 + } } + } - latest = response.Frame + p.engine.logger.Debug( + "failed to complete sync for all known multiaddrs", + zap.String("peer", peer.ID(peerId).String()), + ) + return nil +} + +func (p *AppSyncProvider) AddState( + sourcePeerID []byte, + frameNumber uint64, +) { + // Drop if we're within the threshold + if frameNumber <= + (*p.engine.forks.FinalizedState().State).Header.FrameNumber { + p.engine.logger.Debug("dropping stale state for sync") + return + } + + // Enqueue if we can, otherwise drop it because we'll catch up + select { + case p.queuedStates <- syncRequest{ + frameNumber: frameNumber, + peerId: sourcePeerID, + }: + p.engine.logger.Debug( + "enqueued sync request", + zap.String("peer", peer.ID(sourcePeerID).String()), + zap.Uint64("enqueued_frame_number", frameNumber), + ) + default: + p.engine.logger.Debug("no queue capacity, dropping state for sync") } } @@ -498,7 +695,7 @@ func (p *AppSyncProvider) hyperSyncWithProver( peerId, err := peer.IDFromPublicKey(pubKey) if err == nil { ch, err := p.engine.pubsub.GetDirectChannel( - p.engine.ctx, + context.Background(), []byte(peerId), "sync", ) @@ -506,7 +703,7 @@ func (p *AppSyncProvider) hyperSyncWithProver( if err == nil { defer ch.Close() client := protobufs.NewHypergraphComparisonServiceClient(ch) - str, err := client.HyperStream(p.engine.ctx) + str, err := client.HyperStream(context.Background()) if err != nil { p.engine.logger.Error("error from sync", zap.Error(err)) } else { diff --git a/node/consensus/app/consensus_transition_listener.go b/node/consensus/app/consensus_transition_listener.go deleted file mode 100644 index 3a98780..0000000 --- a/node/consensus/app/consensus_transition_listener.go +++ /dev/null @@ -1,52 +0,0 @@ -package app - -import ( - "go.uber.org/zap" - "source.quilibrium.com/quilibrium/monorepo/consensus" -) - -type AppTracer struct { - logger *zap.Logger -} - -func (t *AppTracer) Trace(message string) { - t.logger.Debug(message) -} - -func (t *AppTracer) Error(message string, err error) { - t.logger.Error(message, zap.Error(err)) -} - -// AppTransitionListener handles state transitions -type AppTransitionListener struct { - engine *AppConsensusEngine - logger *zap.Logger -} - -func (l *AppTransitionListener) OnTransition( - from consensus.State, - to consensus.State, - event consensus.Event, -) { - var stateValue float64 - switch to { - case consensus.StateStopped: - stateValue = 0 - case consensus.StateStarting: - stateValue = 1 - case consensus.StateLoading: - stateValue = 2 - case consensus.StateCollecting: - stateValue = 3 - case consensus.StateProving: - stateValue = 4 - case consensus.StatePublishing: - stateValue = 5 - case consensus.StateVerifying: - stateValue = 6 - case consensus.StateStopping: - stateValue = 7 - } - - engineState.WithLabelValues(l.engine.appAddressHex).Set(stateValue) -} diff --git a/node/consensus/app/consensus_types.go b/node/consensus/app/consensus_types.go index 69af852..16688ef 100644 --- a/node/consensus/app/consensus_types.go +++ b/node/consensus/app/consensus_types.go @@ -5,7 +5,7 @@ import ( "encoding/hex" "slices" - "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" ) // Type aliases for consensus types @@ -13,15 +13,35 @@ type PeerID struct { ID []byte } -func (p PeerID) Identity() consensus.Identity { - return hex.EncodeToString(p.ID) +// GetRank implements models.Unique. +func (p PeerID) GetRank() uint64 { + return 0 +} + +// GetSignature implements models.Unique. +func (p PeerID) GetSignature() []byte { + return []byte{} +} + +// GetTimestamp implements models.Unique. +func (p PeerID) GetTimestamp() uint64 { + return 0 +} + +// Source implements models.Unique. +func (p PeerID) Source() models.Identity { + return "" +} + +func (p PeerID) Identity() models.Identity { + return models.Identity(p.ID) } func (p PeerID) Rank() uint64 { return 0 } -func (p PeerID) Clone() consensus.Unique { +func (p PeerID) Clone() models.Unique { return PeerID{ ID: slices.Clone(p.ID), } @@ -29,12 +49,33 @@ func (p PeerID) Clone() consensus.Unique { // CollectedCommitments represents collected mutation commitments type CollectedCommitments struct { + rank uint64 frameNumber uint64 commitmentHash []byte prover []byte } -func (c CollectedCommitments) Identity() consensus.Identity { +// GetRank implements models.Unique. +func (c CollectedCommitments) GetRank() uint64 { + return c.rank +} + +// GetSignature implements models.Unique. +func (c CollectedCommitments) GetSignature() []byte { + return []byte{} +} + +// GetTimestamp implements models.Unique. +func (c CollectedCommitments) GetTimestamp() uint64 { + return 0 +} + +// Source implements models.Unique. +func (c CollectedCommitments) Source() models.Identity { + return models.Identity(c.prover) +} + +func (c CollectedCommitments) Identity() models.Identity { return hex.EncodeToString( slices.Concat( binary.BigEndian.AppendUint64(nil, c.frameNumber), @@ -48,7 +89,7 @@ func (c CollectedCommitments) Rank() uint64 { return c.frameNumber } -func (c CollectedCommitments) Clone() consensus.Unique { +func (c CollectedCommitments) Clone() models.Unique { return CollectedCommitments{ frameNumber: c.frameNumber, commitmentHash: slices.Clone(c.commitmentHash), diff --git a/node/consensus/app/consensus_voting_provider.go b/node/consensus/app/consensus_voting_provider.go index 834ae32..5babc4a 100644 --- a/node/consensus/app/consensus_voting_provider.go +++ b/node/consensus/app/consensus_voting_provider.go @@ -1,20 +1,15 @@ package app import ( - "bytes" "context" - "encoding/hex" "slices" - "sync" "time" - "github.com/iden3/go-iden3-crypto/poseidon" "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" - "golang.org/x/crypto/sha3" - "google.golang.org/protobuf/proto" "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/consensus/verification" "source.quilibrium.com/quilibrium/monorepo/protobufs" "source.quilibrium.com/quilibrium/monorepo/types/tries" up2p "source.quilibrium.com/quilibrium/monorepo/utils/p2p" @@ -22,672 +17,186 @@ import ( // AppVotingProvider implements VotingProvider type AppVotingProvider struct { - engine *AppConsensusEngine - proposalVotes map[consensus.Identity]map[consensus.Identity]**protobufs.FrameVote - mu sync.Mutex + engine *AppConsensusEngine } -func (p *AppVotingProvider) SendProposal( - proposal **protobufs.AppShardFrame, +// FinalizeQuorumCertificate implements consensus.VotingProvider. +func (p *AppVotingProvider) FinalizeQuorumCertificate( ctx context.Context, -) error { - timer := prometheus.NewTimer(framePublishingDuration.WithLabelValues( - p.engine.appAddressHex, - )) - defer timer.ObserveDuration() - - if proposal == nil || (*proposal).Header == nil { - framePublishingTotal.WithLabelValues(p.engine.appAddressHex, "error").Inc() - return errors.Wrap( - errors.New("invalid proposal"), - "send proposal", - ) + state *models.State[*protobufs.AppShardFrame], + aggregatedSignature models.AggregatedSignature, +) (models.QuorumCertificate, error) { + cloned := (*state.State).Clone().(*protobufs.AppShardFrame) + cloned.Header.PublicKeySignatureBls48581 = + &protobufs.BLS48581AggregateSignature{ + Signature: aggregatedSignature.GetSignature(), + PublicKey: &protobufs.BLS48581G2PublicKey{ + KeyValue: aggregatedSignature.GetPubKey(), + }, + Bitmask: aggregatedSignature.GetBitmask(), + } + frameBytes, err := cloned.ToCanonicalBytes() + if err != nil { + return nil, errors.Wrap(err, "finalize quorum certificate") } - p.engine.logger.Info( - "sending proposal", - zap.Uint64("frame_number", (*proposal).Header.FrameNumber), - zap.String("prover", hex.EncodeToString((*proposal).Header.Prover)), + p.engine.pubsub.PublishToBitmask( + p.engine.getFrameMessageBitmask(), + frameBytes, ) - // Serialize the frame using canonical bytes - frameData, err := (*proposal).ToCanonicalBytes() - if err != nil { - framePublishingTotal.WithLabelValues(p.engine.appAddressHex, "error").Inc() - return errors.Wrap(err, "serialize proposal") - } - - // Publish to the network - if err := p.engine.pubsub.PublishToBitmask( - p.engine.getConsensusMessageBitmask(), - frameData, - ); err != nil { - framePublishingTotal.WithLabelValues(p.engine.appAddressHex, "error").Inc() - return errors.Wrap(err, "send proposal") - } - - // Store the frame - frameIDBI, _ := poseidon.HashBytes((*proposal).Header.Output) - frameID := frameIDBI.FillBytes(make([]byte, 32)) - p.engine.frameStoreMu.Lock() - p.engine.frameStore[string(frameID)] = - (*proposal).Clone().(*protobufs.AppShardFrame) - p.engine.frameStoreMu.Unlock() - - framePublishingTotal.WithLabelValues(p.engine.appAddressHex, "success").Inc() - return nil + return &protobufs.QuorumCertificate{ + Filter: (*state.State).Header.Address, + Rank: (*state.State).GetRank(), + FrameNumber: (*state.State).Header.FrameNumber, + Selector: []byte((*state.State).Identity()), + Timestamp: uint64(time.Now().UnixMilli()), + AggregateSignature: &protobufs.BLS48581AggregateSignature{ + Signature: aggregatedSignature.GetSignature(), + PublicKey: &protobufs.BLS48581G2PublicKey{ + KeyValue: aggregatedSignature.GetPubKey(), + }, + Bitmask: aggregatedSignature.GetBitmask(), + }, + }, nil } -func (p *AppVotingProvider) DecideAndSendVote( - proposals map[consensus.Identity]**protobufs.AppShardFrame, +// FinalizeTimeout implements consensus.VotingProvider. +func (p *AppVotingProvider) FinalizeTimeout( ctx context.Context, -) (PeerID, **protobufs.FrameVote, error) { - var chosenProposal *protobufs.AppShardFrame - var chosenID consensus.Identity - parentFrame := p.engine.GetFrame() - if parentFrame == nil { - return PeerID{}, nil, errors.Wrap( - errors.New("no frame: no valid proposals to vote on"), - "decide and send vote", - ) - } - - parentSelector := p.engine.calculateFrameSelector(parentFrame.Header) - provers, err := p.engine.proverRegistry.GetOrderedProvers( - [32]byte(parentSelector), - p.engine.appAddress, - ) - if err != nil { - return PeerID{}, nil, errors.Wrap(err, "decide and send vote") - } - - for _, id := range provers { - prop := proposals[PeerID{ID: id}.Identity()] - if prop == nil { - p.engine.logger.Debug( - "proposer not found for prover", - zap.String("prover", PeerID{ID: id}.Identity()), - ) - continue - } - // Validate the proposal - valid, err := p.engine.frameValidator.Validate((*prop)) - if err != nil { - p.engine.logger.Debug("proposal validation error", zap.Error(err)) - continue - } - - p.engine.frameStoreMu.RLock() - _, hasParent := p.engine.frameStore[string( - (*prop).Header.ParentSelector, - )] - p.engine.frameStoreMu.RUnlock() - // Do we have continuity? - if !hasParent { - p.engine.logger.Debug( - "proposed frame out of sequence", - zap.String( - "proposed_parent_selector", - hex.EncodeToString((*prop).Header.ParentSelector), - ), - zap.String( - "target_parent_selector", - hex.EncodeToString(parentSelector), - ), - zap.Uint64("proposed_frame_number", (*prop).Header.FrameNumber), - zap.Uint64("target_frame_number", parentFrame.Header.FrameNumber+1), - ) - continue - } else { - p.engine.logger.Debug( - "proposed frame in sequence", - zap.String( - "proposed_parent_selector", - hex.EncodeToString((*prop).Header.ParentSelector), - ), - zap.String( - "target_parent_selector", - hex.EncodeToString(parentSelector), - ), - zap.Uint64("proposed_frame_number", (*prop).Header.FrameNumber), - zap.Uint64("target_frame_number", parentFrame.Header.FrameNumber+1), - ) - } - - if valid { - // Validate fee multiplier is within acceptable bounds (+/-10% of base) - baseFeeMultiplier, err := p.engine.dynamicFeeManager.GetNextFeeMultiplier( - p.engine.appAddress, - ) - if err != nil { - p.engine.logger.Debug( - "could not get base fee multiplier for validation", - zap.Error(err), - ) - continue - } - - // Calculate the maximum allowed deviation (10%) - maxIncrease := baseFeeMultiplier + (baseFeeMultiplier / 10) - minDecrease := baseFeeMultiplier - (baseFeeMultiplier / 10) - if minDecrease < 1 { - minDecrease = 1 - } - - proposedFee := (*prop).Header.FeeMultiplierVote - - // Reject if fee is outside acceptable bounds - if proposedFee > maxIncrease || proposedFee < minDecrease { - p.engine.logger.Debug( - "rejecting proposal with excessive fee change", - zap.Uint64("base_fee", baseFeeMultiplier), - zap.Uint64("proposed_fee", proposedFee), - zap.Uint64("max_allowed", maxIncrease), - zap.Uint64("min_allowed", minDecrease), - ) - continue - } - - chosenProposal = (*prop) - chosenID = PeerID{ID: id}.Identity() - break - } - } - - if chosenProposal == nil { - return PeerID{}, nil, errors.Wrap( - errors.New("no valid proposals to vote on"), - "decide and send vote", - ) - } + rank uint64, + latestQuorumCertificate models.QuorumCertificate, + latestQuorumCertificateRanks []uint64, + aggregatedSignature models.AggregatedSignature, +) (models.TimeoutCertificate, error) { + return &protobufs.TimeoutCertificate{ + Filter: p.engine.appAddress, + Rank: rank, + LatestRanks: latestQuorumCertificateRanks, + LatestQuorumCertificate: latestQuorumCertificate.(*protobufs.QuorumCertificate), + Timestamp: uint64(time.Now().UnixMilli()), + AggregateSignature: &protobufs.BLS48581AggregateSignature{ + Signature: aggregatedSignature.GetSignature(), + PublicKey: &protobufs.BLS48581G2PublicKey{ + KeyValue: aggregatedSignature.GetPubKey(), + }, + Bitmask: aggregatedSignature.GetBitmask(), + }, + }, nil +} +// SignTimeoutVote implements consensus.VotingProvider. +func (p *AppVotingProvider) SignTimeoutVote( + ctx context.Context, + filter []byte, + currentRank uint64, + newestQuorumCertificateRank uint64, +) (**protobufs.ProposalVote, error) { // Get signing key signer, _, publicKey, _ := p.engine.GetProvingKey(p.engine.config.Engine) if publicKey == nil { - return PeerID{}, nil, errors.Wrap( + p.engine.logger.Error("no proving key available") + return nil, errors.Wrap( errors.New("no proving key available for voting"), - "decide and send vote", + "sign vote", ) } // Create vote (signature) - signatureData, err := p.engine.frameProver.GetFrameSignaturePayload( - chosenProposal.Header, + signatureData := verification.MakeTimeoutMessage( + filter, + currentRank, + newestQuorumCertificateRank, ) + + sig, err := signer.SignWithDomain(signatureData, []byte("appshardtimeout")) if err != nil { - return PeerID{}, nil, errors.Wrap(err, "decide and send vote") + p.engine.logger.Error("could not sign vote", zap.Error(err)) + return nil, errors.Wrap(err, "sign vote") } - sig, err := signer.SignWithDomain( - signatureData, - append([]byte("shard"), p.engine.appAddress...), - ) - if err != nil { - return PeerID{}, nil, errors.Wrap(err, "decide and send vote") - } - - // Get our voter address voterAddress := p.engine.getAddressFromPublicKey(publicKey) // Create vote message - vote := &protobufs.FrameVote{ - Filter: p.engine.appAddress, - FrameNumber: chosenProposal.Header.FrameNumber, - Proposer: chosenProposal.Header.Prover, - Approve: true, - Timestamp: time.Now().UnixMilli(), + vote := &protobufs.ProposalVote{ + Filter: filter, + FrameNumber: 0, + Rank: currentRank, + Selector: nil, + Timestamp: uint64(time.Now().UnixMilli()), PublicKeySignatureBls48581: &protobufs.BLS48581AddressedSignature{ Address: voterAddress, Signature: sig, }, } - // Serialize and publish vote - data, err := vote.ToCanonicalBytes() - if err != nil { - return PeerID{}, nil, errors.Wrap(err, "serialize vote") - } - - if err := p.engine.pubsub.PublishToBitmask( - p.engine.getConsensusMessageBitmask(), - data, - ); err != nil { - p.engine.logger.Error("failed to publish vote", zap.Error(err)) - } - - // Store our vote - p.mu.Lock() - if _, ok := p.proposalVotes[chosenID]; !ok { - p.proposalVotes[chosenID] = map[consensus.Identity]**protobufs.FrameVote{} - } - p.proposalVotes[chosenID][p.engine.getPeerID().Identity()] = &vote - p.mu.Unlock() - - p.engine.logger.Info( - "decided and sent vote", - zap.Uint64("frame_number", chosenProposal.Header.FrameNumber), - zap.String("for_proposal", chosenID), - ) - - // Return the peer ID from the chosen proposal's prover - return PeerID{ID: chosenProposal.Header.Prover}, &vote, nil + return &vote, nil } -func (p *AppVotingProvider) SendVote( - vote **protobufs.FrameVote, +// SignVote implements consensus.VotingProvider. +func (p *AppVotingProvider) SignVote( ctx context.Context, -) (PeerID, error) { - if vote == nil || *vote == nil { - return PeerID{}, errors.Wrap( - errors.New("no vote provided"), - "send vote", + state *models.State[*protobufs.AppShardFrame], +) (**protobufs.ProposalVote, error) { + // Get signing key + signer, _, publicKey, _ := p.engine.GetProvingKey(p.engine.config.Engine) + if publicKey == nil { + p.engine.logger.Error("no proving key available") + return nil, errors.Wrap( + errors.New("no proving key available for voting"), + "sign vote", ) } - bumpVote := &protobufs.FrameVote{ - Filter: p.engine.appAddress, - FrameNumber: (*vote).FrameNumber, - Proposer: (*vote).Proposer, - Approve: true, - Timestamp: time.Now().UnixMilli(), - PublicKeySignatureBls48581: (*vote).PublicKeySignatureBls48581, - } - - data, err := (*bumpVote).ToCanonicalBytes() + nextLeader, err := p.engine.LeaderForRank(state.Rank) if err != nil { - return PeerID{}, errors.Wrap(err, "serialize vote") + p.engine.logger.Error("could not determine next prover", zap.Error(err)) + return nil, errors.Wrap( + errors.New("could not determine next prover"), + "sign vote", + ) } - if err := p.engine.pubsub.PublishToBitmask( - p.engine.getConsensusMessageBitmask(), - data, - ); err != nil { - p.engine.logger.Error("failed to publish vote", zap.Error(err)) - } + var extProof []byte + if nextLeader != p.engine.Self() { + p.engine.proofCacheMu.RLock() + proof, ok := p.engine.proofCache[state.Rank] + p.engine.proofCacheMu.RUnlock() - return PeerID{ID: (*vote).Proposer}, nil -} - -func (p *AppVotingProvider) IsQuorum( - proposalVotes map[consensus.Identity]**protobufs.FrameVote, - ctx context.Context, -) (bool, error) { - // Get active prover count for quorum calculation - activeProvers, err := p.engine.proverRegistry.GetActiveProvers( - p.engine.appAddress, - ) - if err != nil { - return false, errors.Wrap(err, "is quorum") - } - - minVotes := len(activeProvers) * 2 / 3 - if minVotes < int(p.engine.minimumProvers()) { - minVotes = int(p.engine.minimumProvers()) - } - - totalVotes := len(proposalVotes) - - if totalVotes >= minVotes { - return true, nil - } - - return false, nil -} - -func (p *AppVotingProvider) FinalizeVotes( - proposals map[consensus.Identity]**protobufs.AppShardFrame, - proposalVotes map[consensus.Identity]**protobufs.FrameVote, - ctx context.Context, -) (**protobufs.AppShardFrame, PeerID, error) { - // Count approvals and collect signatures - var signatures [][]byte - var publicKeys [][]byte - var chosenProposal **protobufs.AppShardFrame - var chosenProposerID PeerID - winnerCount := 0 - parentFrame := p.engine.GetFrame() - voteCount := map[string]int{} - for _, vote := range proposalVotes { - count, ok := voteCount[string((*vote).Proposer)] if !ok { - voteCount[string((*vote).Proposer)] = 1 - } else { - voteCount[string((*vote).Proposer)] = count + 1 - } - } - for _, proposal := range proposals { - if proposal == nil { - continue - } - - p.engine.frameStoreMu.RLock() - _, hasParent := p.engine.frameStore[string( - (*proposal).Header.ParentSelector, - )] - p.engine.frameStoreMu.RUnlock() - - count := 0 - if hasParent { - count = voteCount[string((*proposal).Header.Prover)] - } - if count > winnerCount { - winnerCount = count - chosenProposal = proposal - chosenProposerID = PeerID{ID: (*proposal).Header.Prover} + return nil, errors.Wrap(errors.New("no proof ready for vote"), "sign vote") } + extProof = proof[:] } - if chosenProposal == nil && len(proposals) > 0 { - // No specific votes, just pick first proposal - for _, proposal := range proposals { - if proposal == nil { - continue - } - p.engine.frameStoreMu.RLock() - parent, hasParent := p.engine.frameStore[string( - (*proposal).Header.ParentSelector, - )] - p.engine.frameStoreMu.RUnlock() - if hasParent && (parentFrame == nil || - parent.Header.FrameNumber == parentFrame.Header.FrameNumber) { - chosenProposal = proposal - chosenProposerID = PeerID{ID: (*proposal).Header.Prover} - break - } - } - } - - if chosenProposal == nil { - return &parentFrame, PeerID{}, errors.Wrap( - errors.New("no proposals to finalize"), - "finalize votes", - ) - } - - err := p.engine.ensureGlobalClient() + // Create vote (signature) + signatureData := verification.MakeVoteMessage( + (*state.State).Header.Address, + state.Rank, + state.Identifier, + ) + sig, err := signer.SignWithDomain(signatureData, []byte("appshard")) if err != nil { - return &parentFrame, PeerID{}, errors.Wrap( - errors.New("cannot confirm cross-shard locks"), - "finalize votes", - ) + p.engine.logger.Error("could not sign vote", zap.Error(err)) + return nil, errors.Wrap(err, "sign vote") } - res, err := p.engine.globalClient.GetLockedAddresses( - ctx, - &protobufs.GetLockedAddressesRequest{ - ShardAddress: p.engine.appAddress, - FrameNumber: (*chosenProposal).Header.FrameNumber, + voterAddress := p.engine.getAddressFromPublicKey(publicKey) + + // Create vote message + vote := &protobufs.ProposalVote{ + Filter: (*state.State).Header.Address, + FrameNumber: (*state.State).Header.FrameNumber, + Rank: (*state.State).Header.Rank, + Selector: []byte((*state.State).Identity()), + Timestamp: uint64(time.Now().UnixMilli()), + PublicKeySignatureBls48581: &protobufs.BLS48581AddressedSignature{ + Address: voterAddress, + Signature: slices.Concat(sig, extProof), }, - ) - if err != nil { - p.engine.globalClient = nil - return &parentFrame, PeerID{}, errors.Wrap( - errors.New("cannot confirm cross-shard locks"), - "finalize votes", - ) } - // Build a map of transaction hashes to their committed status - txMap := map[string]bool{} - for _, req := range (*chosenProposal).Requests { - tx, err := req.ToCanonicalBytes() - if err != nil { - return &parentFrame, PeerID{}, errors.Wrap( - err, - "finalize votes", - ) - } - - txHash := sha3.Sum256(tx) - p.engine.logger.Debug( - "adding transaction in frame to commit check", - zap.String("tx_hash", hex.EncodeToString(txHash[:])), - ) - txMap[string(txHash[:])] = false - } - - // Check that transactions are committed in our shard and collect shard - // addresses - shardAddressesSet := make(map[string]bool) - for _, tx := range res.Transactions { - p.engine.logger.Debug( - "checking transaction from global map", - zap.String("tx_hash", hex.EncodeToString(tx.TransactionHash)), - ) - if _, ok := txMap[string(tx.TransactionHash)]; ok { - txMap[string(tx.TransactionHash)] = tx.Committed - - // Extract shard addresses from each locked transaction's shard addresses - for _, shardAddr := range tx.ShardAddresses { - // Extract the applicable shard address (can be shorter than the full - // address) - extractedShards := p.extractShardAddresses(shardAddr) - for _, extractedShard := range extractedShards { - shardAddrStr := string(extractedShard) - shardAddressesSet[shardAddrStr] = true - } - } - } - } - - // Check that all transactions are committed in our shard - for _, committed := range txMap { - if !committed { - return &parentFrame, PeerID{}, errors.Wrap( - errors.New("tx not committed in our shard"), - "finalize votes", - ) - } - } - - // Check cross-shard locks for each unique shard address - for shardAddrStr := range shardAddressesSet { - shardAddr := []byte(shardAddrStr) - - // Skip our own shard since we already checked it - if bytes.Equal(shardAddr, p.engine.appAddress) { - continue - } - - // Query the global client for locked addresses in this shard - shardRes, err := p.engine.globalClient.GetLockedAddresses( - ctx, - &protobufs.GetLockedAddressesRequest{ - ShardAddress: shardAddr, - FrameNumber: (*chosenProposal).Header.FrameNumber, - }, - ) - if err != nil { - p.engine.logger.Debug( - "failed to get locked addresses for shard", - zap.String("shard_addr", hex.EncodeToString(shardAddr)), - zap.Error(err), - ) - continue - } - - // Check that all our transactions are committed in this shard - for txHashStr := range txMap { - committedInShard := false - for _, tx := range shardRes.Transactions { - if string(tx.TransactionHash) == txHashStr { - committedInShard = tx.Committed - break - } - } - - if !committedInShard { - return &parentFrame, PeerID{}, errors.Wrap( - errors.New("tx cross-shard lock unconfirmed"), - "finalize votes", - ) - } - } - } - - proverSet, err := p.engine.proverRegistry.GetActiveProvers( - p.engine.appAddress, - ) - if err != nil { - return &parentFrame, PeerID{}, errors.Wrap(err, "finalize votes") - } - - proverMap := map[string][]byte{} - for _, prover := range proverSet { - proverMap[string(prover.Address)] = prover.PublicKey - } - - voterMap := map[string]**protobufs.FrameVote{} - - // Collect all signatures for aggregation - for _, vote := range proposalVotes { - if vote == nil { - continue - } - - if (*vote).FrameNumber != (*chosenProposal).Header.FrameNumber || - !bytes.Equal((*vote).Proposer, (*chosenProposal).Header.Prover) { - continue - } - - if (*vote).PublicKeySignatureBls48581.Signature != nil && - (*vote).PublicKeySignatureBls48581.Address != nil { - signatures = append( - signatures, - (*vote).PublicKeySignatureBls48581.Signature, - ) - - pub := proverMap[string((*vote).PublicKeySignatureBls48581.Address)] - publicKeys = append(publicKeys, pub) - voterMap[string((*vote).PublicKeySignatureBls48581.Address)] = vote - } - } - - if len(signatures) == 0 { - return &parentFrame, PeerID{}, errors.Wrap( - errors.New("no signatures to aggregate"), - "finalize votes", - ) - } - - // Aggregate signatures - aggregateOutput, err := p.engine.keyManager.Aggregate(publicKeys, signatures) - if err != nil { - return &parentFrame, PeerID{}, errors.Wrap(err, "finalize votes") - } - aggregatedSignature := aggregateOutput.GetAggregateSignature() - - // Create participant bitmap - provers, err := p.engine.proverRegistry.GetActiveProvers(p.engine.appAddress) - if err != nil { - return &parentFrame, PeerID{}, errors.Wrap(err, "finalize votes") - } - - bitmask := make([]byte, (len(provers)+7)/8) - - for i := 0; i < len(provers); i++ { - activeProver := provers[i] - if _, ok := voterMap[string(activeProver.Address)]; !ok { - continue - } - if !bytes.Equal( - (*voterMap[string(activeProver.Address)]).Proposer, - chosenProposerID.ID, - ) { - continue - } - - byteIndex := i / 8 - bitIndex := i % 8 - bitmask[byteIndex] |= (1 << bitIndex) - } - - // Update the frame with aggregated signature - finalizedFrame := &protobufs.AppShardFrame{ - Header: &protobufs.FrameHeader{ - Address: (*chosenProposal).Header.Address, - FrameNumber: (*chosenProposal).Header.FrameNumber, - ParentSelector: (*chosenProposal).Header.ParentSelector, - Timestamp: (*chosenProposal).Header.Timestamp, - Difficulty: (*chosenProposal).Header.Difficulty, - RequestsRoot: (*chosenProposal).Header.RequestsRoot, - StateRoots: (*chosenProposal).Header.StateRoots, - Output: (*chosenProposal).Header.Output, - Prover: (*chosenProposal).Header.Prover, - FeeMultiplierVote: (*chosenProposal).Header.FeeMultiplierVote, - PublicKeySignatureBls48581: &protobufs.BLS48581AggregateSignature{ - Signature: aggregatedSignature, - PublicKey: &protobufs.BLS48581G2PublicKey{ - KeyValue: aggregateOutput.GetAggregatePublicKey(), - }, - Bitmask: bitmask, - }, - }, - Requests: (*chosenProposal).Requests, - } - - p.engine.logger.Info( - "finalized votes", - zap.Uint64("frame_number", finalizedFrame.Header.FrameNumber), - zap.Int("signatures", len(signatures)), - ) - - return &finalizedFrame, chosenProposerID, nil -} - -func (p *AppVotingProvider) SendConfirmation( - finalized **protobufs.AppShardFrame, - ctx context.Context, -) error { - if finalized == nil || (*finalized).Header == nil { - return errors.New("invalid finalized frame") - } - - copiedFinalized := proto.Clone(*finalized).(*protobufs.AppShardFrame) - - // Create frame confirmation - confirmation := &protobufs.FrameConfirmation{ - Filter: p.engine.appAddress, - FrameNumber: copiedFinalized.Header.FrameNumber, - Selector: p.engine.calculateFrameSelector((*finalized).Header), - Timestamp: time.Now().UnixMilli(), - AggregateSignature: copiedFinalized.Header.PublicKeySignatureBls48581, - } - - // Serialize using canonical bytes - data, err := confirmation.ToCanonicalBytes() - if err != nil { - return errors.Wrap(err, "serialize confirmation") - } - - if err := p.engine.pubsub.PublishToBitmask( - p.engine.getConsensusMessageBitmask(), - data, - ); err != nil { - return errors.Wrap(err, "publish confirmation") - } - - // Insert into time reel - if err := p.engine.appTimeReel.Insert( - p.engine.ctx, - copiedFinalized, - ); err != nil { - p.engine.logger.Error("failed to add frame to time reel", zap.Error(err)) - // Clean up on error - - frameIDBI, _ := poseidon.HashBytes(copiedFinalized.Header.Output) - frameID := frameIDBI.FillBytes(make([]byte, 32)) - p.engine.frameStoreMu.Lock() - delete(p.engine.frameStore, string(frameID)) - p.engine.frameStoreMu.Unlock() - } - - p.engine.logger.Info( - "sent confirmation", - zap.Uint64("frame_number", copiedFinalized.Header.FrameNumber), - ) - - return nil + return &vote, nil } // GetFullPath converts a key to its path representation using 6-bit nibbles @@ -797,3 +306,5 @@ func uint32ToBytes(path []uint32) []byte { } return bytes } + +var _ consensus.VotingProvider[*protobufs.AppShardFrame, *protobufs.ProposalVote, PeerID] = (*AppVotingProvider)(nil) diff --git a/node/consensus/app/event_distributor.go b/node/consensus/app/event_distributor.go index d5138ed..88dd4d7 100644 --- a/node/consensus/app/event_distributor.go +++ b/node/consensus/app/event_distributor.go @@ -6,6 +6,7 @@ import ( "github.com/pkg/errors" "go.uber.org/zap" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/node/consensus/global" consensustime "source.quilibrium.com/quilibrium/monorepo/node/consensus/time" globalintrinsics "source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/global" @@ -13,21 +14,15 @@ import ( "source.quilibrium.com/quilibrium/monorepo/types/schema" ) -func (e *AppConsensusEngine) eventDistributorLoop() { +func (e *AppConsensusEngine) eventDistributorLoop( + ctx lifecycle.SignalerContext, +) { defer func() { if r := recover(); r != nil { e.logger.Error("fatal error encountered", zap.Any("panic", r)) - if e.cancel != nil { - e.cancel() - } - // Avoid blocking on quit channel during panic recovery - select { - case e.quit <- struct{}{}: - default: - } + ctx.Throw(errors.Errorf("fatal unhandled error encountered: %v", r)) } }() - defer e.wg.Done() // Subscribe to events from the event distributor eventCh := e.eventDistributor.Subscribe(hex.EncodeToString(e.appAddress)) @@ -35,7 +30,7 @@ func (e *AppConsensusEngine) eventDistributorLoop() { for { select { - case <-e.ctx.Done(): + case <-ctx.Done(): return case <-e.quit: return @@ -172,16 +167,10 @@ func (e *AppConsensusEngine) eventDistributorLoop() { if ok && data.Message != "" { e.logger.Error(data.Message) e.halt() - if err := e.stateMachine.Stop(); err != nil { - e.logger.Error( - "error occurred while halting consensus", - zap.Error(err), - ) - } go func() { for { select { - case <-e.ctx.Done(): + case <-ctx.Done(): return case <-time.After(10 * time.Second): e.logger.Error( @@ -200,16 +189,10 @@ func (e *AppConsensusEngine) eventDistributorLoop() { zap.Error(data.Error), ) e.halt() - if err := e.stateMachine.Stop(); err != nil { - e.logger.Error( - "error occurred while halting consensus", - zap.Error(err), - ) - } go func() { for { select { - case <-e.ctx.Done(): + case <-ctx.Done(): return case <-time.After(10 * time.Second): e.logger.Error( diff --git a/node/consensus/app/factory.go b/node/consensus/app/factory.go index 8483aa2..e90309f 100644 --- a/node/consensus/app/factory.go +++ b/node/consensus/app/factory.go @@ -5,8 +5,10 @@ import ( "go.uber.org/zap" "google.golang.org/grpc" "source.quilibrium.com/quilibrium/monorepo/config" + qconsensus "source.quilibrium.com/quilibrium/monorepo/consensus" "source.quilibrium.com/quilibrium/monorepo/node/consensus/events" "source.quilibrium.com/quilibrium/monorepo/node/consensus/time" + "source.quilibrium.com/quilibrium/monorepo/protobufs" "source.quilibrium.com/quilibrium/monorepo/types/channel" "source.quilibrium.com/quilibrium/monorepo/types/compiler" "source.quilibrium.com/quilibrium/monorepo/types/consensus" @@ -30,6 +32,7 @@ type AppConsensusEngineFactory struct { inboxStore store.InboxStore shardsStore store.ShardsStore hypergraphStore store.HypergraphStore + consensusStore qconsensus.ConsensusStore[*protobufs.ProposalVote] frameProver crypto.FrameProver inclusionProver crypto.InclusionProver bulletproofProver crypto.BulletproofProver @@ -60,6 +63,7 @@ func NewAppConsensusEngineFactory( inboxStore store.InboxStore, shardsStore store.ShardsStore, hypergraphStore store.HypergraphStore, + consensusStore qconsensus.ConsensusStore[*protobufs.ProposalVote], frameProver crypto.FrameProver, inclusionProver crypto.InclusionProver, bulletproofProver crypto.BulletproofProver, @@ -88,6 +92,7 @@ func NewAppConsensusEngineFactory( inboxStore: inboxStore, shardsStore: shardsStore, hypergraphStore: hypergraphStore, + consensusStore: consensusStore, frameProver: frameProver, inclusionProver: inclusionProver, bulletproofProver: bulletproofProver, @@ -145,6 +150,7 @@ func (f *AppConsensusEngineFactory) CreateAppConsensusEngine( f.inboxStore, f.shardsStore, f.hypergraphStore, + f.consensusStore, f.frameProver, f.inclusionProver, f.bulletproofProver, diff --git a/node/consensus/app/integration_helper_test.go b/node/consensus/app/integration_helper_test.go index 37a7946..9de38f6 100644 --- a/node/consensus/app/integration_helper_test.go +++ b/node/consensus/app/integration_helper_test.go @@ -497,6 +497,9 @@ func registerProverInHypergraphWithFilter(t *testing.T, hg thypergraph.Hypergrap t.Fatalf("Failed to insert status: %v", err) } + err = tree.Insert([]byte{3 << 2}, []byte{0, 0, 0, 0, 0, 0, 3, 232}, nil, big.NewInt(0)) // seniority = 1000 + require.NoError(t, err) + // Type Index: typeBI, _ := poseidon.HashBytes( slices.Concat(bytes.Repeat([]byte{0xff}, 32), []byte("prover:Prover")), diff --git a/node/consensus/app/message_processors.go b/node/consensus/app/message_processors.go index b8d468b..21c6bd8 100644 --- a/node/consensus/app/message_processors.go +++ b/node/consensus/app/message_processors.go @@ -2,6 +2,7 @@ package app import ( "bytes" + "context" "encoding/binary" "encoding/hex" @@ -10,17 +11,19 @@ import ( "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" "golang.org/x/crypto/sha3" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/protobufs" "source.quilibrium.com/quilibrium/monorepo/types/crypto" ) -func (e *AppConsensusEngine) processConsensusMessageQueue() { - defer e.wg.Done() - +func (e *AppConsensusEngine) processConsensusMessageQueue( + ctx lifecycle.SignalerContext, +) { for { select { - case <-e.ctx.Done(): + case <-ctx.Done(): return case <-e.quit: return @@ -30,14 +33,14 @@ func (e *AppConsensusEngine) processConsensusMessageQueue() { } } -func (e *AppConsensusEngine) processProverMessageQueue() { - defer e.wg.Done() - +func (e *AppConsensusEngine) processProverMessageQueue( + ctx lifecycle.SignalerContext, +) { for { select { case <-e.haltCtx.Done(): return - case <-e.ctx.Done(): + case <-ctx.Done(): return case message := <-e.proverMessageQueue: e.handleProverMessage(message) @@ -45,14 +48,14 @@ func (e *AppConsensusEngine) processProverMessageQueue() { } } -func (e *AppConsensusEngine) processFrameMessageQueue() { - defer e.wg.Done() - +func (e *AppConsensusEngine) processFrameMessageQueue( + ctx lifecycle.SignalerContext, +) { for { select { case <-e.haltCtx.Done(): return - case <-e.ctx.Done(): + case <-ctx.Done(): return case <-e.quit: return @@ -62,14 +65,14 @@ func (e *AppConsensusEngine) processFrameMessageQueue() { } } -func (e *AppConsensusEngine) processGlobalFrameMessageQueue() { - defer e.wg.Done() - +func (e *AppConsensusEngine) processGlobalFrameMessageQueue( + ctx lifecycle.SignalerContext, +) { for { select { case <-e.haltCtx.Done(): return - case <-e.ctx.Done(): + case <-ctx.Done(): return case <-e.quit: return @@ -79,12 +82,12 @@ func (e *AppConsensusEngine) processGlobalFrameMessageQueue() { } } -func (e *AppConsensusEngine) processAlertMessageQueue() { - defer e.wg.Done() - +func (e *AppConsensusEngine) processAlertMessageQueue( + ctx lifecycle.SignalerContext, +) { for { select { - case <-e.ctx.Done(): + case <-ctx.Done(): return case <-e.quit: return @@ -94,14 +97,27 @@ func (e *AppConsensusEngine) processAlertMessageQueue() { } } -func (e *AppConsensusEngine) processPeerInfoMessageQueue() { - defer e.wg.Done() +func (e *AppConsensusEngine) processAppShardProposalQueue( + ctx lifecycle.SignalerContext, +) { + for { + select { + case <-ctx.Done(): + return + case proposal := <-e.appShardProposalQueue: + e.handleAppShardProposal(proposal) + } + } +} +func (e *AppConsensusEngine) processPeerInfoMessageQueue( + ctx lifecycle.SignalerContext, +) { for { select { case <-e.haltCtx.Done(): return - case <-e.ctx.Done(): + case <-ctx.Done(): return case <-e.quit: return @@ -111,12 +127,12 @@ func (e *AppConsensusEngine) processPeerInfoMessageQueue() { } } -func (e *AppConsensusEngine) processDispatchMessageQueue() { - defer e.wg.Done() - +func (e *AppConsensusEngine) processDispatchMessageQueue( + ctx lifecycle.SignalerContext, +) { for { select { - case <-e.ctx.Done(): + case <-ctx.Done(): return case <-e.quit: return @@ -126,6 +142,455 @@ func (e *AppConsensusEngine) processDispatchMessageQueue() { } } +func (e *AppConsensusEngine) handleAppShardProposal( + proposal *protobufs.AppShardProposal, +) { + defer func() { + if r := recover(); r != nil { + e.logger.Error( + "panic recovered from proposal", + zap.Any("panic", r), + zap.Stack("stacktrace"), + ) + } + }() + + e.logger.Debug( + "handling global proposal", + zap.String("id", hex.EncodeToString([]byte(proposal.State.Identity()))), + ) + + // Small gotcha: the proposal structure uses interfaces, so we can't assign + // directly, otherwise the nil values for the structs will fail the nil + // check on the interfaces (and would incur costly reflection if we wanted + // to check it directly) + pqc := proposal.ParentQuorumCertificate + prtc := proposal.PriorRankTimeoutCertificate + vote := proposal.Vote + signedProposal := &models.SignedProposal[*protobufs.AppShardFrame, *protobufs.ProposalVote]{ + Proposal: models.Proposal[*protobufs.AppShardFrame]{ + State: &models.State[*protobufs.AppShardFrame]{ + Rank: proposal.State.GetRank(), + Identifier: proposal.State.Identity(), + ProposerID: proposal.State.Source(), + Timestamp: proposal.State.GetTimestamp(), + State: &proposal.State, + }, + }, + Vote: &vote, + } + + if pqc != nil { + signedProposal.Proposal.State.ParentQuorumCertificate = pqc + } + + if prtc != nil { + signedProposal.PreviousRankTimeoutCertificate = prtc + } + + finalized := e.forks.FinalizedState() + finalizedRank := finalized.Rank + finalizedFrameNumber := (*finalized.State).Header.FrameNumber + + // drop proposals if we already processed them + if proposal.State.Header.FrameNumber <= finalizedFrameNumber || + proposal.State.Header.Rank <= finalizedRank { + e.logger.Debug("dropping stale proposal") + return + } + _, _, err := e.clockStore.GetShardClockFrame( + proposal.State.Header.Address, + proposal.State.Header.FrameNumber, + false, + ) + if err == nil { + e.logger.Debug("dropping stale proposal") + return + } + + if proposal.State.Header.FrameNumber != 0 { + parent, _, err := e.clockStore.GetShardClockFrame( + proposal.State.Header.Address, + proposal.State.Header.FrameNumber-1, + false, + ) + if err != nil || parent == nil || !bytes.Equal( + []byte(parent.Identity()), + proposal.State.Header.ParentSelector, + ) { + e.logger.Debug( + "parent frame not stored, requesting sync", + zap.Uint64("frame_number", proposal.State.Header.FrameNumber-1), + ) + e.cacheProposal(proposal) + + peerID, err := e.getPeerIDOfProver(proposal.State.Header.Prover) + if err != nil { + peerID, err = e.getRandomProverPeerId() + if err != nil { + e.logger.Debug("could not get peer id for sync", zap.Error(err)) + return + } + } + + head, err := e.appTimeReel.GetHead() + if err != nil || head == nil || head.Header == nil { + e.logger.Debug("could not get shard time reel head", zap.Error(err)) + return + } + + e.syncProvider.AddState([]byte(peerID), head.Header.FrameNumber) + return + } + } + + frameNumber := proposal.State.Header.FrameNumber + expectedFrame, err := e.appTimeReel.GetHead() + if err != nil { + e.logger.Error("could not obtain app time reel head", zap.Error(err)) + return + } + + expectedFrameNumber := uint64(0) + if expectedFrame != nil && expectedFrame.Header != nil { + expectedFrameNumber = expectedFrame.Header.FrameNumber + 1 + } + + if frameNumber < expectedFrameNumber { + e.logger.Debug( + "dropping proposal behind expected frame", + zap.Uint64("frame_number", frameNumber), + zap.Uint64("expected_frame_number", expectedFrameNumber), + ) + return + } + + if frameNumber == expectedFrameNumber { + e.deleteCachedProposal(frameNumber) + if e.processProposal(proposal) { + e.drainProposalCache(frameNumber + 1) + return + } + + e.logger.Debug("failed to process expected proposal, caching") + e.cacheProposal(proposal) + return + } + + e.cacheProposal(proposal) + e.drainProposalCache(expectedFrameNumber) +} + +func (e *AppConsensusEngine) processProposal( + proposal *protobufs.AppShardProposal, +) bool { + e.logger.Debug( + "processing proposal", + zap.String("id", hex.EncodeToString([]byte(proposal.State.Identity()))), + ) + + err := e.VerifyQuorumCertificate(proposal.ParentQuorumCertificate) + if err != nil { + e.logger.Debug("proposal has invalid qc", zap.Error(err)) + return false + } + + if proposal.PriorRankTimeoutCertificate != nil { + err := e.VerifyTimeoutCertificate(proposal.PriorRankTimeoutCertificate) + if err != nil { + e.logger.Debug("proposal has invalid tc", zap.Error(err)) + return false + } + } + + if proposal.Vote != nil { + err := e.VerifyVote(&proposal.Vote) + if err != nil { + e.logger.Debug("proposal has invalid vote", zap.Error(err)) + return false + } + } + + err = proposal.State.Validate() + if err != nil { + e.logger.Debug("proposal is not valid", zap.Error(err)) + return false + } + + valid, err := e.frameValidator.Validate(proposal.State) + if !valid || err != nil { + e.logger.Debug("invalid frame in proposal", zap.Error(err)) + return false + } + + // Small gotcha: the proposal structure uses interfaces, so we can't assign + // directly, otherwise the nil values for the structs will fail the nil + // check on the interfaces (and would incur costly reflection if we wanted + // to check it directly) + pqc := proposal.ParentQuorumCertificate + prtc := proposal.PriorRankTimeoutCertificate + vote := proposal.Vote + signedProposal := &models.SignedProposal[*protobufs.AppShardFrame, *protobufs.ProposalVote]{ + Proposal: models.Proposal[*protobufs.AppShardFrame]{ + State: &models.State[*protobufs.AppShardFrame]{ + Rank: proposal.State.GetRank(), + Identifier: proposal.State.Identity(), + ProposerID: proposal.Vote.Identity(), + Timestamp: proposal.State.GetTimestamp(), + State: &proposal.State, + }, + }, + Vote: &vote, + } + + if pqc != nil { + signedProposal.Proposal.State.ParentQuorumCertificate = pqc + } + + if prtc != nil { + signedProposal.PreviousRankTimeoutCertificate = prtc + } + + e.voteAggregator.AddState(signedProposal) + e.consensusParticipant.SubmitProposal(signedProposal) + proposalProcessedTotal.WithLabelValues(e.appAddressHex, "success").Inc() + + e.trySealParentWithChild(proposal) + e.registerPendingCertifiedParent(proposal) + + return true +} + +func (e *AppConsensusEngine) cacheProposal( + proposal *protobufs.AppShardProposal, +) { + if proposal == nil || proposal.State == nil || proposal.State.Header == nil { + return + } + + frameNumber := proposal.State.Header.FrameNumber + e.proposalCacheMu.Lock() + e.proposalCache[frameNumber] = proposal + e.proposalCacheMu.Unlock() + + e.logger.Debug( + "cached out-of-order proposal", + zap.String("address", e.appAddressHex), + zap.Uint64("frame_number", frameNumber), + ) +} + +func (e *AppConsensusEngine) deleteCachedProposal(frameNumber uint64) { + e.proposalCacheMu.Lock() + delete(e.proposalCache, frameNumber) + e.proposalCacheMu.Unlock() +} + +func (e *AppConsensusEngine) popCachedProposal( + frameNumber uint64, +) *protobufs.AppShardProposal { + e.proposalCacheMu.Lock() + defer e.proposalCacheMu.Unlock() + + proposal, ok := e.proposalCache[frameNumber] + if ok { + delete(e.proposalCache, frameNumber) + } + + return proposal +} + +func (e *AppConsensusEngine) drainProposalCache(startFrame uint64) { + next := startFrame + for { + prop := e.popCachedProposal(next) + if prop == nil { + return + } + + if !e.processProposal(prop) { + e.logger.Debug( + "cached proposal failed processing, retaining for retry", + zap.String("address", e.appAddressHex), + zap.Uint64("frame_number", next), + ) + e.cacheProposal(prop) + return + } + + next++ + } +} + +func (e *AppConsensusEngine) registerPendingCertifiedParent( + proposal *protobufs.AppShardProposal, +) { + if proposal == nil || proposal.State == nil || proposal.State.Header == nil { + return + } + + frameNumber := proposal.State.Header.FrameNumber + e.pendingCertifiedParentsMu.Lock() + e.pendingCertifiedParents[frameNumber] = proposal + e.pendingCertifiedParentsMu.Unlock() +} + +func (e *AppConsensusEngine) trySealParentWithChild( + child *protobufs.AppShardProposal, +) { + if child == nil || child.State == nil || child.State.Header == nil { + return + } + + header := child.State.Header + if header.FrameNumber == 0 { + return + } + + parentFrame := header.FrameNumber - 1 + + e.pendingCertifiedParentsMu.RLock() + parent, ok := e.pendingCertifiedParents[parentFrame] + e.pendingCertifiedParentsMu.RUnlock() + if !ok || parent == nil || parent.State == nil || parent.State.Header == nil { + return + } + + if !bytes.Equal( + header.ParentSelector, + []byte(parent.State.Identity()), + ) { + e.logger.Debug( + "pending parent selector mismatch, dropping entry", + zap.String("address", e.appAddressHex), + zap.Uint64("parent_frame", parent.State.Header.FrameNumber), + zap.Uint64("child_frame", header.FrameNumber), + ) + e.pendingCertifiedParentsMu.Lock() + delete(e.pendingCertifiedParents, parentFrame) + e.pendingCertifiedParentsMu.Unlock() + return + } + + head, err := e.appTimeReel.GetHead() + if err != nil { + e.logger.Error("error fetching app time reel head", zap.Error(err)) + return + } + + if head != nil && head.Header != nil && + head.Header.FrameNumber+1 == parent.State.Header.FrameNumber { + e.logger.Debug( + "sealing parent with descendant proposal", + zap.String("address", e.appAddressHex), + zap.Uint64("parent_frame", parent.State.Header.FrameNumber), + zap.Uint64("child_frame", header.FrameNumber), + ) + e.addCertifiedState(parent, child) + } + + e.pendingCertifiedParentsMu.Lock() + delete(e.pendingCertifiedParents, parentFrame) + e.pendingCertifiedParentsMu.Unlock() +} + +func (e *AppConsensusEngine) addCertifiedState( + parent, child *protobufs.AppShardProposal, +) { + if parent == nil || parent.State == nil || parent.State.Header == nil || + child == nil || child.State == nil || child.State.Header == nil { + e.logger.Error("cannot seal certified state: missing parent or child data") + return + } + + qc := child.ParentQuorumCertificate + if qc == nil { + e.logger.Error( + "child missing parent quorum certificate", + zap.Uint64("child_frame_number", child.State.Header.FrameNumber), + ) + return + } + + txn, err := e.clockStore.NewTransaction(false) + if err != nil { + e.logger.Error("could not create transaction", zap.Error(err)) + return + } + + aggregateSig := &protobufs.BLS48581AggregateSignature{ + Signature: qc.GetAggregatedSignature().GetSignature(), + PublicKey: &protobufs.BLS48581G2PublicKey{ + KeyValue: qc.GetAggregatedSignature().GetPubKey(), + }, + Bitmask: qc.GetAggregatedSignature().GetBitmask(), + } + if err := e.clockStore.PutQuorumCertificate( + &protobufs.QuorumCertificate{ + Filter: e.appAddress, + Rank: qc.GetRank(), + FrameNumber: qc.GetFrameNumber(), + Selector: []byte(qc.Identity()), + AggregateSignature: aggregateSig, + }, + txn, + ); err != nil { + e.logger.Error("could not insert quorum certificate", zap.Error(err)) + txn.Abort() + return + } + + if err := txn.Commit(); err != nil { + e.logger.Error("could not commit transaction", zap.Error(err)) + txn.Abort() + return + } + + child.State.Header.PublicKeySignatureBls48581 = aggregateSig + + if err := e.appTimeReel.Insert(child.State); err != nil { + e.logger.Error("could not insert frame into app time reel", zap.Error(err)) + return + } + + head, err := e.appTimeReel.GetHead() + if err != nil { + e.logger.Error("could not get app time reel head", zap.Error(err)) + return + } + + if head == nil || head.Header == nil || + !bytes.Equal(child.State.Header.Output, head.Header.Output) { + e.logger.Error( + "app frames not aligned", + zap.String("address", e.appAddressHex), + zap.Uint64("new_frame_number", child.State.Header.FrameNumber), + ) + return + } + + txn, err = e.clockStore.NewTransaction(false) + if err != nil { + e.logger.Error("could not create transaction", zap.Error(err)) + return + } + + if err := e.clockStore.PutCertifiedAppShardState( + child, + txn, + ); err != nil { + e.logger.Error("could not insert certified state", zap.Error(err)) + txn.Abort() + return + } + + if err := txn.Commit(); err != nil { + e.logger.Error("could not commit transaction", zap.Error(err)) + txn.Abort() + return + } +} + func (e *AppConsensusEngine) handleConsensusMessage(message *pb.Message) { defer func() { if r := recover(); r != nil { @@ -139,17 +604,17 @@ func (e *AppConsensusEngine) handleConsensusMessage(message *pb.Message) { typePrefix := e.peekMessageType(message) switch typePrefix { - case protobufs.AppShardFrameType: + case protobufs.AppShardProposalType: e.handleProposal(message) - case protobufs.ProverLivenessCheckType: - e.handleLivenessCheck(message) - - case protobufs.FrameVoteType: + case protobufs.ProposalVoteType: e.handleVote(message) - case protobufs.FrameConfirmationType: - e.handleConfirmation(message) + case protobufs.TimeoutStateType: + e.handleTimeoutState(message) + + case protobufs.ProverLivenessCheckType: + // Liveness checks are processed globally; nothing to do here. default: e.logger.Debug( @@ -196,7 +661,7 @@ func (e *AppConsensusEngine) handleFrameMessage(message *pb.Message) { e.frameStore[string(frameID)] = frame e.frameStoreMu.Unlock() - if err := e.appTimeReel.Insert(e.ctx, frame); err != nil { + if err := e.appTimeReel.Insert(frame); err != nil { // Success metric recorded at the end of processing framesProcessedTotal.WithLabelValues("error").Inc() return @@ -225,7 +690,10 @@ func (e *AppConsensusEngine) handleProverMessage(message *pb.Message) { typePrefix := e.peekMessageType(message) - e.logger.Debug("handling prover message", zap.Uint32("type_prefix", typePrefix)) + e.logger.Debug( + "handling prover message", + zap.Uint32("type_prefix", typePrefix), + ) switch typePrefix { case protobufs.MessageBundleType: // MessageBundle messages need to be collected for execution @@ -276,7 +744,7 @@ func (e *AppConsensusEngine) handleGlobalFrameMessage(message *pb.Message) { return } - if err := e.globalTimeReel.Insert(e.ctx, frame); err != nil { + if err := e.globalTimeReel.Insert(frame); err != nil { // Success metric recorded at the end of processing globalFramesProcessedTotal.WithLabelValues("error").Inc() return @@ -382,7 +850,7 @@ func (e *AppConsensusEngine) handleDispatchMessage(message *pb.Message) { } if err := e.dispatchService.AddInboxMessage( - e.ctx, + context.Background(), envelope, ); err != nil { e.logger.Debug("failed to add inbox message", zap.Error(err)) @@ -395,7 +863,7 @@ func (e *AppConsensusEngine) handleDispatchMessage(message *pb.Message) { } if err := e.dispatchService.AddHubInboxAssociation( - e.ctx, + context.Background(), envelope, ); err != nil { e.logger.Debug("failed to add inbox message", zap.Error(err)) @@ -408,7 +876,7 @@ func (e *AppConsensusEngine) handleDispatchMessage(message *pb.Message) { } if err := e.dispatchService.DeleteHubInboxAssociation( - e.ctx, + context.Background(), envelope, ); err != nil { e.logger.Debug("failed to add inbox message", zap.Error(err)) @@ -423,144 +891,54 @@ func (e *AppConsensusEngine) handleDispatchMessage(message *pb.Message) { } func (e *AppConsensusEngine) handleProposal(message *pb.Message) { + // Skip our own messages + if bytes.Equal(message.From, e.pubsub.GetPeerID()) { + return + } + timer := prometheus.NewTimer( proposalProcessingDuration.WithLabelValues(e.appAddressHex), ) defer timer.ObserveDuration() - frame := &protobufs.AppShardFrame{} - if err := frame.FromCanonicalBytes(message.Data); err != nil { - e.logger.Debug("failed to unmarshal frame", zap.Error(err)) - proposalProcessedTotal.WithLabelValues(e.appAddressHex, "error").Inc() + proposal := &protobufs.AppShardProposal{} + if err := proposal.FromCanonicalBytes(message.Data); err != nil { + e.logger.Debug("failed to unmarshal proposal", zap.Error(err)) + proposalProcessedTotal.WithLabelValues("error").Inc() return } - if frame.Header != nil && frame.Header.Prover != nil { - valid, err := e.frameValidator.Validate(frame) - if !valid || err != nil { - e.logger.Error("received invalid frame", zap.Error(err)) - proposalProcessedTotal.WithLabelValues( - e.appAddressHex, - "invalid", - ).Inc() - return - } - - frameIDBI, _ := poseidon.HashBytes(frame.Header.Output) - frameID := frameIDBI.FillBytes(make([]byte, 32)) - e.frameStoreMu.Lock() - e.frameStore[string(frameID)] = frame.Clone().(*protobufs.AppShardFrame) - e.frameStoreMu.Unlock() - - e.stateMachine.ReceiveProposal( - PeerID{ID: frame.Header.Prover}, - &frame, - ) - proposalProcessedTotal.WithLabelValues(e.appAddressHex, "success").Inc() - } -} - -func (e *AppConsensusEngine) handleLivenessCheck(message *pb.Message) { - timer := prometheus.NewTimer( - livenessCheckProcessingDuration.WithLabelValues(e.appAddressHex), - ) - defer timer.ObserveDuration() - - livenessCheck := &protobufs.ProverLivenessCheck{} - if err := livenessCheck.FromCanonicalBytes(message.Data); err != nil { - e.logger.Debug("failed to unmarshal liveness check", zap.Error(err)) - livenessCheckProcessedTotal.WithLabelValues(e.appAddressHex, "error").Inc() + if !bytes.Equal(proposal.State.Header.Address, e.appAddress) { return } - if !bytes.Equal(livenessCheck.Filter, e.appAddress) { - return - } + frameIDBI, _ := poseidon.HashBytes(proposal.State.Header.Output) + frameID := frameIDBI.FillBytes(make([]byte, 32)) + e.frameStoreMu.Lock() + e.frameStore[string(frameID)] = proposal.State + e.frameStoreMu.Unlock() - // Validate the liveness check structure - if err := livenessCheck.Validate(); err != nil { - e.logger.Debug("invalid liveness check", zap.Error(err)) - livenessCheckProcessedTotal.WithLabelValues(e.appAddressHex, "error").Inc() - return - } - - proverSet, err := e.proverRegistry.GetActiveProvers(e.appAddress) + txn, err := e.clockStore.NewTransaction(false) if err != nil { - e.logger.Error("could not receive liveness check", zap.Error(err)) - livenessCheckProcessedTotal.WithLabelValues(e.appAddressHex, "error").Inc() + e.logger.Error("could not create transaction", zap.Error(err)) return } - lcBytes, err := livenessCheck.ConstructSignaturePayload() - if err != nil { - e.logger.Error( - "could not construct signature message for liveness check", - zap.Error(err), - ) - livenessCheckProcessedTotal.WithLabelValues(e.appAddressHex, "error").Inc() + if err := e.clockStore.PutProposalVote(txn, proposal.Vote); err != nil { + e.logger.Error("could not put vote", zap.Error(err)) + txn.Abort() return } - var found []byte = nil - for _, prover := range proverSet { - if bytes.Equal( - prover.Address, - livenessCheck.PublicKeySignatureBls48581.Address, - ) { - valid, err := e.keyManager.ValidateSignature( - crypto.KeyTypeBLS48581G1, - prover.PublicKey, - lcBytes, - livenessCheck.PublicKeySignatureBls48581.Signature, - livenessCheck.GetSignatureDomain(), - ) - if err != nil || !valid { - e.logger.Error( - "could not validate signature for liveness check", - zap.Error(err), - ) - break - } - found = prover.PublicKey - - break - } - } - - if found == nil { - e.logger.Warn( - "invalid liveness check", - zap.String( - "prover", - hex.EncodeToString( - livenessCheck.PublicKeySignatureBls48581.Address, - ), - ), - ) - livenessCheckProcessedTotal.WithLabelValues(e.appAddressHex, "error").Inc() + if err := txn.Commit(); err != nil { + e.logger.Error("could not commit transaction", zap.Error(err)) + txn.Abort() return } - if livenessCheck.PublicKeySignatureBls48581 == nil { - e.logger.Error("no signature on liveness check") - livenessCheckProcessedTotal.WithLabelValues(e.appAddressHex, "error").Inc() - } + e.appShardProposalQueue <- proposal - commitment := CollectedCommitments{ - commitmentHash: livenessCheck.CommitmentHash, - frameNumber: livenessCheck.FrameNumber, - prover: livenessCheck.PublicKeySignatureBls48581.Address, - } - if err := e.stateMachine.ReceiveLivenessCheck( - PeerID{ID: livenessCheck.PublicKeySignatureBls48581.Address}, - commitment, - ); err != nil { - e.logger.Error("could not receive liveness check", zap.Error(err)) - livenessCheckProcessedTotal.WithLabelValues(e.appAddressHex, "error").Inc() - return - } - - livenessCheckProcessedTotal.WithLabelValues(e.appAddressHex, "success").Inc() + proposalProcessedTotal.WithLabelValues(e.appAddressHex, "success").Inc() } func (e *AppConsensusEngine) handleVote(message *pb.Message) { @@ -569,7 +947,7 @@ func (e *AppConsensusEngine) handleVote(message *pb.Message) { ) defer timer.ObserveDuration() - vote := &protobufs.FrameVote{} + vote := &protobufs.ProposalVote{} if err := vote.FromCanonicalBytes(message.Data); err != nil { e.logger.Debug("failed to unmarshal vote", zap.Error(err)) voteProcessedTotal.WithLabelValues(e.appAddressHex, "error").Inc() @@ -586,92 +964,118 @@ func (e *AppConsensusEngine) handleVote(message *pb.Message) { return } - if err := e.stateMachine.ReceiveVote( - PeerID{ID: vote.Proposer}, - PeerID{ID: vote.PublicKeySignatureBls48581.Address}, - &vote, - ); err != nil { - e.logger.Error("could not receive vote", zap.Error(err)) + proverSet, err := e.proverRegistry.GetActiveProvers(e.appAddress) + if err != nil { + e.logger.Error("could not get active provers", zap.Error(err)) voteProcessedTotal.WithLabelValues(e.appAddressHex, "error").Inc() return } - voteProcessedTotal.WithLabelValues(e.appAddressHex, "success").Inc() -} - -func (e *AppConsensusEngine) handleConfirmation(message *pb.Message) { - timer := prometheus.NewTimer( - confirmationProcessingDuration.WithLabelValues(e.appAddressHex), - ) - defer timer.ObserveDuration() - - confirmation := &protobufs.FrameConfirmation{} - if err := confirmation.FromCanonicalBytes(message.Data); err != nil { - e.logger.Debug("failed to unmarshal confirmation", zap.Error(err)) - confirmationProcessedTotal.WithLabelValues(e.appAddressHex, "error").Inc() - return - } - - if !bytes.Equal(confirmation.Filter, e.appAddress) { - return - } - - e.frameStoreMu.RLock() - var matchingFrame *protobufs.AppShardFrame - for _, frame := range e.frameStore { - if frame.Header != nil && - frame.Header.FrameNumber == confirmation.FrameNumber { - frameSelector := e.calculateFrameSelector(frame.Header) - if bytes.Equal(frameSelector, confirmation.Selector) { - matchingFrame = frame - break - } + // Find the voter's public key + var voterPublicKey []byte = nil + for _, prover := range proverSet { + if bytes.Equal( + prover.Address, + vote.PublicKeySignatureBls48581.Address, + ) { + voterPublicKey = prover.PublicKey + break } } - if matchingFrame == nil { - e.frameStoreMu.RUnlock() - return - } - - e.frameStoreMu.RUnlock() - e.frameStoreMu.Lock() - defer e.frameStoreMu.Unlock() - - matchingFrame.Header.PublicKeySignatureBls48581 = - confirmation.AggregateSignature - valid, err := e.frameValidator.Validate(matchingFrame) - if !valid || err != nil { - e.logger.Error("received invalid confirmation", zap.Error(err)) - confirmationProcessedTotal.WithLabelValues(e.appAddressHex, "error").Inc() - return - } - - if matchingFrame.Header.Prover == nil { - e.logger.Error("confirmation with no matched prover") - confirmationProcessedTotal.WithLabelValues(e.appAddressHex, "error").Inc() - return - } - - if err := e.stateMachine.ReceiveConfirmation( - PeerID{ID: matchingFrame.Header.Prover}, - &matchingFrame, - ); err != nil { - e.logger.Error("could not receive confirmation", zap.Error(err)) - confirmationProcessedTotal.WithLabelValues(e.appAddressHex, "error").Inc() - return - } - - if err := e.appTimeReel.Insert(e.ctx, matchingFrame); err != nil { - e.logger.Error( - "could not insert into time reel", - zap.Error(err), + if voterPublicKey == nil { + e.logger.Warn( + "invalid vote - voter not found", + zap.String( + "voter", + hex.EncodeToString( + vote.PublicKeySignatureBls48581.Address, + ), + ), ) - confirmationProcessedTotal.WithLabelValues(e.appAddressHex, "error").Inc() + voteProcessedTotal.WithLabelValues(e.appAddressHex, "error").Inc() return } - confirmationProcessedTotal.WithLabelValues(e.appAddressHex, "success").Inc() + txn, err := e.clockStore.NewTransaction(false) + if err != nil { + e.logger.Error("could not create transaction", zap.Error(err)) + return + } + + if err := e.clockStore.PutProposalVote(txn, vote); err != nil { + e.logger.Error("could not put vote", zap.Error(err)) + txn.Abort() + return + } + + if err := txn.Commit(); err != nil { + e.logger.Error("could not commit transaction", zap.Error(err)) + txn.Abort() + return + } + + e.voteAggregator.AddVote(&vote) + + voteProcessedTotal.WithLabelValues(e.appAddressHex, "success").Inc() +} + +func (e *AppConsensusEngine) handleTimeoutState(message *pb.Message) { + timer := prometheus.NewTimer( + timeoutStateProcessingDuration.WithLabelValues(e.appAddressHex), + ) + defer timer.ObserveDuration() + + timeoutState := &protobufs.TimeoutState{} + if err := timeoutState.FromCanonicalBytes(message.Data); err != nil { + e.logger.Debug("failed to unmarshal timeoutState", zap.Error(err)) + timeoutStateProcessedTotal.WithLabelValues(e.appAddressHex, "error").Inc() + return + } + + if !bytes.Equal(timeoutState.Vote.Filter, e.appAddress) { + return + } + + // Small gotcha: the timeout structure uses interfaces, so we can't assign + // directly, otherwise the nil values for the structs will fail the nil + // check on the interfaces (and would incur costly reflection if we wanted + // to check it directly) + lqc := timeoutState.LatestQuorumCertificate + prtc := timeoutState.PriorRankTimeoutCertificate + timeout := &models.TimeoutState[*protobufs.ProposalVote]{ + Rank: timeoutState.Vote.Rank, + Vote: &timeoutState.Vote, + TimeoutTick: timeoutState.TimeoutTick, + } + if lqc != nil { + timeout.LatestQuorumCertificate = lqc + } + if prtc != nil { + timeout.PriorRankTimeoutCertificate = prtc + } + + txn, err := e.clockStore.NewTransaction(false) + if err != nil { + e.logger.Error("could not create transaction", zap.Error(err)) + return + } + + if err := e.clockStore.PutTimeoutVote(txn, timeoutState); err != nil { + e.logger.Error("could not put vote", zap.Error(err)) + txn.Abort() + return + } + + if err := txn.Commit(); err != nil { + e.logger.Error("could not commit transaction", zap.Error(err)) + txn.Abort() + return + } + + e.timeoutAggregator.AddTimeout(timeout) + + timeoutStateProcessedTotal.WithLabelValues(e.appAddressHex, "success").Inc() } func (e *AppConsensusEngine) peekMessageType(message *pb.Message) uint32 { diff --git a/node/consensus/app/message_subscription.go b/node/consensus/app/message_subscription.go index 32fb099..1315902 100644 --- a/node/consensus/app/message_subscription.go +++ b/node/consensus/app/message_subscription.go @@ -21,9 +21,11 @@ func (e *AppConsensusEngine) subscribeToConsensusMessages() error { e.getConsensusMessageBitmask(), func(message *pb.Message) error { select { + case <-e.haltCtx.Done(): + return nil case e.consensusMessageQueue <- message: return nil - case <-e.ctx.Done(): + case <-e.ShutdownSignal(): return errors.New("context cancelled") default: e.logger.Warn("consensus message queue full, dropping message") @@ -82,7 +84,7 @@ func (e *AppConsensusEngine) subscribeToProverMessages() error { case e.proverMessageQueue <- message: e.logger.Debug("got prover message") return nil - case <-e.ctx.Done(): + case <-e.ShutdownSignal(): return errors.New("context cancelled") default: e.logger.Warn("prover message queue full, dropping message") @@ -111,10 +113,16 @@ func (e *AppConsensusEngine) subscribeToFrameMessages() error { if err := e.pubsub.Subscribe( e.getFrameMessageBitmask(), func(message *pb.Message) error { + if e.IsInProverTrie(e.getProverAddress()) { + return nil + } + select { + case <-e.haltCtx.Done(): + return nil case e.frameMessageQueue <- message: return nil - case <-e.ctx.Done(): + case <-e.ShutdownSignal(): return errors.New("context cancelled") default: e.logger.Warn("app message queue full, dropping message") @@ -144,9 +152,11 @@ func (e *AppConsensusEngine) subscribeToGlobalFrameMessages() error { e.getGlobalFrameMessageBitmask(), func(message *pb.Message) error { select { + case <-e.haltCtx.Done(): + return nil case e.globalFrameMessageQueue <- message: return nil - case <-e.ctx.Done(): + case <-e.ShutdownSignal(): return errors.New("context cancelled") default: e.logger.Warn("global message queue full, dropping message") @@ -178,7 +188,7 @@ func (e *AppConsensusEngine) subscribeToGlobalAlertMessages() error { select { case e.globalAlertMessageQueue <- message: return nil - case <-e.ctx.Done(): + case <-e.ShutdownSignal(): return errors.New("context cancelled") default: e.logger.Warn("global alert queue full, dropping message") @@ -212,7 +222,7 @@ func (e *AppConsensusEngine) subscribeToPeerInfoMessages() error { return nil case e.globalPeerInfoMessageQueue <- message: return nil - case <-e.ctx.Done(): + case <-e.ShutdownSignal(): return errors.New("context cancelled") default: e.logger.Warn("peer info message queue full, dropping message") @@ -244,7 +254,7 @@ func (e *AppConsensusEngine) subscribeToDispatchMessages() error { select { case e.dispatchMessageQueue <- message: return nil - case <-e.ctx.Done(): + case <-e.ShutdownSignal(): return errors.New("context cancelled") default: e.logger.Warn("dispatch queue full, dropping message") diff --git a/node/consensus/app/message_validation.go b/node/consensus/app/message_validation.go index a5735e7..0bc6c8e 100644 --- a/node/consensus/app/message_validation.go +++ b/node/consensus/app/message_validation.go @@ -33,42 +33,42 @@ func (e *AppConsensusEngine) validateConsensusMessage( typePrefix := binary.BigEndian.Uint32(message.Data[:4]) switch typePrefix { - case protobufs.AppShardFrameType: + case protobufs.AppShardProposalType: timer := prometheus.NewTimer( proposalValidationDuration.WithLabelValues(e.appAddressHex), ) defer timer.ObserveDuration() - frame := &protobufs.AppShardFrame{} - if err := frame.FromCanonicalBytes(message.Data); err != nil { + proposal := &protobufs.AppShardProposal{} + if err := proposal.FromCanonicalBytes(message.Data); err != nil { e.logger.Debug("failed to unmarshal frame", zap.Error(err)) proposalValidationTotal.WithLabelValues(e.appAddressHex, "reject").Inc() return p2p.ValidationResultReject } - if frame.Header == nil { - e.logger.Debug("frame has no header") + if err := proposal.Validate(); err != nil { + e.logger.Error("invalid proposal", zap.Error(err)) proposalValidationTotal.WithLabelValues(e.appAddressHex, "reject").Inc() return p2p.ValidationResultReject } - if !bytes.Equal(frame.Header.Address, e.appAddress) { + if !bytes.Equal(proposal.State.Header.Address, e.appAddress) { proposalValidationTotal.WithLabelValues(e.appAddressHex, "ignore").Inc() return p2p.ValidationResultIgnore } - if frametime.AppFrameSince(frame) > 20*time.Second { + if e.forks.FinalizedRank() > proposal.GetRank() { proposalValidationTotal.WithLabelValues(e.appAddressHex, "ignore").Inc() return p2p.ValidationResultIgnore } - if frame.Header.PublicKeySignatureBls48581 != nil { + if proposal.State.Header.PublicKeySignatureBls48581 != nil { e.logger.Debug("frame validation has signature") proposalValidationTotal.WithLabelValues(e.appAddressHex, "reject").Inc() return p2p.ValidationResultReject } - valid, err := e.frameValidator.Validate(frame) + valid, err := e.frameValidator.Validate(proposal.State) if err != nil { e.logger.Debug("frame validation error", zap.Error(err)) proposalValidationTotal.WithLabelValues(e.appAddressHex, "reject").Inc() @@ -83,60 +83,20 @@ func (e *AppConsensusEngine) validateConsensusMessage( proposalValidationTotal.WithLabelValues(e.appAddressHex, "accept").Inc() - case protobufs.ProverLivenessCheckType: - timer := prometheus.NewTimer( - livenessCheckValidationDuration.WithLabelValues(e.appAddressHex), - ) - defer timer.ObserveDuration() - - livenessCheck := &protobufs.ProverLivenessCheck{} - if err := livenessCheck.FromCanonicalBytes(message.Data); err != nil { - e.logger.Debug("failed to unmarshal liveness check", zap.Error(err)) - livenessCheckValidationTotal.WithLabelValues( - e.appAddressHex, - "reject", - ).Inc() - return p2p.ValidationResultReject - } - - now := time.Now().UnixMilli() - if livenessCheck.Timestamp > now+500 || - livenessCheck.Timestamp < now-1000 { - livenessCheckValidationTotal.WithLabelValues( - e.appAddressHex, - "ignore", - ).Inc() - return p2p.ValidationResultIgnore - } - - if err := livenessCheck.Validate(); err != nil { - e.logger.Debug("failed to validate liveness check", zap.Error(err)) - livenessCheckValidationTotal.WithLabelValues( - e.appAddressHex, - "reject", - ).Inc() - return p2p.ValidationResultReject - } - - livenessCheckValidationTotal.WithLabelValues( - e.appAddressHex, - "accept", - ).Inc() - - case protobufs.FrameVoteType: + case protobufs.ProposalVoteType: timer := prometheus.NewTimer( voteValidationDuration.WithLabelValues(e.appAddressHex), ) defer timer.ObserveDuration() - vote := &protobufs.FrameVote{} + vote := &protobufs.ProposalVote{} if err := vote.FromCanonicalBytes(message.Data); err != nil { e.logger.Debug("failed to unmarshal vote", zap.Error(err)) voteValidationTotal.WithLabelValues(e.appAddressHex, "reject").Inc() return p2p.ValidationResultReject } - now := time.Now().UnixMilli() + now := uint64(time.Now().UnixMilli()) if vote.Timestamp > now+5000 || vote.Timestamp < now-5000 { voteValidationTotal.WithLabelValues(e.appAddressHex, "ignore").Inc() return p2p.ValidationResultIgnore @@ -150,41 +110,57 @@ func (e *AppConsensusEngine) validateConsensusMessage( voteValidationTotal.WithLabelValues(e.appAddressHex, "accept").Inc() - case protobufs.FrameConfirmationType: + case protobufs.TimeoutStateType: timer := prometheus.NewTimer( - confirmationValidationDuration.WithLabelValues(e.appAddressHex), + timeoutStateValidationDuration.WithLabelValues(e.appAddressHex), ) defer timer.ObserveDuration() - confirmation := &protobufs.FrameConfirmation{} - if err := confirmation.FromCanonicalBytes(message.Data); err != nil { - e.logger.Debug("failed to unmarshal confirmation", zap.Error(err)) - confirmationValidationTotal.WithLabelValues( + timeoutState := &protobufs.TimeoutState{} + if err := timeoutState.FromCanonicalBytes(message.Data); err != nil { + e.logger.Debug("failed to unmarshal timeout state", zap.Error(err)) + timeoutStateValidationTotal.WithLabelValues( e.appAddressHex, "reject", ).Inc() return p2p.ValidationResultReject } - now := time.Now().UnixMilli() - if confirmation.Timestamp > now+5000 || confirmation.Timestamp < now-5000 { - confirmationValidationTotal.WithLabelValues( + now := uint64(time.Now().UnixMilli()) + if timeoutState.Timestamp > now+5000 || timeoutState.Timestamp < now-5000 { + timeoutStateValidationTotal.WithLabelValues( e.appAddressHex, "ignore", ).Inc() return p2p.ValidationResultIgnore } - if err := confirmation.Validate(); err != nil { - e.logger.Debug("failed to validate confirmation", zap.Error(err)) - confirmationValidationTotal.WithLabelValues( + if err := timeoutState.Validate(); err != nil { + e.logger.Debug("failed to validate timeout state", zap.Error(err)) + timeoutStateValidationTotal.WithLabelValues( e.appAddressHex, "reject", ).Inc() return p2p.ValidationResultReject } - confirmationValidationTotal.WithLabelValues(e.appAddressHex, "accept").Inc() + timeoutStateValidationTotal.WithLabelValues(e.appAddressHex, "accept").Inc() + + case protobufs.ProverLivenessCheckType: + check := &protobufs.ProverLivenessCheck{} + if err := check.FromCanonicalBytes(message.Data); err != nil { + e.logger.Debug("failed to unmarshal liveness check", zap.Error(err)) + return p2p.ValidationResultReject + } + + if err := check.Validate(); err != nil { + e.logger.Debug("invalid liveness check", zap.Error(err)) + return p2p.ValidationResultReject + } + + if len(check.Filter) != 0 && !bytes.Equal(check.Filter, e.appAddress) { + return p2p.ValidationResultIgnore + } default: return p2p.ValidationResultReject diff --git a/node/consensus/app/metrics.go b/node/consensus/app/metrics.go index 7ad82be..e19fcae 100644 --- a/node/consensus/app/metrics.go +++ b/node/consensus/app/metrics.go @@ -99,50 +99,6 @@ var ( []string{"app_address"}, ) - // Shard liveness check processing metrics - livenessCheckProcessedTotal = promauto.NewCounterVec( - prometheus.CounterOpts{ - Namespace: metricsNamespace, - Subsystem: subsystem, - Name: "liveness_check_processed_total", - Help: "Total number of shard liveness checks processed by the app consensus engine", - }, - []string{"app_address", "status"}, // status: "success", "error", "invalid" - ) - - livenessCheckProcessingDuration = promauto.NewHistogramVec( - prometheus.HistogramOpts{ - Namespace: metricsNamespace, - Subsystem: subsystem, - Name: "liveness_check_processing_duration_seconds", - Help: "Time taken to process a shard liveness check", - Buckets: prometheus.DefBuckets, - }, - []string{"app_address"}, - ) - - // Shard liveness check validation metrics - livenessCheckValidationTotal = promauto.NewCounterVec( - prometheus.CounterOpts{ - Namespace: metricsNamespace, - Subsystem: subsystem, - Name: "liveness_check_validation_total", - Help: "Total number of shard liveness check validations", - }, - []string{"app_address", "result"}, // result: "accept", "reject", "ignore" - ) - - livenessCheckValidationDuration = promauto.NewHistogramVec( - prometheus.HistogramOpts{ - Namespace: metricsNamespace, - Subsystem: subsystem, - Name: "liveness_check_validation_duration_seconds", - Help: "Time taken to validate a shard liveness check", - Buckets: prometheus.DefBuckets, - }, - []string{"app_address"}, - ) - // Shard vote processing metrics voteProcessedTotal = promauto.NewCounterVec( prometheus.CounterOpts{ @@ -187,45 +143,45 @@ var ( []string{"app_address"}, ) - // Shard confirmation processing metrics - confirmationProcessedTotal = promauto.NewCounterVec( + // Shard timeout stateprocessing metrics + timeoutStateProcessedTotal = promauto.NewCounterVec( prometheus.CounterOpts{ Namespace: metricsNamespace, Subsystem: subsystem, - Name: "confirmation_processed_total", - Help: "Total number of shard confirmations processed by the app consensus engine", + Name: "timeout_state_processed_total", + Help: "Total number of shard timeout states processed by the app consensus engine", }, []string{"app_address", "status"}, // status: "success", "error", "invalid" ) - confirmationProcessingDuration = promauto.NewHistogramVec( + timeoutStateProcessingDuration = promauto.NewHistogramVec( prometheus.HistogramOpts{ Namespace: metricsNamespace, Subsystem: subsystem, - Name: "confirmation_processing_duration_seconds", - Help: "Time taken to process a shard confirmation", + Name: "timeout_state_processing_duration_seconds", + Help: "Time taken to process a shard timeout state", Buckets: prometheus.DefBuckets, }, []string{"app_address"}, ) - // Shard confirmation validation metrics - confirmationValidationTotal = promauto.NewCounterVec( + // Shard timeout statevalidation metrics + timeoutStateValidationTotal = promauto.NewCounterVec( prometheus.CounterOpts{ Namespace: metricsNamespace, Subsystem: subsystem, - Name: "confirmation_validation_total", - Help: "Total number of shard confirmation validations", + Name: "timeout_state_validation_total", + Help: "Total number of shard timeout statevalidations", }, []string{"app_address", "result"}, // result: "accept", "reject", "ignore" ) - confirmationValidationDuration = promauto.NewHistogramVec( + timeoutStateValidationDuration = promauto.NewHistogramVec( prometheus.HistogramOpts{ Namespace: metricsNamespace, Subsystem: subsystem, - Name: "confirmation_validation_duration_seconds", - Help: "Time taken to validate a shard confirmation", + Name: "timeout_state_validation_duration_seconds", + Help: "Time taken to validate a shard timeout state", Buckets: prometheus.DefBuckets, }, []string{"app_address"}, diff --git a/node/consensus/app/services.go b/node/consensus/app/services.go index 201afe4..0f93800 100644 --- a/node/consensus/app/services.go +++ b/node/consensus/app/services.go @@ -5,6 +5,7 @@ import ( "context" "github.com/iden3/go-iden3-crypto/poseidon" + "github.com/libp2p/go-libp2p/core/peer" "github.com/pkg/errors" "go.uber.org/zap" "google.golang.org/grpc" @@ -18,61 +19,9 @@ func (e *AppConsensusEngine) GetAppShardFrame( ctx context.Context, request *protobufs.GetAppShardFrameRequest, ) (*protobufs.AppShardFrameResponse, error) { - peerID, ok := qgrpc.PeerIDFromContext(ctx) - if !ok { - return nil, status.Error(codes.Internal, "remote peer ID not found") - } - - if !bytes.Equal(request.Filter, e.appAddress) { - return nil, status.Error(codes.InvalidArgument, "incorrect filter") - } - - registry, err := e.keyStore.GetKeyRegistry( - []byte(peerID), - ) + peerID, err := e.authenticateProverFromContext(ctx) if err != nil { - return nil, status.Error(codes.PermissionDenied, "could not identify peer") - } - - if !bytes.Equal(e.pubsub.GetPeerID(), []byte(peerID)) { - if registry.ProverKey == nil || registry.ProverKey.KeyValue == nil { - return nil, status.Error( - codes.PermissionDenied, - "could not identify peer (no prover)", - ) - } - - addrBI, err := poseidon.HashBytes(registry.ProverKey.KeyValue) - if err != nil { - return nil, status.Error( - codes.PermissionDenied, - "could not identify peer (invalid address)", - ) - } - - addr := addrBI.FillBytes(make([]byte, 32)) - info, err := e.proverRegistry.GetActiveProvers(request.Filter) - if err != nil { - return nil, status.Error( - codes.PermissionDenied, - "could not identify peer (no prover registry)", - ) - } - - found := false - for _, prover := range info { - if bytes.Equal(prover.Address, addr) { - found = true - break - } - } - - if !found { - return nil, status.Error( - codes.PermissionDenied, - "invalid peer", - ) - } + return nil, err } e.logger.Debug( @@ -112,9 +61,170 @@ func (e *AppConsensusEngine) GetAppShardFrame( }, nil } +func (e *AppConsensusEngine) GetAppShardProposal( + ctx context.Context, + request *protobufs.GetAppShardProposalRequest, +) (*protobufs.AppShardProposalResponse, error) { + peerID, err := e.authenticateProverFromContext(ctx) + if err != nil { + return nil, err + } + + // Genesis does not have a parent cert, treat special: + if request.FrameNumber == 0 { + frame, _, err := e.clockStore.GetShardClockFrame( + request.Filter, + request.FrameNumber, + false, + ) + if err != nil { + e.logger.Debug( + "received error while fetching shard frame", + zap.String("peer_id", peerID.String()), + zap.Uint64("frame_number", request.FrameNumber), + zap.Error(err), + ) + return nil, errors.Wrap(err, "get shard proposal") + } + return &protobufs.AppShardProposalResponse{ + Proposal: &protobufs.AppShardProposal{ + State: frame, + }, + }, nil + } + + e.logger.Debug( + "received proposal request", + zap.Uint64("frame_number", request.FrameNumber), + zap.String("peer_id", peerID.String()), + ) + frame, _, err := e.clockStore.GetShardClockFrame( + request.Filter, + request.FrameNumber, + false, + ) + if err != nil { + return &protobufs.AppShardProposalResponse{}, nil + } + + vote, err := e.clockStore.GetProposalVote( + request.Filter, + frame.GetRank(), + []byte(frame.Source()), + ) + if err != nil { + return &protobufs.AppShardProposalResponse{}, nil + } + + parent, _, err := e.clockStore.GetShardClockFrame( + request.Filter, + request.FrameNumber-1, + false, + ) + if err != nil { + e.logger.Debug( + "received error while fetching shard frame parent", + zap.String("peer_id", peerID.String()), + zap.Uint64("frame_number", request.FrameNumber), + zap.Error(err), + ) + return nil, errors.Wrap(err, "get shard proposal") + } + + parentQC, err := e.clockStore.GetQuorumCertificate( + request.Filter, + parent.GetRank(), + ) + if err != nil { + e.logger.Debug( + "received error while fetching QC parent", + zap.String("peer_id", peerID.String()), + zap.Uint64("frame_number", request.FrameNumber), + zap.Error(err), + ) + return nil, errors.Wrap(err, "get shard proposal") + } + // no tc is fine, pass the nil along + priorRankTC, _ := e.clockStore.GetTimeoutCertificate( + request.Filter, + frame.GetRank()-1, + ) + proposal := &protobufs.AppShardProposal{ + State: frame, + ParentQuorumCertificate: parentQC, + PriorRankTimeoutCertificate: priorRankTC, + Vote: vote, + } + return &protobufs.AppShardProposalResponse{ + Proposal: proposal, + }, nil +} + func (e *AppConsensusEngine) RegisterServices(server *grpc.Server) { protobufs.RegisterAppShardServiceServer(server, e) protobufs.RegisterDispatchServiceServer(server, e.dispatchService) protobufs.RegisterHypergraphComparisonServiceServer(server, e.hyperSync) protobufs.RegisterOnionServiceServer(server, e.onionService) } + +func (e *AppConsensusEngine) authenticateProverFromContext( + ctx context.Context, +) (peer.ID, error) { + peerID, ok := qgrpc.PeerIDFromContext(ctx) + if !ok { + return peerID, status.Error(codes.Internal, "remote peer ID not found") + } + + if !bytes.Equal(e.pubsub.GetPeerID(), []byte(peerID)) { + registry, err := e.keyStore.GetKeyRegistry( + []byte(peerID), + ) + if err != nil { + return peerID, status.Error( + codes.PermissionDenied, + "could not identify peer", + ) + } + + if registry.ProverKey == nil || registry.ProverKey.KeyValue == nil { + return peerID, status.Error( + codes.PermissionDenied, + "could not identify peer (no prover)", + ) + } + + addrBI, err := poseidon.HashBytes(registry.ProverKey.KeyValue) + if err != nil { + return peerID, status.Error( + codes.PermissionDenied, + "could not identify peer (invalid address)", + ) + } + + addr := addrBI.FillBytes(make([]byte, 32)) + info, err := e.proverRegistry.GetActiveProvers(e.appAddress) + if err != nil { + return peerID, status.Error( + codes.PermissionDenied, + "could not identify peer (no prover registry)", + ) + } + + found := false + for _, prover := range info { + if bytes.Equal(prover.Address, addr) { + found = true + break + } + } + + if !found { + return peerID, status.Error( + codes.PermissionDenied, + "invalid peer", + ) + } + } + + return peerID, nil +} diff --git a/node/consensus/events/app_event_distributor.go b/node/consensus/events/app_event_distributor.go index bacb502..e1b5070 100644 --- a/node/consensus/events/app_event_distributor.go +++ b/node/consensus/events/app_event_distributor.go @@ -6,6 +6,7 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" consensustime "source.quilibrium.com/quilibrium/monorepo/node/consensus/time" "source.quilibrium.com/quilibrium/monorepo/types/consensus" ) @@ -36,52 +37,32 @@ func NewAppEventDistributor( } // Start begins the event processing loop -func (a *AppEventDistributor) Start(ctx context.Context) error { - a.mu.Lock() - defer a.mu.Unlock() - - if a.running { - return nil - } - - a.ctx, a.cancel = context.WithCancel(ctx) - a.running = true - a.startTime = time.Now() +func (g *AppEventDistributor) Start( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, +) { + g.mu.Lock() + g.ctx = ctx + g.running = true + g.startTime = time.Now() distributorStartsTotal.WithLabelValues("app").Inc() + g.mu.Unlock() + ready() + g.wg.Add(2) + go g.processEvents() + go g.trackUptime() - a.wg.Add(1) - go a.processEvents() - - go a.trackUptime() - - return nil -} - -// Stop gracefully shuts down the distributor -func (a *AppEventDistributor) Stop() error { - a.mu.Lock() - if !a.running { - a.mu.Unlock() - return nil - } - a.running = false - a.mu.Unlock() - - a.cancel() - a.wg.Wait() - - a.mu.Lock() - for _, ch := range a.subscribers { + <-ctx.Done() + g.mu.Lock() + g.running = false + for _, ch := range g.subscribers { close(ch) } - a.subscribers = make(map[string]chan consensus.ControlEvent) - a.mu.Unlock() - + g.subscribers = make(map[string]chan consensus.ControlEvent) distributorStopsTotal.WithLabelValues("app").Inc() distributorUptime.WithLabelValues("app").Set(0) - - return nil + g.mu.Unlock() } // Subscribe registers a new subscriber diff --git a/node/consensus/events/distributor_test.go b/node/consensus/events/distributor_test.go index 108cc0d..0e4e88c 100644 --- a/node/consensus/events/distributor_test.go +++ b/node/consensus/events/distributor_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" consensustime "source.quilibrium.com/quilibrium/monorepo/node/consensus/time" "source.quilibrium.com/quilibrium/monorepo/protobufs" "source.quilibrium.com/quilibrium/monorepo/types/consensus" @@ -81,25 +82,21 @@ func TestGlobalEventDistributor_StartStop(t *testing.T) { globalEventCh := make(chan consensustime.GlobalEvent, 10) distributor := NewGlobalEventDistributor(globalEventCh) - ctx := context.Background() + ctx, cancel, errCh := lifecycle.WithSignallerAndCancel(context.Background()) // Test starting - err := distributor.Start(ctx) - require.NoError(t, err) - - // Test starting again (should be idempotent) - err = distributor.Start(ctx) - require.NoError(t, err) + go distributor.Start(ctx, func() {}) // Test stopping - err = distributor.Stop() - require.NoError(t, err) - - // Test stopping again (should be idempotent) - err = distributor.Stop() - require.NoError(t, err) + cancel() + select { + case <-ctx.Done(): + case err, _ := <-errCh: + require.NoError(t, err) + } close(globalEventCh) + } func TestGlobalEventDistributor_Subscribe(t *testing.T) { @@ -116,9 +113,8 @@ func TestGlobalEventDistributor_Subscribe(t *testing.T) { assert.NotNil(t, sub3Ch) // Start the distributor - ctx := context.Background() - err := distributor.Start(ctx) - require.NoError(t, err) + ctx, cancel, errCh := lifecycle.WithSignallerAndCancel(context.Background()) + go distributor.Start(ctx, func() {}) // Send a test event testEvent := createTestGlobalEvent(consensustime.TimeReelEventNewHead, 100) @@ -146,8 +142,12 @@ func TestGlobalEventDistributor_Subscribe(t *testing.T) { } // Stop the distributor - err = distributor.Stop() - require.NoError(t, err) + cancel() + select { + case <-ctx.Done(): + case err, _ := <-errCh: + require.NoError(t, err) + } close(globalEventCh) } @@ -160,9 +160,8 @@ func TestGlobalEventDistributor_Unsubscribe(t *testing.T) { sub2Ch := distributor.Subscribe("subscriber2") // Start the distributor - ctx := context.Background() - err := distributor.Start(ctx) - require.NoError(t, err) + ctx, cancel, errCh := lifecycle.WithSignallerAndCancel(context.Background()) + go distributor.Start(ctx, func() {}) // Unsubscribe subscriber1 distributor.Unsubscribe("subscriber1") @@ -198,8 +197,12 @@ func TestGlobalEventDistributor_Unsubscribe(t *testing.T) { assert.False(t, ok, "Unsubscribed channel should be closed") // Stop the distributor - err = distributor.Stop() - require.NoError(t, err) + cancel() + select { + case <-ctx.Done(): + case err, _ := <-errCh: + require.NoError(t, err) + } close(globalEventCh) } @@ -211,9 +214,8 @@ func TestGlobalEventDistributor_EventTypes(t *testing.T) { subCh := distributor.Subscribe("test-subscriber") // Start the distributor - ctx := context.Background() - err := distributor.Start(ctx) - require.NoError(t, err) + ctx, cancel, errCh := lifecycle.WithSignallerAndCancel(context.Background()) + go distributor.Start(ctx, func() {}) // Test NewHead event newHeadEvent := createTestGlobalEvent(consensustime.TimeReelEventNewHead, 100) @@ -243,8 +245,12 @@ func TestGlobalEventDistributor_EventTypes(t *testing.T) { assert.Equal(t, equivocationEvent, *eventData) // Stop the distributor - err = distributor.Stop() - require.NoError(t, err) + cancel() + select { + case <-ctx.Done(): + case err, _ := <-errCh: + require.NoError(t, err) + } close(globalEventCh) } @@ -253,11 +259,10 @@ func TestGlobalEventDistributor_ContextCancellation(t *testing.T) { distributor := NewGlobalEventDistributor(globalEventCh) // Create a cancellable context - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel, errCh := lifecycle.WithSignallerAndCancel(context.Background()) // Start the distributor - err := distributor.Start(ctx) - require.NoError(t, err) + go distributor.Start(ctx, func() {}) // Subscribe subCh := distributor.Subscribe("test-subscriber") @@ -269,8 +274,12 @@ func TestGlobalEventDistributor_ContextCancellation(t *testing.T) { time.Sleep(100 * time.Millisecond) // Stop should work gracefully - err = distributor.Stop() - require.NoError(t, err) + cancel() + select { + case <-ctx.Done(): + case err, _ := <-errCh: + require.NoError(t, err) + } // Channel should be closed _, ok := <-subCh @@ -284,23 +293,18 @@ func TestAppEventDistributor_StartStop(t *testing.T) { appEventCh := make(chan consensustime.AppEvent, 10) distributor := NewAppEventDistributor(globalEventCh, appEventCh) - ctx := context.Background() + ctx, cancel, errCh := lifecycle.WithSignallerAndCancel(context.Background()) // Test starting - err := distributor.Start(ctx) - require.NoError(t, err) - - // Test starting again (should be idempotent) - err = distributor.Start(ctx) - require.NoError(t, err) + go distributor.Start(ctx, func() {}) // Test stopping - err = distributor.Stop() - require.NoError(t, err) - - // Test stopping again (should be idempotent) - err = distributor.Stop() - require.NoError(t, err) + cancel() + select { + case <-ctx.Done(): + case err, _ := <-errCh: + require.NoError(t, err) + } close(globalEventCh) close(appEventCh) @@ -315,9 +319,8 @@ func TestAppEventDistributor_GlobalAndAppEvents(t *testing.T) { subCh := distributor.Subscribe("test-subscriber") // Start the distributor - ctx := context.Background() - err := distributor.Start(ctx) - require.NoError(t, err) + ctx, cancel, errCh := lifecycle.WithSignallerAndCancel(context.Background()) + go distributor.Start(ctx, func() {}) // Test Global event globalEvent := createTestGlobalEvent(consensustime.TimeReelEventNewHead, 100) @@ -338,8 +341,12 @@ func TestAppEventDistributor_GlobalAndAppEvents(t *testing.T) { assert.Equal(t, appEvent, *appEventData) // Stop the distributor - err = distributor.Stop() - require.NoError(t, err) + cancel() + select { + case <-ctx.Done(): + case err, _ := <-errCh: + require.NoError(t, err) + } close(globalEventCh) close(appEventCh) } @@ -353,9 +360,8 @@ func TestAppEventDistributor_AllEventTypes(t *testing.T) { subCh := distributor.Subscribe("test-subscriber") // Start the distributor - ctx := context.Background() - err := distributor.Start(ctx) - require.NoError(t, err) + ctx, cancel, errCh := lifecycle.WithSignallerAndCancel(context.Background()) + go distributor.Start(ctx, func() {}) // Test all global event types globalNewHead := createTestGlobalEvent(consensustime.TimeReelEventNewHead, 100) @@ -390,8 +396,12 @@ func TestAppEventDistributor_AllEventTypes(t *testing.T) { assert.Equal(t, consensus.ControlEventAppEquivocation, event.Type) // Stop the distributor - err = distributor.Stop() - require.NoError(t, err) + cancel() + select { + case <-ctx.Done(): + case err, _ := <-errCh: + require.NoError(t, err) + } close(globalEventCh) close(appEventCh) } @@ -406,9 +416,8 @@ func TestAppEventDistributor_MultipleSubscribers(t *testing.T) { sub2Ch := distributor.Subscribe("subscriber2") // Start the distributor - ctx := context.Background() - err := distributor.Start(ctx) - require.NoError(t, err) + ctx, cancel, errCh := lifecycle.WithSignallerAndCancel(context.Background()) + go distributor.Start(ctx, func() {}) // Send events globalEvent := createTestGlobalEvent(consensustime.TimeReelEventNewHead, 100) @@ -445,8 +454,12 @@ func TestAppEventDistributor_MultipleSubscribers(t *testing.T) { assert.Equal(t, 2, receivedApp) // Stop the distributor - err = distributor.Stop() - require.NoError(t, err) + cancel() + select { + case <-ctx.Done(): + case err, _ := <-errCh: + require.NoError(t, err) + } close(globalEventCh) close(appEventCh) } @@ -460,9 +473,8 @@ func TestAppEventDistributor_ChannelClosure(t *testing.T) { subCh := distributor.Subscribe("test-subscriber") // Start the distributor - ctx := context.Background() - err := distributor.Start(ctx) - require.NoError(t, err) + ctx, cancel, errCh := lifecycle.WithSignallerAndCancel(context.Background()) + go distributor.Start(ctx, func() {}) // Close the input channels close(globalEventCh) @@ -471,8 +483,12 @@ func TestAppEventDistributor_ChannelClosure(t *testing.T) { time.Sleep(100 * time.Millisecond) // Stop should work gracefully - err = distributor.Stop() - require.NoError(t, err) + cancel() + select { + case <-ctx.Done(): + case err, _ := <-errCh: + require.NoError(t, err) + } // Subscriber channel should be closed _, ok := <-subCh @@ -495,9 +511,8 @@ func TestConcurrentSubscribeUnsubscribe(t *testing.T) { globalEventCh := make(chan consensustime.GlobalEvent, 10) distributor := NewGlobalEventDistributor(globalEventCh) - ctx := context.Background() - err := distributor.Start(ctx) - require.NoError(t, err) + ctx, cancel, errCh := lifecycle.WithSignallerAndCancel(context.Background()) + go distributor.Start(ctx, func() {}) // Concurrently subscribe and unsubscribe done := make(chan bool) @@ -537,8 +552,12 @@ func TestConcurrentSubscribeUnsubscribe(t *testing.T) { } // Stop the distributor - err = distributor.Stop() - require.NoError(t, err) + cancel() + select { + case <-ctx.Done(): + case err, _ := <-errCh: + require.NoError(t, err) + } wg.Wait() close(globalEventCh) @@ -568,9 +587,8 @@ func BenchmarkGlobalEventDistributor_Broadcast(b *testing.B) { }(ch) } - ctx := context.Background() - err := distributor.Start(ctx) - require.NoError(b, err) + ctx, cancel, errCh := lifecycle.WithSignallerAndCancel(context.Background()) + go distributor.Start(ctx, func() {}) b.ResetTimer() @@ -588,8 +606,12 @@ func BenchmarkGlobalEventDistributor_Broadcast(b *testing.B) { // Signal consumers to stop close(done) - err = distributor.Stop() - require.NoError(b, err) + cancel() + select { + case <-ctx.Done(): + case err, _ := <-errCh: + require.NoError(b, err) + } close(globalEventCh) // Wait for all consumers to finish @@ -620,9 +642,8 @@ func BenchmarkAppEventDistributor_MixedEvents(b *testing.B) { }(ch) } - ctx := context.Background() - err := distributor.Start(ctx) - require.NoError(b, err) + ctx, cancel, errCh := lifecycle.WithSignallerAndCancel(context.Background()) + go distributor.Start(ctx, func() {}) b.ResetTimer() @@ -642,8 +663,12 @@ func BenchmarkAppEventDistributor_MixedEvents(b *testing.B) { // Signal consumers to stop close(done) - err = distributor.Stop() - require.NoError(b, err) + cancel() + select { + case <-ctx.Done(): + case err, _ := <-errCh: + require.NoError(b, err) + } close(globalEventCh) close(appEventCh) diff --git a/node/consensus/events/global_event_distributor.go b/node/consensus/events/global_event_distributor.go index 5c0bb32..ceffbd8 100644 --- a/node/consensus/events/global_event_distributor.go +++ b/node/consensus/events/global_event_distributor.go @@ -6,6 +6,7 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" consensustime "source.quilibrium.com/quilibrium/monorepo/node/consensus/time" "source.quilibrium.com/quilibrium/monorepo/types/consensus" ) @@ -34,52 +35,33 @@ func NewGlobalEventDistributor( } // Start begins the event processing loop -func (g *GlobalEventDistributor) Start(ctx context.Context) error { +func (g *GlobalEventDistributor) Start( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, +) { g.mu.Lock() - defer g.mu.Unlock() - - if g.running { - return nil - } - - g.ctx, g.cancel = context.WithCancel(ctx) + g.ctx = ctx g.running = true g.startTime = time.Now() distributorStartsTotal.WithLabelValues("global").Inc() - - g.wg.Add(1) + g.mu.Unlock() + ready() + g.wg.Add(2) go g.processEvents() - go g.trackUptime() - return nil -} - -// Stop gracefully shuts down the distributor -func (g *GlobalEventDistributor) Stop() error { - g.mu.Lock() - if !g.running { - g.mu.Unlock() - return nil - } - g.running = false - g.mu.Unlock() - - g.cancel() + <-ctx.Done() g.wg.Wait() - g.mu.Lock() + g.running = false for _, ch := range g.subscribers { close(ch) } g.subscribers = make(map[string]chan consensus.ControlEvent) - g.mu.Unlock() - distributorStopsTotal.WithLabelValues("global").Inc() distributorUptime.WithLabelValues("global").Set(0) - - return nil + g.mu.Unlock() } // Subscribe registers a new subscriber @@ -194,6 +176,7 @@ func (g *GlobalEventDistributor) broadcast(event consensus.ControlEvent) { // trackUptime periodically updates the uptime metric func (g *GlobalEventDistributor) trackUptime() { + defer g.wg.Done() ticker := time.NewTicker(10 * time.Second) defer ticker.Stop() diff --git a/node/consensus/global/consensus_dynamic_committee.go b/node/consensus/global/consensus_dynamic_committee.go new file mode 100644 index 0000000..730b1c7 --- /dev/null +++ b/node/consensus/global/consensus_dynamic_committee.go @@ -0,0 +1,213 @@ +package global + +import ( + "bytes" + "encoding/binary" + "slices" + + "github.com/iden3/go-iden3-crypto/poseidon" + "github.com/pkg/errors" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/protobufs" + tconsensus "source.quilibrium.com/quilibrium/monorepo/types/consensus" +) + +type ConsensusWeightedIdentity struct { + prover *tconsensus.ProverInfo +} + +// Identity implements models.WeightedIdentity. +func (c *ConsensusWeightedIdentity) Identity() models.Identity { + return models.Identity(c.prover.Address) +} + +// PublicKey implements models.WeightedIdentity. +func (c *ConsensusWeightedIdentity) PublicKey() []byte { + return c.prover.PublicKey +} + +// Weight implements models.WeightedIdentity. +func (c *ConsensusWeightedIdentity) Weight() uint64 { + return c.prover.Seniority +} + +// IdentitiesByRank implements consensus.DynamicCommittee. +func (e *GlobalConsensusEngine) IdentitiesByRank( + rank uint64, +) ([]models.WeightedIdentity, error) { + proverInfo, err := e.proverRegistry.GetActiveProvers(nil) + if err != nil { + return nil, errors.Wrap(err, "identities by rank") + } + + return internalProversToWeightedIdentity(proverInfo), nil +} + +// IdentitiesByState implements consensus.DynamicCommittee. +func (e *GlobalConsensusEngine) IdentitiesByState( + stateID models.Identity, +) ([]models.WeightedIdentity, error) { + proverInfo, err := e.proverRegistry.GetActiveProvers(nil) + if err != nil { + return nil, errors.Wrap(err, "identities by state") + } + + return internalProversToWeightedIdentity(proverInfo), nil +} + +// IdentityByRank implements consensus.DynamicCommittee. +func (e *GlobalConsensusEngine) IdentityByRank( + rank uint64, + participantID models.Identity, +) (models.WeightedIdentity, error) { + proverInfo, err := e.proverRegistry.GetActiveProvers(nil) + if err != nil { + return nil, errors.Wrap(err, "identity by rank") + } + + var found *tconsensus.ProverInfo + for _, p := range proverInfo { + if bytes.Equal(p.Address, []byte(participantID)) { + found = p + break + } + } + + if found == nil { + return nil, errors.Wrap(errors.New("prover not found"), "identity by rank") + } + + return internalProverToWeightedIdentity(found), nil +} + +// IdentityByState implements consensus.DynamicCommittee. +func (e *GlobalConsensusEngine) IdentityByState( + stateID models.Identity, + participantID models.Identity, +) (models.WeightedIdentity, error) { + proverInfo, err := e.proverRegistry.GetActiveProvers(nil) + if err != nil { + return nil, errors.Wrap(err, "identity by state") + } + + var found *tconsensus.ProverInfo + for _, p := range proverInfo { + if bytes.Equal(p.Address, []byte(participantID)) { + found = p + break + } + } + + if found == nil { + return nil, errors.Wrap(errors.New("prover not found"), "identity by state") + } + + return internalProverToWeightedIdentity(found), nil +} + +// LeaderForRank implements consensus.DynamicCommittee. +func (e *GlobalConsensusEngine) LeaderForRank( + rank uint64, +) (models.Identity, error) { + lineage, err := e.globalTimeReel.GetLineage() + if err != nil { + return "", errors.Wrap(err, "leader for rank") + } + + var found *protobufs.GlobalFrame + for _, l := range lineage { + if l.GetRank() == (rank - 1) { + found = l + break + } + } + + var selector models.Identity + if found == nil { + selector = models.Identity(make([]byte, 32)) + } else { + selector = found.Identity() + } + + inputBI, err := poseidon.HashBytes(slices.Concat( + []byte(selector), + binary.BigEndian.AppendUint64(nil, rank), + )) + if err != nil { + return "", errors.Wrap(err, "leader for rank") + } + + input := inputBI.FillBytes(make([]byte, 32)) + prover, err := e.proverRegistry.GetNextProver([32]byte(input), nil) + if err != nil { + return "", errors.Wrap(err, "leader for rank") + } + + return models.Identity(prover), nil +} + +// QuorumThresholdForRank implements consensus.DynamicCommittee. +func (e *GlobalConsensusEngine) QuorumThresholdForRank( + rank uint64, +) (uint64, error) { + proverInfo, err := e.proverRegistry.GetActiveProvers(nil) + if err != nil { + return 0, errors.Wrap(err, "quorum threshold for rank") + } + + total := uint64(0) + for _, p := range proverInfo { + total += p.Seniority + } + + return (total * 2) / 3, nil +} + +// Self implements consensus.DynamicCommittee. +func (e *GlobalConsensusEngine) Self() models.Identity { + return e.getPeerID().Identity() +} + +// TimeoutThresholdForRank implements consensus.DynamicCommittee. +func (e *GlobalConsensusEngine) TimeoutThresholdForRank( + rank uint64, +) (uint64, error) { + proverInfo, err := e.proverRegistry.GetActiveProvers(nil) + if err != nil { + return 0, errors.Wrap(err, "timeout threshold for rank") + } + + leader, err := e.LeaderForRank(rank) + if err != nil { + return 0, errors.Wrap(err, "timeout threshold for rank") + } + + total := uint64(0) + // 2/3 majority doesn't quite work in this scenario, because if the timing out + // prover has a high enough seniority it could get things stuck where no + // timeout can occur + for _, p := range proverInfo { + if !bytes.Equal(p.Address, []byte(leader)) { + total += p.Seniority + } + } + + return (total * 2) / 3, nil +} + +func internalProversToWeightedIdentity( + provers []*tconsensus.ProverInfo, +) []models.WeightedIdentity { + wis := []models.WeightedIdentity{} + for _, p := range provers { + wis = append(wis, internalProverToWeightedIdentity(p)) + } + + return wis +} + +func internalProverToWeightedIdentity( + prover *tconsensus.ProverInfo, +) models.WeightedIdentity { + return &ConsensusWeightedIdentity{prover} +} diff --git a/node/consensus/global/consensus_leader_provider.go b/node/consensus/global/consensus_leader_provider.go index e27a16d..54d87d6 100644 --- a/node/consensus/global/consensus_leader_provider.go +++ b/node/consensus/global/consensus_leader_provider.go @@ -5,23 +5,29 @@ import ( "context" "encoding/binary" "encoding/hex" + "math/big" "time" "github.com/iden3/go-iden3-crypto/poseidon" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" + "golang.org/x/crypto/sha3" + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" "source.quilibrium.com/quilibrium/monorepo/protobufs" + "source.quilibrium.com/quilibrium/monorepo/types/tries" ) // GlobalLeaderProvider implements LeaderProvider type GlobalLeaderProvider struct { - engine *GlobalConsensusEngine + engine *GlobalConsensusEngine + collected GlobalCollectedCommitments } func (p *GlobalLeaderProvider) GetNextLeaders( - prior **protobufs.GlobalFrame, ctx context.Context, + prior **protobufs.GlobalFrame, ) ([]GlobalPeerID, error) { // Get the parent selector for next prover calculation if prior == nil || *prior == nil || (*prior).Header == nil || @@ -60,16 +66,62 @@ func (p *GlobalLeaderProvider) GetNextLeaders( } func (p *GlobalLeaderProvider) ProveNextState( - prior **protobufs.GlobalFrame, - collected GlobalCollectedCommitments, ctx context.Context, + rank uint64, + filter []byte, + priorState models.Identity, ) (**protobufs.GlobalFrame, error) { + _, err := p.engine.livenessProvider.Collect(ctx) + if err != nil { + return nil, models.NewNoVoteErrorf("could not collect: %+w", err) + } + timer := prometheus.NewTimer(frameProvingDuration) defer timer.ObserveDuration() - if prior == nil || *prior == nil { + prior, err := p.engine.clockStore.GetLatestGlobalClockFrame() + if err != nil { frameProvingTotal.WithLabelValues("error").Inc() - return nil, errors.Wrap(errors.New("nil prior frame"), "prove next state") + return nil, models.NewNoVoteErrorf("could not collect: %+w", err) + } + + if prior == nil { + frameProvingTotal.WithLabelValues("error").Inc() + return nil, models.NewNoVoteErrorf("missing prior frame") + } + + if prior.Identity() != priorState { + frameProvingTotal.WithLabelValues("error").Inc() + return nil, models.NewNoVoteErrorf( + "building on fork or needs sync: frame %d, rank %d, parent_id: %x, asked: rank %d, id: %x", + prior.Header.FrameNumber, + prior.Header.Rank, + prior.Header.ParentSelector, + rank, + priorState, + ) + } + + // Get prover index + provers, err := p.engine.proverRegistry.GetActiveProvers(nil) + if err != nil { + frameProvingTotal.WithLabelValues("error").Inc() + return nil, errors.Wrap(err, "prove next state") + } + + proverIndex := uint8(0) + found := false + for i, prover := range provers { + if bytes.Equal(prover.Address, p.engine.getProverAddress()) { + proverIndex = uint8(i) + found = true + break + } + } + + if !found { + frameProvingTotal.WithLabelValues("error").Inc() + return nil, models.NewNoVoteErrorf("not a prover") } p.engine.logger.Info( @@ -81,16 +133,13 @@ func (p *GlobalLeaderProvider) ProveNextState( signer, _, _, _ := p.engine.GetProvingKey(p.engine.config.Engine) if signer == nil { frameProvingTotal.WithLabelValues("error").Inc() - return nil, errors.Wrap( - errors.New("no proving key available"), - "prove next state", - ) + return nil, models.NewNoVoteErrorf("not a prover") } // Get current timestamp and difficulty - timestamp := time.Now().UnixMilli() + 30000 + timestamp := time.Now().UnixMilli() difficulty := p.engine.difficultyAdjuster.GetNextDifficulty( - (*prior).Rank()+1, + rank, timestamp, ) @@ -103,36 +152,6 @@ func (p *GlobalLeaderProvider) ProveNextState( p.engine.currentDifficulty = uint32(difficulty) p.engine.currentDifficultyMu.Unlock() - // Get prover index - provers, err := p.engine.proverRegistry.GetActiveProvers(nil) - if err != nil { - frameProvingTotal.WithLabelValues("error").Inc() - return nil, errors.Wrap(err, "prove next state") - } - - proverIndex := uint8(0) - for i, prover := range provers { - if bytes.Equal(prover.Address, p.engine.getProverAddress()) { - proverIndex = uint8(i) - break - } - } - - // Prove the global frame header - newHeader, err := p.engine.frameProver.ProveGlobalFrameHeader( - (*prior).Header, - p.engine.shardCommitments, - p.engine.proverRoot, - signer, - timestamp, - uint32(difficulty), - proverIndex, - ) - if err != nil { - frameProvingTotal.WithLabelValues("error").Inc() - return nil, errors.Wrap(err, "prove next state") - } - // Convert collected messages to MessageBundles requests := make( []*protobufs.MessageBundle, @@ -143,6 +162,7 @@ func (p *GlobalLeaderProvider) ProveNextState( "including messages", zap.Int("message_count", len(p.engine.collectedMessages)), ) + requestTree := &tries.VectorCommitmentTree{} for _, msgData := range p.engine.collectedMessages { // Check if data is long enough to contain type prefix if len(msgData) < 4 { @@ -179,9 +199,40 @@ func (p *GlobalLeaderProvider) ProveNextState( if messageBundle.Timestamp == 0 { messageBundle.Timestamp = time.Now().UnixMilli() } + + id := sha3.Sum256(msgData) + err := requestTree.Insert(id[:], msgData, nil, big.NewInt(0)) + if err != nil { + p.engine.logger.Warn( + "failed to add global request", + zap.Error(err), + ) + continue + } + requests = append(requests, messageBundle) } + requestRoot := requestTree.Commit(p.engine.inclusionProver, false) + + // Prove the global frame header + newHeader, err := p.engine.frameProver.ProveGlobalFrameHeader( + (*prior).Header, + p.engine.shardCommitments, + p.engine.proverRoot, + requestRoot, + signer, + timestamp, + uint32(difficulty), + proverIndex, + ) + if err != nil { + frameProvingTotal.WithLabelValues("error").Inc() + return nil, errors.Wrap(err, "prove next state") + } + newHeader.Prover = p.engine.getProverAddress() + newHeader.Rank = rank + // Create the new global frame with requests newFrame := &protobufs.GlobalFrame{ Header: newHeader, @@ -202,3 +253,5 @@ func (p *GlobalLeaderProvider) ProveNextState( return &newFrame, nil } + +var _ consensus.LeaderProvider[*protobufs.GlobalFrame, GlobalPeerID, GlobalCollectedCommitments] = (*GlobalLeaderProvider)(nil) diff --git a/node/consensus/global/consensus_liveness_provider.go b/node/consensus/global/consensus_liveness_provider.go index ff79b6c..7c6b1f4 100644 --- a/node/consensus/global/consensus_liveness_provider.go +++ b/node/consensus/global/consensus_liveness_provider.go @@ -5,7 +5,6 @@ import ( "context" "math/big" "slices" - "time" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" @@ -186,68 +185,6 @@ func (p *GlobalLivenessProvider) Collect( }, nil } -func (p *GlobalLivenessProvider) SendLiveness( - prior **protobufs.GlobalFrame, - collected GlobalCollectedCommitments, - ctx context.Context, -) error { - // Get prover key - signer, _, publicKey, _ := p.engine.GetProvingKey(p.engine.config.Engine) - if publicKey == nil { - return errors.Wrap( - errors.New("no proving key available for liveness check"), - "send liveness", - ) - } - - // Create liveness check message - livenessCheck := &protobufs.ProverLivenessCheck{ - FrameNumber: collected.frameNumber, - Timestamp: time.Now().UnixMilli(), - CommitmentHash: collected.commitmentHash, - } - - // Sign the message - signatureData, err := livenessCheck.ConstructSignaturePayload() - if err != nil { - return errors.Wrap(err, "send liveness") - } - - sig, err := signer.SignWithDomain( - signatureData, - livenessCheck.GetSignatureDomain(), - ) - if err != nil { - return errors.Wrap(err, "send liveness") - } - - proverAddress := p.engine.getAddressFromPublicKey(publicKey) - livenessCheck.PublicKeySignatureBls48581 = &protobufs.BLS48581AddressedSignature{ - Address: proverAddress, - Signature: sig, - } - - data, err := livenessCheck.ToCanonicalBytes() - if err != nil { - return errors.Wrap(err, "send liveness") - } - - if err := p.engine.pubsub.PublishToBitmask( - GLOBAL_CONSENSUS_BITMASK, - data, - ); err != nil { - p.engine.logger.Error("could not publish", zap.Error(err)) - return errors.Wrap(err, "send liveness") - } - - p.engine.logger.Info( - "sent liveness check", - zap.Uint64("frame_number", collected.frameNumber), - ) - - return nil -} - func (p *GlobalLivenessProvider) validateAndLockMessage( frameNumber uint64, i int, diff --git a/node/consensus/global/consensus_sync_provider.go b/node/consensus/global/consensus_sync_provider.go index 6bd22a2..5eacc5c 100644 --- a/node/consensus/global/consensus_sync_provider.go +++ b/node/consensus/global/consensus_sync_provider.go @@ -8,17 +8,85 @@ import ( "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/peer" + "github.com/multiformats/go-multiaddr" + mn "github.com/multiformats/go-multiaddr/net" "github.com/pkg/errors" "go.uber.org/zap" "google.golang.org/grpc" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/node/internal/frametime" + "source.quilibrium.com/quilibrium/monorepo/node/p2p" "source.quilibrium.com/quilibrium/monorepo/protobufs" + "source.quilibrium.com/quilibrium/monorepo/types/channel" "source.quilibrium.com/quilibrium/monorepo/types/tries" ) +const defaultStateQueueCapacity = 10 + +type syncRequest struct { + frameNumber uint64 + peerId []byte +} + // GlobalSyncProvider implements SyncProvider type GlobalSyncProvider struct { - engine *GlobalConsensusEngine + // TODO(2.1.1+): Refactor out direct use of engine + engine *GlobalConsensusEngine + queuedStates chan syncRequest +} + +func NewGlobalSyncProvider( + engine *GlobalConsensusEngine, +) *GlobalSyncProvider { + return &GlobalSyncProvider{ + engine: engine, + queuedStates: make(chan syncRequest, defaultStateQueueCapacity), + } +} + +func (p *GlobalSyncProvider) Start( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, +) { + ready() + for { + select { + case <-ctx.Done(): + return + case request := <-p.queuedStates: + finalized := p.engine.forks.FinalizedState() + if request.frameNumber <= + (*p.engine.forks.FinalizedState().State).Header.FrameNumber { + continue + } + p.engine.logger.Info( + "synchronizing with peer", + zap.String("peer", peer.ID(request.peerId).String()), + zap.Uint64("finalized_rank", finalized.Rank), + zap.Uint64("peer_frame", request.frameNumber), + ) + p.processState( + ctx, + request.frameNumber, + request.peerId, + ) + } + } +} + +func (p *GlobalSyncProvider) processState( + ctx context.Context, + frameNumber uint64, + peerID []byte, +) { + err := p.syncWithPeer( + ctx, + frameNumber, + peerID, + ) + if err != nil { + p.engine.logger.Error("could not sync with peer", zap.Error(err)) + } } func (p *GlobalSyncProvider) Synchronize( @@ -80,14 +148,11 @@ func (p *GlobalSyncProvider) Synchronize( } if !hasFrame { - p.engine.logger.Info("initializing genesis") - genesis := p.engine.initializeGenesis() - dataCh <- &genesis - errCh <- nil + errCh <- errors.New("no frame") return } - err = p.syncWithMesh() + err = p.syncWithMesh(ctx) if err != nil { dataCh <- existing errCh <- err @@ -115,7 +180,7 @@ func (p *GlobalSyncProvider) Synchronize( return dataCh, errCh } -func (p *GlobalSyncProvider) syncWithMesh() error { +func (p *GlobalSyncProvider) syncWithMesh(ctx context.Context) error { p.engine.logger.Info("synchronizing with peers") latest, err := p.engine.globalTimeReel.GetHead() @@ -165,7 +230,7 @@ func (p *GlobalSyncProvider) syncWithMesh() error { latest = head } - latest, err = p.syncWithPeer(latest, []byte(peerID)) + err = p.syncWithPeer(ctx, latest.Header.FrameNumber, []byte(peerID)) if err != nil { p.engine.logger.Debug("error syncing frame", zap.Error(err)) } @@ -181,92 +246,162 @@ func (p *GlobalSyncProvider) syncWithMesh() error { } func (p *GlobalSyncProvider) syncWithPeer( - latest *protobufs.GlobalFrame, + ctx context.Context, + frameNumber uint64, peerId []byte, -) (*protobufs.GlobalFrame, error) { +) error { p.engine.logger.Info( "polling peer for new frames", zap.String("peer_id", peer.ID(peerId).String()), - zap.Uint64("current_frame", latest.Header.FrameNumber), + zap.Uint64("current_frame", frameNumber), ) - syncTimeout := p.engine.config.Engine.SyncTimeout - dialCtx, cancelDial := context.WithTimeout(p.engine.ctx, syncTimeout) - defer cancelDial() - cc, err := p.engine.pubsub.GetDirectChannel(dialCtx, peerId, "sync") - if err != nil { - p.engine.logger.Debug( - "could not establish direct channel", - zap.Error(err), + info := p.engine.peerInfoManager.GetPeerInfo(peerId) + if info == nil { + p.engine.logger.Info( + "no peer info known yet, skipping sync", + zap.String("peer", peer.ID(peerId).String()), ) - return latest, errors.Wrap(err, "sync") + return nil } - defer func() { - if err := cc.Close(); err != nil { - p.engine.logger.Error("error while closing connection", zap.Error(err)) - } - }() - client := protobufs.NewGlobalServiceClient(cc) - for { - getCtx, cancelGet := context.WithTimeout(p.engine.ctx, syncTimeout) - response, err := client.GetGlobalFrame( - getCtx, - &protobufs.GetGlobalFrameRequest{ - FrameNumber: latest.Header.FrameNumber + 1, - }, - // The message size limits are swapped because the server is the one - // sending the data. - grpc.MaxCallRecvMsgSize( - p.engine.config.Engine.SyncMessageLimits.MaxSendMsgSize, - ), - grpc.MaxCallSendMsgSize( - p.engine.config.Engine.SyncMessageLimits.MaxRecvMsgSize, - ), + if len(info.Reachability) == 0 { + p.engine.logger.Info( + "no reachability info known yet, skipping sync", + zap.String("peer", peer.ID(peerId).String()), + ) + return nil + } + + syncTimeout := p.engine.config.Engine.SyncTimeout + for _, s := range info.Reachability[0].StreamMultiaddrs { + creds, err := p2p.NewPeerAuthenticator( + p.engine.logger, + p.engine.config.P2P, + nil, + nil, + nil, + nil, + [][]byte{[]byte(peerId)}, + map[string]channel.AllowedPeerPolicyType{}, + map[string]channel.AllowedPeerPolicyType{}, + ).CreateClientTLSCredentials([]byte(peerId)) + if err != nil { + return errors.Wrap(err, "sync") + } + + ma, err := multiaddr.StringCast(s) + if err != nil { + return errors.Wrap(err, "sync") + } + + mga, err := mn.ToNetAddr(ma) + if err != nil { + return errors.Wrap(err, "sync") + } + + cc, err := grpc.NewClient( + mga.String(), + grpc.WithTransportCredentials(creds), ) - cancelGet() if err != nil { p.engine.logger.Debug( - "could not get frame", + "could not establish direct channel, trying next multiaddr", + zap.String("peer", peer.ID(peerId).String()), + zap.String("multiaddr", ma.String()), zap.Error(err), ) - return latest, errors.Wrap(err, "sync") + continue } + defer func() { + if err := cc.Close(); err != nil { + p.engine.logger.Error("error while closing connection", zap.Error(err)) + } + }() - if response == nil { - p.engine.logger.Debug("received no response from peer") - return latest, nil + client := protobufs.NewGlobalServiceClient(cc) + inner: + for { + getCtx, cancelGet := context.WithTimeout(ctx, syncTimeout) + response, err := client.GetGlobalProposal( + getCtx, + &protobufs.GetGlobalProposalRequest{ + FrameNumber: frameNumber, + }, + // The message size limits are swapped because the server is the one + // sending the data. + grpc.MaxCallRecvMsgSize( + p.engine.config.Engine.SyncMessageLimits.MaxSendMsgSize, + ), + grpc.MaxCallSendMsgSize( + p.engine.config.Engine.SyncMessageLimits.MaxRecvMsgSize, + ), + ) + cancelGet() + if err != nil { + p.engine.logger.Debug( + "could not get frame, trying next multiaddr", + zap.String("peer", peer.ID(peerId).String()), + zap.String("multiaddr", ma.String()), + zap.Error(err), + ) + break inner + } + + if response == nil { + p.engine.logger.Debug( + "received no response from peer", + zap.String("peer", peer.ID(peerId).String()), + zap.String("multiaddr", ma.String()), + zap.Error(err), + ) + break inner + } + + if response.Proposal == nil || response.Proposal.State == nil || + response.Proposal.State.Header == nil || + response.Proposal.State.Header.FrameNumber != frameNumber { + p.engine.logger.Debug("received empty response from peer") + return nil + } + if err := response.Proposal.Validate(); err != nil { + p.engine.logger.Debug( + "received invalid response from peer", + zap.Error(err), + ) + return nil + } + + p.engine.logger.Info( + "received new leading frame", + zap.Uint64("frame_number", response.Proposal.State.Header.FrameNumber), + zap.Duration( + "frame_age", + frametime.GlobalFrameSince(response.Proposal.State), + ), + ) + + if _, err := p.engine.frameProver.VerifyGlobalFrameHeader( + response.Proposal.State.Header, + p.engine.blsConstructor, + ); err != nil { + return errors.Wrap(err, "sync") + } + + p.engine.globalProposalQueue <- response.Proposal + frameNumber = frameNumber + 1 } - - if response.Frame == nil || response.Frame.Header == nil || - response.Frame.Header.FrameNumber != latest.Header.FrameNumber+1 || - response.Frame.Header.Timestamp < latest.Header.Timestamp { - p.engine.logger.Debug("received invalid response from peer") - return latest, nil - } - p.engine.logger.Info( - "received new leading frame", - zap.Uint64("frame_number", response.Frame.Header.FrameNumber), - zap.Duration("frame_age", frametime.GlobalFrameSince(response.Frame)), - ) - - if _, err := p.engine.frameProver.VerifyGlobalFrameHeader( - response.Frame.Header, - p.engine.blsConstructor, - ); err != nil { - return latest, errors.Wrap(err, "sync") - } - - err = p.engine.globalTimeReel.Insert(p.engine.ctx, response.Frame) - if err != nil { - return latest, errors.Wrap(err, "sync") - } - - latest = response.Frame } + + p.engine.logger.Debug( + "failed to complete sync for all known multiaddrs", + zap.String("peer", peer.ID(peerId).String()), + ) + return nil } func (p *GlobalSyncProvider) hyperSyncWithProver( + ctx context.Context, prover []byte, shardKey tries.ShardKey, ) { @@ -278,7 +413,7 @@ func (p *GlobalSyncProvider) hyperSyncWithProver( peerId, err := peer.IDFromPublicKey(pubKey) if err == nil { ch, err := p.engine.pubsub.GetDirectChannel( - p.engine.ctx, + ctx, []byte(peerId), "sync", ) @@ -286,7 +421,7 @@ func (p *GlobalSyncProvider) hyperSyncWithProver( if err == nil { defer ch.Close() client := protobufs.NewHypergraphComparisonServiceClient(ch) - str, err := client.HyperStream(p.engine.ctx) + str, err := client.HyperStream(ctx) if err != nil { p.engine.logger.Error("error from sync", zap.Error(err)) } else { @@ -360,3 +495,30 @@ func (p *GlobalSyncProvider) hyperSyncHyperedgeRemoves( } str.CloseSend() } + +func (p *GlobalSyncProvider) AddState( + sourcePeerID []byte, + frameNumber uint64, +) { + // Drop if we're within the threshold + if frameNumber <= + (*p.engine.forks.FinalizedState().State).Header.FrameNumber { + p.engine.logger.Debug("dropping stale state for sync") + return + } + + // Enqueue if we can, otherwise drop it because we'll catch up + select { + case p.queuedStates <- syncRequest{ + frameNumber: frameNumber, + peerId: sourcePeerID, + }: + p.engine.logger.Debug( + "enqueued sync request", + zap.String("peer", peer.ID(sourcePeerID).String()), + zap.Uint64("enqueued_frame_number", frameNumber), + ) + default: + p.engine.logger.Debug("no queue capacity, dropping state for sync") + } +} diff --git a/node/consensus/global/consensus_transition_listener.go b/node/consensus/global/consensus_transition_listener.go deleted file mode 100644 index af0a3ee..0000000 --- a/node/consensus/global/consensus_transition_listener.go +++ /dev/null @@ -1,44 +0,0 @@ -package global - -import ( - "go.uber.org/zap" - "source.quilibrium.com/quilibrium/monorepo/consensus" -) - -type GlobalTracer struct { - logger *zap.Logger -} - -func (t *GlobalTracer) Trace(message string) { - t.logger.Debug(message) -} - -func (t *GlobalTracer) Error(message string, err error) { - t.logger.Error(message, zap.Error(err)) -} - -// GlobalTransitionListener handles state transitions -type GlobalTransitionListener struct { - engine *GlobalConsensusEngine - logger *zap.Logger -} - -func (l *GlobalTransitionListener) OnTransition( - from consensus.State, - to consensus.State, - event consensus.Event, -) { - // Update metrics based on state - switch to { - case consensus.StateLoading: - engineState.Set(2) // EngineStateLoading - case consensus.StateCollecting: - engineState.Set(3) // EngineStateCollecting - case consensus.StateProving: - engineState.Set(4) // EngineStateProving - case consensus.StatePublishing: - engineState.Set(5) // EngineStatePublishing - case consensus.StateVerifying: - engineState.Set(6) // EngineStateVerifying - } -} diff --git a/node/consensus/global/consensus_types.go b/node/consensus/global/consensus_types.go index 43523ae..6940d0a 100644 --- a/node/consensus/global/consensus_types.go +++ b/node/consensus/global/consensus_types.go @@ -5,7 +5,7 @@ import ( "encoding/hex" "slices" - "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" ) // Type aliases for consensus types @@ -13,15 +13,35 @@ type GlobalPeerID struct { ID []byte } -func (p GlobalPeerID) Identity() consensus.Identity { - return hex.EncodeToString(p.ID) +// GetRank implements models.Unique. +func (p GlobalPeerID) GetRank() uint64 { + return 0 +} + +// GetSignature implements models.Unique. +func (p GlobalPeerID) GetSignature() []byte { + return []byte{} +} + +// GetTimestamp implements models.Unique. +func (p GlobalPeerID) GetTimestamp() uint64 { + return 0 +} + +// Source implements models.Unique. +func (p GlobalPeerID) Source() models.Identity { + return "" +} + +func (p GlobalPeerID) Identity() models.Identity { + return models.Identity(p.ID) } func (p GlobalPeerID) Rank() uint64 { return 0 } -func (p GlobalPeerID) Clone() consensus.Unique { +func (p GlobalPeerID) Clone() models.Unique { return GlobalPeerID{ ID: slices.Clone(p.ID), } @@ -29,12 +49,33 @@ func (p GlobalPeerID) Clone() consensus.Unique { // GlobalCollectedCommitments represents collected commitments type GlobalCollectedCommitments struct { + rank uint64 frameNumber uint64 commitmentHash []byte prover []byte } -func (c GlobalCollectedCommitments) Identity() consensus.Identity { +// GetRank implements models.Unique. +func (c GlobalCollectedCommitments) GetRank() uint64 { + return c.rank +} + +// GetSignature implements models.Unique. +func (c GlobalCollectedCommitments) GetSignature() []byte { + return []byte{} +} + +// GetTimestamp implements models.Unique. +func (c GlobalCollectedCommitments) GetTimestamp() uint64 { + return 0 +} + +// Source implements models.Unique. +func (c GlobalCollectedCommitments) Source() models.Identity { + return models.Identity(c.prover) +} + +func (c GlobalCollectedCommitments) Identity() models.Identity { return hex.EncodeToString( slices.Concat( binary.BigEndian.AppendUint64(nil, c.frameNumber), @@ -48,7 +89,7 @@ func (c GlobalCollectedCommitments) Rank() uint64 { return c.frameNumber } -func (c GlobalCollectedCommitments) Clone() consensus.Unique { +func (c GlobalCollectedCommitments) Clone() models.Unique { return GlobalCollectedCommitments{ frameNumber: c.frameNumber, commitmentHash: slices.Clone(c.commitmentHash), diff --git a/node/consensus/global/consensus_voting_provider.go b/node/consensus/global/consensus_voting_provider.go index 92ed5c1..68b9650 100644 --- a/node/consensus/global/consensus_voting_provider.go +++ b/node/consensus/global/consensus_voting_provider.go @@ -1,509 +1,171 @@ package global import ( - "bytes" "context" - "encoding/hex" - "sync" "time" - "github.com/iden3/go-iden3-crypto/poseidon" "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" - "google.golang.org/protobuf/proto" "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/consensus/verification" "source.quilibrium.com/quilibrium/monorepo/protobufs" ) // GlobalVotingProvider implements VotingProvider type GlobalVotingProvider struct { - engine *GlobalConsensusEngine - proposalVotes map[consensus.Identity]map[consensus.Identity]**protobufs.FrameVote - mu sync.RWMutex + engine *GlobalConsensusEngine } -func (p *GlobalVotingProvider) SendProposal( - proposal **protobufs.GlobalFrame, +// FinalizeQuorumCertificate implements consensus.VotingProvider. +func (p *GlobalVotingProvider) FinalizeQuorumCertificate( ctx context.Context, -) error { - timer := prometheus.NewTimer(framePublishingDuration) - defer timer.ObserveDuration() - - if proposal == nil || (*proposal).Header == nil { - framePublishingTotal.WithLabelValues("error").Inc() - return errors.Wrap( - errors.New("invalid proposal"), - "send proposal", - ) - } - - // Store the frame - frameID := p.engine.globalTimeReel.ComputeFrameID(*proposal) - p.engine.frameStoreMu.Lock() - p.engine.frameStore[frameID] = (*proposal) - p.engine.frameStoreMu.Unlock() - - p.engine.logger.Info( - "sending global proposal", - zap.Uint64("frame_number", (*proposal).Header.FrameNumber), - ) - - // Serialize the frame using canonical bytes - frameData, err := (*proposal).ToCanonicalBytes() + state *models.State[*protobufs.GlobalFrame], + aggregatedSignature models.AggregatedSignature, +) (models.QuorumCertificate, error) { + cloned := (*state.State).Clone().(*protobufs.GlobalFrame) + cloned.Header.PublicKeySignatureBls48581 = + &protobufs.BLS48581AggregateSignature{ + Signature: aggregatedSignature.GetSignature(), + PublicKey: &protobufs.BLS48581G2PublicKey{ + KeyValue: aggregatedSignature.GetPubKey(), + }, + Bitmask: aggregatedSignature.GetBitmask(), + } + frameBytes, err := cloned.ToCanonicalBytes() if err != nil { - p.engine.logger.Error("could not serialize frame", zap.Error(err)) - framePublishingTotal.WithLabelValues("error").Inc() - return errors.Wrap(err, "serialize global frame") + return nil, errors.Wrap(err, "finalize quorum certificate") } - // Publish to the global consensus bitmask - if err := p.engine.pubsub.PublishToBitmask( - GLOBAL_CONSENSUS_BITMASK, - frameData, - ); err != nil { - p.engine.logger.Error("could not publish frame", zap.Error(err)) - framePublishingTotal.WithLabelValues("error").Inc() - return errors.Wrap(err, "send proposal") - } + p.engine.pubsub.PublishToBitmask(GLOBAL_FRAME_BITMASK, frameBytes) - framePublishingTotal.WithLabelValues("success").Inc() - return nil + return &protobufs.QuorumCertificate{ + Rank: (*state.State).GetRank(), + FrameNumber: (*state.State).Header.FrameNumber, + Selector: []byte((*state.State).Identity()), + Timestamp: uint64(time.Now().UnixMilli()), + AggregateSignature: &protobufs.BLS48581AggregateSignature{ + Signature: aggregatedSignature.GetSignature(), + PublicKey: &protobufs.BLS48581G2PublicKey{ + KeyValue: aggregatedSignature.GetPubKey(), + }, + Bitmask: aggregatedSignature.GetBitmask(), + }, + }, nil } -func (p *GlobalVotingProvider) DecideAndSendVote( - proposals map[consensus.Identity]**protobufs.GlobalFrame, +// FinalizeTimeout implements consensus.VotingProvider. +func (p *GlobalVotingProvider) FinalizeTimeout( ctx context.Context, -) (GlobalPeerID, **protobufs.FrameVote, error) { - var chosenProposal *protobufs.GlobalFrame - var chosenID consensus.Identity - parentFrame := p.engine.GetFrame() - - // Get parent selector for validating continuity - var parentSelector []byte - if parentFrame != nil && parentFrame.Header != nil { - parentSelectorBI, _ := poseidon.HashBytes(parentFrame.Header.Output) - parentSelector = parentSelectorBI.FillBytes(make([]byte, 32)) - } - - // Get ordered provers to prioritize proposals - provers, err := p.engine.proverRegistry.GetOrderedProvers( - [32]byte(parentSelector), - nil, // global consensus uses nil filter - ) - if err != nil { - p.engine.logger.Error("could not get prover list", zap.Error(err)) - return GlobalPeerID{}, nil, errors.Wrap(err, "decide and send vote") - } - - // Check proposals in prover order - for _, proverID := range provers { - prop := proposals[GlobalPeerID{ID: proverID}.Identity()] - if prop == nil { - continue - } - - // Validate the proposal - valid, err := p.engine.frameValidator.Validate((*prop)) - if err != nil { - p.engine.logger.Debug("proposal validation error", zap.Error(err)) - continue - } - - // Check parent continuity - if parentFrame != nil && parentFrame.Header != nil { - if !bytes.Equal((*prop).Header.ParentSelector, parentSelector) { - p.engine.logger.Debug( - "proposed frame out of sequence", - zap.String( - "proposed_parent_selector", - hex.EncodeToString((*prop).Header.ParentSelector), - ), - zap.String( - "target_parent_selector", - hex.EncodeToString(parentSelector), - ), - zap.Uint64("proposed_frame_number", (*prop).Header.FrameNumber), - zap.Uint64("target_frame_number", parentFrame.Header.FrameNumber+1), - ) - continue - } - } - - if valid { - chosenProposal = (*prop) - chosenID = GlobalPeerID{ID: proverID}.Identity() - break - } - } - - if chosenProposal == nil { - p.engine.logger.Error("proposal is nil") - return GlobalPeerID{}, nil, errors.Wrap( - errors.New("no valid proposals to vote on"), - "decide and send vote", - ) - } + rank uint64, + latestQuorumCertificate models.QuorumCertificate, + latestQuorumCertificateRanks []uint64, + aggregatedSignature models.AggregatedSignature, +) (models.TimeoutCertificate, error) { + return &protobufs.TimeoutCertificate{ + Rank: rank, + LatestRanks: latestQuorumCertificateRanks, + LatestQuorumCertificate: latestQuorumCertificate.(*protobufs.QuorumCertificate), + Timestamp: uint64(time.Now().UnixMilli()), + AggregateSignature: &protobufs.BLS48581AggregateSignature{ + Signature: aggregatedSignature.GetSignature(), + PublicKey: &protobufs.BLS48581G2PublicKey{ + KeyValue: aggregatedSignature.GetPubKey(), + }, + Bitmask: aggregatedSignature.GetBitmask(), + }, + }, nil +} +// SignTimeoutVote implements consensus.VotingProvider. +func (p *GlobalVotingProvider) SignTimeoutVote( + ctx context.Context, + filter []byte, + currentRank uint64, + newestQuorumCertificateRank uint64, +) (**protobufs.ProposalVote, error) { // Get signing key signer, _, publicKey, _ := p.engine.GetProvingKey(p.engine.config.Engine) if publicKey == nil { p.engine.logger.Error("no proving key available") - return GlobalPeerID{}, nil, errors.Wrap( + return nil, errors.Wrap( errors.New("no proving key available for voting"), - "decide and send vote", + "sign vote", ) } // Create vote (signature) - signatureData, err := p.engine.frameProver.GetGlobalFrameSignaturePayload( - chosenProposal.Header, + signatureData := verification.MakeTimeoutMessage( + nil, + currentRank, + newestQuorumCertificateRank, ) - if err != nil { - p.engine.logger.Error("could not get signature payload", zap.Error(err)) - return GlobalPeerID{}, nil, errors.Wrap(err, "decide and send vote") - } - sig, err := signer.SignWithDomain(signatureData, []byte("global")) + sig, err := signer.SignWithDomain(signatureData, []byte("globaltimeout")) if err != nil { p.engine.logger.Error("could not sign vote", zap.Error(err)) - return GlobalPeerID{}, nil, errors.Wrap(err, "decide and send vote") + return nil, errors.Wrap(err, "sign vote") } voterAddress := p.engine.getAddressFromPublicKey(publicKey) - // Extract proposer ID from the chosen proposal - var proposerID []byte - for _, proverID := range provers { - if (GlobalPeerID{ID: proverID}).Identity() == chosenID { - proposerID = proverID - break - } - } - // Create vote message - vote := &protobufs.FrameVote{ - FrameNumber: chosenProposal.Header.FrameNumber, - Proposer: proposerID, - Approve: true, - Timestamp: time.Now().UnixMilli(), + vote := &protobufs.ProposalVote{ + FrameNumber: 0, + Rank: currentRank, + Selector: nil, + Timestamp: uint64(time.Now().UnixMilli()), PublicKeySignatureBls48581: &protobufs.BLS48581AddressedSignature{ Address: voterAddress, Signature: sig, }, } - data, err := vote.ToCanonicalBytes() - if err != nil { - return GlobalPeerID{}, nil, errors.Wrap(err, "serialize vote") + return &vote, nil +} + +// SignVote implements consensus.VotingProvider. +func (p *GlobalVotingProvider) SignVote( + ctx context.Context, + state *models.State[*protobufs.GlobalFrame], +) (**protobufs.ProposalVote, error) { + // Get signing key + signer, _, publicKey, _ := p.engine.GetProvingKey(p.engine.config.Engine) + if publicKey == nil { + p.engine.logger.Error("no proving key available") + return nil, errors.Wrap( + errors.New("no proving key available for voting"), + "sign vote", + ) } - if err := p.engine.pubsub.PublishToBitmask( - GLOBAL_CONSENSUS_BITMASK, - data, - ); err != nil { - p.engine.logger.Error("failed to publish vote", zap.Error(err)) - } - - // Store our vote - p.mu.Lock() - if _, ok := p.proposalVotes[chosenID]; !ok { - p.proposalVotes[chosenID] = map[consensus.Identity]**protobufs.FrameVote{} - } - p.proposalVotes[chosenID][p.engine.getPeerID().Identity()] = &vote - p.mu.Unlock() - - p.engine.logger.Info( - "decided and sent vote", - zap.Uint64("frame_number", chosenProposal.Header.FrameNumber), - zap.String("for_proposal", chosenID), + // Create vote (signature) + signatureData := verification.MakeVoteMessage( + nil, + state.Rank, + state.Identifier, ) - - return GlobalPeerID{ID: proposerID}, &vote, nil -} - -func (p *GlobalVotingProvider) SendVote( - vote **protobufs.FrameVote, - ctx context.Context, -) (GlobalPeerID, error) { - if vote == nil || *vote == nil { - return GlobalPeerID{}, errors.Wrap( - errors.New("no vote provided"), - "send vote", - ) - } - - bumpVote := &protobufs.FrameVote{ - FrameNumber: (*vote).FrameNumber, - Proposer: (*vote).Proposer, - Approve: true, - Timestamp: time.Now().UnixMilli(), - PublicKeySignatureBls48581: (*vote).PublicKeySignatureBls48581, - } - - data, err := (*bumpVote).ToCanonicalBytes() + sig, err := signer.SignWithDomain(signatureData, []byte("global")) if err != nil { - return GlobalPeerID{}, errors.Wrap(err, "serialize vote") + p.engine.logger.Error("could not sign vote", zap.Error(err)) + return nil, errors.Wrap(err, "sign vote") } - if err := p.engine.pubsub.PublishToBitmask( - GLOBAL_CONSENSUS_BITMASK, - data, - ); err != nil { - p.engine.logger.Error("failed to publish vote", zap.Error(err)) - } + voterAddress := p.engine.getAddressFromPublicKey(publicKey) - return GlobalPeerID{ID: (*vote).Proposer}, nil -} - -func (p *GlobalVotingProvider) IsQuorum( - proposalVotes map[consensus.Identity]**protobufs.FrameVote, - ctx context.Context, -) (bool, error) { - // Get active prover count for quorum calculation - activeProvers, err := p.engine.proverRegistry.GetActiveProvers(nil) - if err != nil { - return false, errors.Wrap(err, "is quorum") - } - - minVotes := len(activeProvers) * 2 / 3 // 2/3 majority - if minVotes < int(p.engine.minimumProvers()) { - minVotes = int(p.engine.minimumProvers()) - } - - totalVotes := len(proposalVotes) - - if totalVotes >= minVotes { - return true, nil - } - - return false, nil -} - -func (p *GlobalVotingProvider) FinalizeVotes( - proposals map[consensus.Identity]**protobufs.GlobalFrame, - proposalVotes map[consensus.Identity]**protobufs.FrameVote, - ctx context.Context, -) (**protobufs.GlobalFrame, GlobalPeerID, error) { - // Count approvals and collect signatures - var signatures [][]byte - var publicKeys [][]byte - var chosenProposal **protobufs.GlobalFrame - var chosenProposerID GlobalPeerID - winnerCount := 0 - parentFrame := p.engine.GetFrame() - voteCount := map[string]int{} - for _, vote := range proposalVotes { - count, ok := voteCount[string((*vote).Proposer)] - if !ok { - voteCount[string((*vote).Proposer)] = 1 - } else { - voteCount[string((*vote).Proposer)] = count + 1 - } - } - for _, proposal := range proposals { - if proposal == nil { - continue - } - - proposer := p.engine.getAddressFromPublicKey( - (*proposal).Header.PublicKeySignatureBls48581.PublicKey.KeyValue, - ) - count := voteCount[string(proposer)] - if count > winnerCount { - winnerCount = count - chosenProposal = proposal - chosenProposerID = GlobalPeerID{ID: proposer} - } - } - - if chosenProposal == nil && len(proposals) > 0 { - // No specific votes, just pick first proposal - for _, proposal := range proposals { - if proposal == nil { - continue - } - - chosenProposal = proposal - } - } - - if chosenProposal == nil { - return &parentFrame, GlobalPeerID{}, errors.Wrap( - errors.New("no proposals to finalize"), - "finalize votes", - ) - } - - proverSet, err := p.engine.proverRegistry.GetActiveProvers(nil) - if err != nil { - return &parentFrame, GlobalPeerID{}, errors.Wrap(err, "finalize votes") - } - - proverMap := map[string][]byte{} - for _, prover := range proverSet { - proverMap[string(prover.Address)] = prover.PublicKey - } - - voterMap := map[string]**protobufs.FrameVote{} - - // Collect all signatures for aggregation - for _, vote := range proposalVotes { - if vote == nil { - continue - } - - if (*vote).FrameNumber != (*chosenProposal).Header.FrameNumber || - !bytes.Equal((*vote).Proposer, chosenProposerID.ID) { - continue - } - - if (*vote).PublicKeySignatureBls48581.Signature != nil && - (*vote).PublicKeySignatureBls48581.Address != nil { - signatures = append( - signatures, - (*vote).PublicKeySignatureBls48581.Signature, - ) - - pub := proverMap[string((*vote).PublicKeySignatureBls48581.Address)] - publicKeys = append(publicKeys, pub) - voterMap[string((*vote).PublicKeySignatureBls48581.Address)] = vote - } - } - - if len(signatures) == 0 { - return &parentFrame, GlobalPeerID{}, errors.Wrap( - errors.New("no signatures to aggregate"), - "finalize votes", - ) - } - - // Aggregate signatures - aggregateOutput, err := p.engine.keyManager.Aggregate(publicKeys, signatures) - if err != nil { - return &parentFrame, GlobalPeerID{}, errors.Wrap(err, "finalize votes") - } - aggregatedSignature := aggregateOutput.GetAggregateSignature() - - // Create participant bitmap - provers, err := p.engine.proverRegistry.GetActiveProvers(nil) - if err != nil { - return &parentFrame, GlobalPeerID{}, errors.Wrap(err, "finalize votes") - } - - bitmask := make([]byte, (len(provers)+7)/8) - - for i := 0; i < len(provers); i++ { - activeProver := provers[i] - - // Check if this prover voted in our voterMap - if _, ok := voterMap[string(activeProver.Address)]; ok { - byteIndex := i / 8 - bitIndex := i % 8 - bitmask[byteIndex] |= (1 << bitIndex) - } - } - - // Update the frame with aggregated signature - finalizedFrame := &protobufs.GlobalFrame{ - Header: &protobufs.GlobalFrameHeader{ - FrameNumber: (*chosenProposal).Header.FrameNumber, - ParentSelector: (*chosenProposal).Header.ParentSelector, - Timestamp: (*chosenProposal).Header.Timestamp, - Difficulty: (*chosenProposal).Header.Difficulty, - GlobalCommitments: (*chosenProposal).Header.GlobalCommitments, - ProverTreeCommitment: (*chosenProposal).Header.ProverTreeCommitment, - Output: (*chosenProposal).Header.Output, - PublicKeySignatureBls48581: &protobufs.BLS48581AggregateSignature{ - Signature: aggregatedSignature, - PublicKey: &protobufs.BLS48581G2PublicKey{ - KeyValue: aggregateOutput.GetAggregatePublicKey(), - }, - Bitmask: bitmask, - }, + // Create vote message + vote := &protobufs.ProposalVote{ + FrameNumber: (*state.State).Header.FrameNumber, + Rank: (*state.State).Header.Rank, + Selector: []byte((*state.State).Identity()), + Timestamp: uint64(time.Now().UnixMilli()), + PublicKeySignatureBls48581: &protobufs.BLS48581AddressedSignature{ + Address: voterAddress, + Signature: sig, }, - Requests: (*chosenProposal).Requests, } - p.engine.logger.Info( - "finalized votes", - zap.Uint64("frame_number", finalizedFrame.Header.FrameNumber), - zap.Int("signatures", len(signatures)), - ) - - return &finalizedFrame, chosenProposerID, nil + return &vote, nil } -func (p *GlobalVotingProvider) SendConfirmation( - finalized **protobufs.GlobalFrame, - ctx context.Context, -) error { - if finalized == nil || (*finalized).Header == nil { - return errors.Wrap( - errors.New("invalid finalized frame"), - "send confirmation", - ) - } - - copiedFinalized := proto.Clone(*finalized).(*protobufs.GlobalFrame) - - selectorBI, err := poseidon.HashBytes(copiedFinalized.Header.Output) - - if err != nil { - return errors.Wrap(err, "send confirmation") - } - // Create frame confirmation - confirmation := &protobufs.FrameConfirmation{ - FrameNumber: copiedFinalized.Header.FrameNumber, - Selector: selectorBI.FillBytes(make([]byte, 32)), - Timestamp: time.Now().UnixMilli(), - AggregateSignature: copiedFinalized.Header.PublicKeySignatureBls48581, - } - - // Serialize and send confirmation - data, err := confirmation.ToCanonicalBytes() - if err != nil { - return errors.Wrap(err, "send confirmation") - } - - if err := p.engine.pubsub.PublishToBitmask( - GLOBAL_CONSENSUS_BITMASK, - data, - ); err != nil { - return errors.Wrap(err, "send confirmation") - } - - // Serialize and send finalized frame over the global frame bitmask - frameData, err := copiedFinalized.ToCanonicalBytes() - if err != nil { - return errors.Wrap(err, "send confirmation") - } - if err := p.engine.pubsub.PublishToBitmask( - GLOBAL_FRAME_BITMASK, - frameData, - ); err != nil { - return errors.Wrap(err, "send confirmation") - } - - // Insert into time reel - if err := p.engine.globalTimeReel.Insert( - p.engine.ctx, - copiedFinalized, - ); err != nil { - p.engine.logger.Error("failed to add frame to time reel", zap.Error(err)) - // Clean up on error - frameIDBI, _ := poseidon.HashBytes(copiedFinalized.Header.Output) - frameID := frameIDBI.FillBytes(make([]byte, 32)) - p.engine.frameStoreMu.Lock() - delete(p.engine.frameStore, string(frameID)) - p.engine.frameStoreMu.Unlock() - return errors.Wrap(err, "send confirmation") - } - - p.engine.logger.Info( - "sent confirmation", - zap.Uint64("frame_number", copiedFinalized.Header.FrameNumber), - ) - - return nil -} +var _ consensus.VotingProvider[*protobufs.GlobalFrame, *protobufs.ProposalVote, GlobalPeerID] = (*GlobalVotingProvider)(nil) diff --git a/node/consensus/global/event_distributor.go b/node/consensus/global/event_distributor.go index ad35e09..3af314c 100644 --- a/node/consensus/global/event_distributor.go +++ b/node/consensus/global/event_distributor.go @@ -2,6 +2,7 @@ package global import ( "bytes" + "context" "encoding/hex" "fmt" "math/rand" @@ -14,6 +15,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" "source.quilibrium.com/quilibrium/monorepo/config" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/node/consensus/provers" consensustime "source.quilibrium.com/quilibrium/monorepo/node/consensus/time" globalintrinsics "source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/global" @@ -24,19 +26,15 @@ import ( "source.quilibrium.com/quilibrium/monorepo/types/schema" ) -func (e *GlobalConsensusEngine) eventDistributorLoop() { +func (e *GlobalConsensusEngine) eventDistributorLoop( + ctx lifecycle.SignalerContext, +) { defer func() { if r := recover(); r != nil { e.logger.Error("fatal error encountered", zap.Any("panic", r)) - if e.cancel != nil { - e.cancel() - } - go func() { - e.Stop(false) - }() + ctx.Throw(errors.Errorf("fatal unhandled error encountered: %v", r)) } }() - defer e.wg.Done() // Subscribe to events from the event distributor eventCh := e.eventDistributor.Subscribe("global") @@ -44,7 +42,7 @@ func (e *GlobalConsensusEngine) eventDistributorLoop() { for { select { - case <-e.ctx.Done(): + case <-ctx.Done(): return case <-e.quit: return @@ -94,7 +92,7 @@ func (e *GlobalConsensusEngine) eventDistributorLoop() { allocated = allocated && w.Allocated } if !allocated { - e.evaluateForProposals(data) + e.evaluateForProposals(ctx, data) } } } @@ -204,18 +202,10 @@ func (e *GlobalConsensusEngine) eventDistributorLoop() { if ok && data.Message != "" { e.logger.Error(data.Message) e.halt() - if e.stateMachine != nil { - if err := e.stateMachine.Stop(); err != nil { - e.logger.Error( - "error occurred while halting consensus", - zap.Error(err), - ) - } - } go func() { for { select { - case <-e.ctx.Done(): + case <-ctx.Done(): return case <-time.After(10 * time.Second): e.logger.Error( @@ -234,18 +224,10 @@ func (e *GlobalConsensusEngine) eventDistributorLoop() { zap.Error(data.Error), ) e.halt() - if e.stateMachine != nil { - if err := e.stateMachine.Stop(); err != nil { - e.logger.Error( - "error occurred while halting consensus", - zap.Error(err), - ) - } - } go func() { for { select { - case <-e.ctx.Done(): + case <-ctx.Done(): return case <-time.After(10 * time.Second): e.logger.Error( @@ -376,6 +358,7 @@ func (e *GlobalConsensusEngine) estimateSeniorityFromConfig() uint64 { } func (e *GlobalConsensusEngine) evaluateForProposals( + ctx context.Context, data *consensustime.GlobalEvent, ) { self, err := e.proverRegistry.GetProverInfo(e.getProverAddress()) @@ -412,7 +395,7 @@ func (e *GlobalConsensusEngine) evaluateForProposals( } idx := rand.Int63n(int64(len(ps))) - e.syncProvider.hyperSyncWithProver(ps[idx].Address, key) + e.syncProvider.hyperSyncWithProver(ctx, ps[idx].Address, key) for _, shard := range shards { path := []int{} @@ -461,7 +444,7 @@ func (e *GlobalConsensusEngine) evaluateForProposals( size := e.hypergraph.GetSize(&key, path) resp, err := e.hypergraph.GetChildrenForPath( - e.ctx, + ctx, &protobufs.GetChildrenForPathRequest{ ShardKey: slices.Concat(key.L1[:], key.L2[:]), Path: shard.Path, @@ -676,6 +659,7 @@ func (e *GlobalConsensusEngine) publishKeyRegistry() { return } registry := &protobufs.KeyRegistry{ + LastUpdated: uint64(time.Now().UnixMilli()), IdentityKey: &protobufs.Ed448PublicKey{ KeyValue: e.pubsub.GetPublicKey(), }, diff --git a/node/consensus/global/factory.go b/node/consensus/global/factory.go index 132c3fd..1533695 100644 --- a/node/consensus/global/factory.go +++ b/node/consensus/global/factory.go @@ -4,12 +4,14 @@ import ( "github.com/pkg/errors" "go.uber.org/zap" "source.quilibrium.com/quilibrium/monorepo/config" + "source.quilibrium.com/quilibrium/monorepo/consensus" "source.quilibrium.com/quilibrium/monorepo/node/consensus/events" consensustime "source.quilibrium.com/quilibrium/monorepo/node/consensus/time" "source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/global/compat" + "source.quilibrium.com/quilibrium/monorepo/protobufs" "source.quilibrium.com/quilibrium/monorepo/types/channel" "source.quilibrium.com/quilibrium/monorepo/types/compiler" - "source.quilibrium.com/quilibrium/monorepo/types/consensus" + tconsensus "source.quilibrium.com/quilibrium/monorepo/types/consensus" "source.quilibrium.com/quilibrium/monorepo/types/crypto" "source.quilibrium.com/quilibrium/monorepo/types/hypergraph" "source.quilibrium.com/quilibrium/monorepo/types/keys" @@ -28,18 +30,19 @@ type ConsensusEngineFactory struct { keyStore store.KeyStore frameProver crypto.FrameProver inclusionProver crypto.InclusionProver - signerRegistry consensus.SignerRegistry - proverRegistry consensus.ProverRegistry - dynamicFeeManager consensus.DynamicFeeManager - appFrameValidator consensus.AppFrameValidator - frameValidator consensus.GlobalFrameValidator - difficultyAdjuster consensus.DifficultyAdjuster - rewardIssuance consensus.RewardIssuance + signerRegistry tconsensus.SignerRegistry + proverRegistry tconsensus.ProverRegistry + dynamicFeeManager tconsensus.DynamicFeeManager + appFrameValidator tconsensus.AppFrameValidator + frameValidator tconsensus.GlobalFrameValidator + difficultyAdjuster tconsensus.DifficultyAdjuster + rewardIssuance tconsensus.RewardIssuance clockStore store.ClockStore inboxStore store.InboxStore hypergraphStore store.HypergraphStore shardsStore store.ShardsStore workerStore store.WorkerStore + consensusStore consensus.ConsensusStore[*protobufs.ProposalVote] encryptedChannel channel.EncryptedChannel bulletproofProver crypto.BulletproofProver verEnc crypto.VerifiableEncryptor @@ -59,18 +62,19 @@ func NewConsensusEngineFactory( keyStore store.KeyStore, frameProver crypto.FrameProver, inclusionProver crypto.InclusionProver, - signerRegistry consensus.SignerRegistry, - proverRegistry consensus.ProverRegistry, - dynamicFeeManager consensus.DynamicFeeManager, - appFrameValidator consensus.AppFrameValidator, - frameValidator consensus.GlobalFrameValidator, - difficultyAdjuster consensus.DifficultyAdjuster, - rewardIssuance consensus.RewardIssuance, + signerRegistry tconsensus.SignerRegistry, + proverRegistry tconsensus.ProverRegistry, + dynamicFeeManager tconsensus.DynamicFeeManager, + appFrameValidator tconsensus.AppFrameValidator, + frameValidator tconsensus.GlobalFrameValidator, + difficultyAdjuster tconsensus.DifficultyAdjuster, + rewardIssuance tconsensus.RewardIssuance, clockStore store.ClockStore, inboxStore store.InboxStore, hypergraphStore store.HypergraphStore, shardsStore store.ShardsStore, workerStore store.WorkerStore, + consensusStore consensus.ConsensusStore[*protobufs.ProposalVote], encryptedChannel channel.EncryptedChannel, bulletproofProver crypto.BulletproofProver, verEnc crypto.VerifiableEncryptor, @@ -103,6 +107,7 @@ func NewConsensusEngineFactory( hypergraphStore: hypergraphStore, shardsStore: shardsStore, workerStore: workerStore, + consensusStore: consensusStore, encryptedChannel: encryptedChannel, bulletproofProver: bulletproofProver, verEnc: verEnc, @@ -158,6 +163,7 @@ func (f *ConsensusEngineFactory) CreateGlobalConsensusEngine( f.inboxStore, f.hypergraphStore, f.shardsStore, + f.consensusStore, f.workerStore, f.encryptedChannel, f.bulletproofProver, diff --git a/node/consensus/global/genesis.go b/node/consensus/global/genesis.go index 05514db..0dc797c 100644 --- a/node/consensus/global/genesis.go +++ b/node/consensus/global/genesis.go @@ -1,6 +1,7 @@ package global import ( + "bytes" _ "embed" "encoding/base64" "encoding/binary" @@ -15,6 +16,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/mr-tron/base58" "go.uber.org/zap" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" hgcrdt "source.quilibrium.com/quilibrium/monorepo/hypergraph" globalintrinsics "source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/global" "source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/global/compat" @@ -54,7 +56,10 @@ func (e *GlobalConsensusEngine) getMainnetGenesisJSON() *GenesisJson { } // TODO[2.1.1+]: Refactor out direct hypergraph access -func (e *GlobalConsensusEngine) initializeGenesis() *protobufs.GlobalFrame { +func (e *GlobalConsensusEngine) initializeGenesis() ( + *protobufs.GlobalFrame, + *protobufs.QuorumCertificate, +) { e.logger.Info("initializing genesis frame for global consensus") var genesisFrame *protobufs.GlobalFrame @@ -63,7 +68,7 @@ func (e *GlobalConsensusEngine) initializeGenesis() *protobufs.GlobalFrame { if e.config.P2P.Network == 0 { genesisData := e.getMainnetGenesisJSON() if genesisData == nil { - return nil + return nil, nil } // Decode base64 encoded fields @@ -72,13 +77,13 @@ func (e *GlobalConsensusEngine) initializeGenesis() *protobufs.GlobalFrame { ) if err != nil { e.logger.Error("failed to decode parent selector", zap.Error(err)) - return nil + return nil, nil } output, err := base64.StdEncoding.DecodeString(genesisData.Output) if err != nil { e.logger.Error("failed to decode output", zap.Error(err)) - return nil + return nil, nil } // Create genesis header with actual data @@ -124,7 +129,7 @@ func (e *GlobalConsensusEngine) initializeGenesis() *protobufs.GlobalFrame { zap.String("value", base64Value), zap.Error(err), ) - return nil + return nil, nil } l1 := up2p.GetBloomFilterIndices(keyBytes, 256, 3) @@ -148,7 +153,7 @@ func (e *GlobalConsensusEngine) initializeGenesis() *protobufs.GlobalFrame { zap.Error(err), ) txn.Abort() - return nil + return nil, nil } } } @@ -169,19 +174,19 @@ func (e *GlobalConsensusEngine) initializeGenesis() *protobufs.GlobalFrame { err = e.establishMainnetGenesisProvers(state, genesisData) if err != nil { e.logger.Error("failed to establish provers", zap.Error(err)) - return nil + return nil, nil } err = state.Commit() if err != nil { e.logger.Error("failed to commit", zap.Error(err)) - return nil + return nil, nil } roots, err := e.hypergraph.Commit(0) if err != nil { e.logger.Error("could not commit", zap.Error(err)) - return nil + return nil, nil } proverRoots := roots[tries.ShardKey{ @@ -224,7 +229,7 @@ func (e *GlobalConsensusEngine) initializeGenesis() *protobufs.GlobalFrame { "failed to place app shard", zap.Error(err), ) - return nil + return nil, nil } l1 := up2p.GetBloomFilterIndices(token.QUIL_TOKEN_ADDRESS, 256, 3) @@ -240,7 +245,7 @@ func (e *GlobalConsensusEngine) initializeGenesis() *protobufs.GlobalFrame { zap.Error(err), ) txn.Abort() - return nil + return nil, nil } if err = txn.Commit(); err != nil { e.logger.Error( @@ -248,7 +253,7 @@ func (e *GlobalConsensusEngine) initializeGenesis() *protobufs.GlobalFrame { zap.Error(err), ) txn.Abort() - return nil + return nil, nil } } @@ -260,18 +265,62 @@ func (e *GlobalConsensusEngine) initializeGenesis() *protobufs.GlobalFrame { e.frameStoreMu.Unlock() // Add to time reel - if err := e.globalTimeReel.Insert(e.ctx, genesisFrame); err != nil { - e.logger.Error("failed to add genesis frame to time reel", zap.Error(err)) - // Clean up on error - e.frameStoreMu.Lock() - delete(e.frameStore, string(frameID)) - e.frameStoreMu.Unlock() + txn, err := e.clockStore.NewTransaction(false) + if err != nil { + panic(err) + } + if err := e.clockStore.PutGlobalClockFrame(genesisFrame, txn); err != nil { + txn.Abort() + e.logger.Error("could not add frame", zap.Error(err)) + return nil, nil + } + genesisQC := &protobufs.QuorumCertificate{ + Rank: 0, + Filter: []byte{}, + FrameNumber: genesisFrame.Header.FrameNumber, + Selector: []byte(genesisFrame.Identity()), + Timestamp: 0, + AggregateSignature: &protobufs.BLS48581AggregateSignature{ + PublicKey: &protobufs.BLS48581G2PublicKey{ + KeyValue: make([]byte, 585), + }, + Signature: make([]byte, 74), + Bitmask: bytes.Repeat([]byte{0xff}, 32), + }, + } + if err := e.clockStore.PutQuorumCertificate(genesisQC, txn); err != nil { + txn.Abort() + e.logger.Error("could not add quorum certificate", zap.Error(err)) + return nil, nil + } + if err := txn.Commit(); err != nil { + txn.Abort() + e.logger.Error("could not add frame", zap.Error(err)) + return nil, nil + } + if err = e.consensusStore.PutLivenessState( + &models.LivenessState{ + CurrentRank: 1, + LatestQuorumCertificate: genesisQC, + }, + ); err != nil { + e.logger.Error("could not add liveness state", zap.Error(err)) + return nil, nil + } + if err = e.consensusStore.PutConsensusState( + &models.ConsensusState[*protobufs.ProposalVote]{ + FinalizedRank: 0, + LatestAcknowledgedRank: 0, + }, + ); err != nil { + e.logger.Error("could not add consensus state", zap.Error(err)) + return nil, nil } e.proverRegistry.Refresh() e.logger.Info("initialized genesis frame for global consensus") - return genesisFrame + return genesisFrame, genesisQC } // createStubGenesis creates a stub genesis frame for non-mainnet networks @@ -397,7 +446,7 @@ func (e *GlobalConsensusEngine) createStubGenesis() *protobufs.GlobalFrame { state = hgstate.NewHypergraphState(e.hypergraph) for _, pubkey := range proverPubKeys { - err = e.addGenesisProver(rdfMultiprover, state, pubkey, 0, 0) + err = e.addGenesisProver(rdfMultiprover, state, pubkey, 1000, 0) if err != nil { e.logger.Error("error adding prover", zap.Error(err)) return nil @@ -468,12 +517,19 @@ func (e *GlobalConsensusEngine) createStubGenesis() *protobufs.GlobalFrame { e.frameStoreMu.Unlock() // Add to time reel - if err := e.globalTimeReel.Insert(e.ctx, genesisFrame); err != nil { - e.logger.Error("failed to add genesis frame to time reel", zap.Error(err)) - // Clean up on error - e.frameStoreMu.Lock() - delete(e.frameStore, string(frameID)) - e.frameStoreMu.Unlock() + txn, err := e.clockStore.NewTransaction(false) + if err != nil { + panic(err) + } + if err := e.clockStore.PutGlobalClockFrame(genesisFrame, txn); err != nil { + txn.Abort() + e.logger.Error("could not add frame", zap.Error(err)) + return nil + } + if err := txn.Commit(); err != nil { + txn.Abort() + e.logger.Error("could not add frame", zap.Error(err)) + return nil } return genesisFrame diff --git a/node/consensus/global/global_consensus_engine.go b/node/consensus/global/global_consensus_engine.go index a1fae47..86e085a 100644 --- a/node/consensus/global/global_consensus_engine.go +++ b/node/consensus/global/global_consensus_engine.go @@ -8,6 +8,7 @@ import ( "encoding/hex" "fmt" "math/big" + "math/rand" "net" "net/netip" "slices" @@ -27,10 +28,20 @@ import ( "google.golang.org/grpc" "source.quilibrium.com/quilibrium/monorepo/config" "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/forks" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/consensus/notifications/pubsub" + "source.quilibrium.com/quilibrium/monorepo/consensus/participant" + "source.quilibrium.com/quilibrium/monorepo/consensus/validator" + "source.quilibrium.com/quilibrium/monorepo/consensus/verification" "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" + "source.quilibrium.com/quilibrium/monorepo/node/consensus/aggregator" "source.quilibrium.com/quilibrium/monorepo/node/consensus/provers" "source.quilibrium.com/quilibrium/monorepo/node/consensus/reward" consensustime "source.quilibrium.com/quilibrium/monorepo/node/consensus/time" + "source.quilibrium.com/quilibrium/monorepo/node/consensus/tracing" + "source.quilibrium.com/quilibrium/monorepo/node/consensus/voting" "source.quilibrium.com/quilibrium/monorepo/node/dispatch" "source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/global" "source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/global/compat" @@ -47,7 +58,6 @@ import ( typesconsensus "source.quilibrium.com/quilibrium/monorepo/types/consensus" "source.quilibrium.com/quilibrium/monorepo/types/crypto" typesdispatch "source.quilibrium.com/quilibrium/monorepo/types/dispatch" - "source.quilibrium.com/quilibrium/monorepo/types/execution" "source.quilibrium.com/quilibrium/monorepo/types/execution/intrinsics" "source.quilibrium.com/quilibrium/monorepo/types/execution/state" "source.quilibrium.com/quilibrium/monorepo/types/hypergraph" @@ -83,6 +93,7 @@ type LockedTransaction struct { // GlobalConsensusEngine uses the generic state machine for consensus type GlobalConsensusEngine struct { + *lifecycle.ComponentManager protobufs.GlobalServiceServer logger *zap.Logger @@ -93,6 +104,7 @@ type GlobalConsensusEngine struct { keyStore store.KeyStore clockStore store.ClockStore shardsStore store.ShardsStore + consensusStore consensus.ConsensusStore[*protobufs.ProposalVote] frameProver crypto.FrameProver inclusionProver crypto.InclusionProver signerRegistry typesconsensus.SignerRegistry @@ -105,16 +117,20 @@ type GlobalConsensusEngine struct { eventDistributor typesconsensus.EventDistributor dispatchService typesdispatch.DispatchService globalTimeReel *consensustime.GlobalTimeReel - blsConstructor crypto.BlsConstructor - executors map[string]execution.ShardExecutionEngine - executorsMu sync.RWMutex - executionManager *manager.ExecutionEngineManager - mixnet typesconsensus.Mixnet - peerInfoManager tp2p.PeerInfoManager - workerManager worker.WorkerManager - proposer *provers.Manager - alertPublicKey []byte - hasSentKeyBundle bool + forks consensus.Forks[*protobufs.GlobalFrame] + notifier consensus.Consumer[ + *protobufs.GlobalFrame, + *protobufs.ProposalVote, + ] + blsConstructor crypto.BlsConstructor + executionManager *manager.ExecutionEngineManager + mixnet typesconsensus.Mixnet + peerInfoManager tp2p.PeerInfoManager + workerManager worker.WorkerManager + proposer *provers.Manager + currentRank uint64 + alertPublicKey []byte + hasSentKeyBundle bool // Message queues globalConsensusMessageQueue chan *pb.Message @@ -124,43 +140,51 @@ type GlobalConsensusEngine struct { globalAlertMessageQueue chan *pb.Message appFramesMessageQueue chan *pb.Message shardConsensusMessageQueue chan *pb.Message + globalProposalQueue chan *protobufs.GlobalProposal // Emergency halt haltCtx context.Context halt context.CancelFunc // Internal state - ctx context.Context - cancel context.CancelFunc - quit chan struct{} - wg sync.WaitGroup - minimumProvers func() uint64 - blacklistMap map[string]bool - blacklistMu sync.RWMutex - pendingMessages [][]byte - pendingMessagesMu sync.RWMutex - currentDifficulty uint32 - currentDifficultyMu sync.RWMutex - lastProvenFrameTime time.Time - lastProvenFrameTimeMu sync.RWMutex - frameStore map[string]*protobufs.GlobalFrame - frameStoreMu sync.RWMutex - appFrameStore map[string]*protobufs.AppShardFrame - appFrameStoreMu sync.RWMutex - lowCoverageStreak map[string]*coverageStreak + quit chan struct{} + wg sync.WaitGroup + minimumProvers func() uint64 + blacklistMap map[string]bool + blacklistMu sync.RWMutex + pendingMessages [][]byte + pendingMessagesMu sync.RWMutex + currentDifficulty uint32 + currentDifficultyMu sync.RWMutex + lastProvenFrameTime time.Time + lastProvenFrameTimeMu sync.RWMutex + frameStore map[string]*protobufs.GlobalFrame + frameStoreMu sync.RWMutex + proposalCache map[uint64]*protobufs.GlobalProposal + proposalCacheMu sync.RWMutex + pendingCertifiedParents map[uint64]*protobufs.GlobalProposal + pendingCertifiedParentsMu sync.RWMutex + appFrameStore map[string]*protobufs.AppShardFrame + appFrameStoreMu sync.RWMutex + lowCoverageStreak map[string]*coverageStreak // Transaction cross-shard lock tracking txLockMap map[uint64]map[string]map[string]*LockedTransaction txLockMu sync.RWMutex - // Generic state machine - stateMachine *consensus.StateMachine[ + // Consensus participant instance + consensusParticipant consensus.EventLoop[ *protobufs.GlobalFrame, - *protobufs.FrameVote, - GlobalPeerID, - GlobalCollectedCommitments, + *protobufs.ProposalVote, ] + // Consensus plugins + signatureAggregator consensus.SignatureAggregator + voteCollectorDistributor *pubsub.VoteCollectorDistributor[*protobufs.ProposalVote] + timeoutCollectorDistributor *pubsub.TimeoutCollectorDistributor[*protobufs.ProposalVote] + voteAggregator consensus.VoteAggregator[*protobufs.GlobalFrame, *protobufs.ProposalVote] + timeoutAggregator consensus.TimeoutAggregator[*protobufs.ProposalVote] + // Provider implementations syncProvider *GlobalSyncProvider votingProvider *GlobalVotingProvider @@ -194,7 +218,7 @@ func NewGlobalConsensusEngine( logger *zap.Logger, config *config.Config, frameTimeMillis int64, - pubsub tp2p.PubSub, + ps tp2p.PubSub, hypergraph hypergraph.Hypergraph, keyManager typeskeys.KeyManager, keyStore store.KeyStore, @@ -213,6 +237,7 @@ func NewGlobalConsensusEngine( inboxStore store.InboxStore, hypergraphStore store.HypergraphStore, shardsStore store.ShardsStore, + consensusStore consensus.ConsensusStore[*protobufs.ProposalVote], workerStore store.WorkerStore, encryptedChannel channel.EncryptedChannel, bulletproofProver crypto.BulletproofProver, @@ -225,12 +250,13 @@ func NewGlobalConsensusEngine( engine := &GlobalConsensusEngine{ logger: logger, config: config, - pubsub: pubsub, + pubsub: ps, hypergraph: hypergraph, keyManager: keyManager, keyStore: keyStore, clockStore: clockStore, shardsStore: shardsStore, + consensusStore: consensusStore, frameProver: frameProver, inclusionProver: inclusionProver, signerRegistry: signerRegistry, @@ -244,9 +270,10 @@ func NewGlobalConsensusEngine( eventDistributor: eventDistributor, globalTimeReel: globalTimeReel, peerInfoManager: peerInfoManager, - executors: make(map[string]execution.ShardExecutionEngine), frameStore: make(map[string]*protobufs.GlobalFrame), appFrameStore: make(map[string]*protobufs.AppShardFrame), + proposalCache: make(map[uint64]*protobufs.GlobalProposal), + pendingCertifiedParents: make(map[uint64]*protobufs.GlobalProposal), globalConsensusMessageQueue: make(chan *pb.Message, 1000), globalFrameMessageQueue: make(chan *pb.Message, 100), globalProverMessageQueue: make(chan *pb.Message, 1000), @@ -254,6 +281,7 @@ func NewGlobalConsensusEngine( globalPeerInfoMessageQueue: make(chan *pb.Message, 1000), globalAlertMessageQueue: make(chan *pb.Message, 100), shardConsensusMessageQueue: make(chan *pb.Message, 10000), + globalProposalQueue: make(chan *protobufs.GlobalProposal, 1000), currentDifficulty: config.Engine.Difficulty, lastProvenFrameTime: time.Now(), blacklistMap: make(map[string]bool), @@ -301,6 +329,24 @@ func NewGlobalConsensusEngine( } } + // Create provider implementations + engine.syncProvider = NewGlobalSyncProvider(engine) + engine.votingProvider = &GlobalVotingProvider{engine: engine} + engine.leaderProvider = &GlobalLeaderProvider{engine: engine} + engine.livenessProvider = &GlobalLivenessProvider{engine: engine} + engine.signatureAggregator = aggregator.WrapSignatureAggregator( + engine.blsConstructor, + engine.proverRegistry, + nil, + ) + voteAggregationDistributor := voting.NewGlobalVoteAggregationDistributor() + engine.voteCollectorDistributor = + voteAggregationDistributor.VoteCollectorDistributor + timeoutAggregationDistributor := + voting.NewGlobalTimeoutAggregationDistributor() + engine.timeoutCollectorDistributor = + timeoutAggregationDistributor.TimeoutCollectorDistributor + // Create the worker manager engine.workerManager = mgr.NewWorkerManager( workerStore, @@ -358,23 +404,12 @@ func NewGlobalConsensusEngine( // Establish alert halt context engine.haltCtx, engine.halt = context.WithCancel(context.Background()) - // Create provider implementations - engine.syncProvider = &GlobalSyncProvider{engine: engine} - engine.votingProvider = &GlobalVotingProvider{ - engine: engine, - proposalVotes: make( - map[consensus.Identity]map[consensus.Identity]**protobufs.FrameVote, - ), - } - engine.leaderProvider = &GlobalLeaderProvider{engine: engine} - engine.livenessProvider = &GlobalLivenessProvider{engine: engine} - // Create dispatch service engine.dispatchService = dispatch.NewDispatchService( inboxStore, logger, keyManager, - pubsub, + ps, ) // Create execution engine manager @@ -401,17 +436,6 @@ func NewGlobalConsensusEngine( } engine.executionManager = executionManager - // Initialize execution engines - if err := engine.executionManager.InitializeEngines(); err != nil { - return nil, errors.Wrap(err, "new global consensus engine") - } - - // Register all execution engines with the consensus engine - err = engine.executionManager.RegisterAllEngines(engine.RegisterExecutor) - if err != nil { - return nil, errors.Wrap(err, "new global consensus engine") - } - // Initialize metrics engineState.Set(0) // EngineStateStopped currentDifficulty.Set(float64(config.Engine.Difficulty)) @@ -422,7 +446,7 @@ func NewGlobalConsensusEngine( engine.hyperSync = hypergraph engine.onionService = onion.NewGRPCTransport( logger, - pubsub.GetPeerID(), + ps.GetPeerID(), peerInfoManager, signerRegistry, ) @@ -444,211 +468,324 @@ func NewGlobalConsensusEngine( // Set up gRPC server with TLS credentials if err := engine.setupGRPCServer(); err != nil { - panic(errors.Wrap(err, "failed to setup gRPC server")) + return nil, errors.Wrap(err, "failed to setup gRPC server") } - return engine, nil -} + componentBuilder := lifecycle.NewComponentManagerBuilder() -func (e *GlobalConsensusEngine) Start(quit chan struct{}) <-chan error { - errChan := make(chan error, 1) - - e.quit = quit - e.ctx, e.cancel = context.WithCancel(context.Background()) - - // Start worker manager background process (if applicable) - if !e.config.Engine.ArchiveMode { - if err := e.workerManager.Start(e.ctx); err != nil { - errChan <- errors.Wrap(err, "start") - close(errChan) - return errChan - } - } - - // Start execution engines - if err := e.executionManager.StartAll(e.quit); err != nil { - errChan <- errors.Wrap(err, "start") - close(errChan) - return errChan - } - - // Start the event distributor - if err := e.eventDistributor.Start(e.ctx); err != nil { - errChan <- errors.Wrap(err, "start") - close(errChan) - return errChan - } - - err := e.globalTimeReel.Start() - if err != nil { - errChan <- errors.Wrap(err, "start") - close(errChan) - return errChan - } - - frame, err := e.clockStore.GetLatestGlobalClockFrame() - if err != nil { - e.logger.Warn( - "invalid frame retrieved, will resync", - zap.Error(err), - ) - } - - var initialState **protobufs.GlobalFrame = nil - if frame != nil { - initialState = &frame - } - - if e.config.P2P.Network == 99 || e.config.Engine.ArchiveMode { - // Create the generic state machine - e.stateMachine = consensus.NewStateMachine( - e.getPeerID(), - initialState, - true, - e.minimumProvers, - e.syncProvider, - e.votingProvider, - e.leaderProvider, - e.livenessProvider, - &GlobalTracer{ - logger: e.logger.Named("state_machine"), - }, - ) - - // Add transition listener - e.stateMachine.AddListener(&GlobalTransitionListener{ - engine: e, - logger: e.logger.Named("transitions"), + // Add worker manager background process (if applicable) + if !engine.config.Engine.ArchiveMode { + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + if err := engine.workerManager.Start(ctx); err != nil { + engine.logger.Error("could not start worker manager", zap.Error(err)) + ctx.Throw(err) + return + } + ready() + <-ctx.Done() }) } - // Confirm initial state - if !e.config.Engine.ArchiveMode { - latest, err := e.clockStore.GetLatestGlobalClockFrame() - if err != nil || latest == nil { - e.logger.Info("initializing genesis") - e.initializeGenesis() + // Add sync provider + componentBuilder.AddWorker(engine.syncProvider.Start) + + // Add execution engines + componentBuilder.AddWorker(engine.executionManager.Start) + componentBuilder.AddWorker(engine.eventDistributor.Start) + componentBuilder.AddWorker(engine.globalTimeReel.Start) + + latest, err := engine.consensusStore.GetConsensusState(nil) + var state *models.CertifiedState[*protobufs.GlobalFrame] + var pending []*models.SignedProposal[ + *protobufs.GlobalFrame, + *protobufs.ProposalVote, + ] + if err != nil { + frame, qc := engine.initializeGenesis() + state = &models.CertifiedState[*protobufs.GlobalFrame]{ + State: &models.State[*protobufs.GlobalFrame]{ + Rank: 0, + Identifier: frame.Identity(), + State: &frame, + }, + CertifyingQuorumCertificate: qc, } + pending = []*models.SignedProposal[ + *protobufs.GlobalFrame, + *protobufs.ProposalVote, + ]{} + } else { + qc, err := engine.clockStore.GetQuorumCertificate(nil, latest.FinalizedRank) + if err != nil { + panic(err) + } + frame, err := engine.clockStore.GetGlobalClockFrame( + qc.GetFrameNumber(), + ) + if err != nil { + panic(err) + } + parentFrame, err := engine.clockStore.GetGlobalClockFrame( + qc.GetFrameNumber() - 1, + ) + if err != nil { + panic(err) + } + parentQC, err := engine.clockStore.GetQuorumCertificate( + nil, + parentFrame.GetRank(), + ) + if err != nil { + panic(err) + } + state = &models.CertifiedState[*protobufs.GlobalFrame]{ + State: &models.State[*protobufs.GlobalFrame]{ + Rank: frame.GetRank(), + Identifier: frame.Identity(), + ProposerID: frame.Source(), + ParentQuorumCertificate: parentQC, + Timestamp: frame.GetTimestamp(), + State: &frame, + }, + CertifyingQuorumCertificate: qc, + } + pending = engine.getPendingProposals(frame.Header.FrameNumber) } - // Subscribe to global consensus if participating - err = e.subscribeToGlobalConsensus() + liveness, err := engine.consensusStore.GetLivenessState(nil) + if err == nil { + engine.currentRank = liveness.CurrentRank + } + + engine.voteAggregator, err = voting.NewGlobalVoteAggregator[GlobalPeerID]( + tracing.NewZapTracer(logger), + engine, + voteAggregationDistributor, + engine.signatureAggregator, + engine.votingProvider, + func(qc models.QuorumCertificate) { + select { + case <-engine.haltCtx.Done(): + return + default: + } + engine.consensusParticipant.OnQuorumCertificateConstructedFromVotes(qc) + }, + state.Rank()+1, + ) if err != nil { - errChan <- errors.Wrap(err, "start") - close(errChan) - return errChan + return nil, err + } + engine.timeoutAggregator, err = voting.NewGlobalTimeoutAggregator[GlobalPeerID]( + tracing.NewZapTracer(logger), + engine, + engine, + engine.signatureAggregator, + timeoutAggregationDistributor, + engine.votingProvider, + state.Rank()+1, + ) + + notifier := pubsub.NewDistributor[ + *protobufs.GlobalFrame, + *protobufs.ProposalVote, + ]() + notifier.AddConsumer(engine) + engine.notifier = notifier + + forks, err := forks.NewForks(state, engine, notifier) + if err != nil { + return nil, err + } + + engine.forks = forks + + if engine.config.P2P.Network == 99 || engine.config.Engine.ArchiveMode { + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + if err := engine.startConsensus(state, pending, ctx, ready); err != nil { + engine.logger.Error("could not start consensus", zap.Error(err)) + ctx.Throw(err) + return + } + + <-ctx.Done() + <-lifecycle.AllDone(engine.voteAggregator, engine.timeoutAggregator) + }) + } + + componentBuilder.AddWorker(engine.peerInfoManager.Start) + + // Subscribe to global consensus if participating + err = engine.subscribeToGlobalConsensus() + if err != nil { + return nil, err } // Subscribe to shard consensus messages to broker lock agreement - err = e.subscribeToShardConsensusMessages() + err = engine.subscribeToShardConsensusMessages() if err != nil { - errChan <- errors.Wrap(err, "start") - close(errChan) - return errChan + return nil, errors.Wrap(err, "start") } // Subscribe to frames - err = e.subscribeToFrameMessages() + err = engine.subscribeToFrameMessages() if err != nil { - errChan <- errors.Wrap(err, "start") - close(errChan) - return errChan + return nil, errors.Wrap(err, "start") } // Subscribe to prover messages - err = e.subscribeToProverMessages() + err = engine.subscribeToProverMessages() if err != nil { - errChan <- errors.Wrap(err, "start") - close(errChan) - return errChan + return nil, errors.Wrap(err, "start") } // Subscribe to peer info messages - err = e.subscribeToPeerInfoMessages() + err = engine.subscribeToPeerInfoMessages() if err != nil { - errChan <- errors.Wrap(err, "start") - close(errChan) - return errChan + return nil, errors.Wrap(err, "start") } // Subscribe to alert messages - err = e.subscribeToAlertMessages() + err = engine.subscribeToAlertMessages() if err != nil { - errChan <- errors.Wrap(err, "start") - close(errChan) - return errChan + return nil, errors.Wrap(err, "start") } - e.peerInfoManager.Start() - // Start consensus message queue processor - e.wg.Add(1) - go e.processGlobalConsensusMessageQueue() + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + engine.processGlobalConsensusMessageQueue(ctx) + }) // Start shard consensus message queue processor - e.wg.Add(1) - go e.processShardConsensusMessageQueue() + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + engine.processShardConsensusMessageQueue(ctx) + }) // Start frame message queue processor - e.wg.Add(1) - go e.processFrameMessageQueue() + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + engine.processFrameMessageQueue(ctx) + }) // Start prover message queue processor - e.wg.Add(1) - go e.processProverMessageQueue() + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + engine.processProverMessageQueue(ctx) + }) // Start peer info message queue processor - e.wg.Add(1) - go e.processPeerInfoMessageQueue() + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + engine.processPeerInfoMessageQueue(ctx) + }) // Start alert message queue processor - e.wg.Add(1) - go e.processAlertMessageQueue() + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + engine.processAlertMessageQueue(ctx) + }) + + // Start global proposal queue processor + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + engine.processGlobalProposalQueue(ctx) + }) // Start periodic peer info reporting - e.wg.Add(1) - go e.reportPeerInfoPeriodically() + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + engine.reportPeerInfoPeriodically(ctx) + }) // Start event distributor event loop - e.wg.Add(1) - go e.eventDistributorLoop() + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + engine.eventDistributorLoop(ctx) + }) // Start periodic metrics update - e.wg.Add(1) - go e.updateMetrics() + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + engine.updateMetrics(ctx) + }) // Start periodic tx lock pruning - e.wg.Add(1) - go e.pruneTxLocksPeriodically() + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + engine.pruneTxLocksPeriodically(ctx) + }) - if e.config.P2P.Network == 99 || e.config.Engine.ArchiveMode { - // Start the state machine - if err := e.stateMachine.Start(); err != nil { - errChan <- errors.Wrap(err, "start state machine") - close(errChan) - return errChan - } - } - - if e.grpcServer != nil { + if engine.grpcServer != nil { // Register all services with the gRPC server - e.RegisterServices(e.grpcServer) + engine.RegisterServices(engine.grpcServer) // Start serving the gRPC server - go func() { - if err := e.grpcServer.Serve(e.grpcListener); err != nil { - e.logger.Error("gRPC server error", zap.Error(err)) + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + go func() { + if err := engine.grpcServer.Serve(engine.grpcListener); err != nil { + engine.logger.Error("gRPC server error", zap.Error(err)) + ctx.Throw(err) + } + }() + ready() + engine.logger.Info("started gRPC server", + zap.String("address", engine.grpcListener.Addr().String())) + <-ctx.Done() + engine.logger.Info("stopping gRPC server") + engine.grpcServer.GracefulStop() + if engine.grpcListener != nil { + engine.grpcListener.Close() } - }() - - e.logger.Info("started gRPC server", - zap.String("address", e.grpcListener.Addr().String())) + }) } - e.logger.Info("global consensus engine started") + engine.ComponentManager = componentBuilder.Build() - close(errChan) - return errChan + return engine, nil } func (e *GlobalConsensusEngine) setupGRPCServer() error { @@ -752,47 +889,6 @@ func (e *GlobalConsensusEngine) getAddressFromPublicKey( func (e *GlobalConsensusEngine) Stop(force bool) <-chan error { errChan := make(chan error, 1) - // Stop worker manager background process (if applicable) - if !e.config.Engine.ArchiveMode { - if err := e.workerManager.Stop(); err != nil { - errChan <- errors.Wrap(err, "stop") - close(errChan) - return errChan - } - } - - if e.grpcServer != nil { - e.logger.Info("stopping gRPC server") - e.grpcServer.GracefulStop() - if e.grpcListener != nil { - e.grpcListener.Close() - } - } - - if e.config.P2P.Network == 99 || e.config.Engine.ArchiveMode { - // Stop the state machine - if err := e.stateMachine.Stop(); err != nil && !force { - errChan <- errors.Wrap(err, "stop") - } - } - - // Stop execution engines - if e.executionManager != nil { - if err := e.executionManager.StopAll(force); err != nil && !force { - errChan <- errors.Wrap(err, "stop") - } - } - - // Cancel context - if e.cancel != nil { - e.cancel() - } - - // Stop event distributor - if err := e.eventDistributor.Stop(); err != nil && !force { - errChan <- errors.Wrap(err, "stop") - } - // Unsubscribe from pubsub if e.config.Engine.ArchiveMode || e.config.P2P.Network == 99 { e.pubsub.Unsubscribe(GLOBAL_CONSENSUS_BITMASK, false) @@ -818,17 +914,8 @@ func (e *GlobalConsensusEngine) Stop(force bool) <-chan error { e.pubsub.Unsubscribe(GLOBAL_ALERT_BITMASK, false) e.pubsub.UnregisterValidator(GLOBAL_ALERT_BITMASK) - e.peerInfoManager.Stop() - - // Wait for goroutines to finish - done := make(chan struct{}) - go func() { - e.wg.Wait() - close(done) - }() - select { - case <-done: + case <-e.Done(): // Clean shutdown case <-time.After(30 * time.Second): if !force { @@ -836,11 +923,6 @@ func (e *GlobalConsensusEngine) Stop(force bool) <-chan error { } } - if e.config.P2P.Network == 99 || e.config.Engine.ArchiveMode { - // Close the state machine - e.stateMachine.Close() - } - close(errChan) return errChan } @@ -868,105 +950,16 @@ func (e *GlobalConsensusEngine) GetState() typesconsensus.EngineState { } // Map the generic state machine state to engine state - if e.stateMachine == nil { - return typesconsensus.EngineStateStopped - } - smState := e.stateMachine.GetState() - switch smState { - case consensus.StateStopped: - return typesconsensus.EngineStateStopped - case consensus.StateStarting: - return typesconsensus.EngineStateStarting - case consensus.StateLoading: - return typesconsensus.EngineStateLoading - case consensus.StateCollecting: - return typesconsensus.EngineStateCollecting - case consensus.StateLivenessCheck: - return typesconsensus.EngineStateLivenessCheck - case consensus.StateProving: + select { + case <-e.consensusParticipant.Ready(): return typesconsensus.EngineStateProving - case consensus.StatePublishing: - return typesconsensus.EngineStatePublishing - case consensus.StateVoting: - return typesconsensus.EngineStateVoting - case consensus.StateFinalizing: - return typesconsensus.EngineStateFinalizing - default: + case <-e.consensusParticipant.Done(): return typesconsensus.EngineStateStopped + default: + return typesconsensus.EngineStateStarting } } -func (e *GlobalConsensusEngine) RegisterExecutor( - exec execution.ShardExecutionEngine, - frame uint64, -) <-chan error { - errChan := make(chan error, 1) - - e.executorsMu.Lock() - defer e.executorsMu.Unlock() - - name := exec.GetName() - if _, exists := e.executors[name]; exists { - errChan <- errors.New("executor already registered") - close(errChan) - return errChan - } - - e.executors[name] = exec - - // Update metrics - executorRegistrationTotal.WithLabelValues("register").Inc() - executorsRegistered.Set(float64(len(e.executors))) - - close(errChan) - return errChan -} - -func (e *GlobalConsensusEngine) UnregisterExecutor( - name string, - frame uint64, - force bool, -) <-chan error { - errChan := make(chan error, 1) - - e.executorsMu.Lock() - defer e.executorsMu.Unlock() - - if _, exists := e.executors[name]; !exists { - errChan <- errors.New("executor not registered") - close(errChan) - return errChan - } - - // Stop the executor - if exec, ok := e.executors[name]; ok { - stopErrChan := exec.Stop(force) - select { - case err := <-stopErrChan: - if err != nil && !force { - errChan <- errors.Wrap(err, "stop executor") - close(errChan) - return errChan - } - case <-time.After(5 * time.Second): - if !force { - errChan <- errors.New("timeout stopping executor") - close(errChan) - return errChan - } - } - } - - delete(e.executors, name) - - // Update metrics - executorRegistrationTotal.WithLabelValues("unregister").Inc() - executorsRegistered.Set(float64(len(e.executors))) - - close(errChan) - return errChan -} - func (e *GlobalConsensusEngine) GetProvingKey( engineConfig *config.EngineConfig, ) (crypto.Signer, crypto.KeyType, []byte, []byte) { @@ -1360,24 +1353,22 @@ func (e *GlobalConsensusEngine) getProverAddress() []byte { return addressBI.FillBytes(make([]byte, 32)) } -func (e *GlobalConsensusEngine) updateMetrics() { +func (e *GlobalConsensusEngine) updateMetrics( + ctx lifecycle.SignalerContext, +) { defer func() { if r := recover(); r != nil { e.logger.Error("fatal error encountered", zap.Any("panic", r)) - if e.cancel != nil { - e.cancel() - } - e.quit <- struct{}{} + ctx.Throw(errors.Errorf("fatal unhandled error encountered: %v", r)) } }() - defer e.wg.Done() ticker := time.NewTicker(10 * time.Second) defer ticker.Stop() for { select { - case <-e.ctx.Done(): + case <-ctx.Done(): return case <-e.quit: return @@ -1388,12 +1379,6 @@ func (e *GlobalConsensusEngine) updateMetrics() { e.lastProvenFrameTimeMu.RUnlock() timeSinceLastProvenFrame.Set(timeSince) - // Update executor count - e.executorsMu.RLock() - execCount := len(e.executors) - e.executorsMu.RUnlock() - executorsRegistered.Set(float64(execCount)) - // Update current frame number if frame := e.GetFrame(); frame != nil && frame.Header != nil { currentFrameNumber.Set(float64(frame.Header.FrameNumber)) @@ -1863,16 +1848,16 @@ func (e *GlobalConsensusEngine) signPeerInfo( // reportPeerInfoPeriodically sends peer info over the peer info bitmask every // 5 minutes -func (e *GlobalConsensusEngine) reportPeerInfoPeriodically() { - defer e.wg.Done() - +func (e *GlobalConsensusEngine) reportPeerInfoPeriodically( + ctx lifecycle.SignalerContext, +) { e.logger.Info("starting periodic peer info reporting") ticker := time.NewTicker(5 * time.Minute) defer ticker.Stop() for { select { - case <-e.ctx.Done(): + case <-ctx.Done(): e.logger.Info("stopping periodic peer info reporting") return case <-ticker.C: @@ -1902,9 +1887,9 @@ func (e *GlobalConsensusEngine) reportPeerInfoPeriodically() { } } -func (e *GlobalConsensusEngine) pruneTxLocksPeriodically() { - defer e.wg.Done() - +func (e *GlobalConsensusEngine) pruneTxLocksPeriodically( + ctx lifecycle.SignalerContext, +) { ticker := time.NewTicker(5 * time.Second) defer ticker.Stop() @@ -1912,7 +1897,7 @@ func (e *GlobalConsensusEngine) pruneTxLocksPeriodically() { for { select { - case <-e.ctx.Done(): + case <-ctx.Done(): return case <-ticker.C: e.pruneTxLocks() @@ -2253,7 +2238,7 @@ func (e *GlobalConsensusEngine) ProposeWorkerJoin( wg.Go(func() error { client := protobufs.NewDataIPCServiceClient(svc) resp, err := client.CreateJoinProof( - e.ctx, + context.TODO(), &protobufs.CreateJoinProofRequest{ Challenge: challenge[:], Difficulty: frame.Header.Difficulty, @@ -2432,3 +2417,900 @@ func (e *GlobalConsensusEngine) DecideWorkerJoins( return nil } + +func (e *GlobalConsensusEngine) startConsensus( + trustedRoot *models.CertifiedState[*protobufs.GlobalFrame], + pending []*models.SignedProposal[ + *protobufs.GlobalFrame, + *protobufs.ProposalVote, + ], + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, +) error { + var err error + e.consensusParticipant, err = participant.NewParticipant[ + *protobufs.GlobalFrame, + *protobufs.ProposalVote, + GlobalPeerID, + GlobalCollectedCommitments, + ]( + tracing.NewZapTracer(e.logger), // logger + e, // committee + verification.NewSigner[ + *protobufs.GlobalFrame, + *protobufs.ProposalVote, + GlobalPeerID, + ](e.votingProvider), // signer + e.leaderProvider, // prover + e.votingProvider, // voter + e.notifier, // notifier + e.consensusStore, // consensusStore + e.signatureAggregator, // signatureAggregator + e, // consensusVerifier + e.voteCollectorDistributor, // voteCollectorDistributor + e.timeoutCollectorDistributor, // timeoutCollectorDistributor + e.forks, // forks + validator.NewValidator[ + *protobufs.GlobalFrame, + *protobufs.ProposalVote, + ](e, e), // validator + e.voteAggregator, // voteAggregator + e.timeoutAggregator, // timeoutAggregator + e, // finalizer + nil, // filter + trustedRoot, + pending, + ) + if err != nil { + return err + } + + ready() + e.voteAggregator.Start(ctx) + e.timeoutAggregator.Start(ctx) + <-lifecycle.AllReady(e.voteAggregator, e.timeoutAggregator) + e.consensusParticipant.Start(ctx) + return nil +} + +// MakeFinal implements consensus.Finalizer. +func (e *GlobalConsensusEngine) MakeFinal(stateID models.Identity) error { + // In a standard BFT-only approach, this would be how frames are finalized on + // the time reel. But we're PoMW, so we don't rely on BFT for anything outside + // of basic coordination. If the protocol were ever to move to something like + // PoS, this would be one of the touch points to revisit. + return nil +} + +// OnCurrentRankDetails implements consensus.Consumer. +func (e *GlobalConsensusEngine) OnCurrentRankDetails( + currentRank uint64, + finalizedRank uint64, + currentLeader models.Identity, +) { + e.logger.Info( + "entered new rank", + zap.Uint64("current_rank", currentRank), + zap.String("current_leader", hex.EncodeToString([]byte(currentLeader))), + ) +} + +// OnDoubleProposeDetected implements consensus.Consumer. +func (e *GlobalConsensusEngine) OnDoubleProposeDetected( + proposal1 *models.State[*protobufs.GlobalFrame], + proposal2 *models.State[*protobufs.GlobalFrame], +) { + select { + case <-e.haltCtx.Done(): + return + default: + } + e.eventDistributor.Publish(typesconsensus.ControlEvent{ + Type: typesconsensus.ControlEventGlobalEquivocation, + Data: &consensustime.GlobalEvent{ + Type: consensustime.TimeReelEventEquivocationDetected, + Frame: *proposal2.State, + OldHead: *proposal1.State, + Message: fmt.Sprintf( + "equivocation at rank %d", + proposal1.Rank, + ), + }, + }) +} + +// OnEventProcessed implements consensus.Consumer. +func (e *GlobalConsensusEngine) OnEventProcessed() {} + +// OnFinalizedState implements consensus.Consumer. +func (e *GlobalConsensusEngine) OnFinalizedState( + state *models.State[*protobufs.GlobalFrame], +) { +} + +// OnInvalidStateDetected implements consensus.Consumer. +func (e *GlobalConsensusEngine) OnInvalidStateDetected( + err *models.InvalidProposalError[ + *protobufs.GlobalFrame, + *protobufs.ProposalVote, + ], +) { +} // Presently a no-op, up for reconsideration + +// OnLocalTimeout implements consensus.Consumer. +func (e *GlobalConsensusEngine) OnLocalTimeout(currentRank uint64) {} + +// OnOwnProposal implements consensus.Consumer. +func (e *GlobalConsensusEngine) OnOwnProposal( + proposal *models.SignedProposal[ + *protobufs.GlobalFrame, + *protobufs.ProposalVote, + ], + targetPublicationTime time.Time, +) { + go func() { + select { + case <-time.After(time.Until(targetPublicationTime)): + case <-e.ShutdownSignal(): + return + } + var priorTC *protobufs.TimeoutCertificate = nil + if proposal.PreviousRankTimeoutCertificate != nil { + priorTC = + proposal.PreviousRankTimeoutCertificate.(*protobufs.TimeoutCertificate) + } + + // Manually override the signature as the vdf prover's signature is invalid + (*proposal.State.State).Header.PublicKeySignatureBls48581.Signature = + (*proposal.Vote).PublicKeySignatureBls48581.Signature + + pbProposal := &protobufs.GlobalProposal{ + State: *proposal.State.State, + ParentQuorumCertificate: proposal.Proposal.State.ParentQuorumCertificate.(*protobufs.QuorumCertificate), + PriorRankTimeoutCertificate: priorTC, + Vote: *proposal.Vote, + } + data, err := pbProposal.ToCanonicalBytes() + if err != nil { + e.logger.Error("could not serialize proposal", zap.Error(err)) + return + } + + txn, err := e.clockStore.NewTransaction(false) + if err != nil { + e.logger.Error("could not create transaction", zap.Error(err)) + return + } + + if err := e.clockStore.PutProposalVote(txn, *proposal.Vote); err != nil { + e.logger.Error("could not put vote", zap.Error(err)) + txn.Abort() + return + } + + if err := txn.Commit(); err != nil { + e.logger.Error("could not commit transaction", zap.Error(err)) + txn.Abort() + return + } + + e.voteAggregator.AddState(proposal) + e.consensusParticipant.SubmitProposal(proposal) + + if err := e.pubsub.PublishToBitmask( + GLOBAL_CONSENSUS_BITMASK, + data, + ); err != nil { + e.logger.Error("could not publish", zap.Error(err)) + } + }() +} + +// OnOwnTimeout implements consensus.Consumer. +func (e *GlobalConsensusEngine) OnOwnTimeout( + timeout *models.TimeoutState[*protobufs.ProposalVote], +) { + select { + case <-e.haltCtx.Done(): + return + default: + } + + var priorTC *protobufs.TimeoutCertificate + if timeout.PriorRankTimeoutCertificate != nil { + priorTC = + timeout.PriorRankTimeoutCertificate.(*protobufs.TimeoutCertificate) + } + + pbTimeout := &protobufs.TimeoutState{ + LatestQuorumCertificate: timeout.LatestQuorumCertificate.(*protobufs.QuorumCertificate), + PriorRankTimeoutCertificate: priorTC, + Vote: *timeout.Vote, + TimeoutTick: timeout.TimeoutTick, + Timestamp: uint64(time.Now().UnixMilli()), + } + data, err := pbTimeout.ToCanonicalBytes() + if err != nil { + e.logger.Error("could not serialize timeout", zap.Error(err)) + return + } + + txn, err := e.clockStore.NewTransaction(false) + if err != nil { + e.logger.Error("could not create transaction", zap.Error(err)) + return + } + + if err := e.clockStore.PutTimeoutVote(txn, pbTimeout); err != nil { + e.logger.Error("could not put vote", zap.Error(err)) + txn.Abort() + return + } + + if err := txn.Commit(); err != nil { + e.logger.Error("could not commit transaction", zap.Error(err)) + txn.Abort() + return + } + + e.timeoutAggregator.AddTimeout(timeout) + + if err := e.pubsub.PublishToBitmask( + GLOBAL_CONSENSUS_BITMASK, + data, + ); err != nil { + e.logger.Error("could not publish", zap.Error(err)) + } +} + +// OnOwnVote implements consensus.Consumer. +func (e *GlobalConsensusEngine) OnOwnVote( + vote **protobufs.ProposalVote, + recipientID models.Identity, +) { + select { + case <-e.haltCtx.Done(): + return + default: + } + + data, err := (*vote).ToCanonicalBytes() + if err != nil { + e.logger.Error("could not serialize timeout", zap.Error(err)) + return + } + + txn, err := e.clockStore.NewTransaction(false) + if err != nil { + e.logger.Error("could not create transaction", zap.Error(err)) + return + } + + if err := e.clockStore.PutProposalVote(txn, *vote); err != nil { + e.logger.Error("could not put vote", zap.Error(err)) + txn.Abort() + return + } + + if err := txn.Commit(); err != nil { + e.logger.Error("could not commit transaction", zap.Error(err)) + txn.Abort() + return + } + + e.voteAggregator.AddVote(vote) + + if err := e.pubsub.PublishToBitmask( + GLOBAL_CONSENSUS_BITMASK, + data, + ); err != nil { + e.logger.Error("could not publish", zap.Error(err)) + } +} + +// OnPartialTimeoutCertificate implements consensus.Consumer. +func (e *GlobalConsensusEngine) OnPartialTimeoutCertificate( + currentRank uint64, + partialTimeoutCertificate *consensus.PartialTimeoutCertificateCreated, +) { +} + +// OnQuorumCertificateTriggeredRankChange implements consensus.Consumer. +func (e *GlobalConsensusEngine) OnQuorumCertificateTriggeredRankChange( + oldRank uint64, + newRank uint64, + qc models.QuorumCertificate, +) { + e.logger.Debug("adding certified state", zap.Uint64("rank", newRank-1)) + + parentQC, err := e.clockStore.GetLatestQuorumCertificate(nil) + if err != nil { + e.logger.Error("no latest quorum certificate", zap.Error(err)) + return + } + + txn, err := e.clockStore.NewTransaction(false) + if err != nil { + e.logger.Error("could not create transaction", zap.Error(err)) + return + } + + aggregateSig := &protobufs.BLS48581AggregateSignature{ + Signature: qc.GetAggregatedSignature().GetSignature(), + PublicKey: &protobufs.BLS48581G2PublicKey{ + KeyValue: qc.GetAggregatedSignature().GetPubKey(), + }, + Bitmask: qc.GetAggregatedSignature().GetBitmask(), + } + if err := e.clockStore.PutQuorumCertificate( + &protobufs.QuorumCertificate{ + Rank: qc.GetRank(), + FrameNumber: qc.GetFrameNumber(), + Selector: []byte(qc.Identity()), + AggregateSignature: aggregateSig, + }, + txn, + ); err != nil { + e.logger.Error("could not insert quorum certificate", zap.Error(err)) + txn.Abort() + return + } + + if err := txn.Commit(); err != nil { + e.logger.Error("could not commit transaction", zap.Error(err)) + txn.Abort() + return + } + + e.frameStoreMu.RLock() + frame, ok := e.frameStore[qc.Identity()] + e.frameStoreMu.RUnlock() + + if !ok { + e.logger.Error( + "no frame for quorum certificate", + zap.Uint64("rank", newRank-1), + zap.Uint64("frame_number", qc.GetFrameNumber()), + ) + current, err := e.globalTimeReel.GetHead() + if err != nil { + e.logger.Error("could not get time reel head", zap.Error(err)) + return + } + peer, err := e.getRandomProverPeerId() + if err != nil { + e.logger.Error("could not get random peer", zap.Error(err)) + return + } + e.syncProvider.AddState( + []byte(peer), + current.Header.FrameNumber, + ) + return + } + + frame.Header.PublicKeySignatureBls48581 = aggregateSig + + err = e.globalTimeReel.Insert(frame) + if err != nil { + e.logger.Error("could not insert frame into time reel", zap.Error(err)) + return + } + + current, err := e.globalTimeReel.GetHead() + if err != nil { + e.logger.Error("could not get time reel head", zap.Error(err)) + return + } + + if !bytes.Equal(frame.Header.Output, current.Header.Output) { + e.logger.Error( + "frames not aligned, might need sync", + zap.Uint64("new_frame_number", frame.Header.FrameNumber), + zap.Uint64("reel_frame_number", current.Header.FrameNumber), + zap.Uint64("new_frame_rank", frame.Header.Rank), + zap.Uint64("reel_frame_rank", current.Header.Rank), + zap.String("new_frame_id", hex.EncodeToString([]byte(frame.Identity()))), + zap.String( + "reel_frame_id", + hex.EncodeToString([]byte(current.Identity())), + ), + ) + + peerID, err := e.getPeerIDOfProver(frame.Header.Prover) + if err != nil { + return + } + + e.syncProvider.AddState( + []byte(peerID), + current.Header.FrameNumber, + ) + return + } + + if !bytes.Equal(frame.Header.ParentSelector, parentQC.Selector) { + e.logger.Error( + "quorum certificate does not match frame parent", + zap.String( + "frame_parent_selector", + hex.EncodeToString(frame.Header.ParentSelector), + ), + zap.String( + "parent_qc_selector", + hex.EncodeToString(parentQC.Selector), + ), + zap.Uint64("parent_qc_rank", parentQC.Rank), + ) + return + } + + priorRankTC, err := e.clockStore.GetTimeoutCertificate(nil, qc.GetRank()-1) + if err != nil { + e.logger.Debug("no prior rank TC to include", zap.Uint64("rank", newRank-1)) + } + + vote, err := e.clockStore.GetProposalVote( + nil, + frame.GetRank(), + []byte(frame.Source()), + ) + if err != nil { + e.logger.Error( + "cannot find proposer's vote", + zap.Uint64("rank", newRank-1), + zap.String("proposer", hex.EncodeToString([]byte(frame.Source()))), + ) + return + } + + txn, err = e.clockStore.NewTransaction(false) + if err != nil { + e.logger.Error("could not create transaction", zap.Error(err)) + return + } + + if err := e.clockStore.PutCertifiedGlobalState( + &protobufs.GlobalProposal{ + State: frame, + ParentQuorumCertificate: parentQC, + PriorRankTimeoutCertificate: priorRankTC, + Vote: vote, + }, + txn, + ); err != nil { + e.logger.Error("could not insert certified state", zap.Error(err)) + txn.Abort() + return + } + + if err := txn.Commit(); err != nil { + e.logger.Error("could not commit transaction", zap.Error(err)) + txn.Abort() + } +} + +// OnRankChange implements consensus.Consumer. +func (e *GlobalConsensusEngine) OnRankChange(oldRank uint64, newRank uint64) { + e.currentRank = newRank +} + +// OnReceiveProposal implements consensus.Consumer. +func (e *GlobalConsensusEngine) OnReceiveProposal( + currentRank uint64, + proposal *models.SignedProposal[ + *protobufs.GlobalFrame, + *protobufs.ProposalVote, + ], +) { +} + +// OnReceiveQuorumCertificate implements consensus.Consumer. +func (e *GlobalConsensusEngine) OnReceiveQuorumCertificate( + currentRank uint64, + qc models.QuorumCertificate, +) { +} + +// OnReceiveTimeoutCertificate implements consensus.Consumer. +func (e *GlobalConsensusEngine) OnReceiveTimeoutCertificate( + currentRank uint64, + tc models.TimeoutCertificate, +) { +} + +// OnStart implements consensus.Consumer. +func (e *GlobalConsensusEngine) OnStart(currentRank uint64) {} + +// OnStartingTimeout implements consensus.Consumer. +func (e *GlobalConsensusEngine) OnStartingTimeout( + startTime time.Time, + endTime time.Time, +) { +} + +// OnStateIncorporated implements consensus.Consumer. +func (e *GlobalConsensusEngine) OnStateIncorporated( + state *models.State[*protobufs.GlobalFrame], +) { + e.frameStoreMu.Lock() + e.frameStore[state.Identifier] = *state.State + e.frameStoreMu.Unlock() +} + +// OnTimeoutCertificateTriggeredRankChange implements consensus.Consumer. +func (e *GlobalConsensusEngine) OnTimeoutCertificateTriggeredRankChange( + oldRank uint64, + newRank uint64, + tc models.TimeoutCertificate, +) { + e.logger.Debug( + "inserting timeout certificate", + zap.Uint64("rank", tc.GetRank()), + ) + + txn, err := e.clockStore.NewTransaction(false) + if err != nil { + e.logger.Error("could not create transaction", zap.Error(err)) + return + } + + qc := tc.GetLatestQuorumCert() + err = e.clockStore.PutTimeoutCertificate(&protobufs.TimeoutCertificate{ + Rank: tc.GetRank(), + LatestRanks: tc.GetLatestRanks(), + LatestQuorumCertificate: &protobufs.QuorumCertificate{ + Rank: qc.GetRank(), + FrameNumber: qc.GetFrameNumber(), + Selector: []byte(qc.Identity()), + AggregateSignature: &protobufs.BLS48581AggregateSignature{ + Signature: qc.GetAggregatedSignature().GetSignature(), + PublicKey: &protobufs.BLS48581G2PublicKey{ + KeyValue: qc.GetAggregatedSignature().GetPubKey(), + }, + Bitmask: qc.GetAggregatedSignature().GetBitmask(), + }, + }, + AggregateSignature: &protobufs.BLS48581AggregateSignature{ + Signature: tc.GetAggregatedSignature().GetSignature(), + PublicKey: &protobufs.BLS48581G2PublicKey{ + KeyValue: tc.GetAggregatedSignature().GetPubKey(), + }, + Bitmask: tc.GetAggregatedSignature().GetBitmask(), + }, + }, txn) + if err != nil { + txn.Abort() + e.logger.Error("could not insert timeout certificate") + return + } + + if err := txn.Commit(); err != nil { + txn.Abort() + e.logger.Error("could not commit transaction", zap.Error(err)) + } +} + +// VerifyQuorumCertificate implements consensus.Verifier. +func (e *GlobalConsensusEngine) VerifyQuorumCertificate( + quorumCertificate models.QuorumCertificate, +) error { + qc, ok := quorumCertificate.(*protobufs.QuorumCertificate) + if !ok { + return errors.Wrap( + errors.New("invalid quorum certificate"), + "verify quorum certificate", + ) + } + + if err := qc.Validate(); err != nil { + return models.NewInvalidFormatError( + errors.Wrap(err, "verify quorum certificate"), + ) + } + + // genesis qc is special: + if quorumCertificate.GetRank() == 0 { + genqc, err := e.clockStore.GetQuorumCertificate(nil, 0) + if err != nil { + return errors.Wrap(err, "verify quorum certificate") + } + + if genqc.Equals(quorumCertificate) { + return nil + } + } + + provers, err := e.proverRegistry.GetActiveProvers(nil) + if err != nil { + return errors.Wrap(err, "verify quorum certificate") + } + + pubkeys := [][]byte{} + signatures := [][]byte{} + if ((len(provers) + 7) / 8) > len(qc.AggregateSignature.Bitmask) { + return models.ErrInvalidSignature + } + for i, prover := range provers { + if qc.AggregateSignature.Bitmask[i/8]&(1<<(i%8)) == (1 << (i % 8)) { + pubkeys = append(pubkeys, prover.PublicKey) + signatures = append(signatures, qc.AggregateSignature.GetSignature()) + } + } + + aggregationCheck, err := e.blsConstructor.Aggregate(pubkeys, signatures) + if err != nil { + return models.ErrInvalidSignature + } + + if !bytes.Equal( + qc.AggregateSignature.GetPubKey(), + aggregationCheck.GetAggregatePublicKey(), + ) { + return models.ErrInvalidSignature + } + + if valid := e.blsConstructor.VerifySignatureRaw( + qc.AggregateSignature.GetPubKey(), + qc.AggregateSignature.GetSignature(), + verification.MakeVoteMessage(nil, qc.Rank, qc.Identity()), + []byte("global"), + ); !valid { + return models.ErrInvalidSignature + } + + return nil +} + +// VerifyTimeoutCertificate implements consensus.Verifier. +func (e *GlobalConsensusEngine) VerifyTimeoutCertificate( + timeoutCertificate models.TimeoutCertificate, +) error { + tc, ok := timeoutCertificate.(*protobufs.TimeoutCertificate) + if !ok { + return errors.Wrap( + errors.New("invalid timeout certificate"), + "verify timeout certificate", + ) + } + + if err := tc.Validate(); err != nil { + return models.NewInvalidFormatError( + errors.Wrap(err, "verify timeout certificate"), + ) + } + + provers, err := e.proverRegistry.GetActiveProvers(nil) + if err != nil { + return errors.Wrap(err, "verify timeout certificate") + } + + pubkeys := [][]byte{} + signatures := [][]byte{} + if ((len(provers) + 7) / 8) > len(tc.AggregateSignature.Bitmask) { + return models.ErrInvalidSignature + } + for i, prover := range provers { + if tc.AggregateSignature.Bitmask[i/8]&(1<<(i%8)) == (1 << (i % 8)) { + pubkeys = append(pubkeys, prover.PublicKey) + signatures = append(signatures, tc.AggregateSignature.GetSignature()) + } + } + + aggregationCheck, err := e.blsConstructor.Aggregate(pubkeys, signatures) + if err != nil { + return models.ErrInvalidSignature + } + + if !bytes.Equal( + tc.AggregateSignature.GetPubKey(), + aggregationCheck.GetAggregatePublicKey(), + ) { + return models.ErrInvalidSignature + } + + if valid := e.blsConstructor.VerifySignatureRaw( + tc.AggregateSignature.GetPubKey(), + tc.AggregateSignature.GetSignature(), + verification.MakeTimeoutMessage( + nil, + tc.Rank, + tc.LatestQuorumCertificate.Rank, + ), + []byte("globaltimeout"), + ); !valid { + return models.ErrInvalidSignature + } + + return nil +} + +// VerifyVote implements consensus.Verifier. +func (e *GlobalConsensusEngine) VerifyVote( + vote **protobufs.ProposalVote, +) error { + if vote == nil || *vote == nil { + return errors.Wrap(errors.New("nil vote"), "verify vote") + } + + if err := (*vote).Validate(); err != nil { + return models.NewInvalidFormatError( + errors.Wrap(err, "verify vote"), + ) + } + + provers, err := e.proverRegistry.GetActiveProvers(nil) + if err != nil { + return errors.Wrap(err, "verify vote") + } + + var pubkey []byte + for _, p := range provers { + if bytes.Equal(p.Address, (*vote).PublicKeySignatureBls48581.Address) { + pubkey = p.PublicKey + break + } + } + + if bytes.Equal(pubkey, []byte{}) { + return models.ErrInvalidSignature + } + + if valid := e.blsConstructor.VerifySignatureRaw( + pubkey, + (*vote).PublicKeySignatureBls48581.Signature, + verification.MakeVoteMessage(nil, (*vote).Rank, (*vote).Source()), + []byte("global"), + ); !valid { + return models.ErrInvalidSignature + } + + return nil +} + +func (e *GlobalConsensusEngine) getPendingProposals( + frameNumber uint64, +) []*models.SignedProposal[ + *protobufs.GlobalFrame, + *protobufs.ProposalVote, +] { + pendingFrames, err := e.clockStore.RangeGlobalClockFrames( + frameNumber, + 0xfffffffffffffffe, + ) + if err != nil { + panic(err) + } + defer pendingFrames.Close() + + result := []*models.SignedProposal[ + *protobufs.GlobalFrame, + *protobufs.ProposalVote, + ]{} + + e.logger.Debug("getting pending proposals", zap.Uint64("start", frameNumber)) + pendingFrames.First() + if !pendingFrames.Valid() { + e.logger.Debug("no valid frame") + return result + } + value, err := pendingFrames.Value() + if err != nil || value == nil { + e.logger.Debug("value was invalid", zap.Error(err)) + return result + } + + previous := value + for pendingFrames.Next(); pendingFrames.Valid(); pendingFrames.Next() { + value, err := pendingFrames.Value() + if err != nil || value == nil { + e.logger.Debug("iter value was invalid or empty", zap.Error(err)) + break + } + + parent, err := e.clockStore.GetQuorumCertificate(nil, previous.GetRank()) + if err != nil { + panic(err) + } + + priorTC, _ := e.clockStore.GetTimeoutCertificate(nil, value.GetRank()-1) + var priorTCModel models.TimeoutCertificate = nil + if priorTC != nil { + priorTCModel = priorTC + } + + vote := &protobufs.ProposalVote{ + Rank: value.GetRank(), + FrameNumber: value.Header.FrameNumber, + Selector: []byte(value.Identity()), + PublicKeySignatureBls48581: &protobufs.BLS48581AddressedSignature{ + Signature: value.Header.PublicKeySignatureBls48581.Signature, + Address: []byte(value.Source()), + }, + } + result = append(result, &models.SignedProposal[ + *protobufs.GlobalFrame, + *protobufs.ProposalVote, + ]{ + Proposal: models.Proposal[*protobufs.GlobalFrame]{ + State: &models.State[*protobufs.GlobalFrame]{ + Rank: value.GetRank(), + Identifier: value.Identity(), + ProposerID: value.Source(), + ParentQuorumCertificate: parent, + State: &value, + }, + PreviousRankTimeoutCertificate: priorTCModel, + }, + Vote: &vote, + }) + previous = value + } + return result +} + +func (e *GlobalConsensusEngine) getPeerIDOfProver( + prover []byte, +) (peer.ID, error) { + registry, err := e.signerRegistry.GetKeyRegistryByProver( + prover, + ) + if err != nil { + e.logger.Debug( + "could not get registry for prover", + zap.Error(err), + ) + return "", err + } + + if registry == nil || registry.IdentityKey == nil { + e.logger.Debug("registry for prover not found") + return "", err + } + + pk, err := pcrypto.UnmarshalEd448PublicKey(registry.IdentityKey.KeyValue) + if err != nil { + e.logger.Debug( + "could not parse pub key", + zap.Error(err), + ) + return "", err + } + + id, err := peer.IDFromPublicKey(pk) + if err != nil { + e.logger.Debug( + "could not derive peer id", + zap.Error(err), + ) + return "", err + } + + return id, nil +} + +func (e *GlobalConsensusEngine) getRandomProverPeerId() (peer.ID, error) { + provers, err := e.proverRegistry.GetActiveProvers(nil) + if err != nil { + e.logger.Error( + "could not get active provers for sync", + zap.Error(err), + ) + } + if len(provers) == 0 { + return "", err + } + + otherProvers := []*typesconsensus.ProverInfo{} + for _, p := range provers { + if bytes.Equal(p.Address, e.getProverAddress()) { + continue + } + otherProvers = append(otherProvers, p) + } + + index := rand.Intn(len(otherProvers)) + return e.getPeerIDOfProver(otherProvers[index].Address) +} + +var _ consensus.DynamicCommittee = (*GlobalConsensusEngine)(nil) diff --git a/node/consensus/global/global_consensus_engine_integration_test.go b/node/consensus/global/global_consensus_engine_integration_test.go index aaecd98..bcf4cd9 100644 --- a/node/consensus/global/global_consensus_engine_integration_test.go +++ b/node/consensus/global/global_consensus_engine_integration_test.go @@ -33,9 +33,9 @@ import ( "source.quilibrium.com/quilibrium/monorepo/bulletproofs" "source.quilibrium.com/quilibrium/monorepo/channel" "source.quilibrium.com/quilibrium/monorepo/config" - "source.quilibrium.com/quilibrium/monorepo/consensus" "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" hgcrdt "source.quilibrium.com/quilibrium/monorepo/hypergraph" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/node/compiler" "source.quilibrium.com/quilibrium/monorepo/node/consensus/difficulty" "source.quilibrium.com/quilibrium/monorepo/node/consensus/events" @@ -61,16 +61,6 @@ import ( "source.quilibrium.com/quilibrium/monorepo/verenc" ) -type testTransitionListener struct { - onTransition func(from, to consensus.State, event consensus.Event) -} - -func (l *testTransitionListener) OnTransition(from, to consensus.State, event consensus.Event) { - if l.onTransition != nil { - l.onTransition(from, to, event) - } -} - // mockIntegrationPubSub is a pubsub mock for integration testing type mockIntegrationPubSub struct { mock.Mock @@ -129,11 +119,11 @@ func (m *mockIntegrationPubSub) PublishToBitmask(bitmask []byte, data []byte) er typePrefix := binary.BigEndian.Uint32(data[:4]) // Check if it's a GlobalFrame - if typePrefix == protobufs.GlobalFrameType { - frame := &protobufs.GlobalFrame{} + if typePrefix == protobufs.GlobalProposalType { + frame := &protobufs.GlobalProposal{} if err := frame.FromCanonicalBytes(data); err == nil { m.mu.Lock() - m.frames = append(m.frames, frame) + m.frames = append(m.frames, frame.State) m.mu.Unlock() } } @@ -286,6 +276,9 @@ func registerProverInHypergraph(t *testing.T, hg thypergraph.Hypergraph, publicK t.Fatalf("Failed to insert status: %v", err) } + err = tree.Insert([]byte{3 << 2}, []byte{0, 0, 0, 0, 0, 0, 3, 232}, nil, big.NewInt(0)) // seniority = 1000 + require.NoError(t, err) + // Type Index: typeBI, _ := poseidon.HashBytes( slices.Concat(bytes.Repeat([]byte{0xff}, 32), []byte("prover:Prover")), @@ -548,13 +541,14 @@ func createIntegrationTestGlobalConsensusEngineWithHypergraphAndKey( nil, // inboxStore nil, // hypergraphStore store.NewPebbleShardsStore(pebbleDB, logger), + store.NewPebbleConsensusStore(pebbleDB, logger), store.NewPebbleWorkerStore(pebbleDB, logger), channel.NewDoubleRatchetEncryptedChannel(), // encryptedChannel &bulletproofs.Decaf448BulletproofProver{}, // bulletproofProver &verenc.MPCitHVerifiableEncryptor{}, // verEnc &bulletproofs.Decaf448KeyConstructor{}, // decafConstructor compiler.NewBedlamCompiler(), - nil, + bc, qp2p.NewInMemoryPeerInfoManager(logger), ) require.NoError(t, err) @@ -583,11 +577,11 @@ func TestGlobalConsensusEngine_Integration_BasicFrameProgression(t *testing.T) { typePrefix := binary.BigEndian.Uint32(data[:4]) // Check if it's a GlobalFrame - if typePrefix == protobufs.GlobalFrameType { - frame := &protobufs.GlobalFrame{} + if typePrefix == protobufs.GlobalProposalType { + frame := &protobufs.GlobalProposal{} if err := frame.FromCanonicalBytes(data); err == nil { mu.Lock() - publishedFrames = append(publishedFrames, frame) + publishedFrames = append(publishedFrames, frame.State) mu.Unlock() } } @@ -595,8 +589,9 @@ func TestGlobalConsensusEngine_Integration_BasicFrameProgression(t *testing.T) { } // Start the engine - quit := make(chan struct{}) - errChan := engine.Start(quit) + ctx, cancel, errChan := lifecycle.WithSignallerAndCancel(context.Background()) + err := engine.Start(ctx) + require.NoError(t, err) // Check for startup errors select { @@ -607,7 +602,7 @@ func TestGlobalConsensusEngine_Integration_BasicFrameProgression(t *testing.T) { } // Wait for state transitions - time.Sleep(2 * time.Second) + time.Sleep(20 * time.Second) // Verify engine is in an active state state := engine.GetState() @@ -626,55 +621,7 @@ func TestGlobalConsensusEngine_Integration_BasicFrameProgression(t *testing.T) { t.Logf("Published %d frames", frameCount) // Stop the engine - close(quit) - <-engine.Stop(false) -} - -func TestGlobalConsensusEngine_Integration_StateTransitions(t *testing.T) { - // Generate hosts for testing - _, m, cleanupHosts := tests.GenerateSimnetHosts(t, 1, []libp2p.Option{}) - defer cleanupHosts() - - engine, _, _, _ := createIntegrationTestGlobalConsensusEngine(t, []byte(m.Nodes[0].ID()), 99, m.Nodes[0], m.Keys[0], m.Nodes) - - // Track state transitions - transitions := make([]string, 0) - var mu sync.Mutex - - listener := &testTransitionListener{ - onTransition: func(from, to consensus.State, event consensus.Event) { - mu.Lock() - transitions = append(transitions, fmt.Sprintf("%s->%s", from, to)) - mu.Unlock() - t.Logf("State transition: %s -> %s (event: %s)", from, to, event) - }, - } - - // Start the engine - quit := make(chan struct{}) - errChan := engine.Start(quit) - engine.stateMachine.AddListener(listener) - - // Check for startup errors - select { - case err := <-errChan: - require.NoError(t, err) - case <-time.After(100 * time.Millisecond): - // No error is good - } - - // Wait for state transitions - time.Sleep(10 * time.Second) - - // Verify we had some state transitions - mu.Lock() - transitionCount := len(transitions) - mu.Unlock() - - assert.Greater(t, transitionCount, 0, "Expected at least one state transition") - - // Stop the engine - close(quit) + cancel() <-engine.Stop(false) } @@ -768,13 +715,13 @@ func TestGlobalConsensusEngine_Integration_MultiNodeConsensus(t *testing.T) { typePrefix := binary.BigEndian.Uint32(data[:4]) // Check if it's a GlobalFrame - if typePrefix == protobufs.GlobalFrameType { - frame := &protobufs.GlobalFrame{} + if typePrefix == protobufs.GlobalProposalType { + frame := &protobufs.GlobalProposal{} if err := frame.FromCanonicalBytes(data); err == nil { mu.Lock() - allFrames[nodeIdx] = append(allFrames[nodeIdx], frame) + allFrames[nodeIdx] = append(allFrames[nodeIdx], frame.State) mu.Unlock() - t.Logf("Node %d published frame %d", nodeIdx+1, frame.Header.FrameNumber) + t.Logf("Node %d published frame %d", nodeIdx+1, frame.State.Header.FrameNumber) } } } @@ -790,9 +737,9 @@ func TestGlobalConsensusEngine_Integration_MultiNodeConsensus(t *testing.T) { mu.Lock() defer mu.Unlock() switch typePrefix { - case protobufs.GlobalFrameType: + case protobufs.GlobalProposalType: proposalCount[nodeIdx]++ - case protobufs.FrameVoteType: + case protobufs.ProposalVoteType: voteCount[nodeIdx]++ case protobufs.ProverLivenessCheckType: livenessCount[nodeIdx]++ @@ -802,11 +749,13 @@ func TestGlobalConsensusEngine_Integration_MultiNodeConsensus(t *testing.T) { }) } + cancels := []func(){} // Start all engines - quits := make([]chan struct{}, 6) for i := 0; i < 6; i++ { - quits[i] = make(chan struct{}) - errChan := engines[i].Start(quits[i]) + ctx, cancel, errChan := lifecycle.WithSignallerAndCancel(context.Background()) + err := engines[i].Start(ctx) + require.NoError(t, err) + cancels = append(cancels, cancel) // Check for startup errors select { @@ -888,7 +837,7 @@ loop: // Stop all engines for i := 0; i < 6; i++ { - close(quits[i]) + cancels[i]() <-engines[i].Stop(false) } } @@ -947,10 +896,12 @@ func TestGlobalConsensusEngine_Integration_ShardCoverage(t *testing.T) { engine.eventDistributor = eventDistributor // Start the event distributor - engine.Start(make(chan struct{})) + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + err := engine.Start(ctx) + require.NoError(t, err) // Run shard coverage check - err := engine.checkShardCoverage(1) + err = engine.checkShardCoverage(1) require.NoError(t, err) // Wait for event processing and possible new app shard head @@ -972,7 +923,8 @@ func TestGlobalConsensusEngine_Integration_ShardCoverage(t *testing.T) { require.False(t, newHeadAfter) // Stop the event distributor - eventDistributor.Stop() + cancel() + <-engine.Stop(false) } // TestGlobalConsensusEngine_Integration_NoProversStaysInVerifying tests that engines @@ -992,7 +944,7 @@ func TestGlobalConsensusEngine_Integration_NoProversStaysInVerifying(t *testing. engines := make([]*GlobalConsensusEngine, numNodes) pubsubs := make([]*mockIntegrationPubSub, numNodes) - quits := make([]chan struct{}, numNodes) + cancels := make([]func(), numNodes) // Create shared hypergraph with NO provers registered inclusionProver := bls48581.NewKZGInclusionProver(logger) @@ -1094,19 +1046,19 @@ func TestGlobalConsensusEngine_Integration_NoProversStaysInVerifying(t *testing. nil, // inboxStore nil, // hypergraphStore store.NewPebbleShardsStore(pebbleDB, logger), + store.NewPebbleConsensusStore(pebbleDB, logger), store.NewPebbleWorkerStore(pebbleDB, logger), channel.NewDoubleRatchetEncryptedChannel(), &bulletproofs.Decaf448BulletproofProver{}, // bulletproofProver &verenc.MPCitHVerifiableEncryptor{}, // verEnc &bulletproofs.Decaf448KeyConstructor{}, // decafConstructor compiler.NewBedlamCompiler(), - nil, // blsConstructor + bc, // blsConstructor qp2p.NewInMemoryPeerInfoManager(logger), ) require.NoError(t, err) engines[i] = engine - quits[i] = make(chan struct{}) } // Wire up all pubsubs to each other @@ -1120,7 +1072,11 @@ func TestGlobalConsensusEngine_Integration_NoProversStaysInVerifying(t *testing. // Start all engines for i := 0; i < numNodes; i++ { - errChan := engines[i].Start(quits[i]) + ctx, cancel, errChan := lifecycle.WithSignallerAndCancel(context.Background()) + err := engines[i].Start(ctx) + require.NoError(t, err) + cancels[i] = cancel + select { case err := <-errChan: require.NoError(t, err) @@ -1145,7 +1101,7 @@ func TestGlobalConsensusEngine_Integration_NoProversStaysInVerifying(t *testing. // Stop all engines for i := 0; i < numNodes; i++ { - close(quits[i]) + cancels[i]() <-engines[i].Stop(false) } @@ -1174,14 +1130,14 @@ func TestGlobalConsensusEngine_Integration_AlertStopsProgression(t *testing.T) { typePrefix := binary.BigEndian.Uint32(data[:4]) // Check if it's a GlobalFrame - if typePrefix == protobufs.GlobalFrameType { - frame := &protobufs.GlobalFrame{} + if typePrefix == protobufs.GlobalProposalType { + frame := &protobufs.GlobalProposal{} if err := frame.FromCanonicalBytes(data); err == nil { mu.Lock() if afterAlert { - afterAlertFrames = append(afterAlertFrames, frame) + afterAlertFrames = append(afterAlertFrames, frame.State) } else { - publishedFrames = append(publishedFrames, frame) + publishedFrames = append(publishedFrames, frame.State) } mu.Unlock() } @@ -1190,8 +1146,9 @@ func TestGlobalConsensusEngine_Integration_AlertStopsProgression(t *testing.T) { } // Start the engine - quit := make(chan struct{}) - errChan := engine.Start(quit) + ctx, cancel, errChan := lifecycle.WithSignallerAndCancel(context.Background()) + err := engine.Start(ctx) + require.NoError(t, err) // Check for startup errors select { @@ -1250,7 +1207,7 @@ func TestGlobalConsensusEngine_Integration_AlertStopsProgression(t *testing.T) { require.Equal(t, 0, afterAlertCount) // Stop the engine - close(quit) + cancel() <-engine.Stop(false) } @@ -1276,6 +1233,9 @@ func registerProverInHypergraphWithFilter(t *testing.T, hg thypergraph.Hypergrap t.Fatalf("Failed to insert status: %v", err) } + err = tree.Insert([]byte{3 << 2}, []byte{0, 0, 0, 0, 0, 0, 3, 232}, nil, big.NewInt(0)) // seniority = 1000 + require.NoError(t, err) + // Type Index: typeBI, _ := poseidon.HashBytes( slices.Concat(bytes.Repeat([]byte{0xff}, 32), []byte("prover:Prover")), diff --git a/node/consensus/global/mainnet_genesis.json b/node/consensus/global/mainnet_genesis.json index 665ed5f..0050ae2 100644 --- a/node/consensus/global/mainnet_genesis.json +++ b/node/consensus/global/mainnet_genesis.json @@ -13,6 +13,7 @@ "QmestbFp8PddwRk6ysBRrmWZEiHun5aRidHkqFxgeFaWVK": "030d8e130fd666160ff5df6dbf935adb69b4906e6ac074f675a268b59b470b9393f45273143924d6a8d647fbce93d4bbfaad10bc13c72770b68070ad0898fd98f2f9882d614c00eb909a07dcccc8d7d7472b13617832ebd1943fd66d8a05cda88171990b15e8a99514a7817619f7fb2577ee82ca496ca36aaa52f31052c2e76039b10f0fcb0b44c2a0dd830e4fdf40fb5647e903c3e44689df8cf38830b9edc6a1746c38672b255b956a434d6538c8807d9be066e9a7121da436a6ac215cafe10a8b44192b252bd5683c3e0c7805155e8f7ad8dfc3b01467f2f029f91100caf4907600d8562b3442952ba8dc086661aa5cafaf09acbf01ec55b950032b3d358b2fa6ee282fd72a4cb28022052bd48656e5aac56a9c50eae3e187eda525fc4552010cb1419507b271c7ff3bd7aabb9ee9a1eb1dc4ad88a76f42808e716490c02efddc3c0a5e2386efcdb0b83dccdc7c543d1571e2d9c927ff433ca8aa03b091e0138549c79fe2c1a90e669cef03540353d6ea1ca5ca71b43dc27caa64447b3c49bd4a66dd12f457468b1d32515efc471e2189bc2bb4ce43b5a526c1e6aa7d60e1e1b6e6f795232a823c37ea6ab2a8af2834a3980a758bec050507b416dad50a3645cf757b48b7f43e1b786d1d9b00c270bcd650ae9e1e325765a751c566776d6e5558a6fcd054a2b4947d5b4adf4c528dac71bde90fcca346505869180f4ed740085197cd2a40c6f8ebbeb90d8c5bf359e21870626fbb2151c9f5d507f5903f7abfaeaea88ca14af4cb51b45d9624c8ebecec5f98b9b41182c539d13ae3fe21c2cafd80abdfbba14b75", "QmaMcbX3NH5d6BXy87C1n2xT68XzCwtkbvMRqdLYphh19b": "02108cc721d540c80e378b84b06cb67ecaf9f4bd091a52c1d85b3ef26d536e8482b72cf6651b445bcd0943b40d98bf03c66208170c04689d510cc2317c7ab5c26bb00bb8a1186aee4cc80f6041aca5676b575e65801c88f9e039845338f7ec42fa825b7a7d79138b8ea3427675398b9b6cafd90e5acb33445059946298e675917fdd40fb5810b2dc9de51d7798f271d6eb6cee0353ffb24982dd2e5ac9e482e0fdac819ded3115fe2ce5a28b581a22e74c79aba7895f4ec758c7f57d85c481b4393a0bea7a6b37a0e64a7a4674818f59c9acdb40538103fba190a89f0f05c46a96b24b93c643279c929f6f81f43d17ce0b5ec29e23bf6bcc22efafc6bd8aba77fabf128742fcf5c5f77266c5690118f2b331d4fc6283b9a56f905f94421b9fe9b8789d4b5e07b1b4ae71cb2c064309303fa348a0e58c009f2e34087ac5fcf89c02e6a20b70e7840483bdbbd98f79d4586c4478e869711f70a8bea4baeb789ec63575d42975eb148c79f7f1a02e8d1199214724596cab28dede4c585feab18fa25d9640b117872d8e8af563e1c5dc8a63e7b971ba3fbb79a744c2672dcec227d78583461e66f08ff2cdd6772dd310c9b008a85b1d3a3b010ff224faf8b4863056812a6c0f038a1f29af7c1d23d88f06e722c50f12f59543e550d22155cb7696325bba91055fd1f136dbdf3183c4b39eed32350dc8d11d2a8aaea57d5a77370de21169b96cb673a97aefc2eaa222a8f7964f10618bb25f61f3eb5bcea94e130fcb1a33bd36d3ac612d8e72de81eff2ceefca9c85efcd3218f61737c11c070b4f14790808e591170d2533", "QmS3xJKbAmQxDiry9HpXV6bJyRvyd47pbufpZwEmgY1cy6": "0202e6667d72cf6e25691421b84774f38399aa0580e6807ec71053cb7c67571b2d36e992bf65862c81568860c9a649c413d63785f431f8484f3372d2f61de79485ee31f3670f6dc0340d0142b89c4b17972a0ee8a90e923c237e49dd9d0a658f93a008cb962f5ccb5fbe774c21ecf3bf228944114b8dbbd98128bf40299d52a30f8447db3c755e94a942b68f950e50e26c94d7126265216e69e995337443ec72baf1a5c61c72195e16923b7d04d52802cbd1a27d0b92bce34b6b755fdc7021427a6678d9cbf209874884993effb96181e6caa04dfa43586f72f262bc0a327d6b05f8754c4ffcd2e0a94745917a544fcb808043745d24fc816d4c5a84b03358b0ab24f26f92f409fad55206142aab29952d27f094394ee8b00b5f418a89d1caf95455dd6551067b0ac9540624097b283eeb59ca2b2f8c4e82bb06d6952be97a6e61ed55878aec3a13496a2d9e1015c7a456525552f8c0e9ee8cc8c5c989bc1feb57b8d630d24a05ccf824ee66031a0060729318061c6b933ca1e9659e44f3a11c3f65e3f8d2c2bc7944124290192355913ead6be3ca047a01d2b7a66f48aeed19b96b9209fab73922a1424d4006c42270f8814bf5c544080db0b783402eefcc7a5b41b52d8f6c287dc1c6806994d74a77566fb0cfb946a08329478d0b255d9afbdfa860051060e73b04bb817d86888115bb1b74078a479e9dda2a957e14780ca5100ac7fea80f497bac01b6b9f44e6137de16616961501dcb28b0e766cf3c1fdc87c5ab701510560041857ff32f629fba9077ef7d1473ecd69d0e39ee9c899d2d2afcd2013929670d25", - "QmZKERVN8UkwLp9mPCZw4aaRx9N8Ewnkv7VQh1zyZwBSir": "0301d31c4a06e16789184aa15898d96df20e4286569cfde2f26ae44407705a3ab2969876a146e360f33516422bec027809143183b07c3d84c578dbfa87a690a50e35f450c281be0433c70e9e5d2b0aa1967719d06af2c9c2e3e257624567e4c8f9882328ed2011d327ece7cdc1a23ec370ede0ad28a00cf476156c0d7b0968e16b21e01bae11993d988415f18173bcb99887e00137202680a818549aa6944360ac03f234e9aaaa3b333ee96a9f19f693cac97ec5c736b216d210550311507766b72779021b4023d354bd35fc0f2834014911a4ea8fddff19a7a8f69e030cb119d64190fb81d3635721014b05695566d0cb890f5d86ad0d007ea2a8b3008717d89ff9775950083439969873cfacd258be04d05128de5ae60bfb704174592f6565c5539d8e6804a2e899e19acb512eeba676a5b0c64b868937b578f3741a671938aedba2329c17d21a4d910d2b2b886b5efa502c1de3f05495eef88e2247d4d751983a81a928f9b957eabdfb7f7e510ec5dedf9bcdaff92126aff162773299ab920f390fdb1b3bd9e6ae46eb3b16a07ffd69fb38c916c77ed6deb721b0355c21cb9d9cb4b22e8a41756a40c2d48a4764f6781c865a700614126c1008d910a7bbd758261bee38914b753d15259c094b57f301acff008fbad5161aff0204a96290f395206535feaedcedb0cb6121fdf31c28ab9c7d85c7dec473f531347b4f76c12a0c5eb7f0c3c0077697373a409dba2a0813be122807ae6df88e1aa5d086e265e9ea394e5b98a0d96527cf69bc794ea17c54cfa68fd5c75856a6c6d3ff9e7a9df0f22853e20ac9b6442d" + "QmZKERVN8UkwLp9mPCZw4aaRx9N8Ewnkv7VQh1zyZwBSir": "0301d31c4a06e16789184aa15898d96df20e4286569cfde2f26ae44407705a3ab2969876a146e360f33516422bec027809143183b07c3d84c578dbfa87a690a50e35f450c281be0433c70e9e5d2b0aa1967719d06af2c9c2e3e257624567e4c8f9882328ed2011d327ece7cdc1a23ec370ede0ad28a00cf476156c0d7b0968e16b21e01bae11993d988415f18173bcb99887e00137202680a818549aa6944360ac03f234e9aaaa3b333ee96a9f19f693cac97ec5c736b216d210550311507766b72779021b4023d354bd35fc0f2834014911a4ea8fddff19a7a8f69e030cb119d64190fb81d3635721014b05695566d0cb890f5d86ad0d007ea2a8b3008717d89ff9775950083439969873cfacd258be04d05128de5ae60bfb704174592f6565c5539d8e6804a2e899e19acb512eeba676a5b0c64b868937b578f3741a671938aedba2329c17d21a4d910d2b2b886b5efa502c1de3f05495eef88e2247d4d751983a81a928f9b957eabdfb7f7e510ec5dedf9bcdaff92126aff162773299ab920f390fdb1b3bd9e6ae46eb3b16a07ffd69fb38c916c77ed6deb721b0355c21cb9d9cb4b22e8a41756a40c2d48a4764f6781c865a700614126c1008d910a7bbd758261bee38914b753d15259c094b57f301acff008fbad5161aff0204a96290f395206535feaedcedb0cb6121fdf31c28ab9c7d85c7dec473f531347b4f76c12a0c5eb7f0c3c0077697373a409dba2a0813be122807ae6df88e1aa5d086e265e9ea394e5b98a0d96527cf69bc794ea17c54cfa68fd5c75856a6c6d3ff9e7a9df0f22853e20ac9b6442d", + "QmcuXdV3mdgwmhUv9kzRnZmjJyBwE7erNWVo8Q2ikrcjzX": "030a9616776ff90a2027c4a9df7234e12bbc6747ab47767502f3b0c2853e93c2be31c5f6abe2be2b5e331d389a1ecead12dbddc8edf0ee9ba5a8ecf7732b0fc3c967322b2aedbb5064b80a8e3434181d75be04d8033923a2beed4f1f9a28f3ce3b9734fddd7520ae1513bba4843200ffe8e7b43bb3ef7bf532505bb70baa86b68dfd4d419ac63ce13f7310fcbebb7e944aa0640331a9c9ce0f8feede477d47ceb78c988c46e3c87ae561b5bf4d59b187693a2bf47182c503d2b0e937ea87187bfc0e7526775a5d1355c71a3200a1eb5256ef965b57a5429305a6fa8d092b104f4038b09c59f0720d2fc25ccd418e51e59f30417b9855d7e1bcd36b9799c0a6e1d2dbf7997650a9159515c72a8b37e2817c27088d1c92e59821c58e909cac8551ba2998dc7d00c903bdb96234d57f9db4e70320257aa55f8f21ce0cb59231e0517b361ed4e03d6445da94bb55fb536f7ccebea6e048efb8e9d7c77755c9c16fb749209a77f99e9d603437e45aab3711aceba7ac7a2840f3665d2ee315a7f034205af39f277f7fbd9bbabe7e4a0dd3cdf0a820d44c93573738b084952453483ce110e0a977e8ac2d460baffd55ba9d10304c0def3d8a962a07aa879d931a9a6cb39458ea7a49bb33a30059de8c1946d88ab73faf2ff471108cf917206f4ee82b674b1d438f6016f7ca2d0a6108d7618e4bda97f9808b05115e8673b934ca3a31e30e323e8680847f76804e505b35d43ecf377ea123318b3b778c64e32cba8816cef307692d4875e0af4d5c6cdf1c3eebc042365fe2dd667a1a50003d41bfd04b754a94d388dfff35a1cd" } } \ No newline at end of file diff --git a/node/consensus/global/message_processors.go b/node/consensus/global/message_processors.go index e1a66d3..e67f156 100644 --- a/node/consensus/global/message_processors.go +++ b/node/consensus/global/message_processors.go @@ -2,10 +2,10 @@ package global import ( "bytes" + "context" "encoding/binary" "encoding/hex" "fmt" - "math/bits" "slices" "github.com/iden3/go-iden3-crypto/poseidon" @@ -13,7 +13,10 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/consensus/verification" "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/protobufs" "source.quilibrium.com/quilibrium/monorepo/types/crypto" "source.quilibrium.com/quilibrium/monorepo/types/tries" @@ -21,10 +24,11 @@ import ( var keyRegistryDomain = []byte("KEY_REGISTRY") -func (e *GlobalConsensusEngine) processGlobalConsensusMessageQueue() { - defer e.wg.Done() - +func (e *GlobalConsensusEngine) processGlobalConsensusMessageQueue( + ctx lifecycle.SignalerContext, +) { if e.config.P2P.Network != 99 && !e.config.Engine.ArchiveMode { + <-ctx.Done() return } @@ -32,7 +36,7 @@ func (e *GlobalConsensusEngine) processGlobalConsensusMessageQueue() { select { case <-e.haltCtx.Done(): return - case <-e.ctx.Done(): + case <-ctx.Done(): return case message := <-e.globalConsensusMessageQueue: e.handleGlobalConsensusMessage(message) @@ -42,14 +46,14 @@ func (e *GlobalConsensusEngine) processGlobalConsensusMessageQueue() { } } -func (e *GlobalConsensusEngine) processShardConsensusMessageQueue() { - defer e.wg.Done() - +func (e *GlobalConsensusEngine) processShardConsensusMessageQueue( + ctx lifecycle.SignalerContext, +) { for { select { case <-e.haltCtx.Done(): return - case <-e.ctx.Done(): + case <-ctx.Done(): return case message := <-e.shardConsensusMessageQueue: e.handleShardConsensusMessage(message) @@ -57,9 +61,9 @@ func (e *GlobalConsensusEngine) processShardConsensusMessageQueue() { } } -func (e *GlobalConsensusEngine) processProverMessageQueue() { - defer e.wg.Done() - +func (e *GlobalConsensusEngine) processProverMessageQueue( + ctx lifecycle.SignalerContext, +) { if e.config.P2P.Network != 99 && !e.config.Engine.ArchiveMode { return } @@ -68,7 +72,7 @@ func (e *GlobalConsensusEngine) processProverMessageQueue() { select { case <-e.haltCtx.Done(): return - case <-e.ctx.Done(): + case <-ctx.Done(): return case message := <-e.globalProverMessageQueue: e.handleProverMessage(message) @@ -76,29 +80,29 @@ func (e *GlobalConsensusEngine) processProverMessageQueue() { } } -func (e *GlobalConsensusEngine) processFrameMessageQueue() { - defer e.wg.Done() - +func (e *GlobalConsensusEngine) processFrameMessageQueue( + ctx lifecycle.SignalerContext, +) { for { select { case <-e.haltCtx.Done(): return - case <-e.ctx.Done(): + case <-ctx.Done(): return case message := <-e.globalFrameMessageQueue: - e.handleFrameMessage(message) + e.handleFrameMessage(ctx, message) } } } -func (e *GlobalConsensusEngine) processPeerInfoMessageQueue() { - defer e.wg.Done() - +func (e *GlobalConsensusEngine) processPeerInfoMessageQueue( + ctx lifecycle.SignalerContext, +) { for { select { case <-e.haltCtx.Done(): return - case <-e.ctx.Done(): + case <-ctx.Done(): return case message := <-e.globalPeerInfoMessageQueue: e.handlePeerInfoMessage(message) @@ -106,12 +110,12 @@ func (e *GlobalConsensusEngine) processPeerInfoMessageQueue() { } } -func (e *GlobalConsensusEngine) processAlertMessageQueue() { - defer e.wg.Done() - +func (e *GlobalConsensusEngine) processAlertMessageQueue( + ctx lifecycle.SignalerContext, +) { for { select { - case <-e.ctx.Done(): + case <-ctx.Done(): return case message := <-e.globalAlertMessageQueue: e.handleAlertMessage(message) @@ -119,6 +123,19 @@ func (e *GlobalConsensusEngine) processAlertMessageQueue() { } } +func (e *GlobalConsensusEngine) processGlobalProposalQueue( + ctx lifecycle.SignalerContext, +) { + for { + select { + case <-ctx.Done(): + return + case proposal := <-e.globalProposalQueue: + e.handleGlobalProposal(proposal) + } + } +} + func (e *GlobalConsensusEngine) handleGlobalConsensusMessage( message *pb.Message, ) { @@ -135,17 +152,14 @@ func (e *GlobalConsensusEngine) handleGlobalConsensusMessage( typePrefix := e.peekMessageType(message) switch typePrefix { - case protobufs.GlobalFrameType: + case protobufs.GlobalProposalType: e.handleProposal(message) - case protobufs.ProverLivenessCheckType: - e.handleLivenessCheck(message) - - case protobufs.FrameVoteType: + case protobufs.ProposalVoteType: e.handleVote(message) - case protobufs.FrameConfirmationType: - e.handleConfirmation(message) + case protobufs.TimeoutStateType: + e.handleTimeoutState(message) case protobufs.MessageBundleType: e.handleMessageBundle(message) @@ -174,17 +188,17 @@ func (e *GlobalConsensusEngine) handleShardConsensusMessage( typePrefix := e.peekMessageType(message) switch typePrefix { - case protobufs.GlobalFrameType: + case protobufs.AppShardFrameType: e.handleShardProposal(message) + case protobufs.ProposalVoteType: + e.handleShardVote(message) + case protobufs.ProverLivenessCheckType: e.handleShardLivenessCheck(message) - case protobufs.FrameVoteType: - e.handleShardVote(message) - - case protobufs.FrameConfirmationType: - e.handleShardConfirmation(message) + case protobufs.TimeoutStateType: + // e.handleShardTimeout(message) } } @@ -222,7 +236,10 @@ func (e *GlobalConsensusEngine) handleProverMessage(message *pb.Message) { } } -func (e *GlobalConsensusEngine) handleFrameMessage(message *pb.Message) { +func (e *GlobalConsensusEngine) handleFrameMessage( + ctx context.Context, + message *pb.Message, +) { defer func() { if r := recover(); r != nil { e.logger.Error( @@ -254,7 +271,7 @@ func (e *GlobalConsensusEngine) handleFrameMessage(message *pb.Message) { clone := frame.Clone().(*protobufs.GlobalFrame) e.frameStoreMu.Unlock() - if err := e.globalTimeReel.Insert(e.ctx, clone); err != nil { + if err := e.globalTimeReel.Insert(clone); err != nil { // Success metric recorded at the end of processing framesProcessedTotal.WithLabelValues("error").Inc() return @@ -540,7 +557,7 @@ func (e *GlobalConsensusEngine) validateKeyRegistry( if err != nil { return nil, fmt.Errorf("failed to derive identity peer id: %w", err) } - identityPeerID := []byte(peerID.String()) + identityPeerID := []byte(peerID) if keyRegistry.ProverKey == nil || len(keyRegistry.ProverKey.KeyValue) == 0 { @@ -803,162 +820,504 @@ func (e *GlobalConsensusEngine) handleAlertMessage(message *pb.Message) { } } +func (e *GlobalConsensusEngine) handleGlobalProposal( + proposal *protobufs.GlobalProposal, +) { + defer func() { + if r := recover(); r != nil { + e.logger.Error( + "panic recovered from proposal", + zap.Any("panic", r), + zap.Stack("stacktrace"), + ) + } + }() + + e.logger.Debug( + "handling global proposal", + zap.String("id", hex.EncodeToString([]byte(proposal.State.Identity()))), + ) + + // Small gotcha: the proposal structure uses interfaces, so we can't assign + // directly, otherwise the nil values for the structs will fail the nil + // check on the interfaces (and would incur costly reflection if we wanted + // to check it directly) + pqc := proposal.ParentQuorumCertificate + prtc := proposal.PriorRankTimeoutCertificate + vote := proposal.Vote + signedProposal := &models.SignedProposal[*protobufs.GlobalFrame, *protobufs.ProposalVote]{ + Proposal: models.Proposal[*protobufs.GlobalFrame]{ + State: &models.State[*protobufs.GlobalFrame]{ + Rank: proposal.State.GetRank(), + Identifier: proposal.State.Identity(), + ProposerID: proposal.Vote.Identity(), + Timestamp: proposal.State.GetTimestamp(), + State: &proposal.State, + }, + }, + Vote: &vote, + } + + if pqc != nil { + signedProposal.Proposal.State.ParentQuorumCertificate = pqc + } + + if prtc != nil { + signedProposal.PreviousRankTimeoutCertificate = prtc + } + + finalized := e.forks.FinalizedState() + finalizedRank := finalized.Rank + finalizedFrameNumber := (*finalized.State).Header.FrameNumber + + // drop proposals if we already processed them + if proposal.State.Header.FrameNumber <= finalizedFrameNumber || + proposal.State.Header.Rank <= finalizedRank { + e.logger.Debug("dropping stale proposal") + return + } + + _, err := e.clockStore.GetGlobalClockFrame(proposal.State.Header.FrameNumber) + if err == nil { + e.logger.Debug("dropping stale proposal") + return + } + + // if we have a parent, cache and move on + if proposal.State.Header.FrameNumber != 0 { + // also check with persistence layer + parent, err := e.clockStore.GetGlobalClockFrame( + proposal.State.Header.FrameNumber - 1, + ) + if err != nil || !bytes.Equal( + []byte(parent.Identity()), + proposal.State.Header.ParentSelector, + ) { + e.logger.Debug( + "parent frame not stored, requesting sync", + zap.Uint64("frame_number", proposal.State.Header.FrameNumber-1), + ) + e.cacheProposal(proposal) + + peerID, err := e.getPeerIDOfProver(proposal.State.Header.Prover) + if err != nil { + peerID, err = e.getRandomProverPeerId() + if err != nil { + return + } + } + + head, err := e.globalTimeReel.GetHead() + if err != nil { + return + } + + e.syncProvider.AddState( + []byte(peerID), + head.Header.FrameNumber, + ) + return + } + } + + frameNumber := proposal.State.Header.FrameNumber + expectedFrame, err := e.globalTimeReel.GetHead() + if err != nil { + e.logger.Error("could not obtain time reel head", zap.Error(err)) + return + } + + expectedFrameNumber := expectedFrame.Header.FrameNumber + 1 + + if frameNumber < expectedFrameNumber { + e.logger.Debug( + "dropping proposal behind expected frame", + zap.Uint64("frame_number", frameNumber), + zap.Uint64("expected_frame_number", expectedFrameNumber), + ) + return + } + + if frameNumber == expectedFrameNumber { + e.deleteCachedProposal(frameNumber) + if e.processProposal(proposal) { + e.drainProposalCache(frameNumber + 1) + return + } + + e.logger.Debug("failed to process expected proposal, caching") + e.cacheProposal(proposal) + return + } + + e.cacheProposal(proposal) + e.drainProposalCache(expectedFrameNumber) +} + +func (e *GlobalConsensusEngine) processProposal( + proposal *protobufs.GlobalProposal, +) bool { + e.logger.Debug( + "processing proposal", + zap.String("id", hex.EncodeToString([]byte(proposal.State.Identity()))), + ) + + err := e.VerifyQuorumCertificate(proposal.ParentQuorumCertificate) + if err != nil { + e.logger.Debug("proposal has invalid qc", zap.Error(err)) + return false + } + + if proposal.PriorRankTimeoutCertificate != nil { + err := e.VerifyTimeoutCertificate(proposal.PriorRankTimeoutCertificate) + if err != nil { + e.logger.Debug("proposal has invalid tc", zap.Error(err)) + return false + } + } + + err = e.VerifyVote(&proposal.Vote) + if err != nil { + e.logger.Debug("proposal has invalid vote", zap.Error(err)) + return false + } + + err = proposal.State.Validate() + if err != nil { + e.logger.Debug("proposal is not valid", zap.Error(err)) + return false + } + + valid, err := e.frameValidator.Validate(proposal.State) + if !valid || err != nil { + e.logger.Debug("invalid frame in proposal", zap.Error(err)) + return false + } + + // Small gotcha: the proposal structure uses interfaces, so we can't assign + // directly, otherwise the nil values for the structs will fail the nil + // check on the interfaces (and would incur costly reflection if we wanted + // to check it directly) + pqc := proposal.ParentQuorumCertificate + prtc := proposal.PriorRankTimeoutCertificate + vote := proposal.Vote + signedProposal := &models.SignedProposal[*protobufs.GlobalFrame, *protobufs.ProposalVote]{ + Proposal: models.Proposal[*protobufs.GlobalFrame]{ + State: &models.State[*protobufs.GlobalFrame]{ + Rank: proposal.State.GetRank(), + Identifier: proposal.State.Identity(), + ProposerID: vote.Identity(), + Timestamp: proposal.State.GetTimestamp(), + State: &proposal.State, + }, + }, + Vote: &vote, + } + + if pqc != nil { + signedProposal.Proposal.State.ParentQuorumCertificate = pqc + } + + if prtc != nil { + signedProposal.PreviousRankTimeoutCertificate = prtc + } + e.voteAggregator.AddState(signedProposal) + e.consensusParticipant.SubmitProposal(signedProposal) + + e.trySealParentWithChild(proposal) + e.registerPendingCertifiedParent(proposal) + + return true +} + +func (e *GlobalConsensusEngine) cacheProposal( + proposal *protobufs.GlobalProposal, +) { + frameNumber := proposal.State.Header.FrameNumber + e.proposalCacheMu.Lock() + e.proposalCache[frameNumber] = proposal + e.proposalCacheMu.Unlock() + + e.logger.Debug( + "cached out-of-order proposal", + zap.Uint64("frame_number", frameNumber), + zap.String("id", hex.EncodeToString([]byte(proposal.State.Identity()))), + ) +} + +func (e *GlobalConsensusEngine) deleteCachedProposal(frameNumber uint64) { + e.proposalCacheMu.Lock() + delete(e.proposalCache, frameNumber) + e.proposalCacheMu.Unlock() +} + +func (e *GlobalConsensusEngine) popCachedProposal( + frameNumber uint64, +) *protobufs.GlobalProposal { + e.proposalCacheMu.Lock() + defer e.proposalCacheMu.Unlock() + + proposal, ok := e.proposalCache[frameNumber] + if ok { + delete(e.proposalCache, frameNumber) + } + + return proposal +} + +func (e *GlobalConsensusEngine) drainProposalCache(startFrame uint64) { + next := startFrame + for { + prop := e.popCachedProposal(next) + if prop == nil { + return + } + + if !e.processProposal(prop) { + e.logger.Debug( + "cached proposal failed processing, retaining for retry", + zap.Uint64("frame_number", next), + ) + e.cacheProposal(prop) + return + } + + next++ + } +} + +func (e *GlobalConsensusEngine) registerPendingCertifiedParent( + proposal *protobufs.GlobalProposal, +) { + if proposal == nil || proposal.State == nil || proposal.State.Header == nil { + return + } + + frameNumber := proposal.State.Header.FrameNumber + e.pendingCertifiedParentsMu.Lock() + e.pendingCertifiedParents[frameNumber] = proposal + e.pendingCertifiedParentsMu.Unlock() +} + +func (e *GlobalConsensusEngine) trySealParentWithChild( + child *protobufs.GlobalProposal, +) { + if child == nil || child.State == nil || child.State.Header == nil { + return + } + + header := child.State.Header + if header.FrameNumber == 0 { + return + } + + parentFrame := header.FrameNumber - 1 + + e.pendingCertifiedParentsMu.RLock() + parent, ok := e.pendingCertifiedParents[parentFrame] + e.pendingCertifiedParentsMu.RUnlock() + if !ok || parent == nil || parent.State == nil || parent.State.Header == nil { + return + } + + if !bytes.Equal( + header.ParentSelector, + []byte(parent.State.Identity()), + ) { + e.logger.Debug( + "pending parent selector mismatch, dropping entry", + zap.Uint64("parent_frame", parent.State.Header.FrameNumber), + zap.Uint64("child_frame", header.FrameNumber), + ) + e.pendingCertifiedParentsMu.Lock() + delete(e.pendingCertifiedParents, parentFrame) + e.pendingCertifiedParentsMu.Unlock() + return + } + + e.logger.Debug( + "sealing parent with descendant proposal", + zap.Uint64("parent_frame", parent.State.Header.FrameNumber), + zap.Uint64("child_frame", header.FrameNumber), + ) + + head, err := e.globalTimeReel.GetHead() + if err != nil { + e.logger.Error("error fetching time reel head", zap.Error(err)) + return + } + + if head.Header.FrameNumber+1 == parent.State.Header.FrameNumber { + e.addCertifiedState(parent, child) + } + + e.pendingCertifiedParentsMu.Lock() + delete(e.pendingCertifiedParents, parentFrame) + e.pendingCertifiedParentsMu.Unlock() +} + +func (e *GlobalConsensusEngine) addCertifiedState( + parent, child *protobufs.GlobalProposal, +) { + if parent == nil || parent.State == nil || parent.State.Header == nil || + child == nil || child.State == nil || child.State.Header == nil { + e.logger.Error("cannot seal certified state: missing parent or child data") + return + } + + txn, err := e.clockStore.NewTransaction(false) + if err != nil { + e.logger.Error("could not create transaction", zap.Error(err)) + return + } + + qc := child.ParentQuorumCertificate + if qc == nil { + e.logger.Error( + "child missing parent quorum certificate", + zap.Uint64("child_frame_number", child.State.Header.FrameNumber), + ) + return + } + aggregateSig := &protobufs.BLS48581AggregateSignature{ + Signature: qc.GetAggregatedSignature().GetSignature(), + PublicKey: &protobufs.BLS48581G2PublicKey{ + KeyValue: qc.GetAggregatedSignature().GetPubKey(), + }, + Bitmask: qc.GetAggregatedSignature().GetBitmask(), + } + if err := e.clockStore.PutQuorumCertificate( + &protobufs.QuorumCertificate{ + Rank: qc.GetRank(), + FrameNumber: qc.GetFrameNumber(), + Selector: []byte(qc.Identity()), + AggregateSignature: aggregateSig, + }, + txn, + ); err != nil { + e.logger.Error("could not insert quorum certificate", zap.Error(err)) + txn.Abort() + return + } + + if err := txn.Commit(); err != nil { + e.logger.Error("could not commit transaction", zap.Error(err)) + txn.Abort() + return + } + + parent.State.Header.PublicKeySignatureBls48581 = aggregateSig + + err = e.globalTimeReel.Insert(parent.State) + if err != nil { + e.logger.Error("could not insert frame into time reel", zap.Error(err)) + return + } + + current, err := e.globalTimeReel.GetHead() + if err != nil { + e.logger.Error("could not get time reel head", zap.Error(err)) + return + } + + if !bytes.Equal(parent.State.Header.Output, current.Header.Output) { + e.logger.Error( + "frames not aligned", + zap.Uint64("parent_frame_number", parent.State.Header.FrameNumber), + zap.Uint64("new_frame_number", child.State.Header.FrameNumber), + zap.Uint64("reel_frame_number", current.Header.FrameNumber), + zap.Uint64("new_frame_rank", child.State.Header.Rank), + zap.Uint64("reel_frame_rank", current.Header.Rank), + zap.String( + "new_frame_id", + hex.EncodeToString([]byte(child.State.Identity())), + ), + zap.String( + "reel_frame_id", + hex.EncodeToString([]byte(current.Identity())), + ), + ) + return + } + + txn, err = e.clockStore.NewTransaction(false) + if err != nil { + e.logger.Error("could not create transaction", zap.Error(err)) + return + } + + if err := e.clockStore.PutCertifiedGlobalState( + parent, + txn, + ); err != nil { + e.logger.Error("could not insert certified state", zap.Error(err)) + txn.Abort() + return + } + + if err := txn.Commit(); err != nil { + e.logger.Error("could not commit transaction", zap.Error(err)) + txn.Abort() + return + } +} + func (e *GlobalConsensusEngine) handleProposal(message *pb.Message) { + // Skip our own messages + if bytes.Equal(message.From, e.pubsub.GetPeerID()) { + return + } + timer := prometheus.NewTimer(proposalProcessingDuration) defer timer.ObserveDuration() - frame := &protobufs.GlobalFrame{} - if err := frame.FromCanonicalBytes(message.Data); err != nil { - e.logger.Debug("failed to unmarshal frame", zap.Error(err)) + proposal := &protobufs.GlobalProposal{} + if err := proposal.FromCanonicalBytes(message.Data); err != nil { + e.logger.Debug("failed to unmarshal proposal", zap.Error(err)) proposalProcessedTotal.WithLabelValues("error").Inc() return } - frameIDBI, _ := poseidon.HashBytes(frame.Header.Output) + frameIDBI, _ := poseidon.HashBytes(proposal.State.Header.Output) frameID := frameIDBI.FillBytes(make([]byte, 32)) e.frameStoreMu.Lock() - e.frameStore[string(frameID)] = frame + e.frameStore[string(frameID)] = proposal.State e.frameStoreMu.Unlock() - // For proposals, we need to identify the proposer differently - // The proposer's address should be determinable from the frame header - proposerAddress := e.getAddressFromPublicKey( - frame.Header.PublicKeySignatureBls48581.PublicKey.KeyValue, - ) - if len(proposerAddress) > 0 { - clonedFrame := frame.Clone().(*protobufs.GlobalFrame) - if err := e.stateMachine.ReceiveProposal( - GlobalPeerID{ - ID: proposerAddress, - }, - &clonedFrame, - ); err != nil { - e.logger.Error("could not receive proposal", zap.Error(err)) - proposalProcessedTotal.WithLabelValues("error").Inc() - return - } + txn, err := e.clockStore.NewTransaction(false) + if err != nil { + e.logger.Error("could not create transaction", zap.Error(err)) + return } + if err := e.clockStore.PutProposalVote(txn, proposal.Vote); err != nil { + e.logger.Error("could not put vote", zap.Error(err)) + txn.Abort() + return + } + + if err := txn.Commit(); err != nil { + e.logger.Error("could not commit transaction", zap.Error(err)) + txn.Abort() + return + } + + e.globalProposalQueue <- proposal + // Success metric recorded at the end of processing proposalProcessedTotal.WithLabelValues("success").Inc() } -func (e *GlobalConsensusEngine) handleLivenessCheck(message *pb.Message) { - timer := prometheus.NewTimer(livenessCheckProcessingDuration) - defer timer.ObserveDuration() - - livenessCheck := &protobufs.ProverLivenessCheck{} - if err := livenessCheck.FromCanonicalBytes(message.Data); err != nil { - e.logger.Debug("failed to unmarshal liveness check", zap.Error(err)) - livenessCheckProcessedTotal.WithLabelValues("error").Inc() - return - } - - // Validate the liveness check structure - if err := livenessCheck.Validate(); err != nil { - e.logger.Debug("invalid liveness check", zap.Error(err)) - livenessCheckProcessedTotal.WithLabelValues("error").Inc() - return - } - - proverSet, err := e.proverRegistry.GetActiveProvers(nil) - if err != nil { - e.logger.Error("could not receive liveness check", zap.Error(err)) - livenessCheckProcessedTotal.WithLabelValues("error").Inc() - return - } - - var found []byte = nil - for _, prover := range proverSet { - if bytes.Equal( - prover.Address, - livenessCheck.PublicKeySignatureBls48581.Address, - ) { - lcBytes, err := livenessCheck.ConstructSignaturePayload() - if err != nil { - e.logger.Error( - "could not construct signature message for liveness check", - zap.Error(err), - ) - break - } - valid, err := e.keyManager.ValidateSignature( - crypto.KeyTypeBLS48581G1, - prover.PublicKey, - lcBytes, - livenessCheck.PublicKeySignatureBls48581.Signature, - livenessCheck.GetSignatureDomain(), - ) - if err != nil || !valid { - e.logger.Error( - "could not validate signature for liveness check", - zap.Error(err), - ) - break - } - found = prover.PublicKey - - break - } - } - - if found == nil { - e.logger.Warn( - "invalid liveness check", - zap.String( - "prover", - hex.EncodeToString( - livenessCheck.PublicKeySignatureBls48581.Address, - ), - ), - ) - livenessCheckProcessedTotal.WithLabelValues("error").Inc() - return - } - - signatureData, err := livenessCheck.ConstructSignaturePayload() - if err != nil { - e.logger.Error("invalid signature payload", zap.Error(err)) - livenessCheckProcessedTotal.WithLabelValues("error").Inc() - return - } - - valid, err := e.keyManager.ValidateSignature( - crypto.KeyTypeBLS48581G1, - found, - signatureData, - livenessCheck.PublicKeySignatureBls48581.Signature, - livenessCheck.GetSignatureDomain(), - ) - - if err != nil || !valid { - e.logger.Error("invalid liveness check signature", zap.Error(err)) - livenessCheckProcessedTotal.WithLabelValues("error").Inc() - return - } - - commitment := GlobalCollectedCommitments{ - frameNumber: livenessCheck.FrameNumber, - commitmentHash: livenessCheck.CommitmentHash, - prover: livenessCheck.PublicKeySignatureBls48581.Address, - } - if err := e.stateMachine.ReceiveLivenessCheck( - GlobalPeerID{ID: livenessCheck.PublicKeySignatureBls48581.Address}, - commitment, - ); err != nil { - e.logger.Error("could not receive liveness check", zap.Error(err)) - livenessCheckProcessedTotal.WithLabelValues("error").Inc() - return - } - - livenessCheckProcessedTotal.WithLabelValues("success").Inc() -} - func (e *GlobalConsensusEngine) handleVote(message *pb.Message) { + // Skip our own messages + if bytes.Equal(message.From, e.pubsub.GetPeerID()) { + return + } + timer := prometheus.NewTimer(voteProcessingDuration) defer timer.ObserveDuration() - vote := &protobufs.FrameVote{} + vote := &protobufs.ProposalVote{} if err := vote.FromCanonicalBytes(message.Data); err != nil { e.logger.Debug("failed to unmarshal vote", zap.Error(err)) voteProcessedTotal.WithLabelValues("error").Inc() @@ -977,7 +1336,6 @@ func (e *GlobalConsensusEngine) handleVote(message *pb.Message) { voteProcessedTotal.WithLabelValues("error").Inc() } - // Validate the voter's signature proverSet, err := e.proverRegistry.GetActiveProvers(nil) if err != nil { e.logger.Error("could not get active provers", zap.Error(err)) @@ -1011,70 +1369,89 @@ func (e *GlobalConsensusEngine) handleVote(message *pb.Message) { return } - // Find the proposal frame for this vote - e.frameStoreMu.RLock() - var proposalFrame *protobufs.GlobalFrame = nil - for _, frame := range e.frameStore { - if frame.Header != nil && - frame.Header.FrameNumber == vote.FrameNumber && - bytes.Equal( - e.getAddressFromPublicKey( - frame.Header.PublicKeySignatureBls48581.PublicKey.KeyValue, - ), - vote.Proposer, - ) { - proposalFrame = frame - break - } - } - e.frameStoreMu.RUnlock() - - if proposalFrame == nil { - e.logger.Warn( - "vote for unknown proposal", - zap.Uint64("frame_number", vote.FrameNumber), - zap.String("proposer", hex.EncodeToString(vote.Proposer)), - ) - voteProcessedTotal.WithLabelValues("error").Inc() - return - } - - // Get the signature payload for the proposal - signatureData, err := e.frameProver.GetGlobalFrameSignaturePayload( - proposalFrame.Header, - ) + txn, err := e.clockStore.NewTransaction(false) if err != nil { - e.logger.Error("could not get signature payload", zap.Error(err)) + e.logger.Error("could not create transaction", zap.Error(err)) + return + } + + if err := e.clockStore.PutProposalVote(txn, vote); err != nil { + e.logger.Error("could not put vote", zap.Error(err)) + txn.Abort() + return + } + + if err := txn.Commit(); err != nil { + e.logger.Error("could not commit transaction", zap.Error(err)) + txn.Abort() + return + } + + e.voteAggregator.AddVote(&vote) + voteProcessedTotal.WithLabelValues("success").Inc() +} + +func (e *GlobalConsensusEngine) handleTimeoutState(message *pb.Message) { + // Skip our own messages + if bytes.Equal(message.From, e.pubsub.GetPeerID()) { + return + } + + timer := prometheus.NewTimer(voteProcessingDuration) + defer timer.ObserveDuration() + + timeoutState := &protobufs.TimeoutState{} + if err := timeoutState.FromCanonicalBytes(message.Data); err != nil { + e.logger.Debug("failed to unmarshal timeout", zap.Error(err)) voteProcessedTotal.WithLabelValues("error").Inc() return } - // Validate the vote signature - valid, err := e.keyManager.ValidateSignature( - crypto.KeyTypeBLS48581G1, - voterPublicKey, - signatureData, - vote.PublicKeySignatureBls48581.Signature, - []byte("global"), - ) - - if err != nil || !valid { - e.logger.Error("invalid vote signature", zap.Error(err)) + // Validate the vote structure + if err := timeoutState.Validate(); err != nil { + e.logger.Debug("invalid timeout", zap.Error(err)) voteProcessedTotal.WithLabelValues("error").Inc() return } - // Signature is valid, process the vote - if err := e.stateMachine.ReceiveVote( - GlobalPeerID{ID: vote.Proposer}, - GlobalPeerID{ID: vote.PublicKeySignatureBls48581.Address}, - &vote, - ); err != nil { - e.logger.Error("could not receive vote", zap.Error(err)) - voteProcessedTotal.WithLabelValues("error").Inc() + // Small gotcha: the timeout structure uses interfaces, so we can't assign + // directly, otherwise the nil values for the structs will fail the nil + // check on the interfaces (and would incur costly reflection if we wanted + // to check it directly) + lqc := timeoutState.LatestQuorumCertificate + prtc := timeoutState.PriorRankTimeoutCertificate + timeout := &models.TimeoutState[*protobufs.ProposalVote]{ + Rank: timeoutState.Vote.Rank, + Vote: &timeoutState.Vote, + TimeoutTick: timeoutState.TimeoutTick, + } + if lqc != nil { + timeout.LatestQuorumCertificate = lqc + } + if prtc != nil { + timeout.PriorRankTimeoutCertificate = prtc + } + + txn, err := e.clockStore.NewTransaction(false) + if err != nil { + e.logger.Error("could not create transaction", zap.Error(err)) return } + if err := e.clockStore.PutTimeoutVote(txn, timeoutState); err != nil { + e.logger.Error("could not put vote", zap.Error(err)) + txn.Abort() + return + } + + if err := txn.Commit(); err != nil { + e.logger.Error("could not commit transaction", zap.Error(err)) + txn.Abort() + return + } + + e.timeoutAggregator.AddTimeout(timeout) + voteProcessedTotal.WithLabelValues("success").Inc() } @@ -1088,130 +1465,6 @@ func (e *GlobalConsensusEngine) handleMessageBundle(message *pb.Message) { e.logger.Debug("collected global request for execution") } -func (e *GlobalConsensusEngine) handleConfirmation(message *pb.Message) { - timer := prometheus.NewTimer(confirmationProcessingDuration) - defer timer.ObserveDuration() - - confirmation := &protobufs.FrameConfirmation{} - if err := confirmation.FromCanonicalBytes(message.Data); err != nil { - e.logger.Debug("failed to unmarshal confirmation", zap.Error(err)) - confirmationProcessedTotal.WithLabelValues("error").Inc() - return - } - - // Validate the confirmation structure - if err := confirmation.Validate(); err != nil { - e.logger.Debug("invalid confirmation", zap.Error(err)) - confirmationProcessedTotal.WithLabelValues("error").Inc() - return - } - - // Find the frame with matching selector - e.frameStoreMu.RLock() - var matchingFrame *protobufs.GlobalFrame - for frameID, frame := range e.frameStore { - if frame.Header != nil && - frame.Header.FrameNumber == confirmation.FrameNumber && - frameID == string(confirmation.Selector) { - matchingFrame = frame - break - } - } - - if matchingFrame == nil { - e.frameStoreMu.RUnlock() - return - } - - e.frameStoreMu.RUnlock() - e.frameStoreMu.Lock() - defer e.frameStoreMu.Unlock() - matchingFrame.Header.PublicKeySignatureBls48581 = - confirmation.AggregateSignature - valid, err := e.frameValidator.Validate(matchingFrame) - if !valid || err != nil { - e.logger.Error("received invalid confirmation", zap.Error(err)) - confirmationProcessedTotal.WithLabelValues("error").Inc() - return - } - - // Check if we already have a confirmation stowed - exceeds := false - set := 0 - for _, b := range matchingFrame.Header.PublicKeySignatureBls48581.Bitmask { - set += bits.OnesCount8(b) - if set > 1 { - exceeds = true - break - } - } - if exceeds { - // Skip the remaining operations - return - } - - // Extract proposer address from the original frame - var proposerAddress []byte - frameSignature := matchingFrame.Header.PublicKeySignatureBls48581 - if frameSignature != nil && frameSignature.PublicKey != nil && - len(frameSignature.PublicKey.KeyValue) > 0 { - proposerAddress = e.getAddressFromPublicKey( - frameSignature.PublicKey.KeyValue, - ) - } else if frameSignature != nil && - frameSignature.Bitmask != nil { - // Extract from bitmask if no public key - provers, err := e.proverRegistry.GetActiveProvers(nil) - if err == nil { - for i := 0; i < len(provers); i++ { - byteIndex := i / 8 - bitIndex := i % 8 - if byteIndex < len(frameSignature.Bitmask) && - (frameSignature.Bitmask[byteIndex]&(1< 0 { - if err := e.stateMachine.ReceiveConfirmation( - GlobalPeerID{ID: proposerAddress}, - &matchingFrame, - ); err != nil { - e.logger.Error("could not receive confirmation", zap.Error(err)) - confirmationProcessedTotal.WithLabelValues("error").Inc() - return - } - } - err = e.globalTimeReel.Insert(e.ctx, matchingFrame) - if err != nil { - e.logger.Error( - "could not insert into time reel", - zap.Error(err), - ) - confirmationProcessedTotal.WithLabelValues("error").Inc() - return - } - - confirmationProcessedTotal.WithLabelValues("success").Inc() -} - func (e *GlobalConsensusEngine) handleShardProposal(message *pb.Message) { timer := prometheus.NewTimer(shardProposalProcessingDuration) defer timer.ObserveDuration() @@ -1401,7 +1654,7 @@ func (e *GlobalConsensusEngine) handleShardVote(message *pb.Message) { timer := prometheus.NewTimer(shardVoteProcessingDuration) defer timer.ObserveDuration() - vote := &protobufs.FrameVote{} + vote := &protobufs.ProposalVote{} if err := vote.FromCanonicalBytes(message.Data); err != nil { e.logger.Debug("failed to unmarshal vote", zap.Error(err)) shardVoteProcessedTotal.WithLabelValues("error").Inc() @@ -1456,7 +1709,7 @@ func (e *GlobalConsensusEngine) handleShardVote(message *pb.Message) { } e.appFrameStoreMu.Lock() - frameID := fmt.Sprintf("%x%d", vote.Proposer, vote.FrameNumber) + frameID := fmt.Sprintf("%x%d", vote.Identity(), vote.FrameNumber) proposalFrame := e.appFrameStore[frameID] e.appFrameStoreMu.Unlock() @@ -1467,14 +1720,11 @@ func (e *GlobalConsensusEngine) handleShardVote(message *pb.Message) { } // Get the signature payload for the proposal - signatureData, err := e.frameProver.GetFrameSignaturePayload( - proposalFrame.Header, + signatureData := verification.MakeVoteMessage( + proposalFrame.Header.Address, + proposalFrame.GetRank(), + proposalFrame.Source(), ) - if err != nil { - e.logger.Error("could not get signature payload", zap.Error(err)) - shardVoteProcessedTotal.WithLabelValues("error").Inc() - return - } // Validate the vote signature valid, err := e.keyManager.ValidateSignature( @@ -1482,7 +1732,7 @@ func (e *GlobalConsensusEngine) handleShardVote(message *pb.Message) { voterPublicKey, signatureData, vote.PublicKeySignatureBls48581.Signature, - []byte("global"), + []byte("appshard"), ) if err != nil || !valid { @@ -1494,83 +1744,6 @@ func (e *GlobalConsensusEngine) handleShardVote(message *pb.Message) { shardVoteProcessedTotal.WithLabelValues("success").Inc() } -func (e *GlobalConsensusEngine) handleShardConfirmation(message *pb.Message) { - timer := prometheus.NewTimer(shardConfirmationProcessingDuration) - defer timer.ObserveDuration() - - confirmation := &protobufs.FrameConfirmation{} - if err := confirmation.FromCanonicalBytes(message.Data); err != nil { - e.logger.Debug("failed to unmarshal confirmation", zap.Error(err)) - shardConfirmationProcessedTotal.WithLabelValues("error").Inc() - return - } - - // Validate the confirmation structure - if err := confirmation.Validate(); err != nil { - e.logger.Debug("invalid confirmation", zap.Error(err)) - shardConfirmationProcessedTotal.WithLabelValues("error").Inc() - return - } - - e.appFrameStoreMu.Lock() - matchingFrame := e.appFrameStore[string(confirmation.Selector)] - e.appFrameStoreMu.Unlock() - - if matchingFrame == nil { - e.logger.Error("could not find matching frame") - shardConfirmationProcessedTotal.WithLabelValues("error").Inc() - return - } - - matchingFrame.Header.PublicKeySignatureBls48581 = - confirmation.AggregateSignature - valid, err := e.appFrameValidator.Validate(matchingFrame) - if !valid || err != nil { - e.logger.Error("received invalid confirmation", zap.Error(err)) - shardConfirmationProcessedTotal.WithLabelValues("error").Inc() - return - } - - // Check if we already have a confirmation stowed - exceeds := false - set := 0 - for _, b := range matchingFrame.Header.PublicKeySignatureBls48581.Bitmask { - set += bits.OnesCount8(b) - if set > 1 { - exceeds = true - break - } - } - if exceeds { - // Skip the remaining operations - return - } - - e.txLockMu.Lock() - if _, ok := e.txLockMap[confirmation.FrameNumber]; !ok { - e.txLockMap[confirmation.FrameNumber] = make( - map[string]map[string]*LockedTransaction, - ) - } - _, ok := e.txLockMap[confirmation.FrameNumber][string(confirmation.Filter)] - if !ok { - e.txLockMap[confirmation.FrameNumber][string(confirmation.Filter)] = - make(map[string]*LockedTransaction) - } - txSet := e.txLockMap[confirmation.FrameNumber][string(confirmation.Filter)] - for _, l := range txSet { - for _, p := range slices.Collect(slices.Chunk(l.Prover, 32)) { - if bytes.Equal(p, matchingFrame.Header.Prover) { - l.Committed = true - l.Filled = true - } - } - } - e.txLockMu.Unlock() - - shardConfirmationProcessedTotal.WithLabelValues("success").Inc() -} - func (e *GlobalConsensusEngine) peekMessageType(message *pb.Message) uint32 { // Check if data is long enough to contain type prefix if len(message.Data) < 4 { diff --git a/node/consensus/global/message_subscription.go b/node/consensus/global/message_subscription.go index bd370f3..1391497 100644 --- a/node/consensus/global/message_subscription.go +++ b/node/consensus/global/message_subscription.go @@ -28,7 +28,7 @@ func (e *GlobalConsensusEngine) subscribeToGlobalConsensus() error { return nil case e.globalConsensusMessageQueue <- message: return nil - case <-e.ctx.Done(): + case <-e.ShutdownSignal(): return errors.New("context cancelled") default: e.logger.Warn("global message queue full, dropping message") @@ -59,7 +59,7 @@ func (e *GlobalConsensusEngine) subscribeToGlobalConsensus() error { return nil case e.appFramesMessageQueue <- message: return nil - case <-e.ctx.Done(): + case <-e.ShutdownSignal(): return errors.New("context cancelled") default: e.logger.Warn("app frames message queue full, dropping message") @@ -100,7 +100,7 @@ func (e *GlobalConsensusEngine) subscribeToShardConsensusMessages() error { return nil case e.shardConsensusMessageQueue <- message: return nil - case <-e.ctx.Done(): + case <-e.ShutdownSignal(): return errors.New("context cancelled") default: e.logger.Warn("shard consensus queue full, dropping message") @@ -132,12 +132,17 @@ func (e *GlobalConsensusEngine) subscribeToFrameMessages() error { if err := e.pubsub.Subscribe( GLOBAL_FRAME_BITMASK, func(message *pb.Message) error { + // Don't subscribe if running in consensus, the time reel shouldn't have + // the frame ahead of time + if e.config.P2P.Network == 99 || e.config.Engine.ArchiveMode { + return nil + } select { case <-e.haltCtx.Done(): return nil case e.globalFrameMessageQueue <- message: return nil - case <-e.ctx.Done(): + case <-e.ShutdownSignal(): return errors.New("context cancelled") default: e.logger.Warn("global frame queue full, dropping message") @@ -177,7 +182,7 @@ func (e *GlobalConsensusEngine) subscribeToProverMessages() error { case e.globalProverMessageQueue <- message: e.logger.Debug("received prover message") return nil - case <-e.ctx.Done(): + case <-e.ShutdownSignal(): return errors.New("context cancelled") default: e.logger.Warn("global prover message queue full, dropping message") @@ -211,7 +216,7 @@ func (e *GlobalConsensusEngine) subscribeToPeerInfoMessages() error { return nil case e.globalPeerInfoMessageQueue <- message: return nil - case <-e.ctx.Done(): + case <-e.ShutdownSignal(): return errors.New("context cancelled") default: e.logger.Warn("peer info message queue full, dropping message") @@ -243,7 +248,7 @@ func (e *GlobalConsensusEngine) subscribeToAlertMessages() error { select { case e.globalAlertMessageQueue <- message: return nil - case <-e.ctx.Done(): + case <-e.ShutdownSignal(): return errors.New("context cancelled") default: e.logger.Warn("alert message queue full, dropping message") diff --git a/node/consensus/global/message_validation.go b/node/consensus/global/message_validation.go index 5c6f719..0b8bfcd 100644 --- a/node/consensus/global/message_validation.go +++ b/node/consensus/global/message_validation.go @@ -30,33 +30,32 @@ func (e *GlobalConsensusEngine) validateGlobalConsensusMessage( typePrefix := binary.BigEndian.Uint32(message.Data[:4]) switch typePrefix { - case protobufs.GlobalFrameType: + case protobufs.GlobalProposalType: start := time.Now() defer func() { proposalValidationDuration.Observe(time.Since(start).Seconds()) }() - frame := &protobufs.GlobalFrame{} - if err := frame.FromCanonicalBytes(message.Data); err != nil { + proposal := &protobufs.GlobalProposal{} + if err := proposal.FromCanonicalBytes(message.Data); err != nil { e.logger.Debug("failed to unmarshal frame", zap.Error(err)) proposalValidationTotal.WithLabelValues("reject").Inc() return tp2p.ValidationResultReject } - if frametime.GlobalFrameSince(frame) > 20*time.Second { + if err := proposal.Validate(); err != nil { + e.logger.Debug("invalid proposal", zap.Error(err)) proposalValidationTotal.WithLabelValues("reject").Inc() return tp2p.ValidationResultIgnore } - if frame.Header.PublicKeySignatureBls48581 == nil || - frame.Header.PublicKeySignatureBls48581.PublicKey == nil || - frame.Header.PublicKeySignatureBls48581.PublicKey.KeyValue == nil { - e.logger.Debug("global frame validation missing signature") + if e.currentRank > proposal.GetRank() { + e.logger.Debug("proposal is stale") proposalValidationTotal.WithLabelValues("reject").Inc() - return tp2p.ValidationResultReject + return tp2p.ValidationResultIgnore } - valid, err := e.frameValidator.Validate(frame) + valid, err := e.frameValidator.Validate(proposal.State) if err != nil { e.logger.Debug("global frame validation error", zap.Error(err)) proposalValidationTotal.WithLabelValues("reject").Inc() @@ -71,49 +70,21 @@ func (e *GlobalConsensusEngine) validateGlobalConsensusMessage( proposalValidationTotal.WithLabelValues("accept").Inc() - case protobufs.ProverLivenessCheckType: - start := time.Now() - defer func() { - livenessCheckValidationDuration.Observe(time.Since(start).Seconds()) - }() - - livenessCheck := &protobufs.ProverLivenessCheck{} - if err := livenessCheck.FromCanonicalBytes(message.Data); err != nil { - e.logger.Debug("failed to unmarshal liveness check", zap.Error(err)) - livenessCheckValidationTotal.WithLabelValues("reject").Inc() - return tp2p.ValidationResultReject - } - - now := time.Now().UnixMilli() - if livenessCheck.Timestamp > now+5000 || - livenessCheck.Timestamp < now-5000 { - return tp2p.ValidationResultIgnore - } - - // Validate the liveness check - if err := livenessCheck.Validate(); err != nil { - e.logger.Debug("invalid liveness check", zap.Error(err)) - livenessCheckValidationTotal.WithLabelValues("reject").Inc() - return tp2p.ValidationResultReject - } - - livenessCheckValidationTotal.WithLabelValues("accept").Inc() - - case protobufs.FrameVoteType: + case protobufs.ProposalVoteType: start := time.Now() defer func() { voteValidationDuration.Observe(time.Since(start).Seconds()) }() - vote := &protobufs.FrameVote{} + vote := &protobufs.ProposalVote{} if err := vote.FromCanonicalBytes(message.Data); err != nil { e.logger.Debug("failed to unmarshal vote", zap.Error(err)) voteValidationTotal.WithLabelValues("reject").Inc() return tp2p.ValidationResultReject } - now := time.Now().UnixMilli() - if vote.Timestamp > now+5000 || vote.Timestamp < now-5000 { + if e.currentRank > vote.Rank { + e.logger.Debug("vote is stale") return tp2p.ValidationResultIgnore } @@ -126,33 +97,32 @@ func (e *GlobalConsensusEngine) validateGlobalConsensusMessage( voteValidationTotal.WithLabelValues("accept").Inc() - case protobufs.FrameConfirmationType: + case protobufs.TimeoutStateType: start := time.Now() defer func() { - confirmationValidationDuration.Observe(time.Since(start).Seconds()) + timeoutStateValidationDuration.Observe(time.Since(start).Seconds()) }() - confirmation := &protobufs.FrameConfirmation{} - if err := confirmation.FromCanonicalBytes(message.Data); err != nil { - e.logger.Debug("failed to unmarshal confirmation", zap.Error(err)) - confirmationValidationTotal.WithLabelValues("reject").Inc() + timeoutState := &protobufs.TimeoutState{} + if err := timeoutState.FromCanonicalBytes(message.Data); err != nil { + e.logger.Debug("failed to unmarshal timeoutState", zap.Error(err)) + timeoutStateValidationTotal.WithLabelValues("reject").Inc() return tp2p.ValidationResultReject } - now := time.Now().UnixMilli() - if confirmation.Timestamp > now+5000 || - confirmation.Timestamp < now-5000 { + if e.currentRank > timeoutState.Vote.Rank { + e.logger.Debug("timeout is stale") return tp2p.ValidationResultIgnore } - // Validate the confirmation - if err := confirmation.Validate(); err != nil { - e.logger.Debug("invalid confirmation", zap.Error(err)) - confirmationValidationTotal.WithLabelValues("reject").Inc() + // Validate the timeoutState + if err := timeoutState.Validate(); err != nil { + e.logger.Debug("invalid timeoutState", zap.Error(err)) + timeoutStateValidationTotal.WithLabelValues("reject").Inc() return tp2p.ValidationResultReject } - confirmationValidationTotal.WithLabelValues("accept").Inc() + timeoutStateValidationTotal.WithLabelValues("accept").Inc() default: e.logger.Debug("received unknown type", zap.Uint32("type", typePrefix)) @@ -224,48 +194,20 @@ func (e *GlobalConsensusEngine) validateShardConsensusMessage( shardProposalValidationTotal.WithLabelValues("accept").Inc() - case protobufs.ProverLivenessCheckType: - start := time.Now() - defer func() { - shardLivenessCheckValidationDuration.Observe(time.Since(start).Seconds()) - }() - - livenessCheck := &protobufs.ProverLivenessCheck{} - if err := livenessCheck.FromCanonicalBytes(message.Data); err != nil { - e.logger.Debug("failed to unmarshal liveness check", zap.Error(err)) - shardLivenessCheckValidationTotal.WithLabelValues("reject").Inc() - return tp2p.ValidationResultReject - } - - now := time.Now().UnixMilli() - if livenessCheck.Timestamp > now+500 || - livenessCheck.Timestamp < now-1000 { - shardLivenessCheckValidationTotal.WithLabelValues("ignore").Inc() - return tp2p.ValidationResultIgnore - } - - if err := livenessCheck.Validate(); err != nil { - e.logger.Debug("failed to validate liveness check", zap.Error(err)) - shardLivenessCheckValidationTotal.WithLabelValues("reject").Inc() - return tp2p.ValidationResultReject - } - - shardLivenessCheckValidationTotal.WithLabelValues("accept").Inc() - - case protobufs.FrameVoteType: + case protobufs.ProposalVoteType: start := time.Now() defer func() { shardVoteValidationDuration.Observe(time.Since(start).Seconds()) }() - vote := &protobufs.FrameVote{} + vote := &protobufs.ProposalVote{} if err := vote.FromCanonicalBytes(message.Data); err != nil { e.logger.Debug("failed to unmarshal vote", zap.Error(err)) shardVoteValidationTotal.WithLabelValues("reject").Inc() return tp2p.ValidationResultReject } - now := time.Now().UnixMilli() + now := uint64(time.Now().UnixMilli()) if vote.Timestamp > now+5000 || vote.Timestamp < now-5000 { shardVoteValidationTotal.WithLabelValues("ignore").Inc() return tp2p.ValidationResultIgnore @@ -279,32 +221,44 @@ func (e *GlobalConsensusEngine) validateShardConsensusMessage( shardVoteValidationTotal.WithLabelValues("accept").Inc() - case protobufs.FrameConfirmationType: + case protobufs.TimeoutStateType: start := time.Now() defer func() { - shardConfirmationValidationDuration.Observe(time.Since(start).Seconds()) + shardTimeoutStateValidationDuration.Observe(time.Since(start).Seconds()) }() - confirmation := &protobufs.FrameConfirmation{} - if err := confirmation.FromCanonicalBytes(message.Data); err != nil { - e.logger.Debug("failed to unmarshal confirmation", zap.Error(err)) - shardConfirmationValidationTotal.WithLabelValues("reject").Inc() + timeoutState := &protobufs.TimeoutState{} + if err := timeoutState.FromCanonicalBytes(message.Data); err != nil { + e.logger.Debug("failed to unmarshal timeoutState", zap.Error(err)) + shardTimeoutStateValidationTotal.WithLabelValues("reject").Inc() return tp2p.ValidationResultReject } - now := time.Now().UnixMilli() - if confirmation.Timestamp > now+5000 || confirmation.Timestamp < now-5000 { - shardConfirmationValidationTotal.WithLabelValues("ignore").Inc() + now := uint64(time.Now().UnixMilli()) + if timeoutState.Timestamp > now+5000 || timeoutState.Timestamp < now-5000 { + shardTimeoutStateValidationTotal.WithLabelValues("ignore").Inc() return tp2p.ValidationResultIgnore } - if err := confirmation.Validate(); err != nil { - e.logger.Debug("failed to validate confirmation", zap.Error(err)) - shardConfirmationValidationTotal.WithLabelValues("reject").Inc() + if err := timeoutState.Validate(); err != nil { + e.logger.Debug("failed to validate timeoutState", zap.Error(err)) + shardTimeoutStateValidationTotal.WithLabelValues("reject").Inc() return tp2p.ValidationResultReject } - shardConfirmationValidationTotal.WithLabelValues("accept").Inc() + shardTimeoutStateValidationTotal.WithLabelValues("accept").Inc() + + case protobufs.ProverLivenessCheckType: + check := &protobufs.ProverLivenessCheck{} + if err := check.FromCanonicalBytes(message.Data); err != nil { + e.logger.Debug("failed to unmarshal liveness check", zap.Error(err)) + return tp2p.ValidationResultReject + } + + if err := check.Validate(); err != nil { + e.logger.Debug("invalid liveness check", zap.Error(err)) + return tp2p.ValidationResultReject + } default: return tp2p.ValidationResultReject diff --git a/node/consensus/global/metrics.go b/node/consensus/global/metrics.go index b1f0a10..0cf4f11 100644 --- a/node/consensus/global/metrics.go +++ b/node/consensus/global/metrics.go @@ -95,48 +95,6 @@ var ( }, ) - // Global liveness check processing metrics - livenessCheckProcessedTotal = promauto.NewCounterVec( - prometheus.CounterOpts{ - Namespace: metricsNamespace, - Subsystem: subsystem, - Name: "liveness_check_processed_total", - Help: "Total number of global liveness checks processed by the global consensus engine", - }, - []string{"status"}, // status: "success", "error", "invalid" - ) - - livenessCheckProcessingDuration = promauto.NewHistogram( - prometheus.HistogramOpts{ - Namespace: metricsNamespace, - Subsystem: subsystem, - Name: "liveness_check_processing_duration_seconds", - Help: "Time taken to process a global liveness check", - Buckets: prometheus.DefBuckets, - }, - ) - - // Global liveness check validation metrics - livenessCheckValidationTotal = promauto.NewCounterVec( - prometheus.CounterOpts{ - Namespace: metricsNamespace, - Subsystem: subsystem, - Name: "liveness_check_validation_total", - Help: "Total number of global liveness check validations", - }, - []string{"result"}, // result: "accept", "reject", "ignore" - ) - - livenessCheckValidationDuration = promauto.NewHistogram( - prometheus.HistogramOpts{ - Namespace: metricsNamespace, - Subsystem: subsystem, - Name: "liveness_check_validation_duration_seconds", - Help: "Time taken to validate a global liveness check", - Buckets: prometheus.DefBuckets, - }, - ) - // Shard liveness check processing metrics shardLivenessCheckProcessedTotal = promauto.NewCounterVec( prometheus.CounterOpts{ @@ -158,27 +116,6 @@ var ( }, ) - // Shard liveness check validation metrics - shardLivenessCheckValidationTotal = promauto.NewCounterVec( - prometheus.CounterOpts{ - Namespace: metricsNamespace, - Subsystem: subsystem, - Name: "shard_liveness_check_validation_total", - Help: "Total number of shard liveness check validations", - }, - []string{"result"}, // result: "accept", "reject", "ignore" - ) - - shardLivenessCheckValidationDuration = promauto.NewHistogram( - prometheus.HistogramOpts{ - Namespace: metricsNamespace, - Subsystem: subsystem, - Name: "shard_liveness_check_validation_duration_seconds", - Help: "Time taken to validate a shard liveness check", - Buckets: prometheus.DefBuckets, - }, - ) - // Global vote processing metrics voteProcessedTotal = promauto.NewCounterVec( prometheus.CounterOpts{ @@ -263,86 +200,86 @@ var ( }, ) - // Global confirmation processing metrics - confirmationProcessedTotal = promauto.NewCounterVec( + // Global timeout state processing metrics + timeoutStateProcessedTotal = promauto.NewCounterVec( prometheus.CounterOpts{ Namespace: metricsNamespace, Subsystem: subsystem, - Name: "confirmation_processed_total", - Help: "Total number of global confirmations processed by the global consensus engine", + Name: "timeout_state_processed_total", + Help: "Total number of global timeouts processed by the global consensus engine", }, []string{"status"}, // status: "success", "error", "invalid" ) - confirmationProcessingDuration = promauto.NewHistogram( + timeoutStateProcessingDuration = promauto.NewHistogram( prometheus.HistogramOpts{ Namespace: metricsNamespace, Subsystem: subsystem, - Name: "confirmation_processing_duration_seconds", - Help: "Time taken to process a global confirmation", + Name: "timeout_state_processing_duration_seconds", + Help: "Time taken to process a global timeout", Buckets: prometheus.DefBuckets, }, ) - // Global confirmation validation metrics - confirmationValidationTotal = promauto.NewCounterVec( + // Global timeout state validation metrics + timeoutStateValidationTotal = promauto.NewCounterVec( prometheus.CounterOpts{ Namespace: metricsNamespace, Subsystem: subsystem, - Name: "confirmation_validation_total", - Help: "Total number of global confirmation validations", + Name: "timeout_state_validation_total", + Help: "Total number of global timeout state validations", }, []string{"result"}, // result: "accept", "reject", "ignore" ) - confirmationValidationDuration = promauto.NewHistogram( + timeoutStateValidationDuration = promauto.NewHistogram( prometheus.HistogramOpts{ Namespace: metricsNamespace, Subsystem: subsystem, - Name: "confirmation_validation_duration_seconds", - Help: "Time taken to validate a global confirmation", + Name: "timeout_state_validation_duration_seconds", + Help: "Time taken to validate a global timeout", Buckets: prometheus.DefBuckets, }, ) - // Shard confirmation processing metrics - shardConfirmationProcessedTotal = promauto.NewCounterVec( + // Shard timeout state processing metrics + shardTimeoutStateProcessedTotal = promauto.NewCounterVec( prometheus.CounterOpts{ Namespace: metricsNamespace, Subsystem: subsystem, - Name: "shard_confirmation_processed_total", - Help: "Total number of shard confirmations processed by the global consensus engine", + Name: "shard_timeout_state_processed_total", + Help: "Total number of shard timeouts processed by the global consensus engine", }, []string{"status"}, // status: "success", "error", "invalid" ) - shardConfirmationProcessingDuration = promauto.NewHistogram( + shardTimeoutStateProcessingDuration = promauto.NewHistogram( prometheus.HistogramOpts{ Namespace: metricsNamespace, Subsystem: subsystem, - Name: "shard_confirmation_processing_duration_seconds", - Help: "Time taken to process a shard confirmation", + Name: "shard_timeout_state_processing_duration_seconds", + Help: "Time taken to process a shard timeout", Buckets: prometheus.DefBuckets, }, ) - // Shard confirmation validation metrics - shardConfirmationValidationTotal = promauto.NewCounterVec( + // Shard timeout state validation metrics + shardTimeoutStateValidationTotal = promauto.NewCounterVec( prometheus.CounterOpts{ Namespace: metricsNamespace, Subsystem: subsystem, - Name: "shard_confirmation_validation_total", - Help: "Total number of shard confirmation validations", + Name: "shard_timeout_state_validation_total", + Help: "Total number of shard timeout state validations", }, []string{"result"}, // result: "accept", "reject", "ignore" ) - shardConfirmationValidationDuration = promauto.NewHistogram( + shardTimeoutStateValidationDuration = promauto.NewHistogram( prometheus.HistogramOpts{ Namespace: metricsNamespace, Subsystem: subsystem, - Name: "shard_confirmation_validation_duration_seconds", - Help: "Time taken to validate a shard confirmation", + Name: "shard_timeout_state_validation_duration_seconds", + Help: "Time taken to validate a shard timeout", Buckets: prometheus.DefBuckets, }, ) diff --git a/node/consensus/global/services.go b/node/consensus/global/services.go index dd92476..e8b2e1e 100644 --- a/node/consensus/global/services.go +++ b/node/consensus/global/services.go @@ -7,6 +7,7 @@ import ( "slices" "github.com/iden3/go-iden3-crypto/poseidon" + "github.com/libp2p/go-libp2p/core/peer" "github.com/pkg/errors" "go.uber.org/zap" "google.golang.org/grpc" @@ -22,60 +23,9 @@ func (e *GlobalConsensusEngine) GetGlobalFrame( ctx context.Context, request *protobufs.GetGlobalFrameRequest, ) (*protobufs.GlobalFrameResponse, error) { - peerID, ok := qgrpc.PeerIDFromContext(ctx) - if !ok { - return nil, status.Error(codes.Internal, "remote peer ID not found") - } - - if !bytes.Equal(e.pubsub.GetPeerID(), []byte(peerID)) { - registry, err := e.keyStore.GetKeyRegistry( - []byte(peerID), - ) - if err != nil { - return nil, status.Error( - codes.PermissionDenied, - "could not identify peer", - ) - } - - if registry.ProverKey == nil || registry.ProverKey.KeyValue == nil { - return nil, status.Error( - codes.PermissionDenied, - "could not identify peer (no prover)", - ) - } - - addrBI, err := poseidon.HashBytes(registry.ProverKey.KeyValue) - if err != nil { - return nil, status.Error( - codes.PermissionDenied, - "could not identify peer (invalid address)", - ) - } - - addr := addrBI.FillBytes(make([]byte, 32)) - info, err := e.proverRegistry.GetActiveProvers(nil) - if err != nil { - return nil, status.Error( - codes.PermissionDenied, - "could not identify peer (no prover registry)", - ) - } - - found := false - for _, prover := range info { - if bytes.Equal(prover.Address, addr) { - found = true - break - } - } - - if !found { - return nil, status.Error( - codes.PermissionDenied, - "invalid peer", - ) - } + peerID, err := e.authenticateProverFromContext(ctx) + if err != nil { + return nil, err } e.logger.Debug( @@ -84,7 +34,6 @@ func (e *GlobalConsensusEngine) GetGlobalFrame( zap.String("peer_id", peerID.String()), ) var frame *protobufs.GlobalFrame - var err error if request.FrameNumber == 0 { frame, err = e.globalTimeReel.GetHead() if frame.Header.FrameNumber == 0 { @@ -112,6 +61,90 @@ func (e *GlobalConsensusEngine) GetGlobalFrame( }, nil } +func (e *GlobalConsensusEngine) GetGlobalProposal( + ctx context.Context, + request *protobufs.GetGlobalProposalRequest, +) (*protobufs.GlobalProposalResponse, error) { + peerID, err := e.authenticateProverFromContext(ctx) + if err != nil { + return nil, err + } + + // Genesis does not have a parent cert, treat special: + if request.FrameNumber == 0 { + frame, err := e.clockStore.GetGlobalClockFrame(request.FrameNumber) + if err != nil { + e.logger.Debug( + "received error while fetching global frame", + zap.String("peer_id", peerID.String()), + zap.Uint64("frame_number", request.FrameNumber), + zap.Error(err), + ) + return nil, errors.Wrap(err, "get global proposal") + } + return &protobufs.GlobalProposalResponse{ + Proposal: &protobufs.GlobalProposal{ + State: frame, + }, + }, nil + } + + e.logger.Debug( + "received proposal request", + zap.Uint64("frame_number", request.FrameNumber), + zap.String("peer_id", peerID.String()), + ) + frame, err := e.clockStore.GetGlobalClockFrame(request.FrameNumber) + if err != nil { + return &protobufs.GlobalProposalResponse{}, nil + } + + vote, err := e.clockStore.GetProposalVote( + nil, + frame.GetRank(), + []byte(frame.Source()), + ) + if err != nil { + return &protobufs.GlobalProposalResponse{}, nil + } + + parent, err := e.clockStore.GetGlobalClockFrame(request.FrameNumber - 1) + if err != nil { + e.logger.Debug( + "received error while fetching global frame parent", + zap.String("peer_id", peerID.String()), + zap.Uint64("frame_number", request.FrameNumber), + zap.Error(err), + ) + return nil, errors.Wrap(err, "get global proposal") + } + + parentQC, err := e.clockStore.GetQuorumCertificate(nil, parent.GetRank()) + if err != nil { + e.logger.Debug( + "received error while fetching QC parent", + zap.String("peer_id", peerID.String()), + zap.Uint64("frame_number", request.FrameNumber), + zap.Error(err), + ) + return nil, errors.Wrap(err, "get global proposal") + } + + // no tc is fine, pass the nil along + priorRankTC, _ := e.clockStore.GetTimeoutCertificate(nil, frame.GetRank()-1) + + proposal := &protobufs.GlobalProposal{ + State: frame, + ParentQuorumCertificate: parentQC, + PriorRankTimeoutCertificate: priorRankTC, + Vote: vote, + } + + return &protobufs.GlobalProposalResponse{ + Proposal: proposal, + }, nil +} + func (e *GlobalConsensusEngine) GetAppShards( ctx context.Context, req *protobufs.GetAppShardsRequest, @@ -336,3 +369,65 @@ func (e *GlobalConsensusEngine) RegisterServices(server *grpc.Server) { protobufs.RegisterPubSubProxyServer(server, proxyServer) } } + +func (e *GlobalConsensusEngine) authenticateProverFromContext( + ctx context.Context, +) (peer.ID, error) { + peerID, ok := qgrpc.PeerIDFromContext(ctx) + if !ok { + return peerID, status.Error(codes.Internal, "remote peer ID not found") + } + + if !bytes.Equal(e.pubsub.GetPeerID(), []byte(peerID)) { + registry, err := e.keyStore.GetKeyRegistry( + []byte(peerID), + ) + if err != nil { + return peerID, status.Error( + codes.PermissionDenied, + "could not identify peer", + ) + } + + if registry.ProverKey == nil || registry.ProverKey.KeyValue == nil { + return peerID, status.Error( + codes.PermissionDenied, + "could not identify peer (no prover)", + ) + } + + addrBI, err := poseidon.HashBytes(registry.ProverKey.KeyValue) + if err != nil { + return peerID, status.Error( + codes.PermissionDenied, + "could not identify peer (invalid address)", + ) + } + + addr := addrBI.FillBytes(make([]byte, 32)) + info, err := e.proverRegistry.GetActiveProvers(nil) + if err != nil { + return peerID, status.Error( + codes.PermissionDenied, + "could not identify peer (no prover registry)", + ) + } + + found := false + for _, prover := range info { + if bytes.Equal(prover.Address, addr) { + found = true + break + } + } + + if !found { + return peerID, status.Error( + codes.PermissionDenied, + "invalid peer", + ) + } + } + + return peerID, nil +} diff --git a/node/consensus/time/app_time_reel.go b/node/consensus/time/app_time_reel.go index 8c10112..9715bea 100644 --- a/node/consensus/time/app_time_reel.go +++ b/node/consensus/time/app_time_reel.go @@ -15,6 +15,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/protobufs" "source.quilibrium.com/quilibrium/monorepo/types/consensus" "source.quilibrium.com/quilibrium/monorepo/types/store" @@ -105,9 +106,7 @@ type AppTimeReel struct { ) error // Control - ctx context.Context - cancel context.CancelFunc - wg sync.WaitGroup + ctx context.Context // Archive mode: whether to hold historic frame data archiveMode bool @@ -126,8 +125,6 @@ func NewAppTimeReel( return nil, errors.Wrap(err, "failed to create LRU cache") } - ctx, cancel := context.WithCancel(context.Background()) - return &AppTimeReel{ logger: logger, address: address, @@ -153,8 +150,6 @@ func NewAppTimeReel( return nil }, store: clockStore, - ctx: ctx, - cancel: cancel, archiveMode: archiveMode, }, nil } @@ -180,35 +175,45 @@ func (a *AppTimeReel) SetRevertFunc( } // Start starts the app time reel -func (a *AppTimeReel) Start() error { +func (a *AppTimeReel) Start( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, +) { + a.ctx = ctx a.logger.Info( "starting app time reel", zap.String("address", fmt.Sprintf("%x", a.address)), ) if err := a.bootstrapFromStore(); err != nil { - return errors.Wrap(err, "start app time reel") + ctx.Throw(errors.Wrap(err, "start app time reel")) + return } - return nil -} + ready() + <-ctx.Done() -// Stop stops the app time reel -func (a *AppTimeReel) Stop() { a.logger.Info( "stopping app time reel", zap.String("address", fmt.Sprintf("%x", a.address)), ) - a.cancel() - a.wg.Wait() close(a.eventCh) close(a.eventDone) } // sendEvent sends an event with guaranteed delivery func (a *AppTimeReel) sendEvent(event AppEvent) { - // This blocks until the event is delivered, guaranteeing order + // prioritize halts select { + case <-a.ctx.Done(): + return + default: + } + + // This blocks until the event is delivered or halted, guaranteeing order + select { + case <-a.ctx.Done(): + return case a.eventCh <- event: a.logger.Debug( "sent event", @@ -216,14 +221,11 @@ func (a *AppTimeReel) sendEvent(event AppEvent) { zap.Uint64("frame_number", event.Frame.Header.FrameNumber), zap.String("id", a.ComputeFrameID(event.Frame)), ) - case <-a.ctx.Done(): - return } } // Insert inserts an app frame header into the tree structure func (a *AppTimeReel) Insert( - ctx context.Context, frame *protobufs.AppShardFrame, ) error { // Start timing diff --git a/node/consensus/time/app_time_reel_test.go b/node/consensus/time/app_time_reel_test.go index 5812f00..5c46450 100644 --- a/node/consensus/time/app_time_reel_test.go +++ b/node/consensus/time/app_time_reel_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "go.uber.org/zap" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/protobufs" "source.quilibrium.com/quilibrium/monorepo/types/mocks" ) @@ -51,12 +52,10 @@ func TestAppTimeReel_BasicOperations(t *testing.T) { atr, err := NewAppTimeReel(logger, address, createTestProverRegistry(true), s, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() - + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() // Test address getter assert.Equal(t, address, atr.GetAddress()) @@ -73,7 +72,7 @@ func TestAppTimeReel_BasicOperations(t *testing.T) { }, } - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) assert.NoError(t, err) // Check that genesis became head @@ -94,7 +93,7 @@ func TestAppTimeReel_BasicOperations(t *testing.T) { }, } - err = atr.Insert(ctx, frame1) + err = atr.Insert(frame1) assert.NoError(t, err) // Check new head @@ -135,11 +134,10 @@ func TestAppTimeReel_WrongAddress(t *testing.T) { atr, err := NewAppTimeReel(logger, address, createTestProverRegistry(true), s, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() // Try to insert frame with wrong address wrongFrame := &protobufs.AppShardFrame{ @@ -154,7 +152,7 @@ func TestAppTimeReel_WrongAddress(t *testing.T) { }, } - err = atr.Insert(ctx, wrongFrame) + err = atr.Insert(wrongFrame) assert.Error(t, err) assert.Contains(t, err.Error(), "frame address does not match reel address") } @@ -166,11 +164,10 @@ func TestAppTimeReel_Equivocation(t *testing.T) { atr, err := NewAppTimeReel(logger, address, createTestProverRegistry(true), s, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() // Subscribe to events eventCh := atr.GetEventCh() @@ -194,7 +191,7 @@ func TestAppTimeReel_Equivocation(t *testing.T) { }, } - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) assert.NoError(t, err) // Drain any events @@ -219,7 +216,7 @@ func TestAppTimeReel_Equivocation(t *testing.T) { }, } - err = atr.Insert(ctx, frame1) + err = atr.Insert(frame1) assert.NoError(t, err) // Drain any events @@ -244,7 +241,7 @@ func TestAppTimeReel_Equivocation(t *testing.T) { }, } - err = atr.Insert(ctx, frame1Equivocation) + err = atr.Insert(frame1Equivocation) assert.NoError(t, err) // Give the goroutine time to send the event @@ -268,11 +265,10 @@ func TestAppTimeReel_Fork(t *testing.T) { atr, err := NewAppTimeReel(logger, address, createTestProverRegistry(true), s, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() // Insert genesis genesis := &protobufs.AppShardFrame{ @@ -287,7 +283,7 @@ func TestAppTimeReel_Fork(t *testing.T) { }, } - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) assert.NoError(t, err) // Insert valid frame 1 with BLS signature @@ -306,7 +302,7 @@ func TestAppTimeReel_Fork(t *testing.T) { }, } - err = atr.Insert(ctx, frame1) + err = atr.Insert(frame1) assert.NoError(t, err) // Try to insert forking frame 1 with non-overlapping bitmask (different signers) @@ -326,7 +322,7 @@ func TestAppTimeReel_Fork(t *testing.T) { } // This should succeed - it's a fork, not equivocation - err = atr.Insert(ctx, frame1Fork) + err = atr.Insert(frame1Fork) assert.NoError(t, err) // Head should still be the original frame1 @@ -342,11 +338,10 @@ func TestAppTimeReel_ParentValidation(t *testing.T) { atr, err := NewAppTimeReel(logger, address, createTestProverRegistry(true), s, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() // Insert genesis genesis := &protobufs.AppShardFrame{ @@ -361,7 +356,7 @@ func TestAppTimeReel_ParentValidation(t *testing.T) { }, } - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) assert.NoError(t, err) // Insert valid frame 1 @@ -377,7 +372,7 @@ func TestAppTimeReel_ParentValidation(t *testing.T) { }, } - err = atr.Insert(ctx, frame1) + err = atr.Insert(frame1) assert.NoError(t, err) // Try to insert frame with a completely invalid parent selector that doesn't match any existing frame @@ -394,7 +389,7 @@ func TestAppTimeReel_ParentValidation(t *testing.T) { } // This should succeed (goes to pending since parent not found) - err = atr.Insert(ctx, badFrame) + err = atr.Insert(badFrame) assert.NoError(t, err) // Check that it's in pending frames @@ -409,11 +404,10 @@ func TestAppTimeReel_ForkDetection(t *testing.T) { atr, err := NewAppTimeReel(logger, address, createTestProverRegistry(true), s, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() eventCh := atr.GetEventCh() // Collect events @@ -468,7 +462,7 @@ func TestAppTimeReel_ForkDetection(t *testing.T) { // Insert chain for _, frame := range frames { - err := atr.Insert(ctx, frame) + err := atr.Insert(frame) require.NoError(t, err) } @@ -492,11 +486,10 @@ func TestAppTimeReel_ForkChoice_MoreSignatures(t *testing.T) { atr, err := NewAppTimeReel(logger, address, createTestProverRegistry(true), s, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() eventCh := atr.GetEventCh() // Drain any existing events @@ -518,7 +511,7 @@ func TestAppTimeReel_ForkChoice_MoreSignatures(t *testing.T) { }, } - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) require.NoError(t, err) // Drain genesis event @@ -543,7 +536,7 @@ func TestAppTimeReel_ForkChoice_MoreSignatures(t *testing.T) { }, } - err = atr.Insert(ctx, frame1Weak) + err = atr.Insert(frame1Weak) require.NoError(t, err) // Verify weak frame is initially head @@ -574,7 +567,7 @@ func TestAppTimeReel_ForkChoice_MoreSignatures(t *testing.T) { }, } - err = atr.Insert(ctx, frame1Strong) + err = atr.Insert(frame1Strong) require.NoError(t, err) // Give the goroutine time to send the event @@ -603,11 +596,10 @@ func TestAppTimeReel_ForkChoice_NoReplacement(t *testing.T) { atr, err := NewAppTimeReel(logger, address, createTestProverRegistry(true), s, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() eventCh := atr.GetEventCh() // Drain any existing events @@ -629,7 +621,7 @@ func TestAppTimeReel_ForkChoice_NoReplacement(t *testing.T) { }, } - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) require.NoError(t, err) // Drain genesis event @@ -654,7 +646,7 @@ func TestAppTimeReel_ForkChoice_NoReplacement(t *testing.T) { }, } - err = atr.Insert(ctx, frame1Strong) + err = atr.Insert(frame1Strong) require.NoError(t, err) // Verify strong frame is head @@ -685,7 +677,7 @@ func TestAppTimeReel_ForkChoice_NoReplacement(t *testing.T) { }, } - err = atr.Insert(ctx, frame1Weak) + err = atr.Insert(frame1Weak) require.NoError(t, err) // Give some time for any potential events @@ -714,11 +706,10 @@ func TestAppTimeReel_DeepForkChoice_ReverseInsertion(t *testing.T) { atr, err := NewAppTimeReel(logger, address, reg, s, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() eventCh := atr.GetEventCh() // Drain any existing events @@ -752,7 +743,7 @@ func TestAppTimeReel_DeepForkChoice_ReverseInsertion(t *testing.T) { []byte("prover8"), }, nil) - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) require.NoError(t, err) // Drain genesis event @@ -789,7 +780,7 @@ func TestAppTimeReel_DeepForkChoice_ReverseInsertion(t *testing.T) { []byte("prover8"), }, nil) - err = atr.Insert(ctx, frame1) + err = atr.Insert(frame1) require.NoError(t, err) select { case <-eventCh: @@ -879,21 +870,21 @@ func TestAppTimeReel_DeepForkChoice_ReverseInsertion(t *testing.T) { }, nil) // Insert chain A frames in order: 2A, 3A, 4A - err = atr.Insert(ctx, frame2A) + err = atr.Insert(frame2A) require.NoError(t, err) select { case <-eventCh: case <-time.After(50 * time.Millisecond): } - err = atr.Insert(ctx, frame3A) + err = atr.Insert(frame3A) require.NoError(t, err) select { case <-eventCh: case <-time.After(50 * time.Millisecond): } - err = atr.Insert(ctx, frame4A) + err = atr.Insert(frame4A) require.NoError(t, err) select { case <-eventCh: @@ -980,7 +971,7 @@ func TestAppTimeReel_DeepForkChoice_ReverseInsertion(t *testing.T) { // This should work because the time reel should handle out-of-order insertion // Insert frame 4B first - err = atr.Insert(ctx, frame4B) + err = atr.Insert(frame4B) require.NoError(t, err, "inserting 4B should succeed even without its parents") select { case <-eventCh: @@ -994,7 +985,7 @@ func TestAppTimeReel_DeepForkChoice_ReverseInsertion(t *testing.T) { assert.Equal(t, []byte("frame4A_output"), head.Header.Output, "should still be chain A") // Insert frame 3B - err = atr.Insert(ctx, frame3B) + err = atr.Insert(frame3B) require.NoError(t, err, "inserting 3B should succeed") select { case <-eventCh: @@ -1008,7 +999,7 @@ func TestAppTimeReel_DeepForkChoice_ReverseInsertion(t *testing.T) { assert.Equal(t, []byte("frame4A_output"), head.Header.Output, "should still be chain A") // Insert frame 2B - this completes the chain B lineage - err = atr.Insert(ctx, frame2B) + err = atr.Insert(frame2B) require.NoError(t, err, "inserting 2B should succeed and complete chain B") // Give time for reorganization @@ -1047,11 +1038,10 @@ func TestAppTimeReel_MultipleProvers(t *testing.T) { atr, err := NewAppTimeReel(logger, address, createTestProverRegistry(true), s, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() // Different provers create frames provers := [][]byte{ @@ -1073,7 +1063,7 @@ func TestAppTimeReel_MultipleProvers(t *testing.T) { }, } - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) require.NoError(t, err) // Build chain with alternating provers @@ -1091,7 +1081,7 @@ func TestAppTimeReel_MultipleProvers(t *testing.T) { }, } - err = atr.Insert(ctx, frame) + err = atr.Insert(frame) require.NoError(t, err) prevOutput = frame.Header.Output @@ -1175,11 +1165,10 @@ func TestAppTimeReel_ComplexForkWithOutOfOrderInsertion(t *testing.T) { atr, err := NewAppTimeReel(logger, address, proverRegistry, s, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() eventCh := atr.GetEventCh() // Collect all events @@ -1303,19 +1292,19 @@ func TestAppTimeReel_ComplexForkWithOutOfOrderInsertion(t *testing.T) { // Now insert in the specified order: 1, 3', 3, 2, 3'', 2' // Step 1: Insert genesis first (needed as base) - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) require.NoError(t, err) time.Sleep(50 * time.Millisecond) // Step 2: Insert frame 1 t.Log("Inserting frame 1") - err = atr.Insert(ctx, frame1) + err = atr.Insert(frame1) require.NoError(t, err) time.Sleep(50 * time.Millisecond) // Step 3: Insert frame 3' (should go to pending since 2' doesn't exist yet) t.Log("Inserting frame 3'") - err = atr.Insert(ctx, frame3Prime) + err = atr.Insert(frame3Prime) require.NoError(t, err) time.Sleep(50 * time.Millisecond) @@ -1325,13 +1314,13 @@ func TestAppTimeReel_ComplexForkWithOutOfOrderInsertion(t *testing.T) { // Step 4: Insert frame 3 (should also go to pending since 2 doesn't exist yet) t.Log("Inserting frame 3") - err = atr.Insert(ctx, frame3) + err = atr.Insert(frame3) require.NoError(t, err) time.Sleep(50 * time.Millisecond) // Step 5: Insert frame 2 (this should complete the 1->2->3 chain) t.Log("Inserting frame 2") - err = atr.Insert(ctx, frame2) + err = atr.Insert(frame2) require.NoError(t, err) time.Sleep(100 * time.Millisecond) // Give more time for processing @@ -1343,13 +1332,13 @@ func TestAppTimeReel_ComplexForkWithOutOfOrderInsertion(t *testing.T) { // Step 6: Insert frame 3'' (another competing frame on 2') t.Log("Inserting frame 3''") - err = atr.Insert(ctx, frame3DoublePrime) + err = atr.Insert(frame3DoublePrime) require.NoError(t, err) time.Sleep(50 * time.Millisecond) // Step 7: Insert frame 2' (this completes the 1->2'->3' and 1->2'->3'' chains) t.Log("Inserting frame 2'") - err = atr.Insert(ctx, frame2Prime) + err = atr.Insert(frame2Prime) require.NoError(t, err) time.Sleep(200 * time.Millisecond) // Give ample time for fork choice evaluation @@ -1392,11 +1381,10 @@ func TestAppTimeReel_TreePruning(t *testing.T) { atr, err := NewAppTimeReel(logger, address, createTestProverRegistry(true), s, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() // Insert genesis genesis := &protobufs.AppShardFrame{ @@ -1411,7 +1399,7 @@ func TestAppTimeReel_TreePruning(t *testing.T) { }, } - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) require.NoError(t, err) // Build a long chain that will trigger pruning (370 frames total) @@ -1429,7 +1417,7 @@ func TestAppTimeReel_TreePruning(t *testing.T) { }, } - err = atr.Insert(ctx, frame) + err = atr.Insert(frame) require.NoError(t, err) prevOutput = frame.Header.Output @@ -1481,11 +1469,10 @@ func TestAppTimeReel_TreePruningWithForks(t *testing.T) { atr, err := NewAppTimeReel(logger, address, createTestProverRegistry(true), s, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() // Insert genesis genesis := &protobufs.AppShardFrame{ @@ -1500,7 +1487,7 @@ func TestAppTimeReel_TreePruningWithForks(t *testing.T) { }, } - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) require.NoError(t, err) // Build main chain for 365 frames @@ -1519,7 +1506,7 @@ func TestAppTimeReel_TreePruningWithForks(t *testing.T) { }, } - err = atr.Insert(ctx, frame) + err = atr.Insert(frame) require.NoError(t, err) if i == 5 { @@ -1546,7 +1533,7 @@ func TestAppTimeReel_TreePruningWithForks(t *testing.T) { }, } - err = atr.Insert(ctx, forkFrame) + err = atr.Insert(forkFrame) require.NoError(t, err) // Continue main chain for 375 more frames to trigger deep pruning @@ -1563,7 +1550,7 @@ func TestAppTimeReel_TreePruningWithForks(t *testing.T) { }, } - err = atr.Insert(ctx, frame) + err = atr.Insert(frame) require.NoError(t, err) prevOutput = frame.Header.Output @@ -1601,11 +1588,10 @@ func TestAppTimeReel_ForkChoiceInsertionOrder(t *testing.T) { atr, err := NewAppTimeReel(logger, address, reg, s, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() eventCh := atr.GetEventCh() // Drain any existing events @@ -1642,7 +1628,7 @@ drained: []byte("prover8"), }, nil) - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) require.NoError(t, err) // Drain genesis event @@ -1726,14 +1712,14 @@ drained: }, nil) // Insert weak branch first - err = atr.Insert(ctx, frame1A) + err = atr.Insert(frame1A) require.NoError(t, err) select { case <-eventCh: case <-time.After(50 * time.Millisecond): } - err = atr.Insert(ctx, frame2A) + err = atr.Insert(frame2A) require.NoError(t, err) select { case <-eventCh: @@ -1777,7 +1763,7 @@ drained: }, nil) // Insert stronger branch out of order: first 2B (goes to pending), then 1B - err = atr.Insert(ctx, frame2B) + err = atr.Insert(frame2B) require.NoError(t, err, "should accept frame 2B into pending") // Head should still be weak branch @@ -1786,7 +1772,7 @@ drained: assert.Equal(t, []byte("frame2A_output"), head.Header.Output, "head should still be weak branch") // Now insert 1B, which should complete the strong branch and trigger fork choice - err = atr.Insert(ctx, frame1B) + err = atr.Insert(frame1B) require.NoError(t, err) // Give time for fork choice to process @@ -1817,11 +1803,10 @@ func TestAppTimeReel_ForkEventsWithReplay(t *testing.T) { atr, err := NewAppTimeReel(logger, address, createTestProverRegistry(true), s, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() eventCh := atr.GetEventCh() // Collect all events @@ -1886,7 +1871,7 @@ func TestAppTimeReel_ForkEventsWithReplay(t *testing.T) { // Insert initial chain for _, frame := range []*protobufs.AppShardFrame{genesis, frame1, frame2, frame3} { - err = atr.Insert(ctx, frame) + err = atr.Insert(frame) require.NoError(t, err) time.Sleep(10 * time.Millisecond) // Allow events to be sent } @@ -1945,7 +1930,7 @@ func TestAppTimeReel_ForkEventsWithReplay(t *testing.T) { // Insert stronger fork - this should trigger a reorganization for _, frame := range []*protobufs.AppShardFrame{frame2Prime, frame3Prime, frame4Prime} { - err = atr.Insert(ctx, frame) + err = atr.Insert(frame) require.NoError(t, err) time.Sleep(50 * time.Millisecond) // Allow events to propagate } @@ -2000,11 +1985,10 @@ func TestAppTimeReel_ComprehensiveEquivocation(t *testing.T) { atr, err := NewAppTimeReel(logger, address, createTestProverRegistry(true), s, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() eventCh := atr.GetEventCh() // Collect equivocation events @@ -2033,7 +2017,7 @@ func TestAppTimeReel_ComprehensiveEquivocation(t *testing.T) { }, } - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) require.NoError(t, err) // Insert valid frame 1 @@ -2052,7 +2036,7 @@ func TestAppTimeReel_ComprehensiveEquivocation(t *testing.T) { }, } - err = atr.Insert(ctx, frame1Valid) + err = atr.Insert(frame1Valid) require.NoError(t, err) // Test Case 1: Complete overlap - same signers, different content @@ -2071,7 +2055,7 @@ func TestAppTimeReel_ComprehensiveEquivocation(t *testing.T) { }, } - err = atr.Insert(ctx, frame1Equivocation1) + err = atr.Insert(frame1Equivocation1) assert.NoError(t, err) // Test Case 2: Partial overlap - some same signers @@ -2090,7 +2074,7 @@ func TestAppTimeReel_ComprehensiveEquivocation(t *testing.T) { }, } - err = atr.Insert(ctx, frame1Equivocation2) + err = atr.Insert(frame1Equivocation2) assert.NoError(t, err) // Test Case 3: No overlap - should be allowed (fork) @@ -2109,7 +2093,7 @@ func TestAppTimeReel_ComprehensiveEquivocation(t *testing.T) { }, } - err = atr.Insert(ctx, frame1Fork) + err = atr.Insert(frame1Fork) assert.NoError(t, err, "should allow fork with no overlapping signers") // Wait for events to be processed @@ -2162,11 +2146,10 @@ func TestAppTimeReel_ProverRegistryForkChoice(t *testing.T) { atr, err := NewAppTimeReel(logger, address, proverRegistry, s, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() eventCh := atr.GetEventCh() // Create genesis frame @@ -2180,7 +2163,7 @@ func TestAppTimeReel_ProverRegistryForkChoice(t *testing.T) { }, } - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) require.NoError(t, err) // Drain genesis event @@ -2230,7 +2213,7 @@ func TestAppTimeReel_ProverRegistryForkChoice(t *testing.T) { } // Insert frame with wrong prover first - err = atr.Insert(ctx, frame1b) + err = atr.Insert(frame1b) require.NoError(t, err) // Should become head initially @@ -2243,7 +2226,7 @@ func TestAppTimeReel_ProverRegistryForkChoice(t *testing.T) { } // Insert frame with correct prover - err = atr.Insert(ctx, frame1a) + err = atr.Insert(frame1a) require.NoError(t, err) // Should trigger fork choice and frame1a should win @@ -2293,11 +2276,10 @@ func TestAppTimeReel_ProverRegistryWithOrderedProvers(t *testing.T) { atr, err := NewAppTimeReel(logger, address, proverRegistry, s, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() // Create genesis frame genesis := &protobufs.AppShardFrame{ @@ -2310,7 +2292,7 @@ func TestAppTimeReel_ProverRegistryWithOrderedProvers(t *testing.T) { }, } - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) require.NoError(t, err) // Create three competing frames with different provers from the ordered list @@ -2376,7 +2358,7 @@ func TestAppTimeReel_ProverRegistryWithOrderedProvers(t *testing.T) { // Insert in reverse order of preference t.Logf("Inserting frame1a with prover: %s", frame1a.Header.Prover) - err = atr.Insert(ctx, frame1a) + err = atr.Insert(frame1a) require.NoError(t, err) // Drain events for frame1a @@ -2399,7 +2381,7 @@ func TestAppTimeReel_ProverRegistryWithOrderedProvers(t *testing.T) { t.Logf("Head after frame1a: %s", head1.Header.Output) t.Logf("Inserting frame1b with prover: %s", frame1b.Header.Prover) - err = atr.Insert(ctx, frame1b) + err = atr.Insert(frame1b) require.NoError(t, err) drainEvents("frame1b") @@ -2409,7 +2391,7 @@ func TestAppTimeReel_ProverRegistryWithOrderedProvers(t *testing.T) { t.Logf("Head after frame1b: %s", head2.Header.Output) t.Logf("Inserting frame1c with prover: %s", frame1c.Header.Prover) - err = atr.Insert(ctx, frame1c) + err = atr.Insert(frame1c) require.NoError(t, err) drainEvents("frame1c") diff --git a/node/consensus/time/global_time_reel.go b/node/consensus/time/global_time_reel.go index d0cf073..115f566 100644 --- a/node/consensus/time/global_time_reel.go +++ b/node/consensus/time/global_time_reel.go @@ -14,6 +14,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/protobufs" "source.quilibrium.com/quilibrium/monorepo/types/consensus" "source.quilibrium.com/quilibrium/monorepo/types/store" @@ -110,8 +111,7 @@ type GlobalTimeReel struct { ) error // Control - ctx context.Context - cancel context.CancelFunc + ctx context.Context // Network-specific consensus toggles genesisFrameNumber uint64 @@ -135,8 +135,6 @@ func NewGlobalTimeReel( return nil, errors.Wrap(err, "new global time reel") } - ctx, cancel := context.WithCancel(context.Background()) - genesisFrameNumber := uint64(0) if network == 0 { @@ -169,8 +167,6 @@ func NewGlobalTimeReel( ) error { return nil }, - ctx: ctx, - cancel: cancel, genesisFrameNumber: genesisFrameNumber, archiveMode: archiveMode, }, nil @@ -199,29 +195,41 @@ func (g *GlobalTimeReel) SetRevertFunc( } // Start starts the global time reel -func (g *GlobalTimeReel) Start() error { +func (g *GlobalTimeReel) Start( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, +) { + g.ctx = ctx g.logger.Info("starting global time reel") // Warm the in-memory tree/cache from store. if err := g.bootstrapFromStore(); err != nil { - return errors.Wrap(err, "start") + g.logger.Error("could not bootstrap from store", zap.Error(err)) + ctx.Throw(err) + return } - return nil -} + ready() + <-ctx.Done() -// Stop stops the global time reel -func (g *GlobalTimeReel) Stop() { g.logger.Info("stopping global time reel") - g.cancel() close(g.eventCh) close(g.eventDone) } // sendEvent sends an event with guaranteed delivery func (g *GlobalTimeReel) sendEvent(event GlobalEvent) { - // This blocks until the event is delivered, guaranteeing order + // prioritize halts select { + case <-g.ctx.Done(): + return + default: + } + + // This blocks until the event is delivered or halted, guaranteeing order + select { + case <-g.ctx.Done(): + return case g.eventCh <- event: g.logger.Debug( "sent event", @@ -229,14 +237,11 @@ func (g *GlobalTimeReel) sendEvent(event GlobalEvent) { zap.Uint64("frame_number", event.Frame.Header.FrameNumber), zap.String("id", g.ComputeFrameID(event.Frame)), ) - case <-g.ctx.Done(): - return } } // Insert inserts a global frame header into the tree structure (non-blocking) func (g *GlobalTimeReel) Insert( - ctx context.Context, frame *protobufs.GlobalFrame, ) error { // Start timing diff --git a/node/consensus/time/global_time_reel_equivocation_test.go b/node/consensus/time/global_time_reel_equivocation_test.go index 5061f73..e459162 100644 --- a/node/consensus/time/global_time_reel_equivocation_test.go +++ b/node/consensus/time/global_time_reel_equivocation_test.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/zap" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/protobufs" ) @@ -24,11 +25,10 @@ func TestGlobalTimeReel_MassiveEquivocationForkChoice(t *testing.T) { atr, err := NewGlobalTimeReel(logger, createTestProverRegistry(true), s, 99, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() eventCh := atr.GetEventCh() // Collect events @@ -66,7 +66,7 @@ func TestGlobalTimeReel_MassiveEquivocationForkChoice(t *testing.T) { }, } - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) require.NoError(t, err) // Build chain A: 200 frames with bitmask 0b11100011 (signers 0,1,5,6,7) @@ -85,7 +85,7 @@ func TestGlobalTimeReel_MassiveEquivocationForkChoice(t *testing.T) { }, } - err = atr.Insert(ctx, frameA) + err = atr.Insert(frameA) require.NoError(t, err) prevOutput = frameA.Header.Output } @@ -114,7 +114,7 @@ func TestGlobalTimeReel_MassiveEquivocationForkChoice(t *testing.T) { }, } - err = atr.Insert(ctx, frameB) + err = atr.Insert(frameB) // Should now succeed even with equivocation assert.NoError(t, err, "Should accept frame despite equivocation at frame %d", i) prevOutput = frameB.Header.Output @@ -152,11 +152,10 @@ func TestGlobalTimeReel_EquivocationWithForkChoice(t *testing.T) { atr, err := NewGlobalTimeReel(logger, createTestProverRegistry(true), s, 99, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() eventCh := atr.GetEventCh() // Drain initial events @@ -176,7 +175,7 @@ func TestGlobalTimeReel_EquivocationWithForkChoice(t *testing.T) { }, } - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) require.NoError(t, err) // Drain genesis event @@ -199,7 +198,7 @@ func TestGlobalTimeReel_EquivocationWithForkChoice(t *testing.T) { }, } - err = atr.Insert(ctx, frame1A) + err = atr.Insert(frame1A) require.NoError(t, err) // Drain new head event @@ -223,7 +222,7 @@ func TestGlobalTimeReel_EquivocationWithForkChoice(t *testing.T) { } // This should succeed now, but generate an equivocation event - err = atr.Insert(ctx, frame1B) + err = atr.Insert(frame1B) assert.NoError(t, err) // Wait for equivocation event @@ -252,11 +251,10 @@ func TestGlobalTimeReel_NonOverlappingForks(t *testing.T) { atr, err := NewGlobalTimeReel(logger, createTestProverRegistry(true), s, 99, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() // Insert genesis genesis := &protobufs.GlobalFrame{ @@ -269,7 +267,7 @@ func TestGlobalTimeReel_NonOverlappingForks(t *testing.T) { }, } - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) require.NoError(t, err) // Build two non-overlapping chains @@ -291,7 +289,7 @@ func TestGlobalTimeReel_NonOverlappingForks(t *testing.T) { }, }, } - err = atr.Insert(ctx, frameA) + err = atr.Insert(frameA) require.NoError(t, err) prevOutputA = frameA.Header.Output } @@ -311,7 +309,7 @@ func TestGlobalTimeReel_NonOverlappingForks(t *testing.T) { }, }, } - err = atr.Insert(ctx, frameB) + err = atr.Insert(frameB) require.NoError(t, err, "non-overlapping fork should be allowed") prevOutputB = frameB.Header.Output } diff --git a/node/consensus/time/global_time_reel_test.go b/node/consensus/time/global_time_reel_test.go index 8b4402f..aecc4ff 100644 --- a/node/consensus/time/global_time_reel_test.go +++ b/node/consensus/time/global_time_reel_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/zap" "source.quilibrium.com/quilibrium/monorepo/config" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/node/store" "source.quilibrium.com/quilibrium/monorepo/protobufs" ) @@ -27,11 +28,10 @@ func TestGlobalTimeReel_BasicOperations(t *testing.T) { atr, err := NewGlobalTimeReel(logger, createTestProverRegistry(true), s, 99, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() // Test inserting genesis frame genesis := &protobufs.GlobalFrame{ @@ -44,7 +44,7 @@ func TestGlobalTimeReel_BasicOperations(t *testing.T) { }, } - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) assert.NoError(t, err) // Check that genesis became head @@ -66,7 +66,7 @@ func TestGlobalTimeReel_BasicOperations(t *testing.T) { }, } - err = atr.Insert(ctx, frame1) + err = atr.Insert(frame1) assert.NoError(t, err) // Check new head @@ -108,11 +108,10 @@ func TestGlobalTimeReel_Equivocation(t *testing.T) { atr, err := NewGlobalTimeReel(logger, createTestProverRegistry(true), s, 99, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() // Subscribe to events eventCh := atr.GetEventCh() @@ -134,7 +133,7 @@ func TestGlobalTimeReel_Equivocation(t *testing.T) { }, } - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) assert.NoError(t, err) // Drain any events @@ -157,7 +156,7 @@ func TestGlobalTimeReel_Equivocation(t *testing.T) { }, } - err = atr.Insert(ctx, frame1) + err = atr.Insert(frame1) assert.NoError(t, err) // Drain any events @@ -183,7 +182,7 @@ func TestGlobalTimeReel_Equivocation(t *testing.T) { }, } - err = atr.Insert(ctx, frame1Equivocation) + err = atr.Insert(frame1Equivocation) assert.NoError(t, err) // Give the goroutine time to send the event @@ -208,11 +207,10 @@ func TestGlobalTimeReel_Fork(t *testing.T) { atr, err := NewGlobalTimeReel(logger, createTestProverRegistry(true), s, 99, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() // Insert genesis genesis := &protobufs.GlobalFrame{ @@ -225,7 +223,7 @@ func TestGlobalTimeReel_Fork(t *testing.T) { }, } - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) assert.NoError(t, err) // Insert valid frame 1 with BLS signature @@ -242,7 +240,7 @@ func TestGlobalTimeReel_Fork(t *testing.T) { }, } - err = atr.Insert(ctx, frame1) + err = atr.Insert(frame1) assert.NoError(t, err) assertLatestNumOutput(t, s, 1, frame1.Header.Output) assertStoreNumOutput(t, s, 1, frame1.Header.Output) @@ -262,7 +260,7 @@ func TestGlobalTimeReel_Fork(t *testing.T) { } // This should succeed - it's a fork, not equivocation - err = atr.Insert(ctx, frame1Fork) + err = atr.Insert(frame1Fork) assert.NoError(t, err) time.Sleep(50 * time.Millisecond) @@ -280,11 +278,10 @@ func TestGlobalTimeReel_ParentValidation(t *testing.T) { atr, err := NewGlobalTimeReel(logger, createTestProverRegistry(true), s, 99, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() // Insert genesis genesis := &protobufs.GlobalFrame{ @@ -297,7 +294,7 @@ func TestGlobalTimeReel_ParentValidation(t *testing.T) { }, } - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) assert.NoError(t, err) // Insert valid frame 1 @@ -311,7 +308,7 @@ func TestGlobalTimeReel_ParentValidation(t *testing.T) { }, } - err = atr.Insert(ctx, frame1) + err = atr.Insert(frame1) assert.NoError(t, err) assertLatestNumOutput(t, s, 1, frame1.Header.Output) @@ -327,7 +324,7 @@ func TestGlobalTimeReel_ParentValidation(t *testing.T) { } // This should succeed (goes to pending since parent not found) - err = atr.Insert(ctx, badFrame) + err = atr.Insert(badFrame) assert.NoError(t, err) assertNoGlobalAt(t, s, 2) @@ -342,11 +339,10 @@ func TestGlobalTimeReel_ForkDetection(t *testing.T) { atr, err := NewGlobalTimeReel(logger, createTestProverRegistry(true), s, 99, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() eventCh := atr.GetEventCh() // Collect events @@ -397,7 +393,7 @@ func TestGlobalTimeReel_ForkDetection(t *testing.T) { // Insert chain for _, frame := range frames { - err := atr.Insert(ctx, frame) + err := atr.Insert(frame) require.NoError(t, err) } @@ -423,11 +419,10 @@ func TestGlobalTimeReel_ForkChoice_MoreSignatures(t *testing.T) { atr, err := NewGlobalTimeReel(logger, createTestProverRegistry(true), s, 99, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() eventCh := atr.GetEventCh() // Drain any existing events @@ -447,7 +442,7 @@ func TestGlobalTimeReel_ForkChoice_MoreSignatures(t *testing.T) { }, } - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) require.NoError(t, err) // Drain genesis event @@ -470,7 +465,7 @@ func TestGlobalTimeReel_ForkChoice_MoreSignatures(t *testing.T) { }, } - err = atr.Insert(ctx, frame1Weak) + err = atr.Insert(frame1Weak) require.NoError(t, err) // Verify weak frame is initially head @@ -500,7 +495,7 @@ func TestGlobalTimeReel_ForkChoice_MoreSignatures(t *testing.T) { }, } - err = atr.Insert(ctx, frame1Strong) + err = atr.Insert(frame1Strong) require.NoError(t, err) // Verify strong frame is now head @@ -536,11 +531,10 @@ func TestGlobalTimeReel_ForkChoice_NoReplacement(t *testing.T) { atr, err := NewGlobalTimeReel(logger, createTestProverRegistry(true), s, 99, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() eventCh := atr.GetEventCh() // Drain any existing events @@ -560,7 +554,7 @@ func TestGlobalTimeReel_ForkChoice_NoReplacement(t *testing.T) { }, } - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) require.NoError(t, err) // Drain genesis event @@ -583,7 +577,7 @@ func TestGlobalTimeReel_ForkChoice_NoReplacement(t *testing.T) { }, } - err = atr.Insert(ctx, frame1Strong) + err = atr.Insert(frame1Strong) require.NoError(t, err) // Verify strong frame is head @@ -613,7 +607,7 @@ func TestGlobalTimeReel_ForkChoice_NoReplacement(t *testing.T) { }, } - err = atr.Insert(ctx, frame1Weak) + err = atr.Insert(frame1Weak) require.NoError(t, err) // Give some time for any potential events @@ -641,11 +635,10 @@ func TestGlobalTimeReel_DeepForkChoice_ReverseInsertion(t *testing.T) { atr, err := NewGlobalTimeReel(logger, createTestProverRegistry(true), s, 99, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() eventCh := atr.GetEventCh() // Drain any existing events @@ -665,7 +658,7 @@ func TestGlobalTimeReel_DeepForkChoice_ReverseInsertion(t *testing.T) { }, } - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) require.NoError(t, err) // Drain genesis event @@ -688,7 +681,7 @@ func TestGlobalTimeReel_DeepForkChoice_ReverseInsertion(t *testing.T) { }, } - err = atr.Insert(ctx, frame1) + err = atr.Insert(frame1) require.NoError(t, err) select { case <-eventCh: @@ -736,21 +729,21 @@ func TestGlobalTimeReel_DeepForkChoice_ReverseInsertion(t *testing.T) { } // Insert chain A frames in order: 2A, 3A, 4A - err = atr.Insert(ctx, frame2A) + err = atr.Insert(frame2A) require.NoError(t, err) select { case <-eventCh: case <-time.After(50 * time.Millisecond): } - err = atr.Insert(ctx, frame3A) + err = atr.Insert(frame3A) require.NoError(t, err) select { case <-eventCh: case <-time.After(50 * time.Millisecond): } - err = atr.Insert(ctx, frame4A) + err = atr.Insert(frame4A) require.NoError(t, err) select { case <-eventCh: @@ -808,7 +801,7 @@ func TestGlobalTimeReel_DeepForkChoice_ReverseInsertion(t *testing.T) { // This should work because the time reel should handle out-of-order insertion // Insert frame 4B first - err = atr.Insert(ctx, frame4B) + err = atr.Insert(frame4B) require.NoError(t, err, "inserting 4B should succeed even without its parents") select { case <-eventCh: @@ -822,7 +815,7 @@ func TestGlobalTimeReel_DeepForkChoice_ReverseInsertion(t *testing.T) { assert.Equal(t, []byte("frame4A_output"), head.Header.Output, "should still be chain A") // Insert frame 3B - err = atr.Insert(ctx, frame3B) + err = atr.Insert(frame3B) require.NoError(t, err, "inserting 3B should succeed") select { case <-eventCh: @@ -836,7 +829,7 @@ func TestGlobalTimeReel_DeepForkChoice_ReverseInsertion(t *testing.T) { assert.Equal(t, []byte("frame4A_output"), head.Header.Output, "should still be chain A") // Insert frame 2B - this completes the chain B lineage - err = atr.Insert(ctx, frame2B) + err = atr.Insert(frame2B) require.NoError(t, err, "inserting 2B should succeed and complete chain B") // Give time for reorganization @@ -872,11 +865,10 @@ func TestGlobalTimeReel_TreePruning(t *testing.T) { atr, err := NewGlobalTimeReel(logger, createTestProverRegistry(true), s, 99, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() // Insert genesis genesis := &protobufs.GlobalFrame{ @@ -889,7 +881,7 @@ func TestGlobalTimeReel_TreePruning(t *testing.T) { }, } - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) require.NoError(t, err) // Build a long chain that will trigger pruning (370 frames total) @@ -905,7 +897,7 @@ func TestGlobalTimeReel_TreePruning(t *testing.T) { }, } - err = atr.Insert(ctx, frame) + err = atr.Insert(frame) require.NoError(t, err) prevOutput = frame.Header.Output @@ -956,11 +948,10 @@ func TestGlobalTimeReel_TreePruningWithForks(t *testing.T) { atr, err := NewGlobalTimeReel(logger, createTestProverRegistry(true), s, 99, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() // Insert genesis genesis := &protobufs.GlobalFrame{ @@ -973,7 +964,7 @@ func TestGlobalTimeReel_TreePruningWithForks(t *testing.T) { }, } - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) require.NoError(t, err) // Build main chain for 365 frames @@ -990,7 +981,7 @@ func TestGlobalTimeReel_TreePruningWithForks(t *testing.T) { }, } - err = atr.Insert(ctx, frame) + err = atr.Insert(frame) require.NoError(t, err) if i == 5 { @@ -1015,7 +1006,7 @@ func TestGlobalTimeReel_TreePruningWithForks(t *testing.T) { }, } - err = atr.Insert(ctx, forkFrame) + err = atr.Insert(forkFrame) require.NoError(t, err) // Continue main chain for 375 more frames to trigger deep pruning @@ -1030,7 +1021,7 @@ func TestGlobalTimeReel_TreePruningWithForks(t *testing.T) { }, } - err = atr.Insert(ctx, frame) + err = atr.Insert(frame) require.NoError(t, err) prevOutput = frame.Header.Output @@ -1066,11 +1057,10 @@ func TestGlobalTimeReel_ForkChoiceInsertionOrder(t *testing.T) { atr, err := NewGlobalTimeReel(logger, createTestProverRegistry(true), s, 99, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() eventCh := atr.GetEventCh() // Drain any existing events @@ -1094,7 +1084,7 @@ loop: }, } - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) require.NoError(t, err) // Drain genesis event @@ -1150,14 +1140,14 @@ loop: } // Insert weak branch first - err = atr.Insert(ctx, frame1A) + err = atr.Insert(frame1A) require.NoError(t, err) select { case <-eventCh: case <-time.After(50 * time.Millisecond): } - err = atr.Insert(ctx, frame2A) + err = atr.Insert(frame2A) require.NoError(t, err) select { case <-eventCh: @@ -1188,7 +1178,7 @@ loop: frame2B.Header.ParentSelector = computeGlobalPoseidonHash(frame1B.Header.Output) // Insert stronger branch out of order: first 2B (goes to pending), then 1B - err = atr.Insert(ctx, frame2B) + err = atr.Insert(frame2B) require.NoError(t, err, "should accept frame 2B into pending") // Head should still be weak branch @@ -1197,7 +1187,7 @@ loop: assert.Equal(t, []byte("frame2A_output"), head.Header.Output, "head should still be weak branch") // Now insert 1B, which should complete the strong branch and trigger fork choice - err = atr.Insert(ctx, frame1B) + err = atr.Insert(frame1B) require.NoError(t, err) // Give time for fork choice to process @@ -1227,11 +1217,10 @@ func TestGlobalTimeReel_ForkEventsWithReplay(t *testing.T) { atr, err := NewGlobalTimeReel(logger, createTestProverRegistry(true), s, 99, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() eventCh := atr.GetEventCh() // Collect all events @@ -1288,7 +1277,7 @@ func TestGlobalTimeReel_ForkEventsWithReplay(t *testing.T) { // Insert initial chain for _, frame := range []*protobufs.GlobalFrame{genesis, frame1, frame2, frame3} { - err = atr.Insert(ctx, frame) + err = atr.Insert(frame) require.NoError(t, err) time.Sleep(10 * time.Millisecond) // Allow events to be sent } @@ -1341,7 +1330,7 @@ func TestGlobalTimeReel_ForkEventsWithReplay(t *testing.T) { // Insert stronger fork - this should trigger a reorganization for _, frame := range []*protobufs.GlobalFrame{frame2Prime, frame3Prime, frame4Prime} { - err = atr.Insert(ctx, frame) + err = atr.Insert(frame) require.NoError(t, err) time.Sleep(50 * time.Millisecond) // Allow events to propagate } @@ -1395,11 +1384,10 @@ func TestGlobalTimeReel_ComprehensiveEquivocation(t *testing.T) { atr, err := NewGlobalTimeReel(logger, createTestProverRegistry(true), s, 99, true) require.NoError(t, err) - err = atr.Start() - require.NoError(t, err) - defer atr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go atr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() eventCh := atr.GetEventCh() // Collect equivocation events @@ -1426,7 +1414,7 @@ func TestGlobalTimeReel_ComprehensiveEquivocation(t *testing.T) { }, } - err = atr.Insert(ctx, genesis) + err = atr.Insert(genesis) require.NoError(t, err) // Insert valid frame 1 @@ -1443,7 +1431,7 @@ func TestGlobalTimeReel_ComprehensiveEquivocation(t *testing.T) { }, } - err = atr.Insert(ctx, frame1Valid) + err = atr.Insert(frame1Valid) require.NoError(t, err) // Test Case 1: Complete overlap - same signers, different content @@ -1460,7 +1448,7 @@ func TestGlobalTimeReel_ComprehensiveEquivocation(t *testing.T) { }, } - err = atr.Insert(ctx, frame1Equivocation1) + err = atr.Insert(frame1Equivocation1) assert.NoError(t, err) // Test Case 2: Partial overlap - some same signers @@ -1477,7 +1465,7 @@ func TestGlobalTimeReel_ComprehensiveEquivocation(t *testing.T) { }, } - err = atr.Insert(ctx, frame1Equivocation2) + err = atr.Insert(frame1Equivocation2) assert.NoError(t, err) // Test Case 3: No overlap - should be allowed (fork) @@ -1494,7 +1482,7 @@ func TestGlobalTimeReel_ComprehensiveEquivocation(t *testing.T) { }, } - err = atr.Insert(ctx, frame1Fork) + err = atr.Insert(frame1Fork) assert.NoError(t, err, "should allow fork with no overlapping signers") // Wait for events to be processed @@ -1529,8 +1517,10 @@ func TestGlobalTimeReel_NonArchive_BootstrapLoadsWindowOf360(t *testing.T) { // Start a new reel in non-archive mode; it should bootstrap only last 360. tr, err := NewGlobalTimeReel(logger, createTestProverRegistry(true), s, 99, false) require.NoError(t, err) - require.NoError(t, tr.Start()) - defer tr.Stop() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go tr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() head, err := tr.GetHead() require.NoError(t, err) @@ -1555,8 +1545,10 @@ func TestGlobalTimeReel_NonArchive_SnapForward_WhenGapExceeds360(t *testing.T) { tr, err := NewGlobalTimeReel(logger, createTestProverRegistry(true), s, 99, false) require.NoError(t, err) - require.NoError(t, tr.Start()) - defer tr.Stop() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go tr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() head, err := tr.GetHead() require.NoError(t, err) @@ -1573,7 +1565,7 @@ func TestGlobalTimeReel_NonArchive_SnapForward_WhenGapExceeds360(t *testing.T) { ParentSelector: []byte("unknown_parent"), }, } - require.NoError(t, tr.Insert(context.Background(), future)) + require.NoError(t, tr.Insert(future)) newHead, err := tr.GetHead() require.NoError(t, err) @@ -1586,15 +1578,17 @@ func TestGlobalTimeReel_NonArchive_PrunesStore_AsHeadAdvances(t *testing.T) { tr, err := NewGlobalTimeReel(logger, createTestProverRegistry(true), s, 99, false) require.NoError(t, err) - require.NoError(t, tr.Start()) - defer tr.Stop() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go tr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() // Insert a contiguous chain via Insert so persistCanonicalFrames runs and // prunes store. var prev *protobufs.GlobalFrame for n := uint64(1); n <= uint64(maxGlobalTreeDepth)+25; n++ { f := createGlobalFrame(n, prev, []byte(fmt.Sprintf("out%d", n))) - require.NoError(t, tr.Insert(context.Background(), f)) + require.NoError(t, tr.Insert(f)) prev = f } @@ -1622,15 +1616,15 @@ func TestGlobalTimeReel_NonArchive_PendingResolves_WhenParentArrives(t *testing. tr, err := NewGlobalTimeReel(logger, createTestProverRegistry(true), s, 99, false) require.NoError(t, err) - require.NoError(t, tr.Start()) - defer tr.Stop() - - ctx := context.Background() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go tr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() var prev *protobufs.GlobalFrame for n := uint64(90); n <= 99; n++ { f := createGlobalFrame(n, prev, []byte(fmt.Sprintf("base_%d", n))) - require.NoError(t, tr.Insert(ctx, f)) + require.NoError(t, tr.Insert(f)) prev = f } @@ -1646,7 +1640,7 @@ func TestGlobalTimeReel_NonArchive_PendingResolves_WhenParentArrives(t *testing. ParentSelector: computeGlobalPoseidonHash(out100), // points to future parent 100 }, } - require.NoError(t, tr.Insert(ctx, child101)) + require.NoError(t, tr.Insert(child101)) // Should appear in pending (under the selector for out100). pending := tr.GetPendingFrames() @@ -1661,7 +1655,7 @@ func TestGlobalTimeReel_NonArchive_PendingResolves_WhenParentArrives(t *testing. ParentSelector: computeGlobalPoseidonHash([]byte("base_99")), }, } - require.NoError(t, tr.Insert(ctx, parent100)) + require.NoError(t, tr.Insert(parent100)) // Give a beat for pending processing. time.Sleep(25 * time.Millisecond) @@ -1686,10 +1680,11 @@ func TestGlobalTimeReel_NonArchive_SnapThenAppend_NoSpuriousForks(t *testing.T) tr, err := NewGlobalTimeReel(logger, createTestProverRegistry(true), s, 99, false) require.NoError(t, err) - require.NoError(t, tr.Start()) - defer tr.Stop() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go tr.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() - ctx := context.Background() eventCh := tr.GetEventCh() // Drain any startup/new head events. @@ -1716,7 +1711,7 @@ drain: ParentSelector: []byte("unknown"), }, } - require.NoError(t, tr.Insert(ctx, snapTip)) + require.NoError(t, tr.Insert(snapTip)) // We should get a fork select { @@ -1739,7 +1734,7 @@ drain: ParentSelector: computeGlobalPoseidonHash(prev.Header.Output), }, } - require.NoError(t, tr.Insert(ctx, f)) + require.NoError(t, tr.Insert(f)) prev = f // Expect exactly one new-head event per append, and zero fork events. @@ -1823,13 +1818,15 @@ func buildAndPersistChain(t *testing.T, s *store.PebbleClockStore, start, end ui // note: needs to be non-archive otherwise insert will only set as pending reel, err := NewGlobalTimeReel(logger, createTestProverRegistry(true), s, 99, false) require.NoError(t, err) - require.NoError(t, reel.Start()) - defer reel.Stop() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go reel.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() var prev *protobufs.GlobalFrame for n := start; n <= end; n++ { f := createGlobalFrame(n, prev, []byte(fmt.Sprintf("out%d", n))) - require.NoError(t, reel.Insert(context.Background(), f)) + require.NoError(t, reel.Insert(f)) prev = f } } diff --git a/node/consensus/time/simple_equivocation_test.go b/node/consensus/time/simple_equivocation_test.go index 833bbb6..8dc558c 100644 --- a/node/consensus/time/simple_equivocation_test.go +++ b/node/consensus/time/simple_equivocation_test.go @@ -3,10 +3,12 @@ package time import ( "context" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/zap" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/protobufs" ) @@ -17,9 +19,10 @@ func TestGlobalTimeReel_SimpleEquivocation(t *testing.T) { globalReel, err := NewGlobalTimeReel(logger, createTestProverRegistry(true), s, 99, true) require.NoError(t, err) - err = globalReel.Start() - require.NoError(t, err) - defer globalReel.Stop() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + go globalReel.Start(ctx, func() {}) + time.Sleep(100 * time.Millisecond) + defer cancel() // Insert genesis genesis := &protobufs.GlobalFrame{ @@ -30,7 +33,7 @@ func TestGlobalTimeReel_SimpleEquivocation(t *testing.T) { }, } - err = globalReel.Insert(context.Background(), genesis) + err = globalReel.Insert(genesis) require.NoError(t, err) parentSelector := computeGlobalPoseidonHash(genesis.Header.Output) @@ -47,7 +50,7 @@ func TestGlobalTimeReel_SimpleEquivocation(t *testing.T) { }, } - err = globalReel.Insert(context.Background(), frame1A) + err = globalReel.Insert(frame1A) require.NoError(t, err) // Insert frame 1B with signers 2,3,4,5,6,7 (bitmask 0b11111100) @@ -63,7 +66,7 @@ func TestGlobalTimeReel_SimpleEquivocation(t *testing.T) { }, } - err = globalReel.Insert(context.Background(), frame1B) + err = globalReel.Insert(frame1B) require.NoError(t, err, "Should accept frame despite equivocation") // Check equivocators are tracked diff --git a/node/consensus/tracing/zap_tracer.go b/node/consensus/tracing/zap_tracer.go new file mode 100644 index 0000000..0c29585 --- /dev/null +++ b/node/consensus/tracing/zap_tracer.go @@ -0,0 +1,73 @@ +package tracing + +import ( + "slices" + "time" + + "go.uber.org/zap" + "source.quilibrium.com/quilibrium/monorepo/consensus" +) + +type ZapTracer struct { + logger *zap.Logger + params []consensus.LogParam +} + +// Error implements consensus.TraceLogger. +func (z *ZapTracer) Error( + message string, + err error, + params ...consensus.LogParam, +) { + combined := logParamsToZap(z.params) + combined = append(combined, logParamsToZap(params)...) + combined = append(combined, zap.Error(err)) + z.logger.WithOptions(zap.AddCallerSkip(1)).Error(message, combined...) +} + +// Trace implements consensus.TraceLogger. +func (z *ZapTracer) Trace(message string, params ...consensus.LogParam) { + combined := logParamsToZap(z.params) + combined = append(combined, logParamsToZap(params)...) + z.logger.WithOptions(zap.AddCallerSkip(1)).Info(message, combined...) +} + +// With implements consensus.TraceLogger. +func (z *ZapTracer) With(params ...consensus.LogParam) consensus.TraceLogger { + return &ZapTracer{ + logger: z.logger, + params: slices.Concat(z.params, params), + } +} + +func NewZapTracer(logger *zap.Logger) *ZapTracer { + return &ZapTracer{logger: logger} +} + +func logParamsToZap(params []consensus.LogParam) []zap.Field { + fs := []zap.Field{} + for _, p := range params { + fs = append(fs, logParamToZap(p)) + } + return fs +} + +func logParamToZap(p consensus.LogParam) zap.Field { + switch p.GetKind() { + case "uint64": + return zap.Uint64(p.GetKey(), p.GetValue().(uint64)) + case "uint32": + return zap.Uint32(p.GetKey(), p.GetValue().(uint32)) + case "int64": + return zap.Int64(p.GetKey(), p.GetValue().(int64)) + case "int32": + return zap.Int32(p.GetKey(), p.GetValue().(int32)) + case "string": + return zap.String(p.GetKey(), p.GetValue().(string)) + case "time": + return zap.Time(p.GetKey(), p.GetValue().(time.Time)) + } + return zap.Any(p.GetKey(), p.GetValue()) +} + +var _ consensus.TraceLogger = (*ZapTracer)(nil) diff --git a/node/consensus/voting/voting_aggregator.go b/node/consensus/voting/voting_aggregator.go new file mode 100644 index 0000000..a66b4d6 --- /dev/null +++ b/node/consensus/voting/voting_aggregator.go @@ -0,0 +1,271 @@ +package voting + +import ( + "github.com/gammazero/workerpool" + "github.com/pkg/errors" + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/consensus/notifications/pubsub" + "source.quilibrium.com/quilibrium/monorepo/consensus/timeoutaggregator" + "source.quilibrium.com/quilibrium/monorepo/consensus/timeoutcollector" + "source.quilibrium.com/quilibrium/monorepo/consensus/validator" + "source.quilibrium.com/quilibrium/monorepo/consensus/voteaggregator" + "source.quilibrium.com/quilibrium/monorepo/consensus/votecollector" + "source.quilibrium.com/quilibrium/monorepo/protobufs" +) + +func NewAppShardVoteAggregationDistributor() *pubsub.VoteAggregationDistributor[ + *protobufs.AppShardFrame, + *protobufs.ProposalVote, +] { + return pubsub.NewVoteAggregationDistributor[ + *protobufs.AppShardFrame, + *protobufs.ProposalVote, + ]() +} + +func NewAppShardVoteAggregator[PeerIDT models.Unique]( + logger consensus.TraceLogger, + filter []byte, + committee consensus.DynamicCommittee, + voteAggregationDistributor *pubsub.VoteAggregationDistributor[ + *protobufs.AppShardFrame, + *protobufs.ProposalVote, + ], + signatureAggregator consensus.SignatureAggregator, + votingProvider consensus.VotingProvider[ + *protobufs.AppShardFrame, + *protobufs.ProposalVote, + PeerIDT, + ], + onQCCreated consensus.OnQuorumCertificateCreated, + currentRank uint64, +) ( + consensus.VoteAggregator[*protobufs.AppShardFrame, *protobufs.ProposalVote], + error, +) { + voteProcessorFactory := votecollector.NewVoteProcessorFactory[ + *protobufs.AppShardFrame, + *protobufs.ProposalVote, + PeerIDT, + ](committee, onQCCreated) + + createCollectorFactoryMethod := votecollector.NewStateMachineFactory( + logger, + filter, + voteAggregationDistributor, + votecollector.VerifyingVoteProcessorFactory[ + *protobufs.AppShardFrame, + *protobufs.ProposalVote, + PeerIDT, + ]( + voteProcessorFactory.Create, + ), + []byte("appshard"), + signatureAggregator, + votingProvider, + ) + voteCollectors := voteaggregator.NewVoteCollectors( + logger, + currentRank, + workerpool.New(2), + createCollectorFactoryMethod, + ) + + // initialize the vote aggregator + voteAggregator, err := voteaggregator.NewVoteAggregator( + logger, + voteAggregationDistributor, + currentRank, + voteCollectors, + ) + + return voteAggregator, errors.Wrap(err, "new global vote aggregator") +} + +func NewAppShardTimeoutAggregationDistributor() *pubsub.TimeoutAggregationDistributor[*protobufs.ProposalVote] { + return pubsub.NewTimeoutAggregationDistributor[*protobufs.ProposalVote]() +} + +func NewAppShardTimeoutAggregator[PeerIDT models.Unique]( + logger consensus.TraceLogger, + filter []byte, + committee consensus.DynamicCommittee, + consensusVerifier consensus.Verifier[*protobufs.ProposalVote], + signatureAggregator consensus.SignatureAggregator, + timeoutAggregationDistributor *pubsub.TimeoutAggregationDistributor[*protobufs.ProposalVote], + votingProvider consensus.VotingProvider[ + *protobufs.AppShardFrame, + *protobufs.ProposalVote, + PeerIDT, + ], + currentRank uint64, +) (consensus.TimeoutAggregator[*protobufs.ProposalVote], error) { + // initialize the Validator + validator := validator.NewValidator[ + *protobufs.AppShardFrame, + *protobufs.ProposalVote, + ](committee, consensusVerifier) + + timeoutProcessorFactory := timeoutcollector.NewTimeoutProcessorFactory[ + *protobufs.AppShardFrame, + *protobufs.ProposalVote, + PeerIDT, + ]( + logger, + filter, + signatureAggregator, + timeoutAggregationDistributor, + committee, + validator, + votingProvider, + []byte("appshardtimeout"), + ) + + timeoutCollectorFactory := timeoutcollector.NewTimeoutCollectorFactory( + logger, + timeoutAggregationDistributor, + timeoutProcessorFactory, + ) + timeoutCollectors := timeoutaggregator.NewTimeoutCollectors( + logger, + currentRank, + timeoutCollectorFactory, + ) + + // initialize the timeout aggregator + timeoutAggregator, err := timeoutaggregator.NewTimeoutAggregator( + logger, + currentRank, + timeoutCollectors, + ) + + return timeoutAggregator, errors.Wrap(err, "new global timeout aggregator") +} + +func NewGlobalVoteAggregationDistributor() *pubsub.VoteAggregationDistributor[ + *protobufs.GlobalFrame, + *protobufs.ProposalVote, +] { + return pubsub.NewVoteAggregationDistributor[ + *protobufs.GlobalFrame, + *protobufs.ProposalVote, + ]() +} + +func NewGlobalVoteAggregator[PeerIDT models.Unique]( + logger consensus.TraceLogger, + committee consensus.DynamicCommittee, + voteAggregationDistributor *pubsub.VoteAggregationDistributor[ + *protobufs.GlobalFrame, + *protobufs.ProposalVote, + ], + signatureAggregator consensus.SignatureAggregator, + votingProvider consensus.VotingProvider[ + *protobufs.GlobalFrame, + *protobufs.ProposalVote, + PeerIDT, + ], + onQCCreated consensus.OnQuorumCertificateCreated, + currentRank uint64, +) ( + consensus.VoteAggregator[*protobufs.GlobalFrame, *protobufs.ProposalVote], + error, +) { + voteProcessorFactory := votecollector.NewVoteProcessorFactory[ + *protobufs.GlobalFrame, + *protobufs.ProposalVote, + PeerIDT, + ](committee, onQCCreated) + + createCollectorFactoryMethod := votecollector.NewStateMachineFactory( + logger, + nil, + voteAggregationDistributor, + votecollector.VerifyingVoteProcessorFactory[ + *protobufs.GlobalFrame, + *protobufs.ProposalVote, + PeerIDT, + ]( + voteProcessorFactory.Create, + ), + []byte("global"), + signatureAggregator, + votingProvider, + ) + voteCollectors := voteaggregator.NewVoteCollectors( + logger, + currentRank, + workerpool.New(2), + createCollectorFactoryMethod, + ) + + // initialize the vote aggregator + voteAggregator, err := voteaggregator.NewVoteAggregator( + logger, + voteAggregationDistributor, + currentRank, + voteCollectors, + ) + + return voteAggregator, errors.Wrap(err, "new global vote aggregator") +} + +func NewGlobalTimeoutAggregationDistributor() *pubsub.TimeoutAggregationDistributor[*protobufs.ProposalVote] { + return pubsub.NewTimeoutAggregationDistributor[*protobufs.ProposalVote]() +} + +func NewGlobalTimeoutAggregator[PeerIDT models.Unique]( + logger consensus.TraceLogger, + committee consensus.DynamicCommittee, + consensusVerifier consensus.Verifier[*protobufs.ProposalVote], + signatureAggregator consensus.SignatureAggregator, + timeoutAggregationDistributor *pubsub.TimeoutAggregationDistributor[*protobufs.ProposalVote], + votingProvider consensus.VotingProvider[ + *protobufs.GlobalFrame, + *protobufs.ProposalVote, + PeerIDT, + ], + currentRank uint64, +) (consensus.TimeoutAggregator[*protobufs.ProposalVote], error) { + // initialize the Validator + validator := validator.NewValidator[ + *protobufs.GlobalFrame, + *protobufs.ProposalVote, + ](committee, consensusVerifier) + + timeoutProcessorFactory := timeoutcollector.NewTimeoutProcessorFactory[ + *protobufs.GlobalFrame, + *protobufs.ProposalVote, + PeerIDT, + ]( + logger, + nil, + signatureAggregator, + timeoutAggregationDistributor, + committee, + validator, + votingProvider, + []byte("globaltimeout"), + ) + + timeoutCollectorFactory := timeoutcollector.NewTimeoutCollectorFactory( + logger, + timeoutAggregationDistributor, + timeoutProcessorFactory, + ) + timeoutCollectors := timeoutaggregator.NewTimeoutCollectors( + logger, + currentRank, + timeoutCollectorFactory, + ) + + // initialize the timeout aggregator + timeoutAggregator, err := timeoutaggregator.NewTimeoutAggregator( + logger, + currentRank, + timeoutCollectors, + ) + + return timeoutAggregator, errors.Wrap(err, "new global timeout aggregator") +} diff --git a/node/datarpc/data_worker_ipc_server.go b/node/datarpc/data_worker_ipc_server.go index 81e1f85..a51c3c9 100644 --- a/node/datarpc/data_worker_ipc_server.go +++ b/node/datarpc/data_worker_ipc_server.go @@ -12,6 +12,7 @@ import ( "google.golang.org/grpc" "source.quilibrium.com/quilibrium/monorepo/config" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/node/consensus/app" qgrpc "source.quilibrium.com/quilibrium/monorepo/node/internal/grpc" "source.quilibrium.com/quilibrium/monorepo/node/keys" @@ -26,6 +27,8 @@ import ( type DataWorkerIPCServer struct { protobufs.UnimplementedDataIPCServiceServer + ctx lifecycle.SignalerContext + cancel func() listenAddrGRPC string config *config.Config logger *zap.Logger @@ -130,6 +133,9 @@ func (r *DataWorkerIPCServer) RespawnServer(filter []byte) error { r.server = nil } if r.appConsensusEngine != nil { + if r.cancel != nil { + r.cancel() + } <-r.appConsensusEngine.Stop(false) r.appConsensusEngine = nil } @@ -206,6 +212,12 @@ func (r *DataWorkerIPCServer) RespawnServer(filter []byte) error { globalTimeReel, r.server, ) + r.ctx, r.cancel, _ = lifecycle.WithSignallerAndCancel(context.Background()) + go func() { + if err = r.appConsensusEngine.Start(r.ctx); err != nil { + r.logger.Error("error while running", zap.Error(err)) + } + }() } go func() { protobufs.RegisterDataIPCServiceServer(r.server, r) diff --git a/node/dbscan/main.go b/node/dbscan/main.go index cd46aba..7766a7c 100644 --- a/node/dbscan/main.go +++ b/node/dbscan/main.go @@ -8,9 +8,11 @@ import ( "encoding/json" "flag" "fmt" + "io" "log" "math/big" "path/filepath" + "slices" "sort" "strings" @@ -205,7 +207,11 @@ func main() { var status string switch { case entry.inFirst && entry.inSecond: - status = fmt.Sprintf("present in %s and %s", *configDirectory1, *configDirectory2) + status = fmt.Sprintf( + "present in %s and %s", + *configDirectory1, + *configDirectory2, + ) case entry.inFirst: status = fmt.Sprintf("only present in %s", *configDirectory1) case entry.inSecond: @@ -275,6 +281,11 @@ func decodeValue(key []byte, value []byte) string { return shortHex(value) } return decodeDataProofValue(key[0], key[1], value) + case store.CONSENSUS: + if len(key) < 2 { + return shortHex(value) + } + return decodeConsensusValue(key, value) case store.INBOX: if len(key) < 2 { return shortHex(value) @@ -282,6 +293,8 @@ func decodeValue(key []byte, value []byte) string { return decodeInboxValue(key[1], value) case store.HYPERGRAPH_SHARD: return decodeHypergraphValue(key, value) + case store.MIGRATION: + return decodeMigrationValue(value) default: return shortHex(value) } @@ -323,11 +336,98 @@ func decodeClockValue(key []byte, value []byte) string { return fmt.Sprintf("frame=%d", frame) } return shortHex(value) + case store.CLOCK_GLOBAL_CERTIFIED_STATE, + store.CLOCK_SHARD_CERTIFIED_STATE: + return decodeCertifiedStateValue(value) + case store.CLOCK_GLOBAL_CERTIFIED_STATE_INDEX_EARLIEST, + store.CLOCK_GLOBAL_CERTIFIED_STATE_INDEX_LATEST, + store.CLOCK_SHARD_CERTIFIED_STATE_INDEX_EARLIEST, + store.CLOCK_SHARD_CERTIFIED_STATE_INDEX_LATEST, + store.CLOCK_QUORUM_CERTIFICATE_INDEX_EARLIEST, + store.CLOCK_QUORUM_CERTIFICATE_INDEX_LATEST, + store.CLOCK_TIMEOUT_CERTIFICATE_INDEX_EARLIEST, + store.CLOCK_TIMEOUT_CERTIFICATE_INDEX_LATEST: + if len(value) == 8 { + rank := binary.BigEndian.Uint64(value) + return fmt.Sprintf("rank=%d", rank) + } + return shortHex(value) + case store.CLOCK_QUORUM_CERTIFICATE: + return decodeQuorumCertificateValue(value) + case store.CLOCK_TIMEOUT_CERTIFICATE: + return decodeTimeoutCertificateValue(value) default: return shortHex(value) } } +func decodeCertifiedStateValue(value []byte) string { + if len(value) != 24 { + return shortHex(value) + } + + frameNumber := binary.BigEndian.Uint64(value[:8]) + qcRank := binary.BigEndian.Uint64(value[8:16]) + tcRank := binary.BigEndian.Uint64(value[16:]) + return fmt.Sprintf( + "frame=%d quorum_rank=%d timeout_rank=%d", + frameNumber, + qcRank, + tcRank, + ) +} + +func decodeQuorumCertificateValue(value []byte) string { + qc := &protobufs.QuorumCertificate{} + if err := qc.FromCanonicalBytes(slices.Clone(value)); err != nil { + return fmt.Sprintf( + "quorum_certificate decode_error=%v raw=%s", + err, + shortHex(value), + ) + } + + if s, err := jsonMarshaler.Marshal(qc); err == nil { + return string(s) + } + + return shortHex(value) +} + +func decodeTimeoutCertificateValue(value []byte) string { + tc := &protobufs.TimeoutCertificate{} + if err := tc.FromCanonicalBytes(slices.Clone(value)); err != nil { + return fmt.Sprintf( + "timeout_certificate decode_error=%v raw=%s", + err, + shortHex(value), + ) + } + + if s, err := jsonMarshaler.Marshal(tc); err == nil { + return string(s) + } + + return shortHex(value) +} + +func decodeTimeoutStateValue(value []byte) string { + state := &protobufs.TimeoutState{} + if err := state.FromCanonicalBytes(slices.Clone(value)); err != nil { + return fmt.Sprintf( + "timeout_state decode_error=%v raw=%s", + err, + shortHex(value), + ) + } + + if s, err := jsonMarshaler.Marshal(state); err == nil { + return string(s) + } + + return shortHex(value) +} + func decodeKeyBundleValue(sub byte, value []byte) string { switch sub { case store.KEY_IDENTITY: @@ -354,7 +454,11 @@ func decodeKeyBundleValue(sub byte, value []byte) string { if len(value) >= 32 { counterparty := shortHex(value[:32]) signature := shortHex(value[32:]) - return fmt.Sprintf("counterparty=%s\nsignature=%s", counterparty, signature) + return fmt.Sprintf( + "counterparty=%s\nsignature=%s", + counterparty, + signature, + ) } } return shortHex(value) @@ -401,6 +505,151 @@ func decodeDataProofValue(prefix byte, sub byte, value []byte) string { return shortHex(value) } +func decodeConsensusValue(key []byte, value []byte) string { + switch key[1] { + case store.CONSENSUS_STATE: + return decodeConsensusStateValue(value) + case store.CONSENSUS_LIVENESS: + return decodeConsensusLivenessValue(value) + default: + return shortHex(value) + } +} + +func decodeConsensusStateValue(value []byte) string { + buf := bytes.NewReader(value) + + filter, err := readUint32PrefixedBytes(buf) + if err != nil { + return fmt.Sprintf( + "consensus_state decode_error=%v raw=%s", + err, + shortHex(value), + ) + } + + var finalizedRank uint64 + if err := binary.Read(buf, binary.BigEndian, &finalizedRank); err != nil { + return fmt.Sprintf( + "consensus_state decode_error=%v raw=%s", + err, + shortHex(value), + ) + } + + var latestAckRank uint64 + if err := binary.Read(buf, binary.BigEndian, &latestAckRank); err != nil { + return fmt.Sprintf( + "consensus_state decode_error=%v raw=%s", + err, + shortHex(value), + ) + } + + latestTimeoutBytes, err := readUint32PrefixedBytes(buf) + if err != nil { + return fmt.Sprintf( + "consensus_state decode_error=%v raw=%s", + err, + shortHex(value), + ) + } + + var builder strings.Builder + fmt.Fprintf(&builder, "filter=%s\n", shortHex(filter)) + fmt.Fprintf(&builder, "finalized_rank=%d\n", finalizedRank) + fmt.Fprintf(&builder, "latest_ack_rank=%d", latestAckRank) + + if len(latestTimeoutBytes) > 0 { + builder.WriteString("\nlatest_timeout_state=\n") + builder.WriteString(indent(decodeTimeoutStateValue(latestTimeoutBytes))) + } + + return builder.String() +} + +func decodeConsensusLivenessValue(value []byte) string { + buf := bytes.NewReader(value) + + filter, err := readUint32PrefixedBytes(buf) + if err != nil { + return fmt.Sprintf( + "consensus_liveness decode_error=%v raw=%s", + err, + shortHex(value), + ) + } + + var currentRank uint64 + if err := binary.Read(buf, binary.BigEndian, ¤tRank); err != nil { + return fmt.Sprintf( + "consensus_liveness decode_error=%v raw=%s", + err, + shortHex(value), + ) + } + + latestQCBytes, err := readUint32PrefixedBytes(buf) + if err != nil { + return fmt.Sprintf( + "consensus_liveness decode_error=%v raw=%s", + err, + shortHex(value), + ) + } + + priorTCBytes, err := readUint32PrefixedBytes(buf) + if err != nil { + return fmt.Sprintf( + "consensus_liveness decode_error=%v raw=%s", + err, + shortHex(value), + ) + } + + var builder strings.Builder + fmt.Fprintf(&builder, "filter=%s\n", shortHex(filter)) + fmt.Fprintf(&builder, "current_rank=%d", currentRank) + + if len(latestQCBytes) > 0 { + builder.WriteString("\nlatest_quorum_certificate=\n") + builder.WriteString(indent(decodeQuorumCertificateValue(latestQCBytes))) + } + + if len(priorTCBytes) > 0 { + builder.WriteString("\nprior_timeout_certificate=\n") + builder.WriteString(indent(decodeTimeoutCertificateValue(priorTCBytes))) + } + + return builder.String() +} + +func decodeMigrationValue(value []byte) string { + if len(value) == 8 { + version := binary.BigEndian.Uint64(value) + return fmt.Sprintf("migration_version=%d", version) + } + return shortHex(value) +} + +func readUint32PrefixedBytes(r io.Reader) ([]byte, error) { + var length uint32 + if err := binary.Read(r, binary.BigEndian, &length); err != nil { + return nil, err + } + + if length == 0 { + return nil, nil + } + + data := make([]byte, length) + if _, err := io.ReadFull(r, data); err != nil { + return nil, err + } + + return data, nil +} + func decodeInboxValue(sub byte, value []byte) string { switch sub { case store.INBOX_MESSAGE: @@ -487,7 +736,10 @@ func decodeHypergraphProto(value []byte) (string, bool) { } hasFields := false - msg.ProtoReflect().Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + msg.ProtoReflect().Range(func( + fd protoreflect.FieldDescriptor, + v protoreflect.Value, + ) bool { hasFields = true return false }) @@ -516,7 +768,11 @@ func decodeHypergraphProto(value []byte) (string, bool) { func summarizeVectorCommitmentTree(value []byte) string { _, err := tries.DeserializeNonLazyTree(value) if err != nil { - return fmt.Sprintf("vector_commitment_tree decode_error=%v raw=%s", err, shortHex(value)) + return fmt.Sprintf( + "vector_commitment_tree decode_error=%v raw=%s", + err, + shortHex(value), + ) } sum := sha256.Sum256(value) @@ -569,7 +825,11 @@ func summarizeHypergraphTreeNode(value []byte) string { jsonBytes, err := json.MarshalIndent(summary, "", " ") if err != nil { - return fmt.Sprintf("tree_leaf key=%s sha256=%s", shortHex(leaf.Key), hashStr) + return fmt.Sprintf( + "tree_leaf key=%s sha256=%s", + shortHex(leaf.Key), + hashStr, + ) } return string(jsonBytes) case tries.TypeBranch: @@ -742,6 +1002,10 @@ func describeKey(key []byte) string { return describeInboxKey(key) case store.WORKER: return describeWorkerKey(key) + case store.CONSENSUS: + return describeConsensusKey(key) + case store.MIGRATION: + return "pebble store migration version" default: return fmt.Sprintf("unknown prefix 0x%02x (len=%d)", key[0], len(key)) } @@ -807,6 +1071,37 @@ func describeClockKey(key []byte) string { "clock shard frame latest index shard=%s", shortHex(key[2:]), ) + case store.CLOCK_GLOBAL_CERTIFIED_STATE: + if len(key) >= 10 { + rank := binary.BigEndian.Uint64(key[2:10]) + return fmt.Sprintf("clock global certified state rank=%d", rank) + } + return "clock global certified state (invalid length)" + case store.CLOCK_GLOBAL_CERTIFIED_STATE_INDEX_EARLIEST: + return "clock global certified state earliest index" + case store.CLOCK_GLOBAL_CERTIFIED_STATE_INDEX_LATEST: + return "clock global certified state latest index" + case store.CLOCK_SHARD_CERTIFIED_STATE: + if len(key) >= 10 { + rank := binary.BigEndian.Uint64(key[2:10]) + filter := key[10:] + return fmt.Sprintf( + "clock shard certified state rank=%d shard=%s", + rank, + shortHex(filter), + ) + } + return "clock shard certified state (invalid length)" + case store.CLOCK_SHARD_CERTIFIED_STATE_INDEX_EARLIEST: + return fmt.Sprintf( + "clock shard certified state earliest index shard=%s", + shortHex(key[2:]), + ) + case store.CLOCK_SHARD_CERTIFIED_STATE_INDEX_LATEST: + return fmt.Sprintf( + "clock shard certified state latest index shard=%s", + shortHex(key[2:]), + ) case store.CLOCK_SHARD_FRAME_INDEX_PARENT: if len(key) >= 42 { frame := binary.BigEndian.Uint64(key[2:10]) @@ -861,6 +1156,51 @@ func describeClockKey(key []byte) string { "clock compaction marker shard=%s", shortHex(key[2:]), ) + case store.CLOCK_QUORUM_CERTIFICATE: + if len(key) >= 10 { + rank := binary.BigEndian.Uint64(key[2:10]) + filter := key[10:] + if len(filter) > 0 { + return fmt.Sprintf( + "clock quorum certificate rank=%d filter=%s", + rank, + shortHex(filter), + ) + } + return fmt.Sprintf("clock quorum certificate rank=%d", rank) + } + return "clock quorum certificate (invalid length)" + case store.CLOCK_QUORUM_CERTIFICATE_INDEX_EARLIEST: + return fmt.Sprintf( + "clock quorum certificate earliest index filter=%s", + shortHex(key[2:]), + ) + case store.CLOCK_QUORUM_CERTIFICATE_INDEX_LATEST: + return fmt.Sprintf( + "clock quorum certificate latest index filter=%s", + shortHex(key[2:]), + ) + case store.CLOCK_TIMEOUT_CERTIFICATE: + if len(key) >= 10 { + rank := binary.BigEndian.Uint64(key[2:10]) + filter := key[10:] + return fmt.Sprintf( + "clock timeout certificate rank=%d filter=%s", + rank, + shortHex(filter), + ) + } + return "clock timeout certificate (invalid length)" + case store.CLOCK_TIMEOUT_CERTIFICATE_INDEX_EARLIEST: + return fmt.Sprintf( + "clock timeout certificate earliest index filter=%s", + shortHex(key[2:]), + ) + case store.CLOCK_TIMEOUT_CERTIFICATE_INDEX_LATEST: + return fmt.Sprintf( + "clock timeout certificate latest index filter=%s", + shortHex(key[2:]), + ) case store.CLOCK_SHARD_FRAME_CANDIDATE_SHARD: return fmt.Sprintf("clock shard candidate frame raw=%s", shortHex(key)) case store.CLOCK_SHARD_FRAME_CANDIDATE_INDEX_LATEST: @@ -1414,6 +1754,27 @@ func describeWorkerKey(key []byte) string { } } +func describeConsensusKey(key []byte) string { + if len(key) < 2 { + return "consensus store: invalid key length" + } + + sub := key[1] + filter := key[2:] + switch sub { + case store.CONSENSUS_STATE: + return fmt.Sprintf("consensus state filter=%s", shortHex(filter)) + case store.CONSENSUS_LIVENESS: + return fmt.Sprintf("consensus liveness filter=%s", shortHex(filter)) + default: + return fmt.Sprintf( + "consensus store unknown subtype 0x%02x raw=%s", + sub, + shortHex(filter), + ) + } +} + func shortHex(b []byte) string { if len(b) == 0 { return "0x" diff --git a/node/execution/engines/compute_execution_engine.go b/node/execution/engines/compute_execution_engine.go index ad9d2cb..a273915 100644 --- a/node/execution/engines/compute_execution_engine.go +++ b/node/execution/engines/compute_execution_engine.go @@ -11,6 +11,7 @@ import ( "github.com/pkg/errors" "go.uber.org/zap" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/node/execution/fees" "source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/compute" hgstate "source.quilibrium.com/quilibrium/monorepo/node/execution/state/hypergraph" @@ -41,7 +42,7 @@ type ComputeExecutionEngine struct { intrinsicsMutex sync.RWMutex mode ExecutionMode mu sync.RWMutex - stopChan chan struct{} + ctx lifecycle.SignalerContext } func NewComputeExecutionEngine( @@ -136,45 +137,16 @@ func (e *ComputeExecutionEngine) GetCapabilities() []*protobufs.Capability { return capabilities } -func (e *ComputeExecutionEngine) Start() <-chan error { - errChan := make(chan error, 1) +func (e *ComputeExecutionEngine) Start( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, +) { + e.logger.Info("starting compute execution engine") + e.ctx = ctx + ready() - e.mu.Lock() - e.stopChan = make(chan struct{}, 1) - e.mu.Unlock() - - go func() { - e.logger.Info("starting compute execution engine") - - <-e.stopChan - e.logger.Info("stopping compute execution engine") - }() - - return errChan -} - -func (e *ComputeExecutionEngine) Stop(force bool) <-chan error { - errChan := make(chan error) - - go func() { - e.logger.Info("stopping compute execution engine", zap.Bool("force", force)) - - // Signal stop if we have a stopChan - e.mu.RLock() - if e.stopChan != nil { - select { - case <-e.stopChan: - // Already closed - default: - close(e.stopChan) - } - } - e.mu.RUnlock() - - close(errChan) - }() - - return errChan + <-ctx.Done() + e.logger.Info("stopping compute execution engine") } func (e *ComputeExecutionEngine) Prove( @@ -478,37 +450,42 @@ func (e *ComputeExecutionEngine) validateBundle( // Validate each operation in the bundle sequentially for i, op := range bundle.Requests { - e.logger.Debug( - "validating bundled operation", - zap.Int("operation", i), - zap.String("address", hex.EncodeToString(address)), - ) - - // Check if this is a compute operation type - isComputeOp := op.GetComputeDeploy() != nil || - op.GetComputeUpdate() != nil || - op.GetCodeDeploy() != nil || - op.GetCodeExecute() != nil || - op.GetCodeFinalize() != nil - - if !isComputeOp { - // Skip non-compute operations + select { + case <-e.ctx.Done(): + return errors.Wrap(errors.New("context canceled"), "validate bundle") + default: e.logger.Debug( - "skipping non-compute operation in bundle", + "validating bundled operation", zap.Int("operation", i), + zap.String("address", hex.EncodeToString(address)), ) - continue - } - // Validate this operation individually - err := e.validateIndividualMessage( - frameNumber, - address, - op, - true, - ) - if err != nil { - return errors.Wrap(err, "validate bundle") + // Check if this is a compute operation type + isComputeOp := op.GetComputeDeploy() != nil || + op.GetComputeUpdate() != nil || + op.GetCodeDeploy() != nil || + op.GetCodeExecute() != nil || + op.GetCodeFinalize() != nil + + if !isComputeOp { + // Skip non-compute operations + e.logger.Debug( + "skipping non-compute operation in bundle", + zap.Int("operation", i), + ) + continue + } + + // Validate this operation individually + err := e.validateIndividualMessage( + frameNumber, + address, + op, + true, + ) + if err != nil { + return errors.Wrap(err, "validate bundle") + } } } diff --git a/node/execution/engines/compute_execution_engine_test.go b/node/execution/engines/compute_execution_engine_test.go index 9791d56..5e1e9e4 100644 --- a/node/execution/engines/compute_execution_engine_test.go +++ b/node/execution/engines/compute_execution_engine_test.go @@ -418,6 +418,7 @@ func createTestGlobalConsensusEngine(t *testing.T) ( nil, nil, nil, + nil, &mockEncryptedChannel{}, &mocks.MockBulletproofProver{}, &mocks.MockVerifiableEncryptor{}, @@ -491,6 +492,7 @@ func createTestAppConsensusEngine( clockStore := pstore.NewPebbleClockStore(pebbleDB, logger) inboxStore := pstore.NewPebbleInboxStore(pebbleDB, logger) shardStore := pstore.NewPebbleShardsStore(pebbleDB, logger) + consensusStore := pstore.NewPebbleConsensusStore(pebbleDB, logger) hypergraphStore := pstore.NewPebbleHypergraphStore(config.DB, pebbleDB, logger, &mocks.MockVerifiableEncryptor{}, mockInclusionProver) appTimeReel := createTestAppTimeReel(t, appAddress, clockStore) mockProverRegistry := createTestProverRegistry() @@ -548,6 +550,7 @@ func createTestAppConsensusEngine( inboxStore, shardStore, hypergraphStore, + consensusStore, mockFrameProver, mockInclusionProver, &mocks.MockBulletproofProver{}, // bulletproofProver @@ -1487,8 +1490,6 @@ req:A a rdfs:Property; assert.NoError(t, err) assertCodeExecutionResult(t, mode, msgs, err, false) } - - <-engine.Stop(false) }) }) @@ -1569,8 +1570,6 @@ req:A a rdfs:Property; if engineMode == engines.GlobalMode && err == nil { assert.NotNil(t, msgs, "Bundled operations should produce responses in GlobalMode") } - - <-engine.Stop(false) }) }) @@ -1660,7 +1659,6 @@ req:A a rdfs:Property; msgs, err := engine.ProcessMessage(1, big.NewInt(0), msg.Address, msg.Payload, state) assertCodeExecutionResult(t, mode, msgs, err, false) - <-engine.Stop(false) }) }) @@ -1753,7 +1751,6 @@ req:A a rdfs:Property; msgs, err := engine.ProcessMessage(1, big.NewInt(0), msg.Address, msg.Payload, state) assertCodeExecutionResult(t, mode, msgs, err, false) - <-engine.Stop(false) }) }) @@ -1855,7 +1852,6 @@ req:A a rdfs:Property; msgs, err := engine.ProcessMessage(1, big.NewInt(0), msg.Address, msg.Payload, state) assertCodeExecutionResult(t, mode, msgs, err, false) - <-engine.Stop(false) }) }) @@ -1943,7 +1939,6 @@ req:A a rdfs:Property; assert.Error(t, err) assert.Nil(t, msgs) - <-engine.Stop(false) }) }) @@ -2015,7 +2010,6 @@ req:A a rdfs:Property; assert.Error(t, err) assert.Nil(t, msgs) - <-engine.Stop(false) }) }) @@ -2124,7 +2118,6 @@ req:A a rdfs:Property; } assert.Nil(t, msgs) - <-engine.Stop(false) }) }) @@ -2401,7 +2394,6 @@ req:A a rdfs:Property; } assert.Nil(t, msgs) - <-engine.Stop(false) }) }) @@ -2485,7 +2477,6 @@ req:A a rdfs:Property; assert.Error(t, err) assert.Nil(t, msgs) - <-engine.Stop(false) }) }) @@ -2570,7 +2561,6 @@ req:A a rdfs:Property; msgs, err := engine.ProcessMessage(1, big.NewInt(0), msg.Address, msg.Payload, state) assertCodeExecutionResult(t, mode, msgs, err, false) - <-engine.Stop(false) }) }) @@ -2648,7 +2638,6 @@ req:A a rdfs:Property; msgs, err := engine.ProcessMessage(1, big.NewInt(0), msg.Address, msg.Payload, state) assertCodeExecutionResult(t, mode, msgs, err, false) - <-engine.Stop(false) }) }) @@ -2728,7 +2717,6 @@ req:A a rdfs:Property; msgs, err := engine.ProcessMessage(1, big.NewInt(0), msg.Address, msg.Payload, state) assertCodeExecutionResult(t, mode, msgs, err, false) - <-engine.Stop(false) }) }) @@ -2814,7 +2802,6 @@ req:A a rdfs:Property; msgs, err := engine.ProcessMessage(1, big.NewInt(0), msg.Address, msg.Payload, state) assertCodeExecutionResult(t, mode, msgs, err, false) - <-engine.Stop(false) }) }) @@ -2901,7 +2888,6 @@ req:A a rdfs:Property; msgs, err := engine.ProcessMessage(1, big.NewInt(0), msg.Address, msg.Payload, state) assertCodeExecutionResult(t, mode, msgs, err, false) - <-engine.Stop(false) }) }) @@ -2997,7 +2983,6 @@ req:A a rdfs:Property; msgs, err := engine.ProcessMessage(1, big.NewInt(0), msg.Address, msg.Payload, state) assertCodeExecutionResult(t, mode, msgs, err, false) - <-engine.Stop(false) }) }) @@ -3093,7 +3078,6 @@ req:A a rdfs:Property; // All operations should be in the same stage since there are no conflicts - <-engine.Stop(false) }) }) @@ -3197,7 +3181,6 @@ req:A a rdfs:Property; // Should produce stages: [op1], [op2, op3], [op4] - <-engine.Stop(false) }) }) @@ -3304,7 +3287,6 @@ req:A a rdfs:Property; // Stage 1: op3 (conflicts with op1) // Stage 2: op4 (depends on op1 and op2) - <-engine.Stop(false) }) }) @@ -3396,7 +3378,6 @@ req:A a rdfs:Property; assert.Equal(t, []byte("op2"), ce.ExecuteOperations[1].Identifier) assert.Equal(t, [][]byte{[]byte("op1")}, ce.ExecuteOperations[1].Dependencies) - <-engine.Stop(false) }) }) @@ -3492,7 +3473,6 @@ req:A a rdfs:Property; // The execution stages should be computed and stored - <-engine.Stop(false) }) }) @@ -3572,7 +3552,6 @@ req:A a rdfs:Property; // Verify rendezvous is stored correctly assert.NotNil(t, ce.Rendezvous) - <-engine.Stop(false) }) }) @@ -3659,7 +3638,6 @@ req:A a rdfs:Property; msgs, err := engine.ProcessMessage(1, big.NewInt(0), msg.Address, msg.Payload, state) assertCodeExecutionResult(t, mode, msgs, err, false) - <-engine.Stop(false) }) }) @@ -3791,7 +3769,6 @@ req:A a rdfs:Property; // Since we can't easily verify the exact calls, we trust the test passes } - <-engine.Stop(false) }) }) @@ -3916,7 +3893,6 @@ req:A a rdfs:Property; // State changes should not be committed for failed operations - <-engine.Stop(false) }) }) @@ -4096,7 +4072,6 @@ req:A a rdfs:Property; msgs, err := engine.ProcessMessage(1, big.NewInt(0), msg.Address, msg.Payload, state) assertCodeExecutionResult(t, mode, msgs, err, false) - <-engine.Stop(false) }) }) @@ -4229,7 +4204,6 @@ req:A a rdfs:Property; msgs, err := engine.ProcessMessage(1, big.NewInt(0), msg.Address, msg.Payload, state) assertCodeExecutionResult(t, mode, msgs, err, false) - <-engine.Stop(false) }) }) @@ -4353,7 +4327,6 @@ req:A a rdfs:Property; assert.Contains(t, err.Error(), "empty") } - <-engine.Stop(false) }) }) @@ -4491,7 +4464,6 @@ rdfs:range req:Request. assert.Contains(t, err.Error(), "limit") } - <-engine.Stop(false) }) }) @@ -4610,7 +4582,6 @@ req:A a rdfs:Property; assertCodeExecutionResult(t, mode, msgs, err, true) assert.Len(t, cf.Results, 2) - <-engine.Stop(false) }) }) @@ -4705,7 +4676,6 @@ req:A a rdfs:Property; msgs, err := engine.ProcessMessage(1, big.NewInt(0), msg.Address, msg.Payload, state) assertCodeExecutionResult(t, mode, msgs, err, false) - <-engine.Stop(false) }) }) @@ -4831,7 +4801,6 @@ req:A a rdfs:Property; } assert.Nil(t, msgs) - <-engine.Stop(false) }) }) @@ -4954,7 +4923,6 @@ req:A a rdfs:Property; msgs, err := engine.ProcessMessage(1, big.NewInt(0), msg.Address, msg.Payload, state) assertCodeExecutionResult(t, mode, msgs, err, false) - <-engine.Stop(false) }) }) @@ -5077,7 +5045,6 @@ req:A a rdfs:Property; msgs, err := engine.ProcessMessage(1, big.NewInt(0), msg.Address, msg.Payload, state) assertCodeExecutionResult(t, mode, msgs, err, false) - <-engine.Stop(false) }) }) @@ -5196,7 +5163,6 @@ req:A a rdfs:Property; assertCodeExecutionResult(t, mode, msgs, err, false) } - <-engine.Stop(false) }) }) @@ -5335,7 +5301,6 @@ req:A a rdfs:Property; } } - <-engine.Stop(false) }) }) } diff --git a/node/execution/engines/global_execution_engine.go b/node/execution/engines/global_execution_engine.go index aaec4df..96430f7 100644 --- a/node/execution/engines/global_execution_engine.go +++ b/node/execution/engines/global_execution_engine.go @@ -12,6 +12,7 @@ import ( "github.com/pkg/errors" "go.uber.org/zap" "source.quilibrium.com/quilibrium/monorepo/config" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/global" "source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token" "source.quilibrium.com/quilibrium/monorepo/protobufs" @@ -46,7 +47,7 @@ type GlobalExecutionEngine struct { intrinsics map[string]intrinsics.Intrinsic intrinsicsMutex sync.RWMutex mu sync.RWMutex - stopChan chan struct{} + ctx lifecycle.SignalerContext } func NewGlobalExecutionEngine( @@ -119,45 +120,15 @@ func (e *GlobalExecutionEngine) GetCapabilities() []*protobufs.Capability { } } -func (e *GlobalExecutionEngine) Start() <-chan error { - errChan := make(chan error, 1) - - e.mu.Lock() - e.stopChan = make(chan struct{}, 1) - e.mu.Unlock() - - go func() { - e.logger.Info("starting global execution engine") - - <-e.stopChan - e.logger.Info("stopping global execution engine") - }() - - return errChan -} - -func (e *GlobalExecutionEngine) Stop(force bool) <-chan error { - errChan := make(chan error, 1) - - go func() { - e.logger.Info("stopping global execution engine", zap.Bool("force", force)) - - // Signal stop if we have a stopChan - e.mu.RLock() - if e.stopChan != nil { - select { - case <-e.stopChan: - // Already closed - default: - close(e.stopChan) - } - } - e.mu.RUnlock() - - close(errChan) - }() - - return errChan +func (e *GlobalExecutionEngine) Start( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, +) { + e.ctx = ctx + e.logger.Info("starting global execution engine") + ready() + <-e.ctx.Done() + e.logger.Info("stopping global execution engine") } func (e *GlobalExecutionEngine) ValidateMessage( @@ -217,49 +188,54 @@ func (e *GlobalExecutionEngine) validateBundle( // Validate each operation in the bundle sequentially for i, op := range bundle.Requests { - e.logger.Debug( - "validating bundled operation", - zap.Int("operation", i), - zap.String("address", hex.EncodeToString(address)), - ) - - // Check if this is a global operation type - isGlobalOp := op.GetJoin() != nil || - op.GetLeave() != nil || - op.GetPause() != nil || - op.GetResume() != nil || - op.GetConfirm() != nil || - op.GetReject() != nil || - op.GetKick() != nil || - op.GetUpdate() != nil || - op.GetShard() != nil - - if !isGlobalOp { - if e.config.Network == 0 && - frameNumber <= token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END { - return errors.Wrap( - errors.New("enrollment period has not ended"), - "validate bundle", - ) - } - // Skip non-global operations (e.g., token payments, compute ops) - // They are retained in the bundle for reference but not validated here + select { + case <-e.ctx.Done(): + return errors.Wrap(errors.New("context canceled"), "validate bundle") + default: e.logger.Debug( - "skipping non-global operation in bundle", + "validating bundled operation", zap.Int("operation", i), + zap.String("address", hex.EncodeToString(address)), ) - continue - } - // Validate this operation individually - err := e.validateIndividualMessage( - frameNumber, - address, - op, - true, - ) - if err != nil { - return errors.Wrap(err, "validate bundle") + // Check if this is a global operation type + isGlobalOp := op.GetJoin() != nil || + op.GetLeave() != nil || + op.GetPause() != nil || + op.GetResume() != nil || + op.GetConfirm() != nil || + op.GetReject() != nil || + op.GetKick() != nil || + op.GetUpdate() != nil || + op.GetShard() != nil + + if !isGlobalOp { + if e.config.Network == 0 && + frameNumber <= token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END { + return errors.Wrap( + errors.New("enrollment period has not ended"), + "validate bundle", + ) + } + // Skip non-global operations (e.g., token payments, compute ops) + // They are retained in the bundle for reference but not validated here + e.logger.Debug( + "skipping non-global operation in bundle", + zap.Int("operation", i), + ) + continue + } + + // Validate this operation individually + err := e.validateIndividualMessage( + frameNumber, + address, + op, + true, + ) + if err != nil { + return errors.Wrap(err, "validate bundle") + } } } diff --git a/node/execution/engines/global_execution_engine_test.go b/node/execution/engines/global_execution_engine_test.go index 39882e4..6a15e4a 100644 --- a/node/execution/engines/global_execution_engine_test.go +++ b/node/execution/engines/global_execution_engine_test.go @@ -2,6 +2,7 @@ package engines_test import ( "bytes" + "context" "crypto/rand" "math/big" "slices" @@ -14,6 +15,7 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/zap" "source.quilibrium.com/quilibrium/monorepo/config" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/node/execution/engines" "source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/global" hgstate "source.quilibrium.com/quilibrium/monorepo/node/execution/state/hypergraph" @@ -56,7 +58,8 @@ func TestGlobalExecutionEngine_Start(t *testing.T) { require.NoError(t, err) // Test starting and stopping the engine - errChan := engine.Start() + ctx, cancel, errChan := lifecycle.WithSignallerAndCancel(context.Background()) + engine.Start(ctx, func() {}) // Engine should start without errors select { @@ -67,7 +70,8 @@ func TestGlobalExecutionEngine_Start(t *testing.T) { } // Stop the engine - <-engine.Stop(false) + cancel() + <-ctx.Done() } func TestGlobalExecutionEngine_ProcessMessage(t *testing.T) { diff --git a/node/execution/engines/hypergraph_execution_engine.go b/node/execution/engines/hypergraph_execution_engine.go index 38d7a59..c46ded4 100644 --- a/node/execution/engines/hypergraph_execution_engine.go +++ b/node/execution/engines/hypergraph_execution_engine.go @@ -11,6 +11,7 @@ import ( "github.com/pkg/errors" "go.uber.org/zap" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/node/execution/fees" hypergraphintrinsic "source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/hypergraph" "source.quilibrium.com/quilibrium/monorepo/protobufs" @@ -38,7 +39,7 @@ type HypergraphExecutionEngine struct { intrinsicsMutex sync.RWMutex mode ExecutionMode mu sync.RWMutex - stopChan chan struct{} + ctx lifecycle.SignalerContext } func NewHypergraphExecutionEngine( @@ -204,48 +205,15 @@ func (e *HypergraphExecutionEngine) GetCapabilities() []*protobufs.Capability { } } -func (e *HypergraphExecutionEngine) Start() <-chan error { - errChan := make(chan error, 1) - - e.mu.Lock() - e.stopChan = make(chan struct{}, 1) - e.mu.Unlock() - - go func() { - e.logger.Info("starting hypergraph execution engine") - - <-e.stopChan - e.logger.Info("stopping hypergraph execution engine") - }() - - return errChan -} - -func (e *HypergraphExecutionEngine) Stop(force bool) <-chan error { - errChan := make(chan error, 1) - - go func() { - e.logger.Info( - "stopping hypergraph execution engine", - zap.Bool("force", force), - ) - - // Signal stop if we have a stopChan - e.mu.RLock() - if e.stopChan != nil { - select { - case <-e.stopChan: - // Already closed - default: - close(e.stopChan) - } - } - e.mu.RUnlock() - - close(errChan) - }() - - return errChan +func (e *HypergraphExecutionEngine) Start( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, +) { + e.ctx = ctx + e.logger.Info("starting hypergraph execution engine") + ready() + <-e.ctx.Done() + e.logger.Info("stopping hypergraph execution engine") } func (e *HypergraphExecutionEngine) ValidateMessage( @@ -310,38 +278,43 @@ func (e *HypergraphExecutionEngine) validateBundle( // Validate each operation in the bundle sequentially for i, op := range bundle.Requests { - e.logger.Debug( - "validating bundled operation", - zap.Int("operation", i), - zap.String("address", hex.EncodeToString(address)), - ) - - // Check if this is a hypergraph operation type - isHypergraphOp := op.GetHypergraphDeploy() != nil || - op.GetHypergraphUpdate() != nil || - op.GetVertexAdd() != nil || - op.GetVertexRemove() != nil || - op.GetHyperedgeAdd() != nil || - op.GetHyperedgeRemove() != nil - - if !isHypergraphOp { - // Skip non-hypergraph operations + select { + case <-e.ctx.Done(): + return errors.Wrap(errors.New("context canceled"), "validate bundle") + default: e.logger.Debug( - "skipping non-hypergraph operation in bundle", + "validating bundled operation", zap.Int("operation", i), + zap.String("address", hex.EncodeToString(address)), ) - continue - } - // Validate this operation individually - err := e.validateIndividualMessage( - frameNumber, - address, - op, - true, - ) - if err != nil { - return errors.Wrap(err, "validate bundle") + // Check if this is a hypergraph operation type + isHypergraphOp := op.GetHypergraphDeploy() != nil || + op.GetHypergraphUpdate() != nil || + op.GetVertexAdd() != nil || + op.GetVertexRemove() != nil || + op.GetHyperedgeAdd() != nil || + op.GetHyperedgeRemove() != nil + + if !isHypergraphOp { + // Skip non-hypergraph operations + e.logger.Debug( + "skipping non-hypergraph operation in bundle", + zap.Int("operation", i), + ) + continue + } + + // Validate this operation individually + err := e.validateIndividualMessage( + frameNumber, + address, + op, + true, + ) + if err != nil { + return errors.Wrap(err, "validate bundle") + } } } diff --git a/node/execution/engines/hypergraph_execution_engine_test.go b/node/execution/engines/hypergraph_execution_engine_test.go index ee6dd0a..40ffa3a 100644 --- a/node/execution/engines/hypergraph_execution_engine_test.go +++ b/node/execution/engines/hypergraph_execution_engine_test.go @@ -1,6 +1,7 @@ package engines_test import ( + "context" "crypto/rand" "math/big" "testing" @@ -12,6 +13,7 @@ import ( "go.uber.org/zap" "golang.org/x/crypto/sha3" hgcrdt "source.quilibrium.com/quilibrium/monorepo/hypergraph" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/node/execution/engines" "source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/hypergraph" hgstate "source.quilibrium.com/quilibrium/monorepo/node/execution/state/hypergraph" @@ -48,7 +50,8 @@ func TestHypergraphExecutionEngine_Start(t *testing.T) { require.NoError(t, err) // Test starting and stopping the engine - errChan := engine.Start() + ctx, cancel, errChan := lifecycle.WithSignallerAndCancel(context.Background()) + engine.Start(ctx, func() {}) // Engine should start without errors select { @@ -59,7 +62,8 @@ func TestHypergraphExecutionEngine_Start(t *testing.T) { } // Stop the engine - <-engine.Stop(false) + cancel() + <-ctx.Done() } func TestHypergraphExecutionEngine_ProcessMessage_Deploy(t *testing.T) { diff --git a/node/execution/engines/token_execution_engine.go b/node/execution/engines/token_execution_engine.go index ce01fcf..7099578 100644 --- a/node/execution/engines/token_execution_engine.go +++ b/node/execution/engines/token_execution_engine.go @@ -11,6 +11,7 @@ import ( "github.com/pkg/errors" "go.uber.org/zap" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/node/execution/fees" "source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token" "source.quilibrium.com/quilibrium/monorepo/protobufs" @@ -49,7 +50,7 @@ type TokenExecutionEngine struct { intrinsicsMutex sync.RWMutex mode ExecutionMode mu sync.RWMutex - stopChan chan struct{} + ctx lifecycle.SignalerContext } func NewTokenExecutionEngine( @@ -254,45 +255,15 @@ func (e *TokenExecutionEngine) GetCapabilities() []*protobufs.Capability { } } -func (e *TokenExecutionEngine) Start() <-chan error { - errChan := make(chan error, 1) - - e.mu.Lock() - e.stopChan = make(chan struct{}, 1) - e.mu.Unlock() - - go func() { - e.logger.Info("starting token execution engine") - - <-e.stopChan - e.logger.Info("stopping token execution engine") - }() - - return errChan -} - -func (e *TokenExecutionEngine) Stop(force bool) <-chan error { - errChan := make(chan error, 1) - - go func() { - e.logger.Info("stopping token execution engine", zap.Bool("force", force)) - - // Signal stop if we have a stopChan - e.mu.RLock() - if e.stopChan != nil { - select { - case <-e.stopChan: - // Already closed - default: - close(e.stopChan) - } - } - e.mu.RUnlock() - - close(errChan) - }() - - return errChan +func (e *TokenExecutionEngine) Start( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, +) { + e.ctx = ctx + e.logger.Info("starting token execution engine") + ready() + <-e.ctx.Done() + e.logger.Info("stopping token execution engine") } func (e *TokenExecutionEngine) ValidateMessage( @@ -357,37 +328,42 @@ func (e *TokenExecutionEngine) validateBundle( // Validate each operation in the bundle sequentially for i, op := range bundle.Requests { - e.logger.Debug( - "validating bundled operation", - zap.Int("operation", i), - zap.String("address", hex.EncodeToString(address)), - ) - - // Check if this is a hypergraph operation type - isHypergraphOp := op.GetTokenDeploy() != nil || - op.GetTokenUpdate() != nil || - op.GetTransaction() != nil || - op.GetMintTransaction() != nil || - op.GetPendingTransaction() != nil - - if !isHypergraphOp { - // Skip non-token operations + select { + case <-e.ctx.Done(): + return errors.Wrap(errors.New("context canceled"), "validate bundle") + default: e.logger.Debug( - "skipping non-token operation in bundle", + "validating bundled operation", zap.Int("operation", i), + zap.String("address", hex.EncodeToString(address)), ) - continue - } - // Validate this operation individually - err := e.validateIndividualMessage( - frameNumber, - address, - op, - true, - ) - if err != nil { - return errors.Wrap(err, "validate bundle") + // Check if this is a hypergraph operation type + isHypergraphOp := op.GetTokenDeploy() != nil || + op.GetTokenUpdate() != nil || + op.GetTransaction() != nil || + op.GetMintTransaction() != nil || + op.GetPendingTransaction() != nil + + if !isHypergraphOp { + // Skip non-token operations + e.logger.Debug( + "skipping non-token operation in bundle", + zap.Int("operation", i), + ) + continue + } + + // Validate this operation individually + err := e.validateIndividualMessage( + frameNumber, + address, + op, + true, + ) + if err != nil { + return errors.Wrap(err, "validate bundle") + } } } diff --git a/node/execution/engines/token_execution_engine_test.go b/node/execution/engines/token_execution_engine_test.go index 25cc9b9..ed4f35a 100644 --- a/node/execution/engines/token_execution_engine_test.go +++ b/node/execution/engines/token_execution_engine_test.go @@ -2,6 +2,7 @@ package engines_test import ( "bytes" + "context" "crypto/rand" "math/big" "slices" @@ -14,6 +15,7 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/zap" "golang.org/x/crypto/sha3" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/node/execution/engines" "source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token" hgstate "source.quilibrium.com/quilibrium/monorepo/node/execution/state/hypergraph" @@ -86,7 +88,8 @@ func TestTokenExecutionEngine_Start(t *testing.T) { require.NoError(t, err) // Test starting and stopping the engine - errChan := engine.Start() + ctx, cancel, errChan := lifecycle.WithSignallerAndCancel(context.Background()) + engine.Start(ctx, func() {}) // Engine should start without errors select { @@ -97,7 +100,8 @@ func TestTokenExecutionEngine_Start(t *testing.T) { } // Stop the engine - <-engine.Stop(false) + cancel() + <-ctx.Done() } func TestTokenExecutionEngine_ProcessMessage_DeployEdgeCases(t *testing.T) { diff --git a/node/execution/manager/execution_manager.go b/node/execution/manager/execution_manager.go index 13f89a5..862f82c 100644 --- a/node/execution/manager/execution_manager.go +++ b/node/execution/manager/execution_manager.go @@ -8,12 +8,12 @@ import ( "slices" "strings" "sync" - "time" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" "source.quilibrium.com/quilibrium/monorepo/config" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/node/execution/engines" "source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/compute" hypergraphintrinsic "source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/hypergraph" @@ -33,6 +33,7 @@ import ( // ExecutionEngineManager manages the lifecycle and coordination of execution // engines type ExecutionEngineManager struct { + builder lifecycle.ComponentManagerBuilder logger *zap.Logger config *config.Config engines map[string]execution.ShardExecutionEngine @@ -51,8 +52,6 @@ type ExecutionEngineManager struct { proverRegistry consensus.ProverRegistry blsConstructor crypto.BlsConstructor includeGlobal bool - quit chan struct{} - wg sync.WaitGroup } // NewExecutionEngineManager creates a new execution engine manager @@ -74,7 +73,7 @@ func NewExecutionEngineManager( blsConstructor crypto.BlsConstructor, includeGlobal bool, ) (*ExecutionEngineManager, error) { - return &ExecutionEngineManager{ + em := &ExecutionEngineManager{ logger: logger.With( zap.String("component", "execution_manager"), ), @@ -94,8 +93,20 @@ func NewExecutionEngineManager( proverRegistry: proverRegistry, blsConstructor: blsConstructor, includeGlobal: includeGlobal, - quit: make(chan struct{}), - }, nil + } + + err := em.InitializeEngines() + if err != nil { + return nil, err + } + + em.builder = lifecycle.NewComponentManagerBuilder() + + for _, engine := range em.engines { + em.builder.AddWorker(engine.Start) + } + + return em, nil } // InitializeEngines creates and registers all execution engines @@ -146,109 +157,15 @@ func (m *ExecutionEngineManager) InitializeEngines() error { } // StartAll starts all registered execution engines -func (m *ExecutionEngineManager) StartAll(quit chan struct{}) error { - m.enginesMu.RLock() - defer m.enginesMu.RUnlock() - +func (m *ExecutionEngineManager) Start( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, +) { m.logger.Info("starting all execution engines") - - for name, engine := range m.engines { - m.wg.Add(1) - go func(name string, engine execution.ShardExecutionEngine) { - defer m.wg.Done() - - m.logger.Info("starting execution engine", zap.String("engine", name)) - - // Start the engine - errChan := engine.Start() - - // Wait for any startup errors - select { - case err := <-errChan: - if err != nil { - m.logger.Error( - "execution engine failed to start", - zap.String("engine", name), - zap.Error(err), - ) - } - case <-time.After(5 * time.Second): - // Give engines time to report startup errors - m.logger.Info( - "execution engine started successfully", - zap.String("engine", name), - ) - } - }(name, engine) - } - - return nil -} - -// StopAll stops all execution engines -func (m *ExecutionEngineManager) StopAll(force bool) error { - m.enginesMu.RLock() - defer m.enginesMu.RUnlock() - - m.logger.Info("stopping all execution engines") - - var stopErrors []error - stopWg := sync.WaitGroup{} - - for name, engine := range m.engines { - stopWg.Add(1) - go func(name string, engine execution.ShardExecutionEngine) { - defer stopWg.Done() - - m.logger.Info("stopping execution engine", zap.String("engine", name)) - - errChan := engine.Stop(force) - select { - case err := <-errChan: - if err != nil && !force { - m.logger.Error( - "error stopping execution engine", - zap.String("engine", name), - zap.Error(err), - ) - stopErrors = append(stopErrors, err) - } - case <-time.After(10 * time.Second): - if !force { - err := errors.Errorf("timeout stopping engine: %s", name) - m.logger.Error( - "timeout stopping execution engine", - zap.String("engine", name), - ) - stopErrors = append(stopErrors, err) - } - } - }(name, engine) - } - - stopWg.Wait() - - if len(stopErrors) > 0 && !force { - return errors.Errorf("failed to stop %d engines", len(stopErrors)) - } - - // Wait for all goroutines to finish - done := make(chan struct{}) - go func() { - m.wg.Wait() - close(done) - }() - - select { - case <-done: - m.logger.Info("all execution engines stopped") - case <-time.After(30 * time.Second): - if !force { - return errors.New("timeout waiting for execution engines to stop") - } - } - - return nil + m.builder.Build().Start(ctx) + ready() + <-ctx.Done() + m.logger.Info("all execution engines stopped") } // GetEngine returns a specific execution engine by name @@ -737,30 +654,3 @@ func (m *ExecutionEngineManager) selectEngine( return nil } - -// RegisterAllEngines registers all engines from the manager with a consensus -// engine -func (m *ExecutionEngineManager) RegisterAllEngines( - registerFunc func(execution.ShardExecutionEngine, uint64) <-chan error, -) error { - m.enginesMu.RLock() - defer m.enginesMu.RUnlock() - - for name, engine := range m.engines { - errChan := registerFunc(engine, 0) // frame 0 for initial registration - select { - case err := <-errChan: - if err != nil { - return errors.Wrapf(err, "failed to register engine: %s", name) - } - m.logger.Info( - "registered engine with consensus", - zap.String("engine", name), - ) - default: - // Non-blocking, registration initiated - } - } - - return nil -} diff --git a/node/go.mod b/node/go.mod index 7b7d247..e4fd6c1 100644 --- a/node/go.mod +++ b/node/go.mod @@ -32,6 +32,8 @@ replace source.quilibrium.com/quilibrium/monorepo/hypergraph => ../hypergraph replace source.quilibrium.com/quilibrium/monorepo/consensus => ../consensus +replace source.quilibrium.com/quilibrium/monorepo/lifecycle => ../lifecycle + replace source.quilibrium.com/quilibrium/monorepo/rpm => ../rpm replace github.com/multiformats/go-multiaddr => ../go-multiaddr @@ -53,6 +55,7 @@ require ( github.com/libp2p/go-libp2p v0.41.1 github.com/libp2p/go-libp2p-kad-dht v0.23.0 github.com/marcopolo/simnet v0.0.1 + github.com/muesli/reflow v0.3.0 github.com/shopspring/decimal v1.4.0 google.golang.org/protobuf v1.36.6 gopkg.in/yaml.v2 v2.4.0 @@ -84,6 +87,7 @@ require ( github.com/deiu/gon3 v0.0.0-20241212124032-93153c038193 // indirect github.com/deiu/rdf2go v0.0.0-20241212211204-b661ba0dfd25 // indirect github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect + github.com/gammazero/deque v0.2.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 // indirect github.com/libp2p/go-libp2p-routing-helpers v0.7.2 // indirect github.com/libp2p/go-yamux/v5 v5.0.1 // indirect @@ -94,7 +98,6 @@ require ( github.com/mattn/go-runewidth v0.0.16 // indirect github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect github.com/muesli/cancelreader v0.2.2 // indirect - github.com/muesli/reflow v0.3.0 // indirect github.com/muesli/termenv v0.16.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pion/datachannel v1.5.10 // indirect @@ -125,10 +128,12 @@ require ( go.opentelemetry.io/otel v1.34.0 // indirect go.opentelemetry.io/otel/metric v1.34.0 // indirect go.opentelemetry.io/otel/trace v1.34.0 // indirect + go.uber.org/atomic v1.11.0 // indirect go.uber.org/mock v0.5.2 // indirect golang.org/x/time v0.12.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect source.quilibrium.com/quilibrium/monorepo/ferret v0.0.0-00010101000000-000000000000 // indirect + source.quilibrium.com/quilibrium/monorepo/lifecycle v0.0.0-00010101000000-000000000000 // indirect ) require ( @@ -163,6 +168,7 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect github.com/flynn/noise v1.1.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect + github.com/gammazero/workerpool v1.1.3 github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect @@ -224,7 +230,7 @@ require ( github.com/quic-go/webtransport-go v0.9.0 // indirect github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/stretchr/testify v1.10.0 + github.com/stretchr/testify v1.11.1 github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect go.opencensus.io v0.24.0 // indirect go.uber.org/dig v1.19.0 // indirect @@ -235,7 +241,7 @@ require ( golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 golang.org/x/mod v0.25.0 // indirect golang.org/x/net v0.41.0 // indirect - golang.org/x/sync v0.15.0 + golang.org/x/sync v0.17.0 golang.org/x/sys v0.36.0 // indirect golang.org/x/text v0.26.0 // indirect golang.org/x/tools v0.34.0 // indirect diff --git a/node/go.sum b/node/go.sum index 5a1479e..e22e523 100644 --- a/node/go.sum +++ b/node/go.sum @@ -110,6 +110,10 @@ github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiD github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/gammazero/deque v0.2.0 h1:SkieyNB4bg2/uZZLxvya0Pq6diUlwx7m2TeT7GAIWaA= +github.com/gammazero/deque v0.2.0/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU= +github.com/gammazero/workerpool v1.1.3 h1:WixN4xzukFoN0XSeXF6puqEqFTl2mECI9S6W44HWy9Q= +github.com/gammazero/workerpool v1.1.3/go.mod h1:wPjyBLDbyKnUn2XwwyD3EEwo9dHutia9/fwNmSHWACc= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -495,8 +499,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= @@ -531,6 +535,8 @@ go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4= go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg= @@ -649,8 +655,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= -golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -814,6 +820,8 @@ lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= +pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk= +pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/node/keys/file.go b/node/keys/file.go index d7d3d9a..64d267f 100644 --- a/node/keys/file.go +++ b/node/keys/file.go @@ -73,10 +73,17 @@ func NewFileKeyManager( defer file.Close() - d := yaml.NewDecoder(file) + fileInfo, err := file.Stat() - if err := d.Decode(store); err != nil { - logger.Panic("could not decode store", zap.Error(err)) + if err != nil { + logger.Panic("could not get key file info", zap.Error(err)) + } + + if fileInfo.Size() != 0 { + d := yaml.NewDecoder(file) + if err := d.Decode(store); err != nil { + logger.Panic("could not decode store", zap.Error(err)) + } } keyManager := &FileKeyManager{ diff --git a/node/main.go b/node/main.go index 564627b..9ae1c82 100644 --- a/node/main.go +++ b/node/main.go @@ -546,9 +546,6 @@ func main() { logger.Info("starting node...") - done := make(chan os.Signal, 1) - signal.Notify(done, syscall.SIGINT, syscall.SIGTERM) - // Create MasterNode for core 0 masterNode, err := app.NewMasterNode(logger, nodeConfig, uint(*core)) if err != nil { @@ -556,13 +553,8 @@ func main() { } // Start the master node - quitCh := make(chan struct{}) - go func() { - if err := masterNode.Start(quitCh); err != nil { - logger.Error("master node start error", zap.Error(err)) - close(quitCh) - } - }() + ctx, quit := context.WithCancel(context.Background()) + errCh := masterNode.Start(ctx) defer masterNode.Stop() if nodeConfig.ListenGRPCMultiaddr != "" { @@ -593,14 +585,16 @@ func main() { diskFullCh, ) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() monitor.Start(ctx) select { - case <-done: case <-diskFullCh: - case <-quitCh: + quit() + case err := <-errCh: + if err != nil { + logger.Error("master node error", zap.Error(err)) + } + quit() } } diff --git a/node/p2p/blossomsub.go b/node/p2p/blossomsub.go index e7afe7a..d6f282b 100644 --- a/node/p2p/blossomsub.go +++ b/node/p2p/blossomsub.go @@ -12,6 +12,7 @@ import ( "math/big" "math/bits" "net" + "runtime/debug" "sync" "sync/atomic" "time" @@ -1018,6 +1019,7 @@ func (b *BlossomSub) subscribeHandler( b.logger.Error( "message handler panicked, recovering", zap.Any("panic", r), + zap.String("stack", string(debug.Stack())), ) } }() @@ -1508,8 +1510,18 @@ func (b *BlossomSub) GetDirectChannel( var lastError error for _, addr := range pi.Addrs { var mga net.Addr + b.logger.Debug( + "attempting to get direct channel with peer", + zap.String("peer", peer.ID(peerID).String()), + zap.String("addr", addr.String()), + ) mga, lastError = mn.ToNetAddr(addr) if lastError != nil { + b.logger.Debug( + "skipping address", + zap.String("addr", addr.String()), + zap.Error(lastError), + ) continue } @@ -1519,6 +1531,11 @@ func (b *BlossomSub) GetDirectChannel( grpc.WithTransportCredentials(creds), ) if lastError != nil { + b.logger.Debug( + "could not connect", + zap.String("addr", addr.String()), + zap.Error(lastError), + ) continue } diff --git a/node/p2p/onion/onion_integration_test.go b/node/p2p/onion/onion_integration_test.go index ea1835b..6f5fedb 100644 --- a/node/p2p/onion/onion_integration_test.go +++ b/node/p2p/onion/onion_integration_test.go @@ -18,6 +18,7 @@ import ( health "google.golang.org/grpc/health" healthpb "google.golang.org/grpc/health/grpc_health_v1" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/node/consensus/registration" "source.quilibrium.com/quilibrium/monorepo/node/keys" "source.quilibrium.com/quilibrium/monorepo/node/p2p" @@ -276,7 +277,7 @@ func TestOnionGRPC_RealRelayAndKeys(t *testing.T) { logger := zap.NewNop() // 1) Spin up a real gRPC health server (ephemeral port) - lis, err := net.Listen("tcp", "127.0.0.1:8080") + lis, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) s := grpc.NewServer() hs := health.NewServer() @@ -345,8 +346,11 @@ func TestOnionGRPC_RealRelayAndKeys(t *testing.T) { // 6) PeerInfoManager ordering (entry->middle->exit) pm := p2p.NewInMemoryPeerInfoManager(logger) - pm.Start() - defer pm.Stop() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + readyWait := make(chan struct{}) + go pm.Start(ctx, func() { close(readyWait) }) + <-readyWait + defer cancel() pm.AddPeerInfo(&protobufs.PeerInfo{PeerId: []byte("relay1"), Capabilities: []*protobufs.Capability{{ProtocolIdentifier: onion.ProtocolRouting}}}) pm.AddPeerInfo(&protobufs.PeerInfo{PeerId: []byte("relay2"), Capabilities: []*protobufs.Capability{{ProtocolIdentifier: onion.ProtocolRouting}}}) pm.AddPeerInfo(&protobufs.PeerInfo{PeerId: []byte("relay3"), Capabilities: []*protobufs.Capability{{ProtocolIdentifier: onion.ProtocolRouting}}}) @@ -372,9 +376,9 @@ func TestOnionGRPC_RealRelayAndKeys(t *testing.T) { ) // 8) Build a 3-hop circuit - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - circ, err := or.BuildCircuit(ctx, 3) + hctx, hcancel := context.WithTimeout(context.Background(), 5*time.Second) + defer hcancel() + circ, err := or.BuildCircuit(hctx, 3) require.NoError(t, err) // 9) gRPC dial through onion using MULTIADDR as "addr" (relay expects MA bytes in BEGIN) @@ -458,15 +462,20 @@ func TestHiddenService_RemoteRendezvous(t *testing.T) { // Peer managers (client knows R, service knows A then R) pmClient := p2p.NewInMemoryPeerInfoManager(logger) - pmClient.Start() - defer pmClient.Stop() + ctx, cancel, _ := lifecycle.WithSignallerAndCancel(context.Background()) + readyWait := make(chan struct{}) + go pmClient.Start(ctx, func() { close(readyWait) }) + <-readyWait + defer cancel() // client knows three rendezvous relays for _, id := range [][]byte{[]byte("relayR1"), []byte("relayR2"), []byte("relayR3")} { pmClient.AddPeerInfo(&protobufs.PeerInfo{PeerId: id, Capabilities: []*protobufs.Capability{{ProtocolIdentifier: onion.ProtocolRouting}}}) } pmService := p2p.NewInMemoryPeerInfoManager(logger) - pmService.Start() - defer pmService.Stop() + readyWait = make(chan struct{}) + go pmService.Start(ctx, func() { close(readyWait) }) + <-readyWait + // service knows three intro relays for _, id := range [][]byte{[]byte("relayA1"), []byte("relayA2"), []byte("relayA3")} { pmService.AddPeerInfo(&protobufs.PeerInfo{PeerId: id, Capabilities: []*protobufs.Capability{{ProtocolIdentifier: onion.ProtocolRouting}}}) @@ -487,17 +496,17 @@ func TestHiddenService_RemoteRendezvous(t *testing.T) { onion.WithKeyConstructor(func() ([]byte, []byte, error) { k := keys.NewX448Key(); return k.Public(), k.Private(), nil }), onion.WithSharedSecret(func(priv, pub []byte) ([]byte, error) { e, _ := keys.X448KeyFromBytes(priv); return e.AgreeWith(pub) }), ) - ctx, cancel := context.WithTimeout(context.Background(), 6*time.Second) - defer cancel() + hctx, hcancel := context.WithTimeout(context.Background(), 6*time.Second) + defer hcancel() var serviceID [32]byte copy(serviceID[:], []byte("service-id-32-bytes-------------")[:32]) - _, err = orService.RegisterIntro(ctx, []byte("relayA1"), serviceID) + _, err = orService.RegisterIntro(hctx, []byte("relayA1"), serviceID) require.NoError(t, err) // CLIENT: build circuit to rendezvous relay and send REND1 - cR, err := orClient.BuildCircuitToExit(ctx, 3, []byte("relayR1")) + cR, err := orClient.BuildCircuitToExit(hctx, 3, []byte("relayR1")) require.NoError(t, err) var cookie [16]byte _, _ = rand.Read(cookie[:]) @@ -506,8 +515,9 @@ func TestHiddenService_RemoteRendezvous(t *testing.T) { // CLIENT: build circuit to intro relay and send INTRODUCE(serviceID, "relayR", cookie, clientSid) pmIntro := p2p.NewInMemoryPeerInfoManager(logger) - pmIntro.Start() - defer pmIntro.Stop() + readyWait = make(chan struct{}) + go pmIntro.Start(ctx, func() { close(readyWait) }) + <-readyWait pmIntro.AddPeerInfo(&protobufs.PeerInfo{PeerId: []byte("relayA1"), Capabilities: []*protobufs.Capability{{ProtocolIdentifier: onion.ProtocolRouting}}}) pmIntro.AddPeerInfo(&protobufs.PeerInfo{PeerId: []byte("relayA2"), Capabilities: []*protobufs.Capability{{ProtocolIdentifier: onion.ProtocolRouting}}}) pmIntro.AddPeerInfo(&protobufs.PeerInfo{PeerId: []byte("relayA3"), Capabilities: []*protobufs.Capability{{ProtocolIdentifier: onion.ProtocolRouting}}}) @@ -518,7 +528,7 @@ func TestHiddenService_RemoteRendezvous(t *testing.T) { onion.WithSharedSecret(func(priv, pub []byte) ([]byte, error) { e, _ := keys.X448KeyFromBytes(priv); return e.AgreeWith(pub) }), ) - cI, err := orIntro.BuildCircuit(ctx, 3) + cI, err := orIntro.BuildCircuit(hctx, 3) require.NoError(t, err) require.NoError(t, orClient.ClientIntroduce(cI, serviceID, "relayR1", cookie, clientSid)) @@ -527,7 +537,7 @@ func TestHiddenService_RemoteRendezvous(t *testing.T) { pmService.AddPeerInfo(&protobufs.PeerInfo{PeerId: []byte("relayR2"), Capabilities: []*protobufs.Capability{{ProtocolIdentifier: onion.ProtocolRouting}}}) pmService.AddPeerInfo(&protobufs.PeerInfo{PeerId: []byte("relayR3"), Capabilities: []*protobufs.Capability{{ProtocolIdentifier: onion.ProtocolRouting}}}) time.Sleep(150 * time.Millisecond) - cRS, err := orService.BuildCircuitToExit(ctx, 3, []byte("relayR1")) + cRS, err := orService.BuildCircuitToExit(hctx, 3, []byte("relayR1")) require.NoError(t, err) serviceSid := uint16(0xD777) require.NoError(t, orService.ServiceCompleteRendezvous(cRS, cookie, serviceSid)) diff --git a/node/p2p/peer_info_manager.go b/node/p2p/peer_info_manager.go index 13571eb..7515cbe 100644 --- a/node/p2p/peer_info_manager.go +++ b/node/p2p/peer_info_manager.go @@ -6,6 +6,7 @@ import ( "time" "go.uber.org/zap" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/protobufs" "source.quilibrium.com/quilibrium/monorepo/types/p2p" ) @@ -13,11 +14,11 @@ import ( type InMemoryPeerInfoManager struct { logger *zap.Logger peerInfoCh chan *protobufs.PeerInfo - quitCh chan struct{} peerInfoMx sync.RWMutex peerMap map[string]*p2p.PeerInfo fastestPeers []*p2p.PeerInfo + ctx lifecycle.SignalerContext } var _ p2p.PeerInfoManager = (*InMemoryPeerInfoManager)(nil) @@ -31,62 +32,60 @@ func NewInMemoryPeerInfoManager(logger *zap.Logger) *InMemoryPeerInfoManager { } } -func (m *InMemoryPeerInfoManager) Start() { - go func() { - for { - select { - case info := <-m.peerInfoCh: - m.peerInfoMx.Lock() - reachability := []p2p.Reachability{} - for _, r := range info.Reachability { - reachability = append(reachability, p2p.Reachability{ - Filter: r.Filter, - PubsubMultiaddrs: r.PubsubMultiaddrs, - StreamMultiaddrs: r.StreamMultiaddrs, - }) - } - capabilities := []p2p.Capability{} - for _, c := range info.Capabilities { - capabilities = append(capabilities, p2p.Capability{ - ProtocolIdentifier: c.ProtocolIdentifier, - AdditionalMetadata: c.AdditionalMetadata, - }) - } - seen := time.Now().UnixMilli() - m.peerMap[string(info.PeerId)] = &p2p.PeerInfo{ - PeerId: info.PeerId, - Bandwidth: 100, - Capabilities: capabilities, - Reachability: reachability, - Cores: uint32(len(reachability)), - LastSeen: seen, - } - m.searchAndInsertPeer(&p2p.PeerInfo{ - PeerId: info.PeerId, - Bandwidth: 100, - Capabilities: capabilities, - Reachability: reachability, - Cores: uint32(len(reachability)), - LastSeen: seen, +func (m *InMemoryPeerInfoManager) Start( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, +) { + m.ctx = ctx + ready() + for { + select { + case info := <-m.peerInfoCh: + m.peerInfoMx.Lock() + reachability := []p2p.Reachability{} + for _, r := range info.Reachability { + reachability = append(reachability, p2p.Reachability{ + Filter: r.Filter, + PubsubMultiaddrs: r.PubsubMultiaddrs, + StreamMultiaddrs: r.StreamMultiaddrs, }) - m.peerInfoMx.Unlock() - case <-m.quitCh: - return } + capabilities := []p2p.Capability{} + for _, c := range info.Capabilities { + capabilities = append(capabilities, p2p.Capability{ + ProtocolIdentifier: c.ProtocolIdentifier, + AdditionalMetadata: c.AdditionalMetadata, + }) + } + seen := time.Now().UnixMilli() + m.peerMap[string(info.PeerId)] = &p2p.PeerInfo{ + PeerId: info.PeerId, + Bandwidth: 100, + Capabilities: capabilities, + Reachability: reachability, + Cores: uint32(len(reachability)), + LastSeen: seen, + } + m.searchAndInsertPeer(&p2p.PeerInfo{ + PeerId: info.PeerId, + Bandwidth: 100, + Capabilities: capabilities, + Reachability: reachability, + Cores: uint32(len(reachability)), + LastSeen: seen, + }) + m.peerInfoMx.Unlock() + case <-ctx.Done(): + return } - }() -} - -func (m *InMemoryPeerInfoManager) Stop() { - go func() { - m.quitCh <- struct{}{} - }() + } } func (m *InMemoryPeerInfoManager) AddPeerInfo(info *protobufs.PeerInfo) { - go func() { - m.peerInfoCh <- info - }() + select { + case <-m.ctx.Done(): + case m.peerInfoCh <- info: + } } func (m *InMemoryPeerInfoManager) GetPeerInfo(peerId []byte) *p2p.PeerInfo { diff --git a/node/store/clock.go b/node/store/clock.go index 673d6c1..4e81fd3 100644 --- a/node/store/clock.go +++ b/node/store/clock.go @@ -5,6 +5,7 @@ import ( "encoding/binary" "encoding/gob" "math/big" + "slices" "github.com/cockroachdb/pebble" "github.com/pkg/errors" @@ -28,12 +29,48 @@ type PebbleGlobalClockIterator struct { } type PebbleClockIterator struct { + filter []byte + start uint64 + end uint64 + cur uint64 + db *PebbleClockStore +} + +type PebbleGlobalStateIterator struct { i store.Iterator db *PebbleClockStore } +type PebbleAppShardStateIterator struct { + filter []byte + start uint64 + end uint64 + cur uint64 + db *PebbleClockStore +} + +type PebbleQuorumCertificateIterator struct { + filter []byte + start uint64 + end uint64 + cur uint64 + db *PebbleClockStore +} + +type PebbleTimeoutCertificateIterator struct { + filter []byte + start uint64 + end uint64 + cur uint64 + db *PebbleClockStore +} + var _ store.TypedIterator[*protobufs.GlobalFrame] = (*PebbleGlobalClockIterator)(nil) var _ store.TypedIterator[*protobufs.AppShardFrame] = (*PebbleClockIterator)(nil) +var _ store.TypedIterator[*protobufs.GlobalProposal] = (*PebbleGlobalStateIterator)(nil) +var _ store.TypedIterator[*protobufs.AppShardProposal] = (*PebbleAppShardStateIterator)(nil) +var _ store.TypedIterator[*protobufs.QuorumCertificate] = (*PebbleQuorumCertificateIterator)(nil) +var _ store.TypedIterator[*protobufs.TimeoutCertificate] = (*PebbleTimeoutCertificateIterator)(nil) func (p *PebbleGlobalClockIterator) First() bool { return p.i.First() @@ -129,71 +166,265 @@ func (p *PebbleGlobalClockIterator) Close() error { } func (p *PebbleClockIterator) First() bool { - return p.i.First() + p.cur = p.start + return true } func (p *PebbleClockIterator) Next() bool { - return p.i.Next() + p.cur++ + return p.cur < p.end } func (p *PebbleClockIterator) Prev() bool { - return p.i.Prev() + p.cur-- + return p.cur >= p.start } func (p *PebbleClockIterator) Valid() bool { - return p.i.Valid() + return p.cur >= p.start && p.cur < p.end } func (p *PebbleClockIterator) TruncatedValue() ( *protobufs.AppShardFrame, error, ) { - if !p.i.Valid() { + if !p.Valid() { return nil, store.ErrNotFound } - value := p.i.Value() - frame := &protobufs.AppShardFrame{} - frameValue, frameCloser, err := p.db.db.Get(value) - if err != nil { - return nil, errors.Wrap(err, "get truncated clock frame iterator value") - } - defer frameCloser.Close() - if err := proto.Unmarshal(frameValue, frame); err != nil { - return nil, errors.Wrap( - errors.Wrap(err, store.ErrInvalidData.Error()), - "get truncated clock frame iterator value", - ) - } - - return frame, nil + return p.Value() } func (p *PebbleClockIterator) Value() (*protobufs.AppShardFrame, error) { - if !p.i.Valid() { + if !p.Valid() { return nil, store.ErrNotFound } - value := p.i.Value() - frame := &protobufs.AppShardFrame{} - - frameValue, frameCloser, err := p.db.db.Get(value) + frame, _, err := p.db.GetShardClockFrame(p.filter, p.cur, false) if err != nil { return nil, errors.Wrap(err, "get clock frame iterator value") } - defer frameCloser.Close() - if err := proto.Unmarshal(frameValue, frame); err != nil { - return nil, errors.Wrap( - errors.Wrap(err, store.ErrInvalidData.Error()), - "get clock frame iterator value", - ) - } return frame, nil } func (p *PebbleClockIterator) Close() error { - return errors.Wrap(p.i.Close(), "closing clock frame iterator") + return nil +} + +func (p *PebbleGlobalStateIterator) First() bool { + return p.i.First() +} + +func (p *PebbleGlobalStateIterator) Next() bool { + return p.i.Next() +} + +func (p *PebbleGlobalStateIterator) Prev() bool { + return p.i.Prev() +} + +func (p *PebbleGlobalStateIterator) Valid() bool { + return p.i.Valid() +} + +func (p *PebbleGlobalStateIterator) Value() ( + *protobufs.GlobalProposal, + error, +) { + if !p.Valid() { + return nil, store.ErrNotFound + } + + value := p.i.Value() + if len(value) != 24 { + return nil, errors.Wrap( + store.ErrInvalidData, + "get certified global state", + ) + } + + frameNumber := binary.BigEndian.Uint64(value[:8]) + qcRank := binary.BigEndian.Uint64(value[8:16]) + tcRank := binary.BigEndian.Uint64(value[16:]) + + frame, err := p.db.GetGlobalClockFrame(frameNumber) + if err != nil && !errors.Is(err, store.ErrNotFound) { + return nil, errors.Wrap(err, "get certified global state") + } + + qc, err := p.db.GetQuorumCertificate(nil, qcRank) + if err != nil && !errors.Is(err, store.ErrNotFound) { + return nil, errors.Wrap(err, "get certified global state") + } + + tc, err := p.db.GetTimeoutCertificate(nil, tcRank) + if err != nil && !errors.Is(err, store.ErrNotFound) { + return nil, errors.Wrap(err, "get certified global state") + } + + return &protobufs.GlobalProposal{ + State: frame, + ParentQuorumCertificate: qc, + PriorRankTimeoutCertificate: tc, + }, nil +} + +func (p *PebbleGlobalStateIterator) TruncatedValue() ( + *protobufs.GlobalProposal, + error, +) { + return p.Value() +} + +func (p *PebbleGlobalStateIterator) Close() error { + return p.i.Close() +} + +func (p *PebbleAppShardStateIterator) First() bool { + p.cur = p.start + return true +} + +func (p *PebbleAppShardStateIterator) Next() bool { + p.cur++ + return p.cur < p.end +} + +func (p *PebbleAppShardStateIterator) Prev() bool { + p.cur-- + return p.cur >= p.start +} + +func (p *PebbleAppShardStateIterator) Valid() bool { + return p.cur >= p.start && p.cur < p.end +} + +func (p *PebbleAppShardStateIterator) Close() error { + return nil +} + +func (p *PebbleAppShardStateIterator) Value() ( + *protobufs.AppShardProposal, + error, +) { + if !p.Valid() { + return nil, store.ErrNotFound + } + + state, err := p.db.GetCertifiedAppShardState(p.filter, p.cur) + if err != nil { + return nil, errors.Wrap(err, "get app shard state iterator value") + } + + return state, nil +} + +func (p *PebbleAppShardStateIterator) TruncatedValue() ( + *protobufs.AppShardProposal, + error, +) { + if !p.Valid() { + return nil, store.ErrNotFound + } + + return p.Value() +} + +func (p *PebbleQuorumCertificateIterator) First() bool { + p.cur = p.start + return true +} + +func (p *PebbleQuorumCertificateIterator) Next() bool { + p.cur++ + return p.cur < p.end +} + +func (p *PebbleQuorumCertificateIterator) Prev() bool { + p.cur-- + return p.cur >= p.start +} + +func (p *PebbleQuorumCertificateIterator) Valid() bool { + return p.cur >= p.start && p.cur < p.end +} + +func (p *PebbleQuorumCertificateIterator) Close() error { + return nil +} + +func (p *PebbleQuorumCertificateIterator) Value() ( + *protobufs.QuorumCertificate, + error, +) { + if !p.Valid() { + return nil, store.ErrNotFound + } + + qc, err := p.db.GetQuorumCertificate(p.filter, p.cur) + if err != nil { + return nil, errors.Wrap(err, "get quorum certificate iterator value") + } + + return qc, nil +} + +func (p *PebbleQuorumCertificateIterator) TruncatedValue() ( + *protobufs.QuorumCertificate, + error, +) { + return p.Value() +} + +func (p *PebbleTimeoutCertificateIterator) First() bool { + p.cur = p.start + return true +} + +func (p *PebbleTimeoutCertificateIterator) Next() bool { + p.cur++ + return p.cur < p.end +} + +func (p *PebbleTimeoutCertificateIterator) Prev() bool { + p.cur-- + return p.cur >= p.start +} + +func (p *PebbleTimeoutCertificateIterator) Valid() bool { + return p.cur >= p.start && p.cur < p.end +} + +func (p *PebbleTimeoutCertificateIterator) Close() error { + return nil +} + +func (p *PebbleTimeoutCertificateIterator) Value() ( + *protobufs.TimeoutCertificate, + error, +) { + if !p.Valid() { + return nil, store.ErrNotFound + } + + tc, err := p.db.GetTimeoutCertificate(p.filter, p.cur) + if err != nil { + return nil, errors.Wrap(err, "get timeout certificate iterator value") + } + + return tc, nil +} + +func (p *PebbleTimeoutCertificateIterator) TruncatedValue() ( + *protobufs.TimeoutCertificate, + error, +) { + if !p.Valid() { + return nil, store.ErrNotFound + } + + return p.Value() } func NewPebbleClockStore(db store.KVDB, logger *zap.Logger) *PebbleClockStore { @@ -203,6 +434,84 @@ func NewPebbleClockStore(db store.KVDB, logger *zap.Logger) *PebbleClockStore { } } +func (p *PebbleClockStore) updateEarliestIndex( + txn store.Transaction, + key []byte, + rank uint64, +) error { + existing, closer, err := p.db.Get(key) + if err != nil { + if errors.Is(err, pebble.ErrNotFound) { + return txn.Set( + key, + binary.BigEndian.AppendUint64(nil, rank), + ) + } + return err + } + defer closer.Close() + + if len(existing) != 8 { + return errors.Wrap( + store.ErrInvalidData, + "earliest index contained unexpected length", + ) + } + + if binary.BigEndian.Uint64(existing) > rank { + return txn.Set( + key, + binary.BigEndian.AppendUint64(nil, rank), + ) + } + + return nil +} + +func (p *PebbleClockStore) updateLatestIndex( + txn store.Transaction, + key []byte, + rank uint64, +) error { + existing, closer, err := p.db.Get(key) + if err != nil { + if errors.Is(err, pebble.ErrNotFound) { + return txn.Set( + key, + binary.BigEndian.AppendUint64(nil, rank), + ) + } + return err + } + defer closer.Close() + + if len(existing) != 8 { + return errors.Wrap( + store.ErrInvalidData, + "latest index contained unexpected length", + ) + } + + if binary.BigEndian.Uint64(existing) < rank { + return txn.Set( + key, + binary.BigEndian.AppendUint64(nil, rank), + ) + } + + return nil +} + +func deleteIfExists(txn store.Transaction, key []byte) error { + if err := txn.Delete(key); err != nil { + if errors.Is(err, pebble.ErrNotFound) { + return nil + } + return err + } + return nil +} + // // DB Keys // @@ -211,9 +520,6 @@ func NewPebbleClockStore(db store.KVDB, logger *zap.Logger) *PebbleClockStore { // Increment necessarily must be full width – elsewise the frame number would // easily produce conflicts if filters are stepped by byte: // 0x01 || 0xffff == 0x01ff || 0xff -// -// Global frames are serialized as output data only, Data frames are raw -// protobufs for fast disk-to-network output. func clockFrameKey(filter []byte, frameNumber uint64, frameType byte) []byte { key := []byte{CLOCK_FRAME, frameType} @@ -276,6 +582,82 @@ func clockDataEarliestIndex(filter []byte) []byte { return clockEarliestIndex(filter, CLOCK_SHARD_FRAME_INDEX_EARLIEST) } +func clockGlobalCertifiedStateEarliestIndex() []byte { + return []byte{CLOCK_FRAME, CLOCK_GLOBAL_CERTIFIED_STATE_INDEX_EARLIEST} +} + +func clockShardCertifiedStateEarliestIndex(filter []byte) []byte { + return slices.Concat( + []byte{CLOCK_FRAME, CLOCK_SHARD_CERTIFIED_STATE_INDEX_EARLIEST}, + filter, + ) +} + +func clockGlobalCertifiedStateLatestIndex() []byte { + return []byte{CLOCK_FRAME, CLOCK_GLOBAL_CERTIFIED_STATE_INDEX_LATEST} +} + +func clockShardCertifiedStateLatestIndex(filter []byte) []byte { + return slices.Concat( + []byte{CLOCK_FRAME, CLOCK_SHARD_CERTIFIED_STATE_INDEX_LATEST}, + filter, + ) +} + +func clockGlobalCertifiedStateKey(rank uint64) []byte { + key := []byte{CLOCK_FRAME, CLOCK_GLOBAL_CERTIFIED_STATE} + key = binary.BigEndian.AppendUint64(key, rank) + return key +} + +func clockShardCertifiedStateKey(rank uint64, filter []byte) []byte { + key := []byte{CLOCK_FRAME, CLOCK_SHARD_CERTIFIED_STATE} + key = binary.BigEndian.AppendUint64(key, rank) + key = append(key, filter...) + return key +} + +func clockQuorumCertificateKey(rank uint64, filter []byte) []byte { + key := []byte{CLOCK_FRAME, CLOCK_QUORUM_CERTIFICATE} + key = binary.BigEndian.AppendUint64(key, rank) + return key +} + +func clockQuorumCertificateEarliestIndex(filter []byte) []byte { + return slices.Concat( + []byte{CLOCK_FRAME, CLOCK_QUORUM_CERTIFICATE_INDEX_EARLIEST}, + filter, + ) +} + +func clockQuorumCertificateLatestIndex(filter []byte) []byte { + return slices.Concat( + []byte{CLOCK_FRAME, CLOCK_QUORUM_CERTIFICATE_INDEX_LATEST}, + filter, + ) +} + +func clockTimeoutCertificateKey(rank uint64, filter []byte) []byte { + key := []byte{CLOCK_FRAME, CLOCK_TIMEOUT_CERTIFICATE} + key = binary.BigEndian.AppendUint64(key, rank) + key = append(key, filter...) + return key +} + +func clockTimeoutCertificateEarliestIndex(filter []byte) []byte { + return slices.Concat( + []byte{CLOCK_FRAME, CLOCK_TIMEOUT_CERTIFICATE_INDEX_EARLIEST}, + filter, + ) +} + +func clockTimeoutCertificateLatestIndex(filter []byte) []byte { + return slices.Concat( + []byte{CLOCK_FRAME, CLOCK_TIMEOUT_CERTIFICATE_INDEX_LATEST}, + filter, + ) +} + // Produces an index key of size: len(filter) + 42 func clockParentIndexKey( filter []byte, @@ -303,20 +685,6 @@ func clockShardParentIndexKey( ) } -// func clockShardCandidateFrameKey( -// address []byte, -// frameNumber uint64, -// parent []byte, -// distance []byte, -// ) []byte { -// key := []byte{CLOCK_FRAME, CLOCK_SHARD_FRAME_CANDIDATE_SHARD} -// key = binary.BigEndian.AppendUint64(key, frameNumber) -// key = append(key, address...) -// key = append(key, rightAlign(parent, 32)...) -// key = append(key, rightAlign(distance, 32)...) -// return key -// } - func clockProverTrieKey(filter []byte, ring uint16, frameNumber uint64) []byte { key := []byte{CLOCK_FRAME, CLOCK_SHARD_FRAME_FRECENCY_SHARD} key = binary.BigEndian.AppendUint16(key, ring) @@ -363,6 +731,22 @@ func clockGlobalFrameRequestKey( return key } +func clockProposalVoteKey(rank uint64, filter []byte, identity []byte) []byte { + key := []byte{CLOCK_FRAME, CLOCK_PROPOSAL_VOTE} + key = binary.BigEndian.AppendUint64(key, rank) + key = append(key, filter...) + key = append(key, identity...) + return key +} + +func clockTimeoutVoteKey(rank uint64, filter []byte, identity []byte) []byte { + key := []byte{CLOCK_FRAME, CLOCK_TIMEOUT_VOTE} + key = binary.BigEndian.AppendUint64(key, rank) + key = append(key, filter...) + key = append(key, identity...) + return key +} + func (p *PebbleClockStore) NewTransaction(indexed bool) ( store.Transaction, error, @@ -858,15 +1242,13 @@ func (p *PebbleClockStore) RangeShardClockFrames( startFrameNumber = temp } - iter, err := p.db.NewIter( - clockShardFrameKey(filter, startFrameNumber), - clockShardFrameKey(filter, endFrameNumber+1), - ) - if err != nil { - return nil, errors.Wrap(err, "get shard clock frames") - } - - return &PebbleClockIterator{i: iter, db: p}, nil + return &PebbleClockIterator{ + filter: filter, + start: startFrameNumber, + end: endFrameNumber + 1, + cur: startFrameNumber, + db: p, + }, nil } func (p *PebbleClockStore) SetLatestShardClockFrameNumber( @@ -1216,3 +1598,895 @@ func (p *PebbleClockStore) SetShardStateTree( "set data state tree", ) } + +func (p *PebbleClockStore) GetLatestCertifiedGlobalState() ( + *protobufs.GlobalProposal, + error, +) { + idxValue, closer, err := p.db.Get(clockGlobalCertifiedStateLatestIndex()) + if err != nil { + if errors.Is(err, pebble.ErrNotFound) { + return nil, store.ErrNotFound + } + return nil, errors.Wrap(err, "get latest certified global state") + } + defer closer.Close() + + if len(idxValue) != 8 { + return nil, errors.Wrap( + store.ErrInvalidData, + "get latest certified global state", + ) + } + + rank := binary.BigEndian.Uint64(idxValue) + return p.GetCertifiedGlobalState(rank) +} + +func (p *PebbleClockStore) GetEarliestCertifiedGlobalState() ( + *protobufs.GlobalProposal, + error, +) { + idxValue, closer, err := p.db.Get(clockGlobalCertifiedStateEarliestIndex()) + if err != nil { + if errors.Is(err, pebble.ErrNotFound) { + return nil, store.ErrNotFound + } + return nil, errors.Wrap(err, "get earliest certified global state") + } + defer closer.Close() + + if len(idxValue) != 8 { + return nil, errors.Wrap( + store.ErrInvalidData, + "get earliest certified global state", + ) + } + + rank := binary.BigEndian.Uint64(idxValue) + return p.GetCertifiedGlobalState(rank) +} + +func (p *PebbleClockStore) GetCertifiedGlobalState(rank uint64) ( + *protobufs.GlobalProposal, + error, +) { + key := clockGlobalCertifiedStateKey(rank) + value, closer, err := p.db.Get(key) + if err != nil { + if errors.Is(err, pebble.ErrNotFound) { + return nil, store.ErrNotFound + } + return nil, errors.Wrap(err, "get certified global state") + } + defer closer.Close() + + if len(value) != 24 { + return nil, errors.Wrap( + store.ErrInvalidData, + "get certified global state", + ) + } + + frameNumber := binary.BigEndian.Uint64(value[:8]) + qcRank := binary.BigEndian.Uint64(value[8:16]) + tcRank := binary.BigEndian.Uint64(value[16:]) + + frame, err := p.GetGlobalClockFrame(frameNumber) + if err != nil && !errors.Is(err, store.ErrNotFound) { + return nil, errors.Wrap(err, "get certified global state") + } + + vote, err := p.GetProposalVote(nil, frame.GetRank(), frame.Header.Prover) + if err != nil && !errors.Is(err, store.ErrNotFound) { + return nil, errors.Wrap(err, "get certified app shard state") + } + + qc, err := p.GetQuorumCertificate(nil, qcRank) + if err != nil && !errors.Is(err, store.ErrNotFound) { + return nil, errors.Wrap(err, "get certified global state") + } + + tc, err := p.GetTimeoutCertificate(nil, tcRank) + if err != nil && !errors.Is(err, store.ErrNotFound) { + return nil, errors.Wrap(err, "get certified global state") + } + + return &protobufs.GlobalProposal{ + State: frame, + ParentQuorumCertificate: qc, + PriorRankTimeoutCertificate: tc, + Vote: vote, + }, nil +} + +func (p *PebbleClockStore) RangeCertifiedGlobalStates( + startRank uint64, + endRank uint64, +) (store.TypedIterator[*protobufs.GlobalProposal], error) { + if startRank > endRank { + startRank, endRank = endRank, startRank + } + + iter, err := p.db.NewIter( + clockGlobalCertifiedStateKey(startRank), + clockGlobalCertifiedStateKey(endRank+1), + ) + if err != nil { + return nil, errors.Wrap(err, "range certified global states") + } + + return &PebbleGlobalStateIterator{i: iter, db: p}, nil +} + +func (p *PebbleClockStore) PutCertifiedGlobalState( + state *protobufs.GlobalProposal, + txn store.Transaction, +) error { + if state == nil { + return errors.Wrap( + errors.New("proposal is required"), + "put certified global state", + ) + } + + rank := uint64(0) + frameNumber := uint64(0xffffffffffffffff) + qcRank := uint64(0xffffffffffffffff) + tcRank := uint64(0xffffffffffffffff) + if state.State != nil { + if state.State.Header.Rank > rank { + rank = state.State.Header.Rank + } + frameNumber = state.State.Header.FrameNumber + if err := p.PutGlobalClockFrame(state.State, txn); err != nil { + return errors.Wrap(err, "put certified global state") + } + if err := p.PutProposalVote(txn, state.Vote); err != nil { + return errors.Wrap(err, "put certified global state") + } + } + if state.ParentQuorumCertificate != nil { + if state.ParentQuorumCertificate.Rank > rank { + rank = state.ParentQuorumCertificate.Rank + } + qcRank = state.ParentQuorumCertificate.Rank + if err := p.PutQuorumCertificate( + state.ParentQuorumCertificate, + txn, + ); err != nil { + return errors.Wrap(err, "put certified global state") + } + } + if state.PriorRankTimeoutCertificate != nil { + if state.PriorRankTimeoutCertificate.Rank > rank { + rank = state.PriorRankTimeoutCertificate.Rank + } + tcRank = state.PriorRankTimeoutCertificate.Rank + if err := p.PutTimeoutCertificate( + state.PriorRankTimeoutCertificate, + txn, + ); err != nil { + return errors.Wrap(err, "put certified global state") + } + } + + key := clockGlobalCertifiedStateKey(rank) + value := []byte{} + value = binary.BigEndian.AppendUint64(value, frameNumber) + value = binary.BigEndian.AppendUint64(value, qcRank) + value = binary.BigEndian.AppendUint64(value, tcRank) + + if err := txn.Set(key, value); err != nil { + return errors.Wrap(err, "put certified global state") + } + + if err := p.updateEarliestIndex( + txn, + clockGlobalCertifiedStateEarliestIndex(), + rank, + ); err != nil { + return errors.Wrap(err, "put certified global state") + } + + if err := txn.Set( + clockGlobalCertifiedStateLatestIndex(), + binary.BigEndian.AppendUint64(nil, rank), + ); err != nil { + return errors.Wrap(err, "put certified global state") + } + + return nil +} + +func (p *PebbleClockStore) GetLatestCertifiedAppShardState( + filter []byte, +) ( + *protobufs.AppShardProposal, + error, +) { + idxValue, closer, err := p.db.Get( + clockShardCertifiedStateLatestIndex([]byte{}), + ) + if err != nil { + if errors.Is(err, pebble.ErrNotFound) { + return nil, store.ErrNotFound + } + return nil, errors.Wrap(err, "get latest certified app shard state") + } + defer closer.Close() + + if len(idxValue) != 8 { + return nil, errors.Wrap( + store.ErrInvalidData, + "get latest certified app shard state", + ) + } + + rank := binary.BigEndian.Uint64(idxValue) + return p.GetCertifiedAppShardState(filter, rank) +} + +func (p *PebbleClockStore) GetEarliestCertifiedAppShardState( + filter []byte, +) ( + *protobufs.AppShardProposal, + error, +) { + idxValue, closer, err := p.db.Get( + clockShardCertifiedStateEarliestIndex([]byte{}), + ) + if err != nil { + if errors.Is(err, pebble.ErrNotFound) { + return nil, store.ErrNotFound + } + return nil, errors.Wrap(err, "get earliest certified app shard state") + } + defer closer.Close() + + if len(idxValue) != 8 { + return nil, errors.Wrap( + store.ErrInvalidData, + "get earliest certified app shard state", + ) + } + + rank := binary.BigEndian.Uint64(idxValue) + return p.GetCertifiedAppShardState(filter, rank) +} + +func (p *PebbleClockStore) GetCertifiedAppShardState( + filter []byte, + rank uint64, +) ( + *protobufs.AppShardProposal, + error, +) { + key := clockShardCertifiedStateKey(rank, filter) + value, closer, err := p.db.Get(key) + if err != nil { + if errors.Is(err, pebble.ErrNotFound) { + return nil, store.ErrNotFound + } + return nil, errors.Wrap(err, "get certified app shard state") + } + defer closer.Close() + + if len(value) != 24 { + return nil, errors.Wrap( + store.ErrInvalidData, + "get certified app shard state", + ) + } + + frameNumber := binary.BigEndian.Uint64(value[:8]) + qcRank := binary.BigEndian.Uint64(value[8:16]) + tcRank := binary.BigEndian.Uint64(value[16:]) + + frame, _, err := p.GetShardClockFrame(filter, frameNumber, false) + if err != nil && !errors.Is(err, store.ErrNotFound) { + return nil, errors.Wrap(err, "get certified app shard state") + } + + vote, err := p.GetProposalVote(filter, frame.GetRank(), frame.Header.Prover) + if err != nil && !errors.Is(err, store.ErrNotFound) { + return nil, errors.Wrap(err, "get certified app shard state") + } + + qc, err := p.GetQuorumCertificate(filter, qcRank) + if err != nil && !errors.Is(err, store.ErrNotFound) { + return nil, errors.Wrap(err, "get certified app shard state") + } + + tc, err := p.GetTimeoutCertificate(filter, tcRank) + if err != nil && !errors.Is(err, store.ErrNotFound) { + return nil, errors.Wrap(err, "get certified app shard state") + } + + return &protobufs.AppShardProposal{ + State: frame, + ParentQuorumCertificate: qc, + PriorRankTimeoutCertificate: tc, + Vote: vote, + }, nil +} + +func (p *PebbleClockStore) RangeCertifiedAppShardStates( + filter []byte, + startRank uint64, + endRank uint64, +) (store.TypedIterator[*protobufs.AppShardProposal], error) { + if startRank > endRank { + startRank, endRank = endRank, startRank + } + + return &PebbleAppShardStateIterator{ + filter: filter, + start: startRank, + end: endRank + 1, + cur: startRank, + db: p, + }, nil +} + +func (p *PebbleClockStore) PutCertifiedAppShardState( + state *protobufs.AppShardProposal, + txn store.Transaction, +) error { + if state == nil { + return errors.Wrap( + errors.New("proposal is required"), + "put certified app shard state", + ) + } + + rank := uint64(0) + filter := []byte{} + frameNumber := uint64(0xffffffffffffffff) + qcRank := uint64(0xffffffffffffffff) + tcRank := uint64(0xffffffffffffffff) + if state.State != nil { + if state.State.Header.Rank > rank { + rank = state.State.Header.Rank + } + frameNumber = state.State.Header.FrameNumber + if err := p.StageShardClockFrame( + []byte(state.State.Identity()), + state.State, + txn, + ); err != nil { + return errors.Wrap(err, "put certified app shard state") + } + if err := p.CommitShardClockFrame( + state.State.Header.Address, + frameNumber, + []byte(state.State.Identity()), + nil, + txn, + false, + ); err != nil { + return errors.Wrap(err, "put certified app shard state") + } + if err := p.PutProposalVote(txn, state.Vote); err != nil { + return errors.Wrap(err, "put certified app shard state") + } + filter = state.State.Header.Address + } + if state.ParentQuorumCertificate != nil { + if state.ParentQuorumCertificate.Rank > rank { + rank = state.ParentQuorumCertificate.Rank + } + qcRank = state.ParentQuorumCertificate.Rank + if err := p.PutQuorumCertificate( + state.ParentQuorumCertificate, + txn, + ); err != nil { + return errors.Wrap(err, "put certified app shard state") + } + filter = state.ParentQuorumCertificate.Filter + } + if state.PriorRankTimeoutCertificate != nil { + if state.PriorRankTimeoutCertificate.Rank > rank { + rank = state.PriorRankTimeoutCertificate.Rank + } + tcRank = state.PriorRankTimeoutCertificate.Rank + if err := p.PutTimeoutCertificate( + state.PriorRankTimeoutCertificate, + txn, + ); err != nil { + return errors.Wrap(err, "put certified app shard state") + } + filter = state.PriorRankTimeoutCertificate.Filter + } + + if bytes.Equal(filter, []byte{}) { + return errors.Wrap( + errors.New("invalid filter"), + "put certified app shard state", + ) + } + + key := clockShardCertifiedStateKey(rank, filter) + value := []byte{} + value = binary.BigEndian.AppendUint64(value, frameNumber) + value = binary.BigEndian.AppendUint64(value, qcRank) + value = binary.BigEndian.AppendUint64(value, tcRank) + + if err := txn.Set(key, value); err != nil { + return errors.Wrap(err, "put certified app shard state") + } + + if err := p.updateEarliestIndex( + txn, + clockShardCertifiedStateEarliestIndex(filter), + rank, + ); err != nil { + return errors.Wrap(err, "put certified app shard state") + } + + if err := txn.Set( + clockShardCertifiedStateLatestIndex(filter), + binary.BigEndian.AppendUint64(nil, rank), + ); err != nil { + return errors.Wrap(err, "put certified app shard state") + } + + return nil +} + +func (p *PebbleClockStore) GetLatestQuorumCertificate( + filter []byte, +) (*protobufs.QuorumCertificate, error) { + idxValue, closer, err := p.db.Get( + clockQuorumCertificateLatestIndex(filter), + ) + if err != nil { + if errors.Is(err, pebble.ErrNotFound) { + return nil, store.ErrNotFound + } + return nil, errors.Wrap(err, "get latest quorum certificate") + } + defer closer.Close() + + if len(idxValue) != 8 { + return nil, errors.Wrap( + store.ErrInvalidData, + "get latest quorum certificate", + ) + } + + rank := binary.BigEndian.Uint64(idxValue) + return p.GetQuorumCertificate(filter, rank) +} + +func (p *PebbleClockStore) GetEarliestQuorumCertificate( + filter []byte, +) (*protobufs.QuorumCertificate, error) { + idxValue, closer, err := p.db.Get( + clockQuorumCertificateEarliestIndex(filter), + ) + if err != nil { + if errors.Is(err, pebble.ErrNotFound) { + return nil, store.ErrNotFound + } + return nil, errors.Wrap(err, "get earliest quorum certificate") + } + defer closer.Close() + + if len(idxValue) != 8 { + return nil, errors.Wrap( + store.ErrInvalidData, + "get earliest quorum certificate", + ) + } + + rank := binary.BigEndian.Uint64(idxValue) + return p.GetQuorumCertificate(filter, rank) +} + +func (p *PebbleClockStore) GetQuorumCertificate( + filter []byte, + rank uint64, +) (*protobufs.QuorumCertificate, error) { + key := clockQuorumCertificateKey(rank, filter) + value, closer, err := p.db.Get(key) + if err != nil { + if errors.Is(err, pebble.ErrNotFound) { + return nil, store.ErrNotFound + } + return nil, errors.Wrap(err, "get quorum certificate") + } + defer closer.Close() + + qc := &protobufs.QuorumCertificate{} + if err := qc.FromCanonicalBytes(slices.Clone(value)); err != nil { + return nil, errors.Wrap( + errors.Wrap(err, store.ErrInvalidData.Error()), + "get quorum certificate", + ) + } + + return qc, nil +} + +func (p *PebbleClockStore) RangeQuorumCertificates( + filter []byte, + startRank uint64, + endRank uint64, +) (store.TypedIterator[*protobufs.QuorumCertificate], error) { + if startRank > endRank { + startRank, endRank = endRank, startRank + } + + return &PebbleQuorumCertificateIterator{ + filter: filter, + start: startRank, + end: endRank + 1, + cur: startRank, + db: p, + }, nil +} + +func (p *PebbleClockStore) PutQuorumCertificate( + qc *protobufs.QuorumCertificate, + txn store.Transaction, +) error { + if qc == nil { + return errors.Wrap( + errors.New("quorum certificate is required"), + "put quorum certificate", + ) + } + + rank := qc.Rank + filter := qc.Filter + data, err := qc.ToCanonicalBytes() + if err != nil { + return errors.Wrap( + errors.Wrap(err, store.ErrInvalidData.Error()), + "put quorum certificate", + ) + } + + key := clockQuorumCertificateKey(rank, filter) + if err := txn.Set(key, data); err != nil { + return errors.Wrap(err, "put quorum certificate") + } + + if err := p.updateEarliestIndex( + txn, + clockQuorumCertificateEarliestIndex(filter), + rank, + ); err != nil { + return errors.Wrap(err, "put quorum certificate") + } + + if err := p.updateLatestIndex( + txn, + clockQuorumCertificateLatestIndex(filter), + rank, + ); err != nil { + return errors.Wrap(err, "put quorum certificate") + } + + return nil +} + +func (p *PebbleClockStore) GetLatestTimeoutCertificate( + filter []byte, +) (*protobufs.TimeoutCertificate, error) { + idxValue, closer, err := p.db.Get( + clockTimeoutCertificateLatestIndex(filter), + ) + if err != nil { + if errors.Is(err, pebble.ErrNotFound) { + return nil, store.ErrNotFound + } + return nil, errors.Wrap(err, "get latest timeout certificate") + } + defer closer.Close() + + if len(idxValue) != 8 { + return nil, errors.Wrap( + store.ErrInvalidData, + "get latest timeout certificate", + ) + } + + rank := binary.BigEndian.Uint64(idxValue) + return p.GetTimeoutCertificate(filter, rank) +} + +func (p *PebbleClockStore) GetEarliestTimeoutCertificate( + filter []byte, +) (*protobufs.TimeoutCertificate, error) { + idxValue, closer, err := p.db.Get( + clockTimeoutCertificateEarliestIndex(filter), + ) + if err != nil { + if errors.Is(err, pebble.ErrNotFound) { + return nil, store.ErrNotFound + } + return nil, errors.Wrap(err, "get earliest timeout certificate") + } + defer closer.Close() + + if len(idxValue) != 8 { + return nil, errors.Wrap( + store.ErrInvalidData, + "get earliest timeout certificate", + ) + } + + rank := binary.BigEndian.Uint64(idxValue) + return p.GetTimeoutCertificate(filter, rank) +} + +func (p *PebbleClockStore) GetTimeoutCertificate( + filter []byte, + rank uint64, +) (*protobufs.TimeoutCertificate, error) { + key := clockTimeoutCertificateKey(rank, filter) + value, closer, err := p.db.Get(key) + if err != nil { + if errors.Is(err, pebble.ErrNotFound) { + return nil, store.ErrNotFound + } + return nil, errors.Wrap(err, "get timeout certificate") + } + defer closer.Close() + + tc := &protobufs.TimeoutCertificate{} + if err := tc.FromCanonicalBytes(slices.Clone(value)); err != nil { + return nil, errors.Wrap( + errors.Wrap(err, store.ErrInvalidData.Error()), + "get timeout certificate", + ) + } + + return tc, nil +} + +func (p *PebbleClockStore) RangeTimeoutCertificates( + filter []byte, + startRank uint64, + endRank uint64, +) (store.TypedIterator[*protobufs.TimeoutCertificate], error) { + if startRank > endRank { + startRank, endRank = endRank, startRank + } + + return &PebbleTimeoutCertificateIterator{ + filter: filter, + start: startRank, + end: endRank + 1, + cur: startRank, + db: p, + }, nil +} + +func (p *PebbleClockStore) PutTimeoutCertificate( + tc *protobufs.TimeoutCertificate, + txn store.Transaction, +) error { + if tc == nil { + return errors.Wrap( + errors.New("timeout certificate is required"), + "put timeout certificate", + ) + } + + rank := tc.Rank + filter := tc.Filter + + data, err := tc.ToCanonicalBytes() + if err != nil { + return errors.Wrap( + errors.Wrap(err, store.ErrInvalidData.Error()), + "put timeout certificate", + ) + } + + key := clockTimeoutCertificateKey(rank, filter) + if err := txn.Set(key, data); err != nil { + return errors.Wrap(err, "put timeout certificate") + } + + if err := p.updateEarliestIndex( + txn, + clockTimeoutCertificateEarliestIndex(filter), + rank, + ); err != nil { + return errors.Wrap(err, "put timeout certificate") + } + + if err := p.updateLatestIndex( + txn, + clockTimeoutCertificateLatestIndex(filter), + rank, + ); err != nil { + return errors.Wrap(err, "put timeout certificate") + } + + return nil +} + +func (p *PebbleClockStore) PutProposalVote( + txn store.Transaction, + vote *protobufs.ProposalVote, +) error { + if vote == nil { + return errors.Wrap( + errors.New("proposal vote is required"), + "put proposal vote", + ) + } + + rank := vote.Rank + filter := vote.Filter + identity := vote.Identity() + + data, err := vote.ToCanonicalBytes() + if err != nil { + return errors.Wrap( + errors.Wrap(err, store.ErrInvalidData.Error()), + "put proposal vote", + ) + } + + key := clockProposalVoteKey(rank, filter, []byte(identity)) + err = txn.Set(key, data) + return errors.Wrap(err, "put proposal vote") +} + +func (p *PebbleClockStore) GetProposalVote( + filter []byte, + rank uint64, + identity []byte, +) ( + *protobufs.ProposalVote, + error, +) { + key := clockProposalVoteKey(rank, filter, []byte(identity)) + value, closer, err := p.db.Get(key) + if err != nil { + if errors.Is(err, pebble.ErrNotFound) { + return nil, store.ErrNotFound + } + return nil, errors.Wrap(err, "get proposal vote") + } + defer closer.Close() + + vote := &protobufs.ProposalVote{} + if err := vote.FromCanonicalBytes(slices.Clone(value)); err != nil { + return nil, errors.Wrap( + errors.Wrap(err, store.ErrInvalidData.Error()), + "get proposal vote", + ) + } + + return vote, nil +} + +func (p *PebbleClockStore) GetProposalVotes(filter []byte, rank uint64) ( + []*protobufs.ProposalVote, + error, +) { + results := []*protobufs.ProposalVote{} + startKey := clockProposalVoteKey(rank, filter, nil) + endKey := clockProposalVoteKey(rank+1, filter, nil) + iterator, err := p.db.NewIter(startKey, endKey) + if err != nil { + return nil, errors.Wrap(err, "get proposal votes") + } + defer iterator.Close() + + for iterator.First(); iterator.Valid(); iterator.Next() { + key := iterator.Key() + if len(key) != len(startKey)+32 { + continue + } + + value := iterator.Value() + vote := &protobufs.ProposalVote{} + if err := vote.FromCanonicalBytes(slices.Clone(value)); err != nil { + return nil, errors.Wrap( + errors.Wrap(err, store.ErrInvalidData.Error()), + "get proposal votes", + ) + } + results = append(results, vote) + } + + return results, nil +} + +func (p *PebbleClockStore) PutTimeoutVote( + txn store.Transaction, + vote *protobufs.TimeoutState, +) error { + if vote == nil { + return errors.Wrap( + errors.New("timeout vote is required"), + "put timeout vote", + ) + } + + rank := vote.Vote.Rank + filter := vote.Vote.Filter + identity := vote.Vote.Identity() + + data, err := vote.ToCanonicalBytes() + if err != nil { + return errors.Wrap( + errors.Wrap(err, store.ErrInvalidData.Error()), + "put timeout vote", + ) + } + + key := clockTimeoutVoteKey(rank, filter, []byte(identity)) + err = txn.Set(key, data) + return errors.Wrap(err, "put timeout vote") +} + +func (p *PebbleClockStore) GetTimeoutVote( + filter []byte, + rank uint64, + identity []byte, +) ( + *protobufs.TimeoutState, + error, +) { + key := clockTimeoutVoteKey(rank, filter, []byte(identity)) + value, closer, err := p.db.Get(key) + if err != nil { + if errors.Is(err, pebble.ErrNotFound) { + return nil, store.ErrNotFound + } + return nil, errors.Wrap(err, "get proposal vote") + } + defer closer.Close() + + vote := &protobufs.TimeoutState{} + if err := vote.FromCanonicalBytes(slices.Clone(value)); err != nil { + return nil, errors.Wrap( + errors.Wrap(err, store.ErrInvalidData.Error()), + "get proposal vote", + ) + } + + return vote, nil +} + +func (p *PebbleClockStore) GetTimeoutVotes(filter []byte, rank uint64) ( + []*protobufs.TimeoutState, + error, +) { + results := []*protobufs.TimeoutState{} + startKey := clockTimeoutVoteKey(rank, filter, nil) + endKey := clockTimeoutVoteKey(rank+1, filter, nil) + iterator, err := p.db.NewIter(startKey, endKey) + if err != nil { + return nil, errors.Wrap(err, "get timeout votes") + } + defer iterator.Close() + + for iterator.First(); iterator.Valid(); iterator.Next() { + key := iterator.Key() + if len(key) != len(startKey)+32 { + continue + } + + value := iterator.Value() + vote := &protobufs.TimeoutState{} + if err := vote.FromCanonicalBytes(slices.Clone(value)); err != nil { + return nil, errors.Wrap( + errors.Wrap(err, store.ErrInvalidData.Error()), + "get timeout votes", + ) + } + results = append(results, vote) + } + + return results, nil +} diff --git a/node/store/consensus.go b/node/store/consensus.go new file mode 100644 index 0000000..507c20d --- /dev/null +++ b/node/store/consensus.go @@ -0,0 +1,353 @@ +package store + +import ( + "bytes" + "encoding/binary" + "slices" + + "github.com/cockroachdb/pebble" + "github.com/pkg/errors" + "go.uber.org/zap" + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/protobufs" + "source.quilibrium.com/quilibrium/monorepo/types/store" +) + +type PebbleConsensusStore struct { + db store.KVDB + logger *zap.Logger +} + +var _ consensus.ConsensusStore[*protobufs.ProposalVote] = (*PebbleConsensusStore)(nil) + +func NewPebbleConsensusStore( + db store.KVDB, + logger *zap.Logger, +) *PebbleConsensusStore { + return &PebbleConsensusStore{ + db, + logger, + } +} + +// GetConsensusState implements consensus.ConsensusStore. +func (p *PebbleConsensusStore) GetConsensusState(filter []byte) ( + *models.ConsensusState[*protobufs.ProposalVote], + error, +) { + value, closer, err := p.db.Get( + slices.Concat([]byte{CONSENSUS, CONSENSUS_STATE}, filter), + ) + if err != nil { + if errors.Is(err, pebble.ErrNotFound) { + return nil, ErrNotFound + } + + return nil, errors.Wrap(err, "get consensus state") + } + defer closer.Close() + + c := slices.Clone(value) + if len(c) < 24 { + return nil, errors.Wrap(errors.New("invalid data"), "get consensus state") + } + + state := &models.ConsensusState[*protobufs.ProposalVote]{} + buf := bytes.NewBuffer(c) + + var filterLen uint32 + if err := binary.Read(buf, binary.BigEndian, &filterLen); err != nil { + return nil, errors.Wrap(err, "get consensus state") + } + if filterLen > 0 { + filterBytes := make([]byte, filterLen) + if _, err := buf.Read(filterBytes); err != nil { + return nil, errors.Wrap(err, "get consensus state") + } + state.Filter = filterBytes + } + + if err := binary.Read( + buf, + binary.BigEndian, + &state.FinalizedRank, + ); err != nil { + return nil, errors.Wrap(err, "get consensus state") + } + + if err := binary.Read( + buf, + binary.BigEndian, + &state.LatestAcknowledgedRank, + ); err != nil { + return nil, errors.Wrap(err, "get consensus state") + } + + var latestTimeoutLen uint32 + if err := binary.Read(buf, binary.BigEndian, &latestTimeoutLen); err != nil { + return nil, errors.Wrap(err, "get consensus state") + } + if latestTimeoutLen > 0 { + latestTimeoutBytes := make([]byte, latestTimeoutLen) + if _, err := buf.Read(latestTimeoutBytes); err != nil { + return nil, errors.Wrap(err, "get consensus state") + } + lt := &protobufs.TimeoutState{} + if err := lt.FromCanonicalBytes(latestTimeoutBytes); err != nil { + return nil, errors.Wrap(err, "get consensus state") + } + state.LatestTimeout = &models.TimeoutState[*protobufs.ProposalVote]{ + Rank: lt.Vote.Rank, + LatestQuorumCertificate: lt.LatestQuorumCertificate, + PriorRankTimeoutCertificate: lt.PriorRankTimeoutCertificate, + Vote: <.Vote, + TimeoutTick: lt.TimeoutTick, + } + } + + return state, nil +} + +// GetLivenessState implements consensus.ConsensusStore. +func (p *PebbleConsensusStore) GetLivenessState(filter []byte) ( + *models.LivenessState, + error, +) { + value, closer, err := p.db.Get( + slices.Concat([]byte{CONSENSUS, CONSENSUS_LIVENESS}, filter), + ) + if err != nil { + if errors.Is(err, pebble.ErrNotFound) { + return nil, ErrNotFound + } + + return nil, errors.Wrap(err, "get liveness state") + } + defer closer.Close() + + c := slices.Clone(value) + if len(c) < 20 { + return nil, errors.Wrap(errors.New("invalid data"), "get liveness state") + } + + state := &models.LivenessState{} + buf := bytes.NewBuffer(c) + + var filterLen uint32 + if err := binary.Read(buf, binary.BigEndian, &filterLen); err != nil { + return nil, errors.Wrap(err, "get liveness state") + } + if filterLen > 0 { + filterBytes := make([]byte, filterLen) + if _, err := buf.Read(filterBytes); err != nil { + return nil, errors.Wrap(err, "get liveness state") + } + state.Filter = filterBytes + } + + if err := binary.Read( + buf, + binary.BigEndian, + &state.CurrentRank, + ); err != nil { + return nil, errors.Wrap(err, "get liveness state") + } + + var latestQCLen uint32 + if err := binary.Read(buf, binary.BigEndian, &latestQCLen); err != nil { + return nil, errors.Wrap(err, "get liveness state") + } + if latestQCLen > 0 { + latestQCBytes := make([]byte, latestQCLen) + if _, err := buf.Read(latestQCBytes); err != nil { + return nil, errors.Wrap(err, "get liveness state") + } + lt := &protobufs.QuorumCertificate{} + if err := lt.FromCanonicalBytes(latestQCBytes); err != nil { + return nil, errors.Wrap(err, "get liveness state") + } + state.LatestQuorumCertificate = lt + } + + var priorTCLen uint32 + if err := binary.Read(buf, binary.BigEndian, &priorTCLen); err != nil { + return nil, errors.Wrap(err, "get liveness state") + } + if priorTCLen > 0 { + priorTCBytes := make([]byte, priorTCLen) + if _, err := buf.Read(priorTCBytes); err != nil { + return nil, errors.Wrap(err, "get liveness state") + } + lt := &protobufs.TimeoutCertificate{} + if err := lt.FromCanonicalBytes(priorTCBytes); err != nil { + return nil, errors.Wrap(err, "get liveness state") + } + state.PriorRankTimeoutCertificate = lt + } + + return state, nil +} + +// PutConsensusState implements consensus.ConsensusStore. +func (p *PebbleConsensusStore) PutConsensusState( + state *models.ConsensusState[*protobufs.ProposalVote], +) error { + buf := new(bytes.Buffer) + + if err := binary.Write( + buf, + binary.BigEndian, + uint32(len(state.Filter)), + ); err != nil { + return errors.Wrap(err, "put consensus state") + } + if _, err := buf.Write(state.Filter); err != nil { + return errors.Wrap(err, "put consensus state") + } + + if err := binary.Write( + buf, + binary.BigEndian, + state.FinalizedRank, + ); err != nil { + return errors.Wrap(err, "put consensus state") + } + + if err := binary.Write( + buf, + binary.BigEndian, + state.LatestAcknowledgedRank, + ); err != nil { + return errors.Wrap(err, "put consensus state") + } + + if state.LatestTimeout == nil { + if err := binary.Write( + buf, + binary.BigEndian, + uint32(0), + ); err != nil { + return errors.Wrap(err, "put consensus state") + } + } else { + var priorTC *protobufs.TimeoutCertificate + if state.LatestTimeout.PriorRankTimeoutCertificate != nil { + priorTC = state.LatestTimeout.PriorRankTimeoutCertificate.(*protobufs.TimeoutCertificate) + } + lt := &protobufs.TimeoutState{ + LatestQuorumCertificate: state.LatestTimeout.LatestQuorumCertificate.(*protobufs.QuorumCertificate), + PriorRankTimeoutCertificate: priorTC, + Vote: *state.LatestTimeout.Vote, + TimeoutTick: state.LatestTimeout.TimeoutTick, + } + timeoutBytes, err := lt.ToCanonicalBytes() + if err != nil { + return errors.Wrap(err, "put consensus state") + } + if err := binary.Write( + buf, + binary.BigEndian, + uint32(len(timeoutBytes)), + ); err != nil { + return errors.Wrap(err, "put consensus state") + } + if _, err := buf.Write(timeoutBytes); err != nil { + return errors.Wrap(err, "put consensus state") + } + } + + return errors.Wrap( + p.db.Set( + slices.Concat([]byte{CONSENSUS, CONSENSUS_STATE}, state.Filter), + buf.Bytes(), + ), + "put consensus state", + ) +} + +// PutLivenessState implements consensus.ConsensusStore. +func (p *PebbleConsensusStore) PutLivenessState( + state *models.LivenessState, +) error { + buf := new(bytes.Buffer) + + if err := binary.Write( + buf, + binary.BigEndian, + uint32(len(state.Filter)), + ); err != nil { + return errors.Wrap(err, "put liveness state") + } + if _, err := buf.Write(state.Filter); err != nil { + return errors.Wrap(err, "put liveness state") + } + + if err := binary.Write( + buf, + binary.BigEndian, + state.CurrentRank, + ); err != nil { + return errors.Wrap(err, "put liveness state") + } + + if state.LatestQuorumCertificate == nil { + if err := binary.Write( + buf, + binary.BigEndian, + uint32(0), + ); err != nil { + return errors.Wrap(err, "put liveness state") + } + } else { + qc := state.LatestQuorumCertificate.(*protobufs.QuorumCertificate) + qcBytes, err := qc.ToCanonicalBytes() + if err != nil { + return errors.Wrap(err, "put liveness state") + } + if err := binary.Write( + buf, + binary.BigEndian, + uint32(len(qcBytes)), + ); err != nil { + return errors.Wrap(err, "put liveness state") + } + if _, err := buf.Write(qcBytes); err != nil { + return errors.Wrap(err, "put liveness state") + } + } + + if state.PriorRankTimeoutCertificate == nil { + if err := binary.Write( + buf, + binary.BigEndian, + uint32(0), + ); err != nil { + return errors.Wrap(err, "put liveness state") + } + } else { + tc := state.PriorRankTimeoutCertificate.(*protobufs.TimeoutCertificate) + timeoutBytes, err := tc.ToCanonicalBytes() + if err != nil { + return errors.Wrap(err, "put liveness state") + } + if err := binary.Write( + buf, + binary.BigEndian, + uint32(len(timeoutBytes)), + ); err != nil { + return errors.Wrap(err, "put liveness state") + } + if _, err := buf.Write(timeoutBytes); err != nil { + return errors.Wrap(err, "put liveness state") + } + } + + return errors.Wrap( + p.db.Set( + slices.Concat([]byte{CONSENSUS, CONSENSUS_LIVENESS}, state.Filter), + buf.Bytes(), + ), + "put liveness state", + ) +} diff --git a/node/store/constants.go b/node/store/constants.go index 7c78be7..0b12fb4 100644 --- a/node/store/constants.go +++ b/node/store/constants.go @@ -14,27 +14,57 @@ const ( HYPERGRAPH_SHARD = 0x09 SHARD = 0x0A INBOX = 0x0B + CONSENSUS = 0x0C MIGRATION = 0xF0 WORKER = 0xFF ) // Clock store indexes: const ( - CLOCK_GLOBAL_FRAME = 0x00 - CLOCK_SHARD_FRAME_SHARD = 0x01 - CLOCK_SHARD_FRAME_CANDIDATE_SHARD = 0x02 - CLOCK_SHARD_FRAME_FRECENCY_SHARD = 0x03 - CLOCK_SHARD_FRAME_DISTANCE_SHARD = 0x04 - CLOCK_COMPACTION_SHARD = 0x05 - CLOCK_SHARD_FRAME_SENIORITY_SHARD = 0x06 - CLOCK_SHARD_FRAME_STATE_TREE = 0x07 - CLOCK_GLOBAL_FRAME_REQUEST = 0x08 - CLOCK_GLOBAL_FRAME_INDEX_EARLIEST = 0x10 | CLOCK_GLOBAL_FRAME - CLOCK_GLOBAL_FRAME_INDEX_LATEST = 0x20 | CLOCK_GLOBAL_FRAME - CLOCK_GLOBAL_FRAME_INDEX_PARENT = 0x30 | CLOCK_GLOBAL_FRAME - CLOCK_SHARD_FRAME_INDEX_EARLIEST = 0x10 | CLOCK_SHARD_FRAME_SHARD - CLOCK_SHARD_FRAME_INDEX_LATEST = 0x20 | CLOCK_SHARD_FRAME_SHARD - CLOCK_SHARD_FRAME_INDEX_PARENT = 0x30 | CLOCK_SHARD_FRAME_SHARD + CLOCK_GLOBAL_FRAME = 0x00 + CLOCK_SHARD_FRAME_SHARD = 0x01 + CLOCK_SHARD_FRAME_CANDIDATE_SHARD = 0x02 + CLOCK_SHARD_FRAME_FRECENCY_SHARD = 0x03 + CLOCK_SHARD_FRAME_DISTANCE_SHARD = 0x04 + CLOCK_COMPACTION_SHARD = 0x05 + CLOCK_SHARD_FRAME_SENIORITY_SHARD = 0x06 + CLOCK_SHARD_FRAME_STATE_TREE = 0x07 + CLOCK_GLOBAL_FRAME_REQUEST = 0x08 + CLOCK_GLOBAL_CERTIFIED_STATE = 0x09 + CLOCK_SHARD_CERTIFIED_STATE = 0x0A + CLOCK_QUORUM_CERTIFICATE = 0x0B + CLOCK_TIMEOUT_CERTIFICATE = 0x0C + CLOCK_PROPOSAL_VOTE = 0x0D + CLOCK_TIMEOUT_VOTE = 0x0E + + CLOCK_GLOBAL_FRAME_INDEX_EARLIEST = 0x10 | CLOCK_GLOBAL_FRAME + CLOCK_GLOBAL_FRAME_INDEX_LATEST = 0x20 | CLOCK_GLOBAL_FRAME + CLOCK_GLOBAL_FRAME_INDEX_PARENT = 0x30 | CLOCK_GLOBAL_FRAME + + CLOCK_SHARD_FRAME_INDEX_EARLIEST = 0x10 | CLOCK_SHARD_FRAME_SHARD + CLOCK_SHARD_FRAME_INDEX_LATEST = 0x20 | CLOCK_SHARD_FRAME_SHARD + CLOCK_SHARD_FRAME_INDEX_PARENT = 0x30 | CLOCK_SHARD_FRAME_SHARD + + CLOCK_GLOBAL_CERTIFIED_STATE_INDEX_EARLIEST = 0x10 | + CLOCK_GLOBAL_CERTIFIED_STATE + CLOCK_GLOBAL_CERTIFIED_STATE_INDEX_LATEST = 0x20 | + CLOCK_GLOBAL_CERTIFIED_STATE + + CLOCK_SHARD_CERTIFIED_STATE_INDEX_EARLIEST = 0x10 | + CLOCK_SHARD_CERTIFIED_STATE + CLOCK_SHARD_CERTIFIED_STATE_INDEX_LATEST = 0x20 | + CLOCK_SHARD_CERTIFIED_STATE + + CLOCK_QUORUM_CERTIFICATE_INDEX_EARLIEST = 0x10 | + CLOCK_QUORUM_CERTIFICATE + CLOCK_QUORUM_CERTIFICATE_INDEX_LATEST = 0x20 | + CLOCK_QUORUM_CERTIFICATE + + CLOCK_TIMEOUT_CERTIFICATE_INDEX_EARLIEST = 0x10 | + CLOCK_TIMEOUT_CERTIFICATE + CLOCK_TIMEOUT_CERTIFICATE_INDEX_LATEST = 0x20 | + CLOCK_TIMEOUT_CERTIFICATE + CLOCK_SHARD_FRAME_CANDIDATE_INDEX_LATEST = 0x20 | CLOCK_SHARD_FRAME_CANDIDATE_SHARD ) @@ -132,3 +162,9 @@ const ( WORKER_BY_CORE = 0x00 WORKER_BY_FILTER = 0x01 ) + +// Consensus store indexes: +const ( + CONSENSUS_STATE = 0x00 + CONSENSUS_LIVENESS = 0x01 +) diff --git a/node/store/key.go b/node/store/key.go index 66190eb..e376003 100644 --- a/node/store/key.go +++ b/node/store/key.go @@ -1090,8 +1090,8 @@ func (p *PebbleKeyStore) GetKeyRegistryByProver( // Find identity key via cross signatures crossSigData, err := p.GetCrossSignatureByProvingKey(proverKeyAddress) - if err == nil && len(crossSigData) > 0 { - identityKeyAddress := crossSigData[:32] + if err == nil && len(crossSigData) > 74 { + identityKeyAddress := crossSigData[:len(crossSigData)-74] // Get the identity key identityKey, err := p.GetIdentityKey(identityKeyAddress) @@ -1099,14 +1099,14 @@ func (p *PebbleKeyStore) GetKeyRegistryByProver( registry.IdentityKey = identityKey // Get the signatures - registry.IdentityToProver = &protobufs.Ed448Signature{ - Signature: crossSigData[32:], - } + registry.ProverToIdentity = &protobufs.BLS48581Signature{ + Signature: crossSigData[len(crossSigData)-74:], + } // Get reverse signature - proverSigData, err := p.GetCrossSignatureByProvingKey(proverKeyAddress) + proverSigData, err := p.GetCrossSignatureByProvingKey(identityKeyAddress) if err == nil { - registry.ProverToIdentity = &protobufs.BLS48581Signature{ + registry.IdentityToProver = &protobufs.Ed448Signature{ Signature: proverSigData[32:], } } diff --git a/node/store/pebble.go b/node/store/pebble.go index d1b6b9f..7943637 100644 --- a/node/store/pebble.go +++ b/node/store/pebble.go @@ -23,6 +23,7 @@ type PebbleDB struct { // the end. var pebbleMigrations = []func(*pebble.Batch) error{ migration_2_1_0_4, + migration_2_1_0_5, } func NewPebbleDB( @@ -437,3 +438,8 @@ func migration_2_1_0_4(b *pebble.Batch) error { return nil } + +func migration_2_1_0_5(b *pebble.Batch) error { + // We just re-run it again + return migration_2_1_0_4(b) +} diff --git a/protobufs/canonical_types.go b/protobufs/canonical_types.go index 9c238e8..34ae47d 100644 --- a/protobufs/canonical_types.go +++ b/protobufs/canonical_types.go @@ -49,8 +49,8 @@ const ( GlobalFrameHeaderType uint32 = 0x0309 FrameHeaderType uint32 = 0x030A ProverLivenessCheckType uint32 = 0x030B - FrameVoteType uint32 = 0x030C - FrameConfirmationType uint32 = 0x030D + ProposalVoteType uint32 = 0x030C + QuorumCertificateType uint32 = 0x030D GlobalFrameType uint32 = 0x030E AppShardFrameType uint32 = 0x030F SeniorityMergeType uint32 = 0x0310 @@ -60,6 +60,10 @@ const ( PathType uint32 = 0x0314 TraversalSubProofType uint32 = 0x0315 TraversalProofType uint32 = 0x0316 + GlobalProposalType uint32 = 0x0317 + AppShardProposalType uint32 = 0x0318 + TimeoutStateType uint32 = 0x031C + TimeoutCertificateType uint32 = 0x031D // Hypergraph types (0x0400 - 0x04FF) HypergraphConfigurationType uint32 = 0x0401 diff --git a/protobufs/global.go b/protobufs/global.go index c7cb0d6..a4979bd 100644 --- a/protobufs/global.go +++ b/protobufs/global.go @@ -6,52 +6,561 @@ import ( "slices" "time" + "github.com/iden3/go-iden3-crypto/poseidon" "github.com/multiformats/go-multiaddr" "github.com/pkg/errors" "google.golang.org/protobuf/proto" - "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" ) -func (g *GlobalFrame) Clone() consensus.Unique { - g.Identity() - frame := proto.Clone(g) - return frame.(*GlobalFrame) +// Source implements models.QuorumCertificate. +func (g *QuorumCertificate) Equals(other models.QuorumCertificate) bool { + return bytes.Equal(g.Filter, other.GetFilter()) && + g.Rank == other.GetRank() && + g.FrameNumber == other.GetFrameNumber() && + g.Identity() == other.Identity() } -func (g *GlobalFrame) Identity() consensus.Identity { - return consensus.Identity(g.Header.Output) +func ( + g *QuorumCertificate, +) GetAggregatedSignature() models.AggregatedSignature { + return g.AggregateSignature } -func (g *GlobalFrame) Rank() uint64 { - return g.Header.FrameNumber +// Source implements models.Unique. +func (g *QuorumCertificate) Clone() models.Unique { + return proto.Clone(g).(*QuorumCertificate) } -func (a *AppShardFrame) Clone() consensus.Unique { - a.Identity() - frame := proto.Clone(a) - return frame.(*AppShardFrame) +// GetSignature implements models.Unique. +func (g *QuorumCertificate) GetSignature() []byte { + return g.AggregateSignature.Signature } -func (a *AppShardFrame) Identity() consensus.Identity { - return consensus.Identity(a.Header.Output) +// Source implements models.Unique. +func (g *QuorumCertificate) Source() models.Identity { + return g.AggregateSignature.Identity() } -func (a *AppShardFrame) Rank() uint64 { - return a.Header.FrameNumber +// Source implements models.Unique. +func (g *QuorumCertificate) Identity() models.Identity { + return models.Identity(g.Selector) } -func (f *FrameVote) Clone() consensus.Unique { - f.Identity() - frame := proto.Clone(f) - return frame.(*FrameVote) +// Source implements models.TimeoutCertificate. +func (g *TimeoutCertificate) Equals(other models.TimeoutCertificate) bool { + return bytes.Equal(g.Filter, other.GetFilter()) && + g.Rank == other.GetRank() && + slices.Equal(g.LatestRanks, other.GetLatestRanks()) && + g.LatestQuorumCertificate.Equals(other.GetLatestQuorumCert()) } -func (f *FrameVote) Identity() consensus.Identity { - return consensus.Identity(f.PublicKeySignatureBls48581.Signature) +func ( + g *TimeoutCertificate, +) GetAggregatedSignature() models.AggregatedSignature { + return g.AggregateSignature } -func (f *FrameVote) Rank() uint64 { - return f.FrameNumber +func ( + g *TimeoutCertificate, +) GetLatestQuorumCert() models.QuorumCertificate { + return g.LatestQuorumCertificate +} + +// Source implements models.Unique. +func (g *TimeoutCertificate) Clone() models.Unique { + return proto.Clone(g).(*TimeoutCertificate) +} + +// GetSignature implements models.Unique. +func (g *TimeoutCertificate) GetSignature() []byte { + return g.AggregateSignature.Signature +} + +// Source implements models.Unique. +func (g *TimeoutCertificate) Source() models.Identity { + return models.Identity( + binary.BigEndian.AppendUint64(slices.Clone(g.Filter), g.Rank), + ) +} + +// Source implements models.Unique. +func (g *TimeoutCertificate) Identity() models.Identity { + return models.Identity( + binary.BigEndian.AppendUint64(slices.Clone(g.Filter), g.Rank), + ) +} + +// GetSignature implements models.Unique. +func (f *ProposalVote) Clone() models.Unique { + return proto.Clone(f).(*ProposalVote) +} + +// GetSignature implements models.Unique. +func (f *ProposalVote) GetSignature() []byte { + return f.PublicKeySignatureBls48581.Signature +} + +// Source implements models.Unique. +func (f *ProposalVote) Source() models.Identity { + return models.Identity(f.Selector) +} + +// GetSignature implements models.Unique. +func (f *ProposalVote) Identity() models.Identity { + return models.Identity(f.PublicKeySignatureBls48581.Address) +} + +func (g *GlobalFrame) Clone() models.Unique { + return proto.Clone(g).(*GlobalFrame) +} + +// GetRank implements models.Unique. +func (g *GlobalFrame) GetRank() uint64 { + return g.Header.Rank +} + +// GetSignature implements models.Unique. +func (g *GlobalFrame) GetSignature() []byte { + return g.Header.PublicKeySignatureBls48581.Signature +} + +// GetTimestamp implements models.Unique. +func (g *GlobalFrame) GetTimestamp() uint64 { + return uint64(g.Header.Timestamp) +} + +// Identity implements models.Unique. +func (g *GlobalFrame) Identity() models.Identity { + selectorBI, err := poseidon.HashBytes(g.Header.Output) + if err != nil { + return "" + } + + return models.Identity(selectorBI.FillBytes(make([]byte, 32))) +} + +// Source implements models.Unique. +func (g *GlobalFrame) Source() models.Identity { + return models.Identity(g.Header.Prover) +} + +func (a *AppShardFrame) Clone() models.Unique { + return proto.Clone(a).(*AppShardFrame) +} + +// GetRank implements models.Unique. +func (a *AppShardFrame) GetRank() uint64 { + return a.Header.Rank +} + +// GetSignature implements models.Unique. +func (a *AppShardFrame) GetSignature() []byte { + return a.Header.PublicKeySignatureBls48581.Signature +} + +// GetTimestamp implements models.Unique. +func (a *AppShardFrame) GetTimestamp() uint64 { + return uint64(a.Header.Timestamp) +} + +// Identity implements models.Unique. +func (a *AppShardFrame) Identity() models.Identity { + selectorBI, err := poseidon.HashBytes(a.Header.Output) + if err != nil { + return "" + } + + return models.Identity(selectorBI.FillBytes(make([]byte, 32))) +} + +// Source implements models.Unique. +func (a *AppShardFrame) Source() models.Identity { + return models.Identity(a.Header.Prover) +} + +func (s *AppShardProposal) GetRank() uint64 { + rank := uint64(0) + if s.State != nil && s.State.GetRank() > rank { + rank = s.State.GetRank() + } + if s.ParentQuorumCertificate != nil && + s.ParentQuorumCertificate.GetRank() > rank { + rank = s.ParentQuorumCertificate.GetRank() + } + if s.PriorRankTimeoutCertificate != nil && + s.PriorRankTimeoutCertificate.GetRank() > rank { + rank = s.PriorRankTimeoutCertificate.GetRank() + } + return rank +} + +func (s *AppShardProposal) ToCanonicalBytes() ([]byte, error) { + buf := new(bytes.Buffer) + + // Write type prefix + if err := binary.Write( + buf, + binary.BigEndian, + AppShardProposalType, + ); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + + // Write state + stateBytes, err := s.State.ToCanonicalBytes() + if err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if err := binary.Write( + buf, + binary.BigEndian, + uint32(len(stateBytes)), + ); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if _, err := buf.Write(stateBytes); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + + // Write parent_quorum_certificate + parentQCBytes, err := s.ParentQuorumCertificate.ToCanonicalBytes() + if err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if err := binary.Write( + buf, + binary.BigEndian, + uint32(len(parentQCBytes)), + ); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if _, err := buf.Write(parentQCBytes); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + + // Write prior_rank_timeout_certificate + if s.PriorRankTimeoutCertificate == nil { + if err := binary.Write( + buf, + binary.BigEndian, + uint32(0), + ); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + } else { + priorTCBytes, err := s.PriorRankTimeoutCertificate.ToCanonicalBytes() + if err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if err := binary.Write( + buf, + binary.BigEndian, + uint32(len(priorTCBytes)), + ); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if _, err := buf.Write(priorTCBytes); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + } + + // Write vote + voteBytes, err := s.Vote.ToCanonicalBytes() + if err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if err := binary.Write( + buf, + binary.BigEndian, + uint32(len(voteBytes)), + ); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if _, err := buf.Write(voteBytes); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + + return buf.Bytes(), nil +} + +func (s *AppShardProposal) FromCanonicalBytes(data []byte) error { + buf := bytes.NewBuffer(data) + + // Read and verify type prefix + var typePrefix uint32 + if err := binary.Read(buf, binary.BigEndian, &typePrefix); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + if typePrefix != AppShardProposalType { + return errors.Wrap( + errors.New("invalid type prefix"), + "from canonical bytes", + ) + } + + // Read state + var stateLen uint32 + if err := binary.Read(buf, binary.BigEndian, &stateLen); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + stateBytes := make([]byte, stateLen) + if _, err := buf.Read(stateBytes); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + + s.State = &AppShardFrame{} + if err := s.State.FromCanonicalBytes(stateBytes); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + + // Read parent_quorum_certificate + var parentQCLen uint32 + if err := binary.Read(buf, binary.BigEndian, &parentQCLen); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + parentQCBytes := make([]byte, parentQCLen) + if _, err := buf.Read(parentQCBytes); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + + s.ParentQuorumCertificate = &QuorumCertificate{} + if err := s.ParentQuorumCertificate.FromCanonicalBytes( + parentQCBytes, + ); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + + // Read prior_rank_timeout_certificate + var priorRankTCLen uint32 + if err := binary.Read(buf, binary.BigEndian, &priorRankTCLen); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + + if priorRankTCLen != 0 { + priorRankTCBytes := make([]byte, priorRankTCLen) + if _, err := buf.Read(priorRankTCBytes); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + + s.PriorRankTimeoutCertificate = &TimeoutCertificate{} + if err := s.PriorRankTimeoutCertificate.FromCanonicalBytes( + priorRankTCBytes, + ); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + } + + // Read vote + var voteLen uint32 + if err := binary.Read(buf, binary.BigEndian, &voteLen); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + voteBytes := make([]byte, voteLen) + if _, err := buf.Read(voteBytes); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + + s.Vote = &ProposalVote{} + if err := s.Vote.FromCanonicalBytes( + voteBytes, + ); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + + return nil +} + +func (s *GlobalProposal) GetRank() uint64 { + rank := uint64(0) + if s.State != nil && s.State.GetRank() > rank { + rank = s.State.GetRank() + } + if s.ParentQuorumCertificate != nil && + s.ParentQuorumCertificate.GetRank() > rank { + rank = s.ParentQuorumCertificate.GetRank() + } + if s.PriorRankTimeoutCertificate != nil && + s.PriorRankTimeoutCertificate.GetRank() > rank { + rank = s.PriorRankTimeoutCertificate.GetRank() + } + return rank +} + +func (s *GlobalProposal) ToCanonicalBytes() ([]byte, error) { + buf := new(bytes.Buffer) + + // Write type prefix + if err := binary.Write( + buf, + binary.BigEndian, + GlobalProposalType, + ); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + + // Write state + stateBytes, err := s.State.ToCanonicalBytes() + if err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if err := binary.Write( + buf, + binary.BigEndian, + uint32(len(stateBytes)), + ); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if _, err := buf.Write(stateBytes); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + + // Write parent_quorum_certificate + parentQCBytes, err := s.ParentQuorumCertificate.ToCanonicalBytes() + if err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if err := binary.Write( + buf, + binary.BigEndian, + uint32(len(parentQCBytes)), + ); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if _, err := buf.Write(parentQCBytes); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + + // Write prior_rank_timeout_certificate + if s.PriorRankTimeoutCertificate == nil { + if err := binary.Write( + buf, + binary.BigEndian, + uint32(0), + ); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + } else { + priorTCBytes, err := s.PriorRankTimeoutCertificate.ToCanonicalBytes() + if err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if err := binary.Write( + buf, + binary.BigEndian, + uint32(len(priorTCBytes)), + ); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if _, err := buf.Write(priorTCBytes); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + } + + // Write vote + voteBytes, err := s.Vote.ToCanonicalBytes() + if err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if err := binary.Write( + buf, + binary.BigEndian, + uint32(len(voteBytes)), + ); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if _, err := buf.Write(voteBytes); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + + return buf.Bytes(), nil +} + +func (s *GlobalProposal) FromCanonicalBytes(data []byte) error { + buf := bytes.NewBuffer(data) + + // Read and verify type prefix + var typePrefix uint32 + if err := binary.Read(buf, binary.BigEndian, &typePrefix); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + if typePrefix != GlobalProposalType { + return errors.Wrap( + errors.New("invalid type prefix"), + "from canonical bytes", + ) + } + + // Read state + var stateLen uint32 + if err := binary.Read(buf, binary.BigEndian, &stateLen); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + stateBytes := make([]byte, stateLen) + if _, err := buf.Read(stateBytes); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + + s.State = &GlobalFrame{} + if err := s.State.FromCanonicalBytes(stateBytes); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + + // Read parent_quorum_certificate + var parentQCLen uint32 + if err := binary.Read(buf, binary.BigEndian, &parentQCLen); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + parentQCBytes := make([]byte, parentQCLen) + if _, err := buf.Read(parentQCBytes); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + + s.ParentQuorumCertificate = &QuorumCertificate{} + if err := s.ParentQuorumCertificate.FromCanonicalBytes( + parentQCBytes, + ); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + + // Read prior_rank_timeout_certificate + var priorRankTCLen uint32 + if err := binary.Read(buf, binary.BigEndian, &priorRankTCLen); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + + if priorRankTCLen != 0 { + priorRankTCBytes := make([]byte, priorRankTCLen) + if _, err := buf.Read(priorRankTCBytes); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + + s.PriorRankTimeoutCertificate = &TimeoutCertificate{} + if err := s.PriorRankTimeoutCertificate.FromCanonicalBytes( + priorRankTCBytes, + ); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + } + + // Read vote + var voteLen uint32 + if err := binary.Read(buf, binary.BigEndian, &voteLen); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + voteBytes := make([]byte, voteLen) + if _, err := buf.Read(voteBytes); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + + s.Vote = &ProposalVote{} + if err := s.Vote.FromCanonicalBytes( + voteBytes, + ); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + + return nil } func (s *SeniorityMerge) ToCanonicalBytes() ([]byte, error) { @@ -1492,6 +2001,11 @@ func (g *GlobalFrameHeader) ToCanonicalBytes() ([]byte, error) { return nil, errors.Wrap(err, "to canonical bytes") } + // Write rank + if err := binary.Write(buf, binary.BigEndian, g.Rank); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + // Write timestamp if err := binary.Write(buf, binary.BigEndian, g.Timestamp); err != nil { return nil, errors.Wrap(err, "to canonical bytes") @@ -1559,6 +2073,30 @@ func (g *GlobalFrameHeader) ToCanonicalBytes() ([]byte, error) { return nil, errors.Wrap(err, "to canonical bytes") } + // Write requests_root + if err := binary.Write( + buf, + binary.BigEndian, + uint32(len(g.RequestsRoot)), + ); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if _, err := buf.Write(g.RequestsRoot); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + + // Write prover + if err := binary.Write( + buf, + binary.BigEndian, + uint32(len(g.Prover)), + ); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if _, err := buf.Write(g.Prover); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + // Write public_key_signature_bls48581 if g.PublicKeySignatureBls48581 != nil { sigBytes, err := g.PublicKeySignatureBls48581.ToCanonicalBytes() @@ -1604,6 +2142,11 @@ func (g *GlobalFrameHeader) FromCanonicalBytes(data []byte) error { return errors.Wrap(err, "from canonical bytes") } + // Read rank + if err := binary.Read(buf, binary.BigEndian, &g.Rank); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + // Read timestamp if err := binary.Read(buf, binary.BigEndian, &g.Timestamp); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -1665,6 +2208,34 @@ func (g *GlobalFrameHeader) FromCanonicalBytes(data []byte) error { return errors.Wrap(err, "from canonical bytes") } + // Read requests_root + var requestsRootLen uint32 + if err := binary.Read( + buf, + binary.BigEndian, + &requestsRootLen, + ); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + g.RequestsRoot = make([]byte, requestsRootLen) + if _, err := buf.Read(g.RequestsRoot); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + + // Read prover + var proverLen uint32 + if err := binary.Read( + buf, + binary.BigEndian, + &proverLen, + ); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + g.Prover = make([]byte, proverLen) + if _, err := buf.Read(g.Prover); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + // Read public_key_signature_bls48581 var sigLen uint32 if err := binary.Read(buf, binary.BigEndian, &sigLen); err != nil { @@ -2091,11 +2662,11 @@ func (p *ProverLivenessCheck) FromCanonicalBytes(data []byte) error { return nil } -func (f *FrameVote) ToCanonicalBytes() ([]byte, error) { +func (f *ProposalVote) ToCanonicalBytes() ([]byte, error) { buf := new(bytes.Buffer) // Write type prefix - if err := binary.Write(buf, binary.BigEndian, FrameVoteType); err != nil { + if err := binary.Write(buf, binary.BigEndian, ProposalVoteType); err != nil { return nil, errors.Wrap(err, "to canonical bytes") } @@ -2111,32 +2682,29 @@ func (f *FrameVote) ToCanonicalBytes() ([]byte, error) { return nil, errors.Wrap(err, "to canonical bytes") } + // Write rank + if err := binary.Write(buf, binary.BigEndian, f.Rank); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + // Write frame_number if err := binary.Write(buf, binary.BigEndian, f.FrameNumber); err != nil { return nil, errors.Wrap(err, "to canonical bytes") } - // Write proposer + // Write selector if err := binary.Write( buf, binary.BigEndian, - uint32(len(f.Proposer)), + uint32(len(f.Selector)), ); err != nil { return nil, errors.Wrap(err, "to canonical bytes") } - if _, err := buf.Write(f.Proposer); err != nil { - return nil, errors.Wrap(err, "to canonical bytes") - } - - // Write approve - approve := uint8(0) - if f.Approve { - approve = 1 - } - if err := binary.Write(buf, binary.BigEndian, approve); err != nil { + if _, err := buf.Write(f.Selector); err != nil { return nil, errors.Wrap(err, "to canonical bytes") } + // Write timestamp if err := binary.Write(buf, binary.BigEndian, f.Timestamp); err != nil { return nil, errors.Wrap(err, "to canonical bytes") } @@ -2166,7 +2734,7 @@ func (f *FrameVote) ToCanonicalBytes() ([]byte, error) { return buf.Bytes(), nil } -func (f *FrameVote) FromCanonicalBytes(data []byte) error { +func (f *ProposalVote) FromCanonicalBytes(data []byte) error { buf := bytes.NewBuffer(data) // Read and verify type prefix @@ -2174,7 +2742,7 @@ func (f *FrameVote) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &typePrefix); err != nil { return errors.Wrap(err, "from canonical bytes") } - if typePrefix != FrameVoteType { + if typePrefix != ProposalVoteType { return errors.Wrap( errors.New("invalid type prefix"), "from canonical bytes", @@ -2191,28 +2759,26 @@ func (f *FrameVote) FromCanonicalBytes(data []byte) error { return errors.Wrap(err, "from canonical bytes") } + // Read rank + if err := binary.Read(buf, binary.BigEndian, &f.Rank); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + // Read frame_number if err := binary.Read(buf, binary.BigEndian, &f.FrameNumber); err != nil { return errors.Wrap(err, "from canonical bytes") } - // Read proposer - var proposerLen uint32 - if err := binary.Read(buf, binary.BigEndian, &proposerLen); err != nil { + // Read selector + var selectorLen uint32 + if err := binary.Read(buf, binary.BigEndian, &selectorLen); err != nil { return errors.Wrap(err, "from canonical bytes") } - f.Proposer = make([]byte, proposerLen) - if _, err := buf.Read(f.Proposer); err != nil { + f.Selector = make([]byte, selectorLen) + if _, err := buf.Read(f.Selector); err != nil { return errors.Wrap(err, "from canonical bytes") } - // Read approve - var approve uint8 - if err := binary.Read(buf, binary.BigEndian, &approve); err != nil { - return errors.Wrap(err, "from canonical bytes") - } - f.Approve = approve != 0 - // Read timestamp if err := binary.Read(buf, binary.BigEndian, &f.Timestamp); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -2239,14 +2805,183 @@ func (f *FrameVote) FromCanonicalBytes(data []byte) error { return nil } -func (f *FrameConfirmation) ToCanonicalBytes() ([]byte, error) { +func (f *TimeoutState) ToCanonicalBytes() ([]byte, error) { + buf := new(bytes.Buffer) + + // Write type prefix + if err := binary.Write(buf, binary.BigEndian, TimeoutStateType); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + + // Write latest_quorum_certificate + latestQCBytes, err := f.LatestQuorumCertificate.ToCanonicalBytes() + if err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if err := binary.Write( + buf, + binary.BigEndian, + uint32(len(latestQCBytes)), + ); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if _, err := buf.Write(latestQCBytes); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + + // Write prior_rank_timeout_certificate + if f.PriorRankTimeoutCertificate != nil { + priorTCBytes, err := f.PriorRankTimeoutCertificate.ToCanonicalBytes() + if err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if err := binary.Write( + buf, + binary.BigEndian, + uint32(len(priorTCBytes)), + ); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if _, err := buf.Write(priorTCBytes); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + } else { + if err := binary.Write(buf, binary.BigEndian, uint32(0)); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + } + + // Write vote + if f.Vote != nil { + voteBytes, err := f.Vote.ToCanonicalBytes() + if err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if err := binary.Write( + buf, + binary.BigEndian, + uint32(len(voteBytes)), + ); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if _, err := buf.Write(voteBytes); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + } else { + if err := binary.Write(buf, binary.BigEndian, uint32(0)); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + } + + // Write timeout_tick + if err := binary.Write(buf, binary.BigEndian, f.TimeoutTick); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + + // Write timestamp + if err := binary.Write(buf, binary.BigEndian, f.Timestamp); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + + return buf.Bytes(), nil +} + +func (f *TimeoutState) FromCanonicalBytes(data []byte) error { + buf := bytes.NewBuffer(data) + + // Read and verify type prefix + var typePrefix uint32 + if err := binary.Read(buf, binary.BigEndian, &typePrefix); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + if typePrefix != TimeoutStateType { + return errors.Wrap( + errors.New("invalid type prefix"), + "from canonical bytes", + ) + } + + // Read latest_quorum_certificate + var latestQuorumCertLen uint32 + if err := binary.Read( + buf, + binary.BigEndian, + &latestQuorumCertLen, + ); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + if latestQuorumCertLen > 0 { + latestQuorumCertBytes := make([]byte, latestQuorumCertLen) + if _, err := buf.Read(latestQuorumCertBytes); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + f.LatestQuorumCertificate = &QuorumCertificate{} + if err := f.LatestQuorumCertificate.FromCanonicalBytes( + latestQuorumCertBytes, + ); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + } + + // Read prior_rank_timeout_certificate + var priorRankTimeoutCertLen uint32 + if err := binary.Read( + buf, + binary.BigEndian, + &priorRankTimeoutCertLen, + ); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + if priorRankTimeoutCertLen > 0 { + priorRankTimeoutBytes := make([]byte, priorRankTimeoutCertLen) + if _, err := buf.Read(priorRankTimeoutBytes); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + f.PriorRankTimeoutCertificate = &TimeoutCertificate{} + if err := f.PriorRankTimeoutCertificate.FromCanonicalBytes( + priorRankTimeoutBytes, + ); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + } + + // Read vote + var voteLen uint32 + if err := binary.Read(buf, binary.BigEndian, &voteLen); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + if voteLen > 0 { + voteBytes := make([]byte, voteLen) + if _, err := buf.Read(voteBytes); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + f.Vote = &ProposalVote{} + if err := f.Vote.FromCanonicalBytes(voteBytes); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + } + + // Read timeout_tick + if err := binary.Read(buf, binary.BigEndian, &f.TimeoutTick); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + + // Read timestamp + if err := binary.Read(buf, binary.BigEndian, &f.Timestamp); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + + return nil +} + +func (f *QuorumCertificate) ToCanonicalBytes() ([]byte, error) { buf := new(bytes.Buffer) // Write type prefix if err := binary.Write( buf, binary.BigEndian, - FrameConfirmationType, + QuorumCertificateType, ); err != nil { return nil, errors.Wrap(err, "to canonical bytes") } @@ -2263,6 +2998,11 @@ func (f *FrameConfirmation) ToCanonicalBytes() ([]byte, error) { return nil, errors.Wrap(err, "to canonical bytes") } + // Write rank + if err := binary.Write(buf, binary.BigEndian, f.Rank); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + // Write frame_number if err := binary.Write(buf, binary.BigEndian, f.FrameNumber); err != nil { return nil, errors.Wrap(err, "to canonical bytes") @@ -2309,7 +3049,7 @@ func (f *FrameConfirmation) ToCanonicalBytes() ([]byte, error) { return buf.Bytes(), nil } -func (f *FrameConfirmation) FromCanonicalBytes(data []byte) error { +func (f *QuorumCertificate) FromCanonicalBytes(data []byte) error { buf := bytes.NewBuffer(data) // Read and verify type prefix @@ -2317,7 +3057,7 @@ func (f *FrameConfirmation) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &typePrefix); err != nil { return errors.Wrap(err, "from canonical bytes") } - if typePrefix != FrameConfirmationType { + if typePrefix != QuorumCertificateType { return errors.Wrap( errors.New("invalid type prefix"), "from canonical bytes", @@ -2334,6 +3074,11 @@ func (f *FrameConfirmation) FromCanonicalBytes(data []byte) error { return errors.Wrap(err, "from canonical bytes") } + // Read rank + if err := binary.Read(buf, binary.BigEndian, &f.Rank); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + // Read frame_number if err := binary.Read(buf, binary.BigEndian, &f.FrameNumber); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -2373,6 +3118,187 @@ func (f *FrameConfirmation) FromCanonicalBytes(data []byte) error { return nil } +func (t *TimeoutCertificate) ToCanonicalBytes() ([]byte, error) { + buf := new(bytes.Buffer) + + // Write type prefix + if err := binary.Write( + buf, + binary.BigEndian, + TimeoutCertificateType, + ); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + + // Write filter + if err := binary.Write( + buf, + binary.BigEndian, + uint32(len(t.Filter)), + ); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if _, err := buf.Write(t.Filter); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + + // Write rank + if err := binary.Write(buf, binary.BigEndian, t.Rank); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + + // Write latest_ranks + if err := binary.Write( + buf, + binary.BigEndian, + uint32(len(t.LatestRanks)), + ); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + for _, r := range t.LatestRanks { + if err := binary.Write(buf, binary.BigEndian, r); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + } + + // Write latest_quorum_certificate + if t.LatestQuorumCertificate != nil { + latestQCBytes, err := t.LatestQuorumCertificate.ToCanonicalBytes() + if err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if err := binary.Write( + buf, + binary.BigEndian, + uint32(len(latestQCBytes)), + ); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if _, err := buf.Write(latestQCBytes); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + } else { + if err := binary.Write(buf, binary.BigEndian, uint32(0)); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + } + + // Write timestamp + if err := binary.Write(buf, binary.BigEndian, t.Timestamp); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + + // Write aggregate_signature + if t.AggregateSignature != nil { + sigBytes, err := t.AggregateSignature.ToCanonicalBytes() + if err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if err := binary.Write( + buf, + binary.BigEndian, + uint32(len(sigBytes)), + ); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + if _, err := buf.Write(sigBytes); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + } else { + if err := binary.Write(buf, binary.BigEndian, uint32(0)); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + } + + return buf.Bytes(), nil +} + +func (t *TimeoutCertificate) FromCanonicalBytes(data []byte) error { + buf := bytes.NewBuffer(data) + + // Read and verify type prefix + var typePrefix uint32 + if err := binary.Read(buf, binary.BigEndian, &typePrefix); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + if typePrefix != TimeoutCertificateType { + return errors.Wrap( + errors.New("invalid type prefix"), + "from canonical bytes", + ) + } + + // Read filter + var filterLen uint32 + if err := binary.Read(buf, binary.BigEndian, &filterLen); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + t.Filter = make([]byte, filterLen) + if _, err := buf.Read(t.Filter); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + + // Read rank + if err := binary.Read(buf, binary.BigEndian, &t.Rank); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + + // Read latest_ranks + var latestRanksCount uint32 + if err := binary.Read(buf, binary.BigEndian, &latestRanksCount); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + t.LatestRanks = make([]uint64, latestRanksCount) + if err := binary.Read(buf, binary.BigEndian, &t.LatestRanks); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + + // Read latest_quorum_certificate + var latestQuorumCertLen uint32 + if err := binary.Read( + buf, + binary.BigEndian, + &latestQuorumCertLen, + ); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + if latestQuorumCertLen > 0 { + latestQuorumCertBytes := make([]byte, latestQuorumCertLen) + if _, err := buf.Read(latestQuorumCertBytes); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + t.LatestQuorumCertificate = &QuorumCertificate{} + if err := t.LatestQuorumCertificate.FromCanonicalBytes( + latestQuorumCertBytes, + ); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + } + + // Read timestamp + if err := binary.Read(buf, binary.BigEndian, &t.Timestamp); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + + // Read aggregate_signature + var sigLen uint32 + if err := binary.Read(buf, binary.BigEndian, &sigLen); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + if sigLen > 0 { + sigBytes := make([]byte, sigLen) + if _, err := buf.Read(sigBytes); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + t.AggregateSignature = &BLS48581AggregateSignature{} + if err := t.AggregateSignature.FromCanonicalBytes(sigBytes); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + } + + return nil +} + func (g *GlobalFrame) ToCanonicalBytes() ([]byte, error) { buf := new(bytes.Buffer) @@ -3761,6 +4687,22 @@ func (h *GlobalFrameHeader) Validate() error { ) } + // Requests root commitment should be 64 or 74 bytes + if len(h.RequestsRoot) != 64 && len(h.RequestsRoot) != 74 { + return errors.Wrap( + errors.New("invalid request root commitment length"), + "validate", + ) + } + + // Prover must be set + if len(h.Prover) != 32 { + return errors.Wrap( + errors.New("invalid prover length"), + "validate", + ) + } + // Signature must be present if h.PublicKeySignatureBls48581 == nil { return errors.Wrap(errors.New("missing signature"), "validate") @@ -3831,11 +4773,6 @@ func (h *FrameHeader) Validate() error { // Fee multiplier vote is uint64, any value is valid - // Signature must be present - if h.PublicKeySignatureBls48581 == nil { - return errors.Wrap(errors.New("missing signature"), "validate") - } - return nil } @@ -3881,43 +4818,179 @@ func (p *ProverLivenessCheck) GetSignatureDomain() []byte { return slices.Concat([]byte("PROVER_LIVENESS"), p.Filter) } -var _ ValidatableMessage = (*FrameVote)(nil) +var _ ValidatableMessage = (*ProposalVote)(nil) -func (f *FrameVote) Validate() error { +func (f *ProposalVote) Validate() error { if f == nil { return errors.Wrap(errors.New("nil frame vote"), "validate") } - // Frame number is uint64, any value is valid + // Rank and frame number is uint64, any value is valid - // Proposer should be 32 bytes - if len(f.Proposer) != 32 { - return errors.Wrap(errors.New("invalid proposer length"), "validate") + // Selector should be 32 bytes (proposal) or zero (timeout) + if len(f.Selector) != 32 && len(f.Selector) != 0 { + return errors.Wrap( + errors.Errorf("invalid selector length: %d", len(f.Selector)), + "validate", + ) } - // Approve is bool, any value is valid - // Signature must be present if f.PublicKeySignatureBls48581 == nil { return errors.Wrap(errors.New("missing signature"), "validate") } // Validate the signature - if err := f.PublicKeySignatureBls48581.Validate(); err != nil { - return errors.Wrap(err, "validate") + if len(f.Filter) == 0 { + if err := f.PublicKeySignatureBls48581.Validate(); err != nil { + return errors.Wrap(err, "validate") + } + } else { + if len(f.PublicKeySignatureBls48581.Address) != 32 { + return errors.Wrap(errors.New("invalid address"), "validate") + } + // handle extended sig + if len(f.PublicKeySignatureBls48581.Signature) != 74 && + len(f.PublicKeySignatureBls48581.Signature) != 590 { + return errors.Wrap(errors.New("invalid bls48581 signature"), "validate") + } } return nil } -var _ ValidatableMessage = (*FrameConfirmation)(nil) +var _ ValidatableMessage = (*AppShardProposal)(nil) -func (f *FrameConfirmation) Validate() error { +func (f *AppShardProposal) Validate() error { + if f == nil { + return errors.Wrap(errors.New("nil proposal"), "validate") + } + + if f.State == nil { + return errors.Wrap( + errors.New("missing state"), + "validate", + ) + } + + if err := f.State.Validate(); err != nil { + return err + } + + if f.ParentQuorumCertificate == nil { + return errors.Wrap( + errors.New("missing parent quorum certificate"), + "validate", + ) + } + + if err := f.ParentQuorumCertificate.Validate(); err != nil { + return err + } + + if f.PriorRankTimeoutCertificate != nil { + if err := f.PriorRankTimeoutCertificate.Validate(); err != nil { + return err + } + } + + if f.Vote == nil { + return errors.Wrap(errors.New("missing vote"), "validate") + } + + if err := f.Vote.Validate(); err != nil { + return err + } + + return nil +} + +var _ ValidatableMessage = (*GlobalProposal)(nil) + +func (f *GlobalProposal) Validate() error { + if f == nil { + return errors.Wrap(errors.New("nil proposal"), "validate") + } + + if f.State == nil { + return errors.Wrap( + errors.New("missing state"), + "validate", + ) + } + + if err := f.State.Validate(); err != nil { + return err + } + + if f.ParentQuorumCertificate == nil { + return errors.Wrap( + errors.New("missing parent quorum certificate"), + "validate", + ) + } + + if err := f.ParentQuorumCertificate.Validate(); err != nil { + return err + } + + if f.PriorRankTimeoutCertificate != nil { + if err := f.PriorRankTimeoutCertificate.Validate(); err != nil { + return err + } + } + + if f.Vote == nil { + return errors.Wrap(errors.New("missing vote"), "validate") + } + + if err := f.Vote.Validate(); err != nil { + return err + } + + return nil +} + +var _ ValidatableMessage = (*TimeoutState)(nil) + +func (f *TimeoutState) Validate() error { + if f == nil { + return errors.Wrap(errors.New("nil timeout state"), "validate") + } + + if f.LatestQuorumCertificate == nil { + return errors.Wrap(errors.New("nil latest quorum certificate"), "validate") + } + + if err := f.LatestQuorumCertificate.Validate(); err != nil { + return err + } + + if f.PriorRankTimeoutCertificate != nil { + if err := f.PriorRankTimeoutCertificate.Validate(); err != nil { + return err + } + } + + if f.Vote == nil { + return errors.Wrap(errors.New("missing vote"), "validate") + } + + if err := f.Vote.Validate(); err != nil { + return err + } + + return nil +} + +var _ ValidatableMessage = (*QuorumCertificate)(nil) + +func (f *QuorumCertificate) Validate() error { if f == nil { return errors.Wrap(errors.New("nil frame confirmation"), "validate") } - // Frame number is uint64, any value is valid + // Rank and frame number is uint64, any value is valid // Selector should be 32 bytes if len(f.Selector) != 32 { @@ -3932,7 +5005,24 @@ func (f *FrameConfirmation) Validate() error { return errors.Wrap(errors.New("missing aggregate signature"), "validate") } - return nil + return f.AggregateSignature.Validate() +} + +var _ ValidatableMessage = (*TimeoutCertificate)(nil) + +func (f *TimeoutCertificate) Validate() error { + if f == nil { + return errors.Wrap(errors.New("nil frame confirmation"), "validate") + } + + // Rank and frame number is uint64, any value is valid + + // Aggregate signature must be present + if f.AggregateSignature == nil { + return errors.Wrap(errors.New("missing aggregate signature"), "validate") + } + + return f.AggregateSignature.Validate() } var _ ValidatableMessage = (*GlobalFrame)(nil) @@ -3982,8 +5072,19 @@ func (a *AppShardFrame) Validate() error { return errors.Wrap(err, "validate") } - // Requests are raw bytes, no specific validation needed - // Each request will be validated when deserialized + // Validate each request + for i, request := range a.Requests { + if request == nil { + return errors.Wrapf( + errors.New("nil request"), + "validate: request %d", + i, + ) + } + if err := request.Validate(); err != nil { + return errors.Wrapf(err, "validate: request %d", i) + } + } return nil } diff --git a/protobufs/global.pb.go b/protobufs/global.pb.go index 15dc28c..ad365d9 100644 --- a/protobufs/global.pb.go +++ b/protobufs/global.pb.go @@ -1158,6 +1158,10 @@ type GlobalFrameHeader struct { // A strictly monotonically-increasing frame number. Used for culling old // frames past a configurable cutoff point. FrameNumber uint64 `protobuf:"varint,1,opt,name=frame_number,json=frameNumber,proto3" json:"frame_number,omitempty"` + // A strictly monotonically-increasing rank number. Disambiguates timeouts + // and allows for consistent determination of leader, without having to rely + // on parsing internal state. + Rank uint64 `protobuf:"varint,2,opt,name=rank,proto3" json:"rank,omitempty"` // The self-reported timestamp from the proof publisher, encoded as an int64 // of the Unix epoch in milliseconds. Should be good until // 292278994-08-17 07:12:55.807, at which point, this is someone else's @@ -1167,27 +1171,31 @@ type GlobalFrameHeader struct { // is discarded in preference to the runner up electees, unless there is // simply no alternative available (for example, if a network outage occurred // from an upgrade or bug). - Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // The difficulty level used for the frame. Difficulty is calculated based on // the previous 60 timestamps correlated with difficulties, such that the // interval smooths out to align to the type-defined rate. This is expected to // increase subtly with clock speed and future hardware implementations, but // due to incentive alignment associated with global proofs, not fastest clock // in the west, should be gradual. - Difficulty uint32 `protobuf:"varint,3,opt,name=difficulty,proto3" json:"difficulty,omitempty"` + Difficulty uint32 `protobuf:"varint,4,opt,name=difficulty,proto3" json:"difficulty,omitempty"` // The output data from the VDF, serialized as bytes. For Wesolowski, this is // an encoding of the 258 byte Y value concatenated with the 258 byte proof // value. - Output []byte `protobuf:"bytes,4,opt,name=output,proto3" json:"output,omitempty"` + Output []byte `protobuf:"bytes,5,opt,name=output,proto3" json:"output,omitempty"` // The selector value of the previous frame's output, produced as a Poseidon // hash of the output. - ParentSelector []byte `protobuf:"bytes,5,opt,name=parent_selector,json=parentSelector,proto3" json:"parent_selector,omitempty"` + ParentSelector []byte `protobuf:"bytes,6,opt,name=parent_selector,json=parentSelector,proto3" json:"parent_selector,omitempty"` // The 256 global commitment values - GlobalCommitments [][]byte `protobuf:"bytes,6,rep,name=global_commitments,json=globalCommitments,proto3" json:"global_commitments,omitempty"` + GlobalCommitments [][]byte `protobuf:"bytes,7,rep,name=global_commitments,json=globalCommitments,proto3" json:"global_commitments,omitempty"` // The prover tree root commitment - ProverTreeCommitment []byte `protobuf:"bytes,7,opt,name=prover_tree_commitment,json=proverTreeCommitment,proto3" json:"prover_tree_commitment,omitempty"` + ProverTreeCommitment []byte `protobuf:"bytes,8,opt,name=prover_tree_commitment,json=proverTreeCommitment,proto3" json:"prover_tree_commitment,omitempty"` + // The request root commitment + RequestsRoot []byte `protobuf:"bytes,9,opt,name=requests_root,json=requestsRoot,proto3" json:"requests_root,omitempty"` + // The prover of the frame + Prover []byte `protobuf:"bytes,10,opt,name=prover,proto3" json:"prover,omitempty"` // The confirmation signatures of the frame - PublicKeySignatureBls48581 *BLS48581AggregateSignature `protobuf:"bytes,8,opt,name=public_key_signature_bls48581,json=publicKeySignatureBls48581,proto3" json:"public_key_signature_bls48581,omitempty"` + PublicKeySignatureBls48581 *BLS48581AggregateSignature `protobuf:"bytes,11,opt,name=public_key_signature_bls48581,json=publicKeySignatureBls48581,proto3" json:"public_key_signature_bls48581,omitempty"` } func (x *GlobalFrameHeader) Reset() { @@ -1229,6 +1237,13 @@ func (x *GlobalFrameHeader) GetFrameNumber() uint64 { return 0 } +func (x *GlobalFrameHeader) GetRank() uint64 { + if x != nil { + return x.Rank + } + return 0 +} + func (x *GlobalFrameHeader) GetTimestamp() int64 { if x != nil { return x.Timestamp @@ -1271,6 +1286,20 @@ func (x *GlobalFrameHeader) GetProverTreeCommitment() []byte { return nil } +func (x *GlobalFrameHeader) GetRequestsRoot() []byte { + if x != nil { + return x.RequestsRoot + } + return nil +} + +func (x *GlobalFrameHeader) GetProver() []byte { + if x != nil { + return x.Prover + } + return nil +} + func (x *GlobalFrameHeader) GetPublicKeySignatureBls48581() *BLS48581AggregateSignature { if x != nil { return x.PublicKeySignatureBls48581 @@ -1288,6 +1317,10 @@ type FrameHeader struct { // A strictly monotonically-increasing frame number. Used for culling old // frames past a configurable cutoff point. FrameNumber uint64 `protobuf:"varint,2,opt,name=frame_number,json=frameNumber,proto3" json:"frame_number,omitempty"` + // A strictly monotonically-increasing rank number. Disambiguates timeouts + // and allows for consistent determination of leader, without having to rely + // on parsing internal state. + Rank uint64 `protobuf:"varint,3,opt,name=rank,proto3" json:"rank,omitempty"` // The self-reported timestamp from the proof publisher, encoded as an int64 // of the Unix epoch in milliseconds. Should be good until // 292278994-08-17 07:12:55.807, at which point, this is someone else's @@ -1297,32 +1330,32 @@ type FrameHeader struct { // is discarded in preference to the runner up electees, unless there is // simply no alternative available (for example, if a network outage occurred // from an upgrade or bug). - Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Timestamp int64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // The difficulty level used for the frame. Difficulty is calculated based on // the previous 60 timestamps correlated with difficulties, such that the // interval smooths out to align to the type-defined rate. This is expected to // increase subtly with clock speed and future hardware implementations, but // due to incentive alignment associated with global proofs, not fastest clock // in the west, should be gradual. - Difficulty uint32 `protobuf:"varint,4,opt,name=difficulty,proto3" json:"difficulty,omitempty"` + Difficulty uint32 `protobuf:"varint,5,opt,name=difficulty,proto3" json:"difficulty,omitempty"` // The output data from the VDF, serialized as bytes. For Wesolowski, this is // an encoding of the 258 byte Y value concatenated with the 258 byte proof // value. - Output []byte `protobuf:"bytes,5,opt,name=output,proto3" json:"output,omitempty"` + Output []byte `protobuf:"bytes,6,opt,name=output,proto3" json:"output,omitempty"` // The selector value of the previous frame's output, produced as a Poseidon // hash of the output. - ParentSelector []byte `protobuf:"bytes,6,opt,name=parent_selector,json=parentSelector,proto3" json:"parent_selector,omitempty"` + ParentSelector []byte `protobuf:"bytes,7,opt,name=parent_selector,json=parentSelector,proto3" json:"parent_selector,omitempty"` // The root commitment to the set of requests for the frame. - RequestsRoot []byte `protobuf:"bytes,7,opt,name=requests_root,json=requestsRoot,proto3" json:"requests_root,omitempty"` + RequestsRoot []byte `protobuf:"bytes,8,opt,name=requests_root,json=requestsRoot,proto3" json:"requests_root,omitempty"` // The root commitments to to the hypergraph state at the address. - StateRoots [][]byte `protobuf:"bytes,8,rep,name=state_roots,json=stateRoots,proto3" json:"state_roots,omitempty"` + StateRoots [][]byte `protobuf:"bytes,9,rep,name=state_roots,json=stateRoots,proto3" json:"state_roots,omitempty"` // The prover of the frame, incorporated into the input to the VDF. - Prover []byte `protobuf:"bytes,9,opt,name=prover,proto3" json:"prover,omitempty"` + Prover []byte `protobuf:"bytes,10,opt,name=prover,proto3" json:"prover,omitempty"` // The prover's proposed fee multiplier, incorporated into sliding window // averaging. - FeeMultiplierVote uint64 `protobuf:"varint,10,opt,name=fee_multiplier_vote,json=feeMultiplierVote,proto3" json:"fee_multiplier_vote,omitempty"` + FeeMultiplierVote uint64 `protobuf:"varint,11,opt,name=fee_multiplier_vote,json=feeMultiplierVote,proto3" json:"fee_multiplier_vote,omitempty"` // The confirmation signatures of the frame - PublicKeySignatureBls48581 *BLS48581AggregateSignature `protobuf:"bytes,11,opt,name=public_key_signature_bls48581,json=publicKeySignatureBls48581,proto3" json:"public_key_signature_bls48581,omitempty"` + PublicKeySignatureBls48581 *BLS48581AggregateSignature `protobuf:"bytes,12,opt,name=public_key_signature_bls48581,json=publicKeySignatureBls48581,proto3" json:"public_key_signature_bls48581,omitempty"` } func (x *FrameHeader) Reset() { @@ -1371,6 +1404,13 @@ func (x *FrameHeader) GetFrameNumber() uint64 { return 0 } +func (x *FrameHeader) GetRank() uint64 { + if x != nil { + return x.Rank + } + return 0 +} + func (x *FrameHeader) GetTimestamp() int64 { if x != nil { return x.Timestamp @@ -1441,14 +1481,16 @@ type ProverLivenessCheck struct { // The filter for the prover's commitment in the trie Filter []byte `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + // The rank of the consensus clique + Rank uint64 `protobuf:"varint,2,opt,name=rank,proto3" json:"rank,omitempty"` // The frame number for which this liveness check is being sent - FrameNumber uint64 `protobuf:"varint,2,opt,name=frame_number,json=frameNumber,proto3" json:"frame_number,omitempty"` + FrameNumber uint64 `protobuf:"varint,3,opt,name=frame_number,json=frameNumber,proto3" json:"frame_number,omitempty"` // The timestamp when the liveness check was created - Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Timestamp int64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // The hash of the shard commitments and prover root - CommitmentHash []byte `protobuf:"bytes,4,opt,name=commitment_hash,json=commitmentHash,proto3" json:"commitment_hash,omitempty"` + CommitmentHash []byte `protobuf:"bytes,5,opt,name=commitment_hash,json=commitmentHash,proto3" json:"commitment_hash,omitempty"` // The BLS signature with the prover's address - PublicKeySignatureBls48581 *BLS48581AddressedSignature `protobuf:"bytes,5,opt,name=public_key_signature_bls48581,json=publicKeySignatureBls48581,proto3" json:"public_key_signature_bls48581,omitempty"` + PublicKeySignatureBls48581 *BLS48581AddressedSignature `protobuf:"bytes,6,opt,name=public_key_signature_bls48581,json=publicKeySignatureBls48581,proto3" json:"public_key_signature_bls48581,omitempty"` } func (x *ProverLivenessCheck) Reset() { @@ -1490,6 +1532,13 @@ func (x *ProverLivenessCheck) GetFilter() []byte { return nil } +func (x *ProverLivenessCheck) GetRank() uint64 { + if x != nil { + return x.Rank + } + return 0 +} + func (x *ProverLivenessCheck) GetFrameNumber() uint64 { if x != nil { return x.FrameNumber @@ -1518,27 +1567,23 @@ func (x *ProverLivenessCheck) GetPublicKeySignatureBls48581() *BLS48581Addressed return nil } -type FrameVote struct { +type AppShardProposal struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The filter for the prover's commitment in the trie - Filter []byte `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` - // The frame number being voted on - FrameNumber uint64 `protobuf:"varint,2,opt,name=frame_number,json=frameNumber,proto3" json:"frame_number,omitempty"` - // The proposer of the frame - Proposer []byte `protobuf:"bytes,3,opt,name=proposer,proto3" json:"proposer,omitempty"` - // Whether the voter approves the frame - Approve bool `protobuf:"varint,4,opt,name=approve,proto3" json:"approve,omitempty"` - // The timestamp when the vote was created - Timestamp int64 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // The BLS signature with the voter's address - PublicKeySignatureBls48581 *BLS48581AddressedSignature `protobuf:"bytes,6,opt,name=public_key_signature_bls48581,json=publicKeySignatureBls48581,proto3" json:"public_key_signature_bls48581,omitempty"` + // The associated state for the proposal + State *AppShardFrame `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` + // The parent quorum certificate to this state + ParentQuorumCertificate *QuorumCertificate `protobuf:"bytes,2,opt,name=parent_quorum_certificate,json=parentQuorumCertificate,proto3" json:"parent_quorum_certificate,omitempty"` + // The previous rank's timeout certificate, if applicable + PriorRankTimeoutCertificate *TimeoutCertificate `protobuf:"bytes,3,opt,name=prior_rank_timeout_certificate,json=priorRankTimeoutCertificate,proto3" json:"prior_rank_timeout_certificate,omitempty"` + // The proposer's vote + Vote *ProposalVote `protobuf:"bytes,4,opt,name=vote,proto3" json:"vote,omitempty"` } -func (x *FrameVote) Reset() { - *x = FrameVote{} +func (x *AppShardProposal) Reset() { + *x = AppShardProposal{} if protoimpl.UnsafeEnabled { mi := &file_global_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1546,13 +1591,13 @@ func (x *FrameVote) Reset() { } } -func (x *FrameVote) String() string { +func (x *AppShardProposal) String() string { return protoimpl.X.MessageStringOf(x) } -func (*FrameVote) ProtoMessage() {} +func (*AppShardProposal) ProtoMessage() {} -func (x *FrameVote) ProtoReflect() protoreflect.Message { +func (x *AppShardProposal) ProtoReflect() protoreflect.Message { mi := &file_global_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1564,72 +1609,56 @@ func (x *FrameVote) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use FrameVote.ProtoReflect.Descriptor instead. -func (*FrameVote) Descriptor() ([]byte, []int) { +// Deprecated: Use AppShardProposal.ProtoReflect.Descriptor instead. +func (*AppShardProposal) Descriptor() ([]byte, []int) { return file_global_proto_rawDescGZIP(), []int{15} } -func (x *FrameVote) GetFilter() []byte { +func (x *AppShardProposal) GetState() *AppShardFrame { if x != nil { - return x.Filter + return x.State } return nil } -func (x *FrameVote) GetFrameNumber() uint64 { +func (x *AppShardProposal) GetParentQuorumCertificate() *QuorumCertificate { if x != nil { - return x.FrameNumber - } - return 0 -} - -func (x *FrameVote) GetProposer() []byte { - if x != nil { - return x.Proposer + return x.ParentQuorumCertificate } return nil } -func (x *FrameVote) GetApprove() bool { +func (x *AppShardProposal) GetPriorRankTimeoutCertificate() *TimeoutCertificate { if x != nil { - return x.Approve - } - return false -} - -func (x *FrameVote) GetTimestamp() int64 { - if x != nil { - return x.Timestamp - } - return 0 -} - -func (x *FrameVote) GetPublicKeySignatureBls48581() *BLS48581AddressedSignature { - if x != nil { - return x.PublicKeySignatureBls48581 + return x.PriorRankTimeoutCertificate } return nil } -type FrameConfirmation struct { +func (x *AppShardProposal) GetVote() *ProposalVote { + if x != nil { + return x.Vote + } + return nil +} + +type GlobalProposal struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The filter for the prover's commitment in the trie - Filter []byte `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` - // The frame number that was confirmed - FrameNumber uint64 `protobuf:"varint,2,opt,name=frame_number,json=frameNumber,proto3" json:"frame_number,omitempty"` - // The selector (hash) of the confirmed frame - Selector []byte `protobuf:"bytes,3,opt,name=selector,proto3" json:"selector,omitempty"` - // The timestamp when the vote was created - Timestamp int64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // The aggregated BLS signature from all voters - AggregateSignature *BLS48581AggregateSignature `protobuf:"bytes,5,opt,name=aggregate_signature,json=aggregateSignature,proto3" json:"aggregate_signature,omitempty"` + // The associated state for the proposal + State *GlobalFrame `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` + // The parent quorum certificate to this state + ParentQuorumCertificate *QuorumCertificate `protobuf:"bytes,2,opt,name=parent_quorum_certificate,json=parentQuorumCertificate,proto3" json:"parent_quorum_certificate,omitempty"` + // The previous rank's timeout certificate, if applicable + PriorRankTimeoutCertificate *TimeoutCertificate `protobuf:"bytes,3,opt,name=prior_rank_timeout_certificate,json=priorRankTimeoutCertificate,proto3" json:"prior_rank_timeout_certificate,omitempty"` + // The proposer's vote + Vote *ProposalVote `protobuf:"bytes,4,opt,name=vote,proto3" json:"vote,omitempty"` } -func (x *FrameConfirmation) Reset() { - *x = FrameConfirmation{} +func (x *GlobalProposal) Reset() { + *x = GlobalProposal{} if protoimpl.UnsafeEnabled { mi := &file_global_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1637,13 +1666,13 @@ func (x *FrameConfirmation) Reset() { } } -func (x *FrameConfirmation) String() string { +func (x *GlobalProposal) String() string { return protoimpl.X.MessageStringOf(x) } -func (*FrameConfirmation) ProtoMessage() {} +func (*GlobalProposal) ProtoMessage() {} -func (x *FrameConfirmation) ProtoReflect() protoreflect.Message { +func (x *GlobalProposal) ProtoReflect() protoreflect.Message { mi := &file_global_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1655,40 +1684,404 @@ func (x *FrameConfirmation) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use FrameConfirmation.ProtoReflect.Descriptor instead. -func (*FrameConfirmation) Descriptor() ([]byte, []int) { +// Deprecated: Use GlobalProposal.ProtoReflect.Descriptor instead. +func (*GlobalProposal) Descriptor() ([]byte, []int) { return file_global_proto_rawDescGZIP(), []int{16} } -func (x *FrameConfirmation) GetFilter() []byte { +func (x *GlobalProposal) GetState() *GlobalFrame { + if x != nil { + return x.State + } + return nil +} + +func (x *GlobalProposal) GetParentQuorumCertificate() *QuorumCertificate { + if x != nil { + return x.ParentQuorumCertificate + } + return nil +} + +func (x *GlobalProposal) GetPriorRankTimeoutCertificate() *TimeoutCertificate { + if x != nil { + return x.PriorRankTimeoutCertificate + } + return nil +} + +func (x *GlobalProposal) GetVote() *ProposalVote { + if x != nil { + return x.Vote + } + return nil +} + +type ProposalVote struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The filter for the prover's commitment in the trie + Filter []byte `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + // The rank of the consensus clique + Rank uint64 `protobuf:"varint,2,opt,name=rank,proto3" json:"rank,omitempty"` + // The frame number for which this proposal applies + FrameNumber uint64 `protobuf:"varint,3,opt,name=frame_number,json=frameNumber,proto3" json:"frame_number,omitempty"` + // The selector being voted for + Selector []byte `protobuf:"bytes,4,opt,name=selector,proto3" json:"selector,omitempty"` + // The timestamp when the vote was created + Timestamp uint64 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // The BLS signature with the voter's address + PublicKeySignatureBls48581 *BLS48581AddressedSignature `protobuf:"bytes,6,opt,name=public_key_signature_bls48581,json=publicKeySignatureBls48581,proto3" json:"public_key_signature_bls48581,omitempty"` +} + +func (x *ProposalVote) Reset() { + *x = ProposalVote{} + if protoimpl.UnsafeEnabled { + mi := &file_global_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProposalVote) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProposalVote) ProtoMessage() {} + +func (x *ProposalVote) ProtoReflect() protoreflect.Message { + mi := &file_global_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProposalVote.ProtoReflect.Descriptor instead. +func (*ProposalVote) Descriptor() ([]byte, []int) { + return file_global_proto_rawDescGZIP(), []int{17} +} + +func (x *ProposalVote) GetFilter() []byte { if x != nil { return x.Filter } return nil } -func (x *FrameConfirmation) GetFrameNumber() uint64 { +func (x *ProposalVote) GetRank() uint64 { + if x != nil { + return x.Rank + } + return 0 +} + +func (x *ProposalVote) GetFrameNumber() uint64 { if x != nil { return x.FrameNumber } return 0 } -func (x *FrameConfirmation) GetSelector() []byte { +func (x *ProposalVote) GetSelector() []byte { if x != nil { return x.Selector } return nil } -func (x *FrameConfirmation) GetTimestamp() int64 { +func (x *ProposalVote) GetTimestamp() uint64 { if x != nil { return x.Timestamp } return 0 } -func (x *FrameConfirmation) GetAggregateSignature() *BLS48581AggregateSignature { +func (x *ProposalVote) GetPublicKeySignatureBls48581() *BLS48581AddressedSignature { + if x != nil { + return x.PublicKeySignatureBls48581 + } + return nil +} + +type TimeoutState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The latest quorum certificate seen by the pacemaker. + LatestQuorumCertificate *QuorumCertificate `protobuf:"bytes,1,opt,name=latest_quorum_certificate,json=latestQuorumCertificate,proto3" json:"latest_quorum_certificate,omitempty"` + // The previous rank's timeout certificate, if applicable. + PriorRankTimeoutCertificate *TimeoutCertificate `protobuf:"bytes,2,opt,name=prior_rank_timeout_certificate,json=priorRankTimeoutCertificate,proto3" json:"prior_rank_timeout_certificate,omitempty"` + // The signed payload which will become part of the new timeout certificate. + Vote *ProposalVote `protobuf:"bytes,3,opt,name=vote,proto3" json:"vote,omitempty"` + // TimeoutTick is the number of times the `timeout.Controller` has + // (re-)emitted the timeout for this rank. When the timer for the rank's + // original duration expires, a `TimeoutState` with `TimeoutTick = 0` is + // broadcast. Subsequently, `timeout.Controller` re-broadcasts the + // `TimeoutState` periodically based on some internal heuristic. Each time + // we attempt a re-broadcast, the `TimeoutTick` is incremented. Incrementing + // the field prevents de-duplicated within the network layer, which in turn + // guarantees quick delivery of the `TimeoutState` after GST and facilitates + // recovery. + TimeoutTick uint64 `protobuf:"varint,4,opt,name=timeout_tick,json=timeoutTick,proto3" json:"timeout_tick,omitempty"` + // The timestamp of the message (not the timeout state) + Timestamp uint64 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +func (x *TimeoutState) Reset() { + *x = TimeoutState{} + if protoimpl.UnsafeEnabled { + mi := &file_global_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeoutState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeoutState) ProtoMessage() {} + +func (x *TimeoutState) ProtoReflect() protoreflect.Message { + mi := &file_global_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeoutState.ProtoReflect.Descriptor instead. +func (*TimeoutState) Descriptor() ([]byte, []int) { + return file_global_proto_rawDescGZIP(), []int{18} +} + +func (x *TimeoutState) GetLatestQuorumCertificate() *QuorumCertificate { + if x != nil { + return x.LatestQuorumCertificate + } + return nil +} + +func (x *TimeoutState) GetPriorRankTimeoutCertificate() *TimeoutCertificate { + if x != nil { + return x.PriorRankTimeoutCertificate + } + return nil +} + +func (x *TimeoutState) GetVote() *ProposalVote { + if x != nil { + return x.Vote + } + return nil +} + +func (x *TimeoutState) GetTimeoutTick() uint64 { + if x != nil { + return x.TimeoutTick + } + return 0 +} + +func (x *TimeoutState) GetTimestamp() uint64 { + if x != nil { + return x.Timestamp + } + return 0 +} + +type QuorumCertificate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The filter for the prover's commitment in the trie + Filter []byte `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + // The rank of the consensus clique + Rank uint64 `protobuf:"varint,2,opt,name=rank,proto3" json:"rank,omitempty"` + // The frame number for which this certificate applies + FrameNumber uint64 `protobuf:"varint,3,opt,name=frame_number,json=frameNumber,proto3" json:"frame_number,omitempty"` + // The selector (hash) of the confirmed frame + Selector []byte `protobuf:"bytes,4,opt,name=selector,proto3" json:"selector,omitempty"` + // The timestamp of the message (not the certificate) + Timestamp uint64 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // The aggregated BLS signature from all voters + AggregateSignature *BLS48581AggregateSignature `protobuf:"bytes,6,opt,name=aggregate_signature,json=aggregateSignature,proto3" json:"aggregate_signature,omitempty"` +} + +func (x *QuorumCertificate) Reset() { + *x = QuorumCertificate{} + if protoimpl.UnsafeEnabled { + mi := &file_global_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QuorumCertificate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QuorumCertificate) ProtoMessage() {} + +func (x *QuorumCertificate) ProtoReflect() protoreflect.Message { + mi := &file_global_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QuorumCertificate.ProtoReflect.Descriptor instead. +func (*QuorumCertificate) Descriptor() ([]byte, []int) { + return file_global_proto_rawDescGZIP(), []int{19} +} + +func (x *QuorumCertificate) GetFilter() []byte { + if x != nil { + return x.Filter + } + return nil +} + +func (x *QuorumCertificate) GetRank() uint64 { + if x != nil { + return x.Rank + } + return 0 +} + +func (x *QuorumCertificate) GetFrameNumber() uint64 { + if x != nil { + return x.FrameNumber + } + return 0 +} + +func (x *QuorumCertificate) GetSelector() []byte { + if x != nil { + return x.Selector + } + return nil +} + +func (x *QuorumCertificate) GetTimestamp() uint64 { + if x != nil { + return x.Timestamp + } + return 0 +} + +func (x *QuorumCertificate) GetAggregateSignature() *BLS48581AggregateSignature { + if x != nil { + return x.AggregateSignature + } + return nil +} + +type TimeoutCertificate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The filter for the prover's commitment in the trie + Filter []byte `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + // The rank of the consensus clique + Rank uint64 `protobuf:"varint,2,opt,name=rank,proto3" json:"rank,omitempty"` + // The latest ranks in signer order + LatestRanks []uint64 `protobuf:"varint,3,rep,packed,name=latest_ranks,json=latestRanks,proto3" json:"latest_ranks,omitempty"` + // The latest quorum certificate from all timeouts + LatestQuorumCertificate *QuorumCertificate `protobuf:"bytes,4,opt,name=latest_quorum_certificate,json=latestQuorumCertificate,proto3" json:"latest_quorum_certificate,omitempty"` + // The timestamp of the message (not the certificate) + Timestamp uint64 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // The aggregated BLS signature from all voters + AggregateSignature *BLS48581AggregateSignature `protobuf:"bytes,6,opt,name=aggregate_signature,json=aggregateSignature,proto3" json:"aggregate_signature,omitempty"` +} + +func (x *TimeoutCertificate) Reset() { + *x = TimeoutCertificate{} + if protoimpl.UnsafeEnabled { + mi := &file_global_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeoutCertificate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeoutCertificate) ProtoMessage() {} + +func (x *TimeoutCertificate) ProtoReflect() protoreflect.Message { + mi := &file_global_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeoutCertificate.ProtoReflect.Descriptor instead. +func (*TimeoutCertificate) Descriptor() ([]byte, []int) { + return file_global_proto_rawDescGZIP(), []int{20} +} + +func (x *TimeoutCertificate) GetFilter() []byte { + if x != nil { + return x.Filter + } + return nil +} + +func (x *TimeoutCertificate) GetRank() uint64 { + if x != nil { + return x.Rank + } + return 0 +} + +func (x *TimeoutCertificate) GetLatestRanks() []uint64 { + if x != nil { + return x.LatestRanks + } + return nil +} + +func (x *TimeoutCertificate) GetLatestQuorumCertificate() *QuorumCertificate { + if x != nil { + return x.LatestQuorumCertificate + } + return nil +} + +func (x *TimeoutCertificate) GetTimestamp() uint64 { + if x != nil { + return x.Timestamp + } + return 0 +} + +func (x *TimeoutCertificate) GetAggregateSignature() *BLS48581AggregateSignature { if x != nil { return x.AggregateSignature } @@ -1707,7 +2100,7 @@ type GlobalFrame struct { func (x *GlobalFrame) Reset() { *x = GlobalFrame{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[17] + mi := &file_global_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1720,7 +2113,7 @@ func (x *GlobalFrame) String() string { func (*GlobalFrame) ProtoMessage() {} func (x *GlobalFrame) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[17] + mi := &file_global_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1733,7 +2126,7 @@ func (x *GlobalFrame) ProtoReflect() protoreflect.Message { // Deprecated: Use GlobalFrame.ProtoReflect.Descriptor instead. func (*GlobalFrame) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{17} + return file_global_proto_rawDescGZIP(), []int{21} } func (x *GlobalFrame) GetHeader() *GlobalFrameHeader { @@ -1762,7 +2155,7 @@ type AppShardFrame struct { func (x *AppShardFrame) Reset() { *x = AppShardFrame{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[18] + mi := &file_global_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1775,7 +2168,7 @@ func (x *AppShardFrame) String() string { func (*AppShardFrame) ProtoMessage() {} func (x *AppShardFrame) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[18] + mi := &file_global_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1788,7 +2181,7 @@ func (x *AppShardFrame) ProtoReflect() protoreflect.Message { // Deprecated: Use AppShardFrame.ProtoReflect.Descriptor instead. func (*AppShardFrame) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{18} + return file_global_proto_rawDescGZIP(), []int{22} } func (x *AppShardFrame) GetHeader() *FrameHeader { @@ -1817,7 +2210,7 @@ type GlobalAlert struct { func (x *GlobalAlert) Reset() { *x = GlobalAlert{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[19] + mi := &file_global_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1830,7 +2223,7 @@ func (x *GlobalAlert) String() string { func (*GlobalAlert) ProtoMessage() {} func (x *GlobalAlert) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[19] + mi := &file_global_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1843,7 +2236,7 @@ func (x *GlobalAlert) ProtoReflect() protoreflect.Message { // Deprecated: Use GlobalAlert.ProtoReflect.Descriptor instead. func (*GlobalAlert) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{19} + return file_global_proto_rawDescGZIP(), []int{23} } func (x *GlobalAlert) GetMessage() string { @@ -1871,7 +2264,7 @@ type GetGlobalFrameRequest struct { func (x *GetGlobalFrameRequest) Reset() { *x = GetGlobalFrameRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[20] + mi := &file_global_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1884,7 +2277,7 @@ func (x *GetGlobalFrameRequest) String() string { func (*GetGlobalFrameRequest) ProtoMessage() {} func (x *GetGlobalFrameRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[20] + mi := &file_global_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1897,7 +2290,7 @@ func (x *GetGlobalFrameRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetGlobalFrameRequest.ProtoReflect.Descriptor instead. func (*GetGlobalFrameRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{20} + return file_global_proto_rawDescGZIP(), []int{24} } func (x *GetGlobalFrameRequest) GetFrameNumber() uint64 { @@ -1919,7 +2312,7 @@ type GlobalFrameResponse struct { func (x *GlobalFrameResponse) Reset() { *x = GlobalFrameResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[21] + mi := &file_global_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1932,7 +2325,7 @@ func (x *GlobalFrameResponse) String() string { func (*GlobalFrameResponse) ProtoMessage() {} func (x *GlobalFrameResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[21] + mi := &file_global_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1945,7 +2338,7 @@ func (x *GlobalFrameResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GlobalFrameResponse.ProtoReflect.Descriptor instead. func (*GlobalFrameResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{21} + return file_global_proto_rawDescGZIP(), []int{25} } func (x *GlobalFrameResponse) GetFrame() *GlobalFrame { @@ -1962,6 +2355,100 @@ func (x *GlobalFrameResponse) GetProof() []byte { return nil } +type GetGlobalProposalRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FrameNumber uint64 `protobuf:"varint,1,opt,name=frame_number,json=frameNumber,proto3" json:"frame_number,omitempty"` +} + +func (x *GetGlobalProposalRequest) Reset() { + *x = GetGlobalProposalRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_global_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetGlobalProposalRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetGlobalProposalRequest) ProtoMessage() {} + +func (x *GetGlobalProposalRequest) ProtoReflect() protoreflect.Message { + mi := &file_global_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetGlobalProposalRequest.ProtoReflect.Descriptor instead. +func (*GetGlobalProposalRequest) Descriptor() ([]byte, []int) { + return file_global_proto_rawDescGZIP(), []int{26} +} + +func (x *GetGlobalProposalRequest) GetFrameNumber() uint64 { + if x != nil { + return x.FrameNumber + } + return 0 +} + +type GlobalProposalResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Proposal *GlobalProposal `protobuf:"bytes,1,opt,name=proposal,proto3" json:"proposal,omitempty"` +} + +func (x *GlobalProposalResponse) Reset() { + *x = GlobalProposalResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_global_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GlobalProposalResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GlobalProposalResponse) ProtoMessage() {} + +func (x *GlobalProposalResponse) ProtoReflect() protoreflect.Message { + mi := &file_global_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GlobalProposalResponse.ProtoReflect.Descriptor instead. +func (*GlobalProposalResponse) Descriptor() ([]byte, []int) { + return file_global_proto_rawDescGZIP(), []int{27} +} + +func (x *GlobalProposalResponse) GetProposal() *GlobalProposal { + if x != nil { + return x.Proposal + } + return nil +} + type GetAppShardFrameRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1974,7 +2461,7 @@ type GetAppShardFrameRequest struct { func (x *GetAppShardFrameRequest) Reset() { *x = GetAppShardFrameRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[22] + mi := &file_global_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1987,7 +2474,7 @@ func (x *GetAppShardFrameRequest) String() string { func (*GetAppShardFrameRequest) ProtoMessage() {} func (x *GetAppShardFrameRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[22] + mi := &file_global_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2000,7 +2487,7 @@ func (x *GetAppShardFrameRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAppShardFrameRequest.ProtoReflect.Descriptor instead. func (*GetAppShardFrameRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{22} + return file_global_proto_rawDescGZIP(), []int{28} } func (x *GetAppShardFrameRequest) GetFilter() []byte { @@ -2029,7 +2516,7 @@ type AppShardFrameResponse struct { func (x *AppShardFrameResponse) Reset() { *x = AppShardFrameResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[23] + mi := &file_global_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2042,7 +2529,7 @@ func (x *AppShardFrameResponse) String() string { func (*AppShardFrameResponse) ProtoMessage() {} func (x *AppShardFrameResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[23] + mi := &file_global_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2055,7 +2542,7 @@ func (x *AppShardFrameResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use AppShardFrameResponse.ProtoReflect.Descriptor instead. func (*AppShardFrameResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{23} + return file_global_proto_rawDescGZIP(), []int{29} } func (x *AppShardFrameResponse) GetFrame() *AppShardFrame { @@ -2072,6 +2559,108 @@ func (x *AppShardFrameResponse) GetProof() []byte { return nil } +type GetAppShardProposalRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Filter []byte `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + FrameNumber uint64 `protobuf:"varint,2,opt,name=frame_number,json=frameNumber,proto3" json:"frame_number,omitempty"` +} + +func (x *GetAppShardProposalRequest) Reset() { + *x = GetAppShardProposalRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_global_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetAppShardProposalRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetAppShardProposalRequest) ProtoMessage() {} + +func (x *GetAppShardProposalRequest) ProtoReflect() protoreflect.Message { + mi := &file_global_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetAppShardProposalRequest.ProtoReflect.Descriptor instead. +func (*GetAppShardProposalRequest) Descriptor() ([]byte, []int) { + return file_global_proto_rawDescGZIP(), []int{30} +} + +func (x *GetAppShardProposalRequest) GetFilter() []byte { + if x != nil { + return x.Filter + } + return nil +} + +func (x *GetAppShardProposalRequest) GetFrameNumber() uint64 { + if x != nil { + return x.FrameNumber + } + return 0 +} + +type AppShardProposalResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Proposal *AppShardProposal `protobuf:"bytes,1,opt,name=proposal,proto3" json:"proposal,omitempty"` +} + +func (x *AppShardProposalResponse) Reset() { + *x = AppShardProposalResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_global_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AppShardProposalResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AppShardProposalResponse) ProtoMessage() {} + +func (x *AppShardProposalResponse) ProtoReflect() protoreflect.Message { + mi := &file_global_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AppShardProposalResponse.ProtoReflect.Descriptor instead. +func (*AppShardProposalResponse) Descriptor() ([]byte, []int) { + return file_global_proto_rawDescGZIP(), []int{31} +} + +func (x *AppShardProposalResponse) GetProposal() *AppShardProposal { + if x != nil { + return x.Proposal + } + return nil +} + type GetAppShardsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2084,7 +2673,7 @@ type GetAppShardsRequest struct { func (x *GetAppShardsRequest) Reset() { *x = GetAppShardsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[24] + mi := &file_global_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2097,7 +2686,7 @@ func (x *GetAppShardsRequest) String() string { func (*GetAppShardsRequest) ProtoMessage() {} func (x *GetAppShardsRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[24] + mi := &file_global_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2110,7 +2699,7 @@ func (x *GetAppShardsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAppShardsRequest.ProtoReflect.Descriptor instead. func (*GetAppShardsRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{24} + return file_global_proto_rawDescGZIP(), []int{32} } func (x *GetAppShardsRequest) GetShardKey() []byte { @@ -2142,7 +2731,7 @@ type AppShardInfo struct { func (x *AppShardInfo) Reset() { *x = AppShardInfo{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[25] + mi := &file_global_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2155,7 +2744,7 @@ func (x *AppShardInfo) String() string { func (*AppShardInfo) ProtoMessage() {} func (x *AppShardInfo) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[25] + mi := &file_global_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2168,7 +2757,7 @@ func (x *AppShardInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use AppShardInfo.ProtoReflect.Descriptor instead. func (*AppShardInfo) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{25} + return file_global_proto_rawDescGZIP(), []int{33} } func (x *AppShardInfo) GetPrefix() []uint32 { @@ -2217,7 +2806,7 @@ type GetAppShardsResponse struct { func (x *GetAppShardsResponse) Reset() { *x = GetAppShardsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[26] + mi := &file_global_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2230,7 +2819,7 @@ func (x *GetAppShardsResponse) String() string { func (*GetAppShardsResponse) ProtoMessage() {} func (x *GetAppShardsResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[26] + mi := &file_global_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2243,7 +2832,7 @@ func (x *GetAppShardsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAppShardsResponse.ProtoReflect.Descriptor instead. func (*GetAppShardsResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{26} + return file_global_proto_rawDescGZIP(), []int{34} } func (x *GetAppShardsResponse) GetInfo() []*AppShardInfo { @@ -2265,7 +2854,7 @@ type GetGlobalShardsRequest struct { func (x *GetGlobalShardsRequest) Reset() { *x = GetGlobalShardsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[27] + mi := &file_global_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2278,7 +2867,7 @@ func (x *GetGlobalShardsRequest) String() string { func (*GetGlobalShardsRequest) ProtoMessage() {} func (x *GetGlobalShardsRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[27] + mi := &file_global_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2291,7 +2880,7 @@ func (x *GetGlobalShardsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetGlobalShardsRequest.ProtoReflect.Descriptor instead. func (*GetGlobalShardsRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{27} + return file_global_proto_rawDescGZIP(), []int{35} } func (x *GetGlobalShardsRequest) GetL1() []byte { @@ -2320,7 +2909,7 @@ type GetGlobalShardsResponse struct { func (x *GetGlobalShardsResponse) Reset() { *x = GetGlobalShardsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[28] + mi := &file_global_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2333,7 +2922,7 @@ func (x *GetGlobalShardsResponse) String() string { func (*GetGlobalShardsResponse) ProtoMessage() {} func (x *GetGlobalShardsResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[28] + mi := &file_global_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2346,7 +2935,7 @@ func (x *GetGlobalShardsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetGlobalShardsResponse.ProtoReflect.Descriptor instead. func (*GetGlobalShardsResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{28} + return file_global_proto_rawDescGZIP(), []int{36} } func (x *GetGlobalShardsResponse) GetSize() []byte { @@ -2378,7 +2967,7 @@ type GetLockedAddressesRequest struct { func (x *GetLockedAddressesRequest) Reset() { *x = GetLockedAddressesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[29] + mi := &file_global_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2391,7 +2980,7 @@ func (x *GetLockedAddressesRequest) String() string { func (*GetLockedAddressesRequest) ProtoMessage() {} func (x *GetLockedAddressesRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[29] + mi := &file_global_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2404,7 +2993,7 @@ func (x *GetLockedAddressesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetLockedAddressesRequest.ProtoReflect.Descriptor instead. func (*GetLockedAddressesRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{29} + return file_global_proto_rawDescGZIP(), []int{37} } func (x *GetLockedAddressesRequest) GetShardAddress() []byte { @@ -2439,7 +3028,7 @@ type LockedTransaction struct { func (x *LockedTransaction) Reset() { *x = LockedTransaction{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[30] + mi := &file_global_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2452,7 +3041,7 @@ func (x *LockedTransaction) String() string { func (*LockedTransaction) ProtoMessage() {} func (x *LockedTransaction) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[30] + mi := &file_global_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2465,7 +3054,7 @@ func (x *LockedTransaction) ProtoReflect() protoreflect.Message { // Deprecated: Use LockedTransaction.ProtoReflect.Descriptor instead. func (*LockedTransaction) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{30} + return file_global_proto_rawDescGZIP(), []int{38} } func (x *LockedTransaction) GetTransactionHash() []byte { @@ -2507,7 +3096,7 @@ type GetLockedAddressesResponse struct { func (x *GetLockedAddressesResponse) Reset() { *x = GetLockedAddressesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[31] + mi := &file_global_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2520,7 +3109,7 @@ func (x *GetLockedAddressesResponse) String() string { func (*GetLockedAddressesResponse) ProtoMessage() {} func (x *GetLockedAddressesResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[31] + mi := &file_global_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2533,7 +3122,7 @@ func (x *GetLockedAddressesResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetLockedAddressesResponse.ProtoReflect.Descriptor instead. func (*GetLockedAddressesResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{31} + return file_global_proto_rawDescGZIP(), []int{39} } func (x *GetLockedAddressesResponse) GetTransactions() []*LockedTransaction { @@ -2552,7 +3141,7 @@ type GlobalGetWorkerInfoRequest struct { func (x *GlobalGetWorkerInfoRequest) Reset() { *x = GlobalGetWorkerInfoRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[32] + mi := &file_global_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2565,7 +3154,7 @@ func (x *GlobalGetWorkerInfoRequest) String() string { func (*GlobalGetWorkerInfoRequest) ProtoMessage() {} func (x *GlobalGetWorkerInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[32] + mi := &file_global_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2578,7 +3167,7 @@ func (x *GlobalGetWorkerInfoRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GlobalGetWorkerInfoRequest.ProtoReflect.Descriptor instead. func (*GlobalGetWorkerInfoRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{32} + return file_global_proto_rawDescGZIP(), []int{40} } type GlobalGetWorkerInfoResponseItem struct { @@ -2597,7 +3186,7 @@ type GlobalGetWorkerInfoResponseItem struct { func (x *GlobalGetWorkerInfoResponseItem) Reset() { *x = GlobalGetWorkerInfoResponseItem{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[33] + mi := &file_global_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2610,7 +3199,7 @@ func (x *GlobalGetWorkerInfoResponseItem) String() string { func (*GlobalGetWorkerInfoResponseItem) ProtoMessage() {} func (x *GlobalGetWorkerInfoResponseItem) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[33] + mi := &file_global_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2623,7 +3212,7 @@ func (x *GlobalGetWorkerInfoResponseItem) ProtoReflect() protoreflect.Message { // Deprecated: Use GlobalGetWorkerInfoResponseItem.ProtoReflect.Descriptor instead. func (*GlobalGetWorkerInfoResponseItem) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{33} + return file_global_proto_rawDescGZIP(), []int{41} } func (x *GlobalGetWorkerInfoResponseItem) GetCoreId() uint32 { @@ -2679,7 +3268,7 @@ type GlobalGetWorkerInfoResponse struct { func (x *GlobalGetWorkerInfoResponse) Reset() { *x = GlobalGetWorkerInfoResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[34] + mi := &file_global_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2692,7 +3281,7 @@ func (x *GlobalGetWorkerInfoResponse) String() string { func (*GlobalGetWorkerInfoResponse) ProtoMessage() {} func (x *GlobalGetWorkerInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[34] + mi := &file_global_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2705,7 +3294,7 @@ func (x *GlobalGetWorkerInfoResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GlobalGetWorkerInfoResponse.ProtoReflect.Descriptor instead. func (*GlobalGetWorkerInfoResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{34} + return file_global_proto_rawDescGZIP(), []int{42} } func (x *GlobalGetWorkerInfoResponse) GetWorkers() []*GlobalGetWorkerInfoResponseItem { @@ -2728,7 +3317,7 @@ type SendMessage struct { func (x *SendMessage) Reset() { *x = SendMessage{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[35] + mi := &file_global_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2741,7 +3330,7 @@ func (x *SendMessage) String() string { func (*SendMessage) ProtoMessage() {} func (x *SendMessage) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[35] + mi := &file_global_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2754,7 +3343,7 @@ func (x *SendMessage) ProtoReflect() protoreflect.Message { // Deprecated: Use SendMessage.ProtoReflect.Descriptor instead. func (*SendMessage) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{35} + return file_global_proto_rawDescGZIP(), []int{43} } func (x *SendMessage) GetPeerId() []byte { @@ -2791,7 +3380,7 @@ type ReceiveMessage struct { func (x *ReceiveMessage) Reset() { *x = ReceiveMessage{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[36] + mi := &file_global_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2804,7 +3393,7 @@ func (x *ReceiveMessage) String() string { func (*ReceiveMessage) ProtoMessage() {} func (x *ReceiveMessage) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[36] + mi := &file_global_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2817,7 +3406,7 @@ func (x *ReceiveMessage) ProtoReflect() protoreflect.Message { // Deprecated: Use ReceiveMessage.ProtoReflect.Descriptor instead. func (*ReceiveMessage) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{36} + return file_global_proto_rawDescGZIP(), []int{44} } func (x *ReceiveMessage) GetSourcePeerId() []byte { @@ -2852,7 +3441,7 @@ type GetKeyRegistryRequest struct { func (x *GetKeyRegistryRequest) Reset() { *x = GetKeyRegistryRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[37] + mi := &file_global_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2865,7 +3454,7 @@ func (x *GetKeyRegistryRequest) String() string { func (*GetKeyRegistryRequest) ProtoMessage() {} func (x *GetKeyRegistryRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[37] + mi := &file_global_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2878,7 +3467,7 @@ func (x *GetKeyRegistryRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetKeyRegistryRequest.ProtoReflect.Descriptor instead. func (*GetKeyRegistryRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{37} + return file_global_proto_rawDescGZIP(), []int{45} } func (x *GetKeyRegistryRequest) GetIdentityKeyAddress() []byte { @@ -2900,7 +3489,7 @@ type GetKeyRegistryResponse struct { func (x *GetKeyRegistryResponse) Reset() { *x = GetKeyRegistryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[38] + mi := &file_global_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2913,7 +3502,7 @@ func (x *GetKeyRegistryResponse) String() string { func (*GetKeyRegistryResponse) ProtoMessage() {} func (x *GetKeyRegistryResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[38] + mi := &file_global_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2926,7 +3515,7 @@ func (x *GetKeyRegistryResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetKeyRegistryResponse.ProtoReflect.Descriptor instead. func (*GetKeyRegistryResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{38} + return file_global_proto_rawDescGZIP(), []int{46} } func (x *GetKeyRegistryResponse) GetRegistry() *KeyRegistry { @@ -2954,7 +3543,7 @@ type GetKeyRegistryByProverRequest struct { func (x *GetKeyRegistryByProverRequest) Reset() { *x = GetKeyRegistryByProverRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[39] + mi := &file_global_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2967,7 +3556,7 @@ func (x *GetKeyRegistryByProverRequest) String() string { func (*GetKeyRegistryByProverRequest) ProtoMessage() {} func (x *GetKeyRegistryByProverRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[39] + mi := &file_global_proto_msgTypes[47] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2980,7 +3569,7 @@ func (x *GetKeyRegistryByProverRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetKeyRegistryByProverRequest.ProtoReflect.Descriptor instead. func (*GetKeyRegistryByProverRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{39} + return file_global_proto_rawDescGZIP(), []int{47} } func (x *GetKeyRegistryByProverRequest) GetProverKeyAddress() []byte { @@ -3002,7 +3591,7 @@ type GetKeyRegistryByProverResponse struct { func (x *GetKeyRegistryByProverResponse) Reset() { *x = GetKeyRegistryByProverResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[40] + mi := &file_global_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3015,7 +3604,7 @@ func (x *GetKeyRegistryByProverResponse) String() string { func (*GetKeyRegistryByProverResponse) ProtoMessage() {} func (x *GetKeyRegistryByProverResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[40] + mi := &file_global_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3028,7 +3617,7 @@ func (x *GetKeyRegistryByProverResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetKeyRegistryByProverResponse.ProtoReflect.Descriptor instead. func (*GetKeyRegistryByProverResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{40} + return file_global_proto_rawDescGZIP(), []int{48} } func (x *GetKeyRegistryByProverResponse) GetRegistry() *KeyRegistry { @@ -3057,7 +3646,7 @@ type PutIdentityKeyRequest struct { func (x *PutIdentityKeyRequest) Reset() { *x = PutIdentityKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[41] + mi := &file_global_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3070,7 +3659,7 @@ func (x *PutIdentityKeyRequest) String() string { func (*PutIdentityKeyRequest) ProtoMessage() {} func (x *PutIdentityKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[41] + mi := &file_global_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3083,7 +3672,7 @@ func (x *PutIdentityKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PutIdentityKeyRequest.ProtoReflect.Descriptor instead. func (*PutIdentityKeyRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{41} + return file_global_proto_rawDescGZIP(), []int{49} } func (x *PutIdentityKeyRequest) GetAddress() []byte { @@ -3111,7 +3700,7 @@ type PutIdentityKeyResponse struct { func (x *PutIdentityKeyResponse) Reset() { *x = PutIdentityKeyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[42] + mi := &file_global_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3124,7 +3713,7 @@ func (x *PutIdentityKeyResponse) String() string { func (*PutIdentityKeyResponse) ProtoMessage() {} func (x *PutIdentityKeyResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[42] + mi := &file_global_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3137,7 +3726,7 @@ func (x *PutIdentityKeyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PutIdentityKeyResponse.ProtoReflect.Descriptor instead. func (*PutIdentityKeyResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{42} + return file_global_proto_rawDescGZIP(), []int{50} } func (x *PutIdentityKeyResponse) GetError() string { @@ -3158,7 +3747,7 @@ type PutProvingKeyRequest struct { func (x *PutProvingKeyRequest) Reset() { *x = PutProvingKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[43] + mi := &file_global_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3171,7 +3760,7 @@ func (x *PutProvingKeyRequest) String() string { func (*PutProvingKeyRequest) ProtoMessage() {} func (x *PutProvingKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[43] + mi := &file_global_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3184,7 +3773,7 @@ func (x *PutProvingKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PutProvingKeyRequest.ProtoReflect.Descriptor instead. func (*PutProvingKeyRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{43} + return file_global_proto_rawDescGZIP(), []int{51} } func (x *PutProvingKeyRequest) GetProvingKey() *BLS48581SignatureWithProofOfPossession { @@ -3205,7 +3794,7 @@ type PutProvingKeyResponse struct { func (x *PutProvingKeyResponse) Reset() { *x = PutProvingKeyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[44] + mi := &file_global_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3218,7 +3807,7 @@ func (x *PutProvingKeyResponse) String() string { func (*PutProvingKeyResponse) ProtoMessage() {} func (x *PutProvingKeyResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[44] + mi := &file_global_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3231,7 +3820,7 @@ func (x *PutProvingKeyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PutProvingKeyResponse.ProtoReflect.Descriptor instead. func (*PutProvingKeyResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{44} + return file_global_proto_rawDescGZIP(), []int{52} } func (x *PutProvingKeyResponse) GetError() string { @@ -3255,7 +3844,7 @@ type PutCrossSignatureRequest struct { func (x *PutCrossSignatureRequest) Reset() { *x = PutCrossSignatureRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[45] + mi := &file_global_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3268,7 +3857,7 @@ func (x *PutCrossSignatureRequest) String() string { func (*PutCrossSignatureRequest) ProtoMessage() {} func (x *PutCrossSignatureRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[45] + mi := &file_global_proto_msgTypes[53] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3281,7 +3870,7 @@ func (x *PutCrossSignatureRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PutCrossSignatureRequest.ProtoReflect.Descriptor instead. func (*PutCrossSignatureRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{45} + return file_global_proto_rawDescGZIP(), []int{53} } func (x *PutCrossSignatureRequest) GetIdentityKeyAddress() []byte { @@ -3323,7 +3912,7 @@ type PutCrossSignatureResponse struct { func (x *PutCrossSignatureResponse) Reset() { *x = PutCrossSignatureResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[46] + mi := &file_global_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3336,7 +3925,7 @@ func (x *PutCrossSignatureResponse) String() string { func (*PutCrossSignatureResponse) ProtoMessage() {} func (x *PutCrossSignatureResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[46] + mi := &file_global_proto_msgTypes[54] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3349,7 +3938,7 @@ func (x *PutCrossSignatureResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PutCrossSignatureResponse.ProtoReflect.Descriptor instead. func (*PutCrossSignatureResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{46} + return file_global_proto_rawDescGZIP(), []int{54} } func (x *PutCrossSignatureResponse) GetError() string { @@ -3371,7 +3960,7 @@ type PutSignedKeyRequest struct { func (x *PutSignedKeyRequest) Reset() { *x = PutSignedKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[47] + mi := &file_global_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3384,7 +3973,7 @@ func (x *PutSignedKeyRequest) String() string { func (*PutSignedKeyRequest) ProtoMessage() {} func (x *PutSignedKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[47] + mi := &file_global_proto_msgTypes[55] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3397,7 +3986,7 @@ func (x *PutSignedKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PutSignedKeyRequest.ProtoReflect.Descriptor instead. func (*PutSignedKeyRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{47} + return file_global_proto_rawDescGZIP(), []int{55} } func (x *PutSignedKeyRequest) GetAddress() []byte { @@ -3425,7 +4014,7 @@ type PutSignedKeyResponse struct { func (x *PutSignedKeyResponse) Reset() { *x = PutSignedKeyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[48] + mi := &file_global_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3438,7 +4027,7 @@ func (x *PutSignedKeyResponse) String() string { func (*PutSignedKeyResponse) ProtoMessage() {} func (x *PutSignedKeyResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[48] + mi := &file_global_proto_msgTypes[56] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3451,7 +4040,7 @@ func (x *PutSignedKeyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PutSignedKeyResponse.ProtoReflect.Descriptor instead. func (*PutSignedKeyResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{48} + return file_global_proto_rawDescGZIP(), []int{56} } func (x *PutSignedKeyResponse) GetError() string { @@ -3472,7 +4061,7 @@ type GetIdentityKeyRequest struct { func (x *GetIdentityKeyRequest) Reset() { *x = GetIdentityKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[49] + mi := &file_global_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3485,7 +4074,7 @@ func (x *GetIdentityKeyRequest) String() string { func (*GetIdentityKeyRequest) ProtoMessage() {} func (x *GetIdentityKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[49] + mi := &file_global_proto_msgTypes[57] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3498,7 +4087,7 @@ func (x *GetIdentityKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetIdentityKeyRequest.ProtoReflect.Descriptor instead. func (*GetIdentityKeyRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{49} + return file_global_proto_rawDescGZIP(), []int{57} } func (x *GetIdentityKeyRequest) GetAddress() []byte { @@ -3520,7 +4109,7 @@ type GetIdentityKeyResponse struct { func (x *GetIdentityKeyResponse) Reset() { *x = GetIdentityKeyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[50] + mi := &file_global_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3533,7 +4122,7 @@ func (x *GetIdentityKeyResponse) String() string { func (*GetIdentityKeyResponse) ProtoMessage() {} func (x *GetIdentityKeyResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[50] + mi := &file_global_proto_msgTypes[58] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3546,7 +4135,7 @@ func (x *GetIdentityKeyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetIdentityKeyResponse.ProtoReflect.Descriptor instead. func (*GetIdentityKeyResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{50} + return file_global_proto_rawDescGZIP(), []int{58} } func (x *GetIdentityKeyResponse) GetKey() *Ed448PublicKey { @@ -3574,7 +4163,7 @@ type GetProvingKeyRequest struct { func (x *GetProvingKeyRequest) Reset() { *x = GetProvingKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[51] + mi := &file_global_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3587,7 +4176,7 @@ func (x *GetProvingKeyRequest) String() string { func (*GetProvingKeyRequest) ProtoMessage() {} func (x *GetProvingKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[51] + mi := &file_global_proto_msgTypes[59] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3600,7 +4189,7 @@ func (x *GetProvingKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetProvingKeyRequest.ProtoReflect.Descriptor instead. func (*GetProvingKeyRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{51} + return file_global_proto_rawDescGZIP(), []int{59} } func (x *GetProvingKeyRequest) GetAddress() []byte { @@ -3622,7 +4211,7 @@ type GetProvingKeyResponse struct { func (x *GetProvingKeyResponse) Reset() { *x = GetProvingKeyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[52] + mi := &file_global_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3635,7 +4224,7 @@ func (x *GetProvingKeyResponse) String() string { func (*GetProvingKeyResponse) ProtoMessage() {} func (x *GetProvingKeyResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[52] + mi := &file_global_proto_msgTypes[60] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3648,7 +4237,7 @@ func (x *GetProvingKeyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetProvingKeyResponse.ProtoReflect.Descriptor instead. func (*GetProvingKeyResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{52} + return file_global_proto_rawDescGZIP(), []int{60} } func (x *GetProvingKeyResponse) GetKey() *BLS48581SignatureWithProofOfPossession { @@ -3676,7 +4265,7 @@ type GetSignedKeyRequest struct { func (x *GetSignedKeyRequest) Reset() { *x = GetSignedKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[53] + mi := &file_global_proto_msgTypes[61] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3689,7 +4278,7 @@ func (x *GetSignedKeyRequest) String() string { func (*GetSignedKeyRequest) ProtoMessage() {} func (x *GetSignedKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[53] + mi := &file_global_proto_msgTypes[61] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3702,7 +4291,7 @@ func (x *GetSignedKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSignedKeyRequest.ProtoReflect.Descriptor instead. func (*GetSignedKeyRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{53} + return file_global_proto_rawDescGZIP(), []int{61} } func (x *GetSignedKeyRequest) GetAddress() []byte { @@ -3724,7 +4313,7 @@ type GetSignedKeyResponse struct { func (x *GetSignedKeyResponse) Reset() { *x = GetSignedKeyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[54] + mi := &file_global_proto_msgTypes[62] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3737,7 +4326,7 @@ func (x *GetSignedKeyResponse) String() string { func (*GetSignedKeyResponse) ProtoMessage() {} func (x *GetSignedKeyResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[54] + mi := &file_global_proto_msgTypes[62] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3750,7 +4339,7 @@ func (x *GetSignedKeyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSignedKeyResponse.ProtoReflect.Descriptor instead. func (*GetSignedKeyResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{54} + return file_global_proto_rawDescGZIP(), []int{62} } func (x *GetSignedKeyResponse) GetKey() *SignedX448Key { @@ -3779,7 +4368,7 @@ type GetSignedKeysByParentRequest struct { func (x *GetSignedKeysByParentRequest) Reset() { *x = GetSignedKeysByParentRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[55] + mi := &file_global_proto_msgTypes[63] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3792,7 +4381,7 @@ func (x *GetSignedKeysByParentRequest) String() string { func (*GetSignedKeysByParentRequest) ProtoMessage() {} func (x *GetSignedKeysByParentRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[55] + mi := &file_global_proto_msgTypes[63] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3805,7 +4394,7 @@ func (x *GetSignedKeysByParentRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSignedKeysByParentRequest.ProtoReflect.Descriptor instead. func (*GetSignedKeysByParentRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{55} + return file_global_proto_rawDescGZIP(), []int{63} } func (x *GetSignedKeysByParentRequest) GetParentKeyAddress() []byte { @@ -3834,7 +4423,7 @@ type GetSignedKeysByParentResponse struct { func (x *GetSignedKeysByParentResponse) Reset() { *x = GetSignedKeysByParentResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[56] + mi := &file_global_proto_msgTypes[64] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3847,7 +4436,7 @@ func (x *GetSignedKeysByParentResponse) String() string { func (*GetSignedKeysByParentResponse) ProtoMessage() {} func (x *GetSignedKeysByParentResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[56] + mi := &file_global_proto_msgTypes[64] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3860,7 +4449,7 @@ func (x *GetSignedKeysByParentResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSignedKeysByParentResponse.ProtoReflect.Descriptor instead. func (*GetSignedKeysByParentResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{56} + return file_global_proto_rawDescGZIP(), []int{64} } func (x *GetSignedKeysByParentResponse) GetKeys() []*SignedX448Key { @@ -3886,7 +4475,7 @@ type RangeProvingKeysRequest struct { func (x *RangeProvingKeysRequest) Reset() { *x = RangeProvingKeysRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[57] + mi := &file_global_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3899,7 +4488,7 @@ func (x *RangeProvingKeysRequest) String() string { func (*RangeProvingKeysRequest) ProtoMessage() {} func (x *RangeProvingKeysRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[57] + mi := &file_global_proto_msgTypes[65] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3912,7 +4501,7 @@ func (x *RangeProvingKeysRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RangeProvingKeysRequest.ProtoReflect.Descriptor instead. func (*RangeProvingKeysRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{57} + return file_global_proto_rawDescGZIP(), []int{65} } type RangeProvingKeysResponse struct { @@ -3927,7 +4516,7 @@ type RangeProvingKeysResponse struct { func (x *RangeProvingKeysResponse) Reset() { *x = RangeProvingKeysResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[58] + mi := &file_global_proto_msgTypes[66] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3940,7 +4529,7 @@ func (x *RangeProvingKeysResponse) String() string { func (*RangeProvingKeysResponse) ProtoMessage() {} func (x *RangeProvingKeysResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[58] + mi := &file_global_proto_msgTypes[66] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3953,7 +4542,7 @@ func (x *RangeProvingKeysResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RangeProvingKeysResponse.ProtoReflect.Descriptor instead. func (*RangeProvingKeysResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{58} + return file_global_proto_rawDescGZIP(), []int{66} } func (x *RangeProvingKeysResponse) GetKey() *BLS48581SignatureWithProofOfPossession { @@ -3979,7 +4568,7 @@ type RangeIdentityKeysRequest struct { func (x *RangeIdentityKeysRequest) Reset() { *x = RangeIdentityKeysRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[59] + mi := &file_global_proto_msgTypes[67] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3992,7 +4581,7 @@ func (x *RangeIdentityKeysRequest) String() string { func (*RangeIdentityKeysRequest) ProtoMessage() {} func (x *RangeIdentityKeysRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[59] + mi := &file_global_proto_msgTypes[67] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4005,7 +4594,7 @@ func (x *RangeIdentityKeysRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RangeIdentityKeysRequest.ProtoReflect.Descriptor instead. func (*RangeIdentityKeysRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{59} + return file_global_proto_rawDescGZIP(), []int{67} } type RangeIdentityKeysResponse struct { @@ -4020,7 +4609,7 @@ type RangeIdentityKeysResponse struct { func (x *RangeIdentityKeysResponse) Reset() { *x = RangeIdentityKeysResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[60] + mi := &file_global_proto_msgTypes[68] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4033,7 +4622,7 @@ func (x *RangeIdentityKeysResponse) String() string { func (*RangeIdentityKeysResponse) ProtoMessage() {} func (x *RangeIdentityKeysResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[60] + mi := &file_global_proto_msgTypes[68] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4046,7 +4635,7 @@ func (x *RangeIdentityKeysResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RangeIdentityKeysResponse.ProtoReflect.Descriptor instead. func (*RangeIdentityKeysResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{60} + return file_global_proto_rawDescGZIP(), []int{68} } func (x *RangeIdentityKeysResponse) GetKey() *Ed448PublicKey { @@ -4075,7 +4664,7 @@ type RangeSignedKeysRequest struct { func (x *RangeSignedKeysRequest) Reset() { *x = RangeSignedKeysRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[61] + mi := &file_global_proto_msgTypes[69] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4088,7 +4677,7 @@ func (x *RangeSignedKeysRequest) String() string { func (*RangeSignedKeysRequest) ProtoMessage() {} func (x *RangeSignedKeysRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[61] + mi := &file_global_proto_msgTypes[69] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4101,7 +4690,7 @@ func (x *RangeSignedKeysRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RangeSignedKeysRequest.ProtoReflect.Descriptor instead. func (*RangeSignedKeysRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{61} + return file_global_proto_rawDescGZIP(), []int{69} } func (x *RangeSignedKeysRequest) GetParentKeyAddress() []byte { @@ -4130,7 +4719,7 @@ type RangeSignedKeysResponse struct { func (x *RangeSignedKeysResponse) Reset() { *x = RangeSignedKeysResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[62] + mi := &file_global_proto_msgTypes[70] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4143,7 +4732,7 @@ func (x *RangeSignedKeysResponse) String() string { func (*RangeSignedKeysResponse) ProtoMessage() {} func (x *RangeSignedKeysResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[62] + mi := &file_global_proto_msgTypes[70] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4156,7 +4745,7 @@ func (x *RangeSignedKeysResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RangeSignedKeysResponse.ProtoReflect.Descriptor instead. func (*RangeSignedKeysResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{62} + return file_global_proto_rawDescGZIP(), []int{70} } func (x *RangeSignedKeysResponse) GetKey() *SignedX448Key { @@ -4185,7 +4774,7 @@ type MessageKeyShard struct { func (x *MessageKeyShard) Reset() { *x = MessageKeyShard{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[63] + mi := &file_global_proto_msgTypes[71] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4198,7 +4787,7 @@ func (x *MessageKeyShard) String() string { func (*MessageKeyShard) ProtoMessage() {} func (x *MessageKeyShard) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[63] + mi := &file_global_proto_msgTypes[71] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4211,7 +4800,7 @@ func (x *MessageKeyShard) ProtoReflect() protoreflect.Message { // Deprecated: Use MessageKeyShard.ProtoReflect.Descriptor instead. func (*MessageKeyShard) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{63} + return file_global_proto_rawDescGZIP(), []int{71} } func (x *MessageKeyShard) GetPartyIdentifier() uint32 { @@ -4246,7 +4835,7 @@ type PutMessageRequest struct { func (x *PutMessageRequest) Reset() { *x = PutMessageRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[64] + mi := &file_global_proto_msgTypes[72] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4259,7 +4848,7 @@ func (x *PutMessageRequest) String() string { func (*PutMessageRequest) ProtoMessage() {} func (x *PutMessageRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[64] + mi := &file_global_proto_msgTypes[72] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4272,7 +4861,7 @@ func (x *PutMessageRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PutMessageRequest.ProtoReflect.Descriptor instead. func (*PutMessageRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{64} + return file_global_proto_rawDescGZIP(), []int{72} } func (x *PutMessageRequest) GetMessageShards() []*MessageKeyShard { @@ -4305,7 +4894,7 @@ type PutMessageResponse struct { func (x *PutMessageResponse) Reset() { *x = PutMessageResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[65] + mi := &file_global_proto_msgTypes[73] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4318,7 +4907,7 @@ func (x *PutMessageResponse) String() string { func (*PutMessageResponse) ProtoMessage() {} func (x *PutMessageResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[65] + mi := &file_global_proto_msgTypes[73] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4331,7 +4920,7 @@ func (x *PutMessageResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PutMessageResponse.ProtoReflect.Descriptor instead. func (*PutMessageResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{65} + return file_global_proto_rawDescGZIP(), []int{73} } var File_global_proto protoreflect.FileDescriptor @@ -4610,624 +5199,761 @@ var file_global_proto_rawDesc = []byte{ 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x92, 0x03, 0x0a, 0x11, 0x47, + 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xe3, 0x03, 0x0a, 0x11, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, - 0x62, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, - 0x79, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x12, 0x2d, 0x0a, 0x12, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6d, - 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x11, - 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, - 0x73, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x65, 0x65, - 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x14, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x43, 0x6f, 0x6d, - 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x76, 0x0a, 0x1d, 0x70, 0x75, 0x62, 0x6c, 0x69, - 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, - 0x62, 0x6c, 0x73, 0x34, 0x38, 0x35, 0x38, 0x31, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, - 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x4c, 0x53, 0x34, 0x38, 0x35, 0x38, - 0x31, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x52, 0x1a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x53, 0x69, - 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x42, 0x6c, 0x73, 0x34, 0x38, 0x35, 0x38, 0x31, 0x22, - 0xcf, 0x03, 0x0a, 0x0b, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, - 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, - 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x69, - 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, - 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, - 0x74, 0x70, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, - 0x75, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x6c, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x07, 0x20, 0x01, + 0x62, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, + 0x6c, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x64, 0x69, 0x66, 0x66, 0x69, + 0x63, 0x75, 0x6c, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x27, 0x0a, + 0x0f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x2d, 0x0a, 0x12, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, + 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x07, 0x20, 0x03, + 0x28, 0x0c, 0x52, 0x11, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x5f, + 0x74, 0x72, 0x65, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x54, 0x72, 0x65, + 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x52, 0x6f, 0x6f, 0x74, - 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x73, 0x18, - 0x08, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, - 0x73, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x06, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x12, 0x2e, 0x0a, 0x13, 0x66, 0x65, 0x65, - 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x6f, 0x74, 0x65, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x66, 0x65, 0x65, 0x4d, 0x75, 0x6c, 0x74, 0x69, - 0x70, 0x6c, 0x69, 0x65, 0x72, 0x56, 0x6f, 0x74, 0x65, 0x12, 0x76, 0x0a, 0x1d, 0x70, 0x75, 0x62, - 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x5f, 0x62, 0x6c, 0x73, 0x34, 0x38, 0x35, 0x38, 0x31, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x33, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, - 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x4c, 0x53, 0x34, 0x38, - 0x35, 0x38, 0x31, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x53, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x1a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, - 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x42, 0x6c, 0x73, 0x34, 0x38, 0x35, 0x38, - 0x31, 0x22, 0x8f, 0x02, 0x0a, 0x13, 0x50, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x4c, 0x69, 0x76, 0x65, - 0x6e, 0x65, 0x73, 0x73, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, - 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x63, 0x6f, 0x6d, - 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x76, 0x0a, 0x1d, 0x70, - 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x5f, 0x62, 0x6c, 0x73, 0x34, 0x38, 0x35, 0x38, 0x31, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x4c, 0x53, - 0x34, 0x38, 0x35, 0x38, 0x31, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x53, 0x69, - 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x1a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, - 0x65, 0x79, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x42, 0x6c, 0x73, 0x34, 0x38, - 0x35, 0x38, 0x31, 0x22, 0x92, 0x02, 0x0a, 0x09, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x56, 0x6f, 0x74, - 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, - 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, - 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, - 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x70, 0x70, 0x72, - 0x6f, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x61, 0x70, 0x70, 0x72, 0x6f, - 0x76, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x12, 0x76, 0x0a, 0x1d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x62, 0x6c, 0x73, 0x34, 0x38, 0x35, 0x38, - 0x31, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, - 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, - 0x62, 0x2e, 0x42, 0x4c, 0x53, 0x34, 0x38, 0x35, 0x38, 0x31, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x65, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x1a, 0x70, 0x75, - 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x42, 0x6c, 0x73, 0x34, 0x38, 0x35, 0x38, 0x31, 0x22, 0xee, 0x01, 0x0a, 0x11, 0x46, 0x72, 0x61, - 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, + 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x06, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x12, 0x76, 0x0a, 0x1d, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x5f, 0x62, 0x6c, 0x73, 0x34, 0x38, 0x35, 0x38, 0x31, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x33, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x4c, 0x53, 0x34, 0x38, 0x35, + 0x38, 0x31, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x52, 0x1a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x53, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x42, 0x6c, 0x73, 0x34, 0x38, 0x35, 0x38, 0x31, + 0x22, 0xe3, 0x03, 0x0a, 0x0b, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, + 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x12, 0x0a, + 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x72, 0x61, 0x6e, + 0x6b, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x1e, 0x0a, 0x0a, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x12, + 0x16, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x72, 0x6f, 0x6f, + 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x73, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, + 0x6f, 0x6f, 0x74, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x12, 0x2e, + 0x0a, 0x13, 0x66, 0x65, 0x65, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, + 0x5f, 0x76, 0x6f, 0x74, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x66, 0x65, 0x65, + 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x56, 0x6f, 0x74, 0x65, 0x12, 0x76, + 0x0a, 0x1d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x62, 0x6c, 0x73, 0x34, 0x38, 0x35, 0x38, 0x31, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, + 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, + 0x42, 0x4c, 0x53, 0x34, 0x38, 0x35, 0x38, 0x31, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, + 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x1a, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x4b, 0x65, 0x79, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x42, 0x6c, + 0x73, 0x34, 0x38, 0x35, 0x38, 0x31, 0x22, 0xa3, 0x02, 0x0a, 0x13, 0x50, 0x72, 0x6f, 0x76, 0x65, + 0x72, 0x4c, 0x69, 0x76, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, - 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, - 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, - 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x73, 0x65, 0x6c, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x12, 0x64, 0x0a, 0x13, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, - 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x33, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, - 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x4c, 0x53, 0x34, 0x38, - 0x35, 0x38, 0x31, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x53, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x12, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, - 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x99, 0x01, 0x0a, 0x0b, 0x47, 0x6c, - 0x6f, 0x62, 0x61, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x06, 0x68, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x71, 0x75, 0x69, 0x6c, - 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, - 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x46, 0x72, 0x61, 0x6d, - 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, - 0x44, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, - 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x08, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x73, 0x22, 0x95, 0x01, 0x0a, 0x0d, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, - 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, - 0x2e, 0x70, 0x62, 0x2e, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, - 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, - 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, - 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x75, 0x6e, - 0x64, 0x6c, 0x65, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x22, 0x45, 0x0a, - 0x0b, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x12, 0x18, 0x0a, 0x07, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x22, 0x3a, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, - 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, - 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x22, 0x69, 0x0a, 0x13, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x05, 0x66, 0x72, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, - 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, - 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x05, - 0x66, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0x54, 0x0a, 0x17, 0x47, - 0x65, 0x74, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x21, - 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x22, 0x6d, 0x0a, 0x15, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x46, 0x72, 0x61, - 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x05, 0x66, 0x72, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, - 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, - 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x46, 0x72, - 0x61, 0x6d, 0x65, 0x52, 0x05, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x72, - 0x6f, 0x6f, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, - 0x22, 0x4a, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x4b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0d, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x98, 0x01, 0x0a, - 0x0c, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, - 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x06, 0x70, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x61, 0x74, - 0x61, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, - 0x64, 0x61, 0x74, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, - 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, - 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x4b, 0x65, 0x79, 0x22, 0x53, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x41, 0x70, - 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x3b, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, - 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, - 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x38, 0x0a, 0x16, - 0x47, 0x65, 0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x31, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x02, 0x6c, 0x31, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x32, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x02, 0x6c, 0x32, 0x22, 0x4d, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x47, 0x6c, 0x6f, - 0x62, 0x61, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, - 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, - 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x22, 0x63, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x6b, - 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, - 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, - 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x9d, 0x01, 0x0a, 0x11, 0x4c, - 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x29, 0x0a, 0x10, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x6e, - 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x61, 0x73, 0x68, 0x12, 0x27, 0x0a, 0x0f, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0e, 0x73, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, - 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x6c, 0x65, 0x64, 0x22, 0x6e, 0x0a, 0x1a, 0x47, 0x65, - 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x6e, - 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, - 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x65, - 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x74, 0x72, - 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x1c, 0x0a, 0x1a, 0x47, 0x6c, - 0x6f, 0x62, 0x61, 0x6c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xf8, 0x01, 0x0a, 0x1f, 0x47, 0x6c, 0x6f, - 0x62, 0x61, 0x6c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x17, 0x0a, 0x07, - 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x63, - 0x6f, 0x72, 0x65, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x5f, - 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x61, 0x64, 0x64, 0x72, - 0x12, 0x36, 0x0a, 0x17, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x61, 0x64, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x15, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x4d, - 0x75, 0x6c, 0x74, 0x69, 0x61, 0x64, 0x64, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x12, 0x23, 0x0a, 0x0d, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x22, 0x73, 0x0a, 0x1b, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x47, 0x65, 0x74, - 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x54, 0x0a, 0x07, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, + 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1c, 0x0a, + 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x27, 0x0a, 0x0f, 0x63, + 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, + 0x48, 0x61, 0x73, 0x68, 0x12, 0x76, 0x0a, 0x1d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, + 0x65, 0x79, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x62, 0x6c, 0x73, + 0x34, 0x38, 0x35, 0x38, 0x31, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x71, 0x75, + 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, + 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x4c, 0x53, 0x34, 0x38, 0x35, 0x38, 0x31, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x52, 0x1a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x53, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x42, 0x6c, 0x73, 0x34, 0x38, 0x35, 0x38, 0x31, 0x22, 0xed, 0x02, 0x0a, + 0x10, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, + 0x6c, 0x12, 0x3e, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x70, 0x70, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x68, 0x0a, 0x19, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x71, 0x75, 0x6f, 0x72, + 0x75, 0x6d, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, + 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, + 0x2e, 0x51, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x52, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x51, 0x75, 0x6f, 0x72, 0x75, 0x6d, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x72, 0x0a, 0x1e, 0x70, + 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x72, 0x61, 0x6e, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, - 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, - 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x52, - 0x07, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x22, 0x53, 0x0a, 0x0b, 0x53, 0x65, 0x6e, 0x64, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, - 0x12, 0x17, 0x0a, 0x07, 0x63, 0x69, 0x72, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x06, 0x63, 0x69, 0x72, 0x63, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, - 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x63, 0x0a, - 0x0e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, - 0x24, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, + 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x52, 0x1b, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x52, 0x61, 0x6e, 0x6b, 0x54, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, + 0x3b, 0x0a, 0x04, 0x76, 0x6f, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, + 0x61, 0x6c, 0x56, 0x6f, 0x74, 0x65, 0x52, 0x04, 0x76, 0x6f, 0x74, 0x65, 0x22, 0xe9, 0x02, 0x0a, + 0x0e, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x12, + 0x3c, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x61, + 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x68, 0x0a, + 0x19, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x71, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x5f, 0x63, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2c, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x6f, + 0x72, 0x75, 0x6d, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x17, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x51, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x72, 0x0a, 0x1e, 0x70, 0x72, 0x69, 0x6f, 0x72, + 0x5f, 0x72, 0x61, 0x6e, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x63, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2d, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x1b, + 0x70, 0x72, 0x69, 0x6f, 0x72, 0x52, 0x61, 0x6e, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x3b, 0x0a, 0x04, 0x76, + 0x6f, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, + 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, + 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x56, 0x6f, + 0x74, 0x65, 0x52, 0x04, 0x76, 0x6f, 0x74, 0x65, 0x22, 0x8f, 0x02, 0x0a, 0x0c, 0x50, 0x72, 0x6f, + 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x56, 0x6f, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, + 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x12, 0x76, 0x0a, 0x1d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, + 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x62, 0x6c, 0x73, 0x34, 0x38, + 0x35, 0x38, 0x31, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x71, 0x75, 0x69, 0x6c, + 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, + 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x4c, 0x53, 0x34, 0x38, 0x35, 0x38, 0x31, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x65, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x1a, + 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x42, 0x6c, 0x73, 0x34, 0x38, 0x35, 0x38, 0x31, 0x22, 0xea, 0x02, 0x0a, 0x0c, 0x54, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x68, 0x0a, 0x19, 0x6c, + 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x71, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x5f, 0x63, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, + 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x6f, 0x72, 0x75, + 0x6d, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x17, 0x6c, 0x61, + 0x74, 0x65, 0x73, 0x74, 0x51, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x72, 0x0a, 0x1e, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x72, + 0x61, 0x6e, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, + 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x1b, 0x70, 0x72, + 0x69, 0x6f, 0x72, 0x52, 0x61, 0x6e, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x3b, 0x0a, 0x04, 0x76, 0x6f, 0x74, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, + 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, + 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x56, 0x6f, 0x74, 0x65, + 0x52, 0x04, 0x76, 0x6f, 0x74, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x74, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x82, 0x02, 0x0a, 0x11, 0x51, 0x75, 0x6f, 0x72, + 0x75, 0x6d, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, + 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x64, 0x0a, 0x13, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, + 0x61, 0x74, 0x65, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, + 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x4c, + 0x53, 0x34, 0x38, 0x35, 0x38, 0x31, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x53, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x12, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, + 0x61, 0x74, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0xd1, 0x02, 0x0a, + 0x12, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x72, + 0x61, 0x6e, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x12, + 0x21, 0x0a, 0x0c, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x04, 0x52, 0x0b, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x52, 0x61, 0x6e, + 0x6b, 0x73, 0x12, 0x68, 0x0a, 0x19, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x71, 0x75, 0x6f, + 0x72, 0x75, 0x6d, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, + 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, + 0x62, 0x2e, 0x51, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x52, 0x17, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x51, 0x75, 0x6f, 0x72, 0x75, + 0x6d, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x0a, 0x09, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x64, 0x0a, 0x13, 0x61, 0x67, + 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, + 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, + 0x62, 0x2e, 0x42, 0x4c, 0x53, 0x34, 0x38, 0x35, 0x38, 0x31, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, + 0x61, 0x74, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x12, 0x61, 0x67, + 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x22, 0x99, 0x01, 0x0a, 0x0b, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, + 0x12, 0x44, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2c, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, + 0x62, 0x61, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x06, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, + 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, + 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x75, 0x6e, 0x64, + 0x6c, 0x65, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x22, 0x95, 0x01, 0x0a, + 0x0d, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x3e, + 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x46, 0x72, 0x61, 0x6d, 0x65, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x44, + 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x73, 0x22, 0x45, 0x0a, 0x0b, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x41, 0x6c, + 0x65, 0x72, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, + 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x3a, 0x0a, 0x15, 0x47, + 0x65, 0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, + 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x69, 0x0a, 0x13, 0x47, 0x6c, 0x6f, 0x62, 0x61, + 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, + 0x0a, 0x05, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, + 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, + 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x05, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x70, 0x72, 0x6f, + 0x6f, 0x66, 0x22, 0x3d, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x50, + 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, + 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x22, 0x5f, 0x0a, 0x16, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x6f, + 0x73, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x08, 0x70, + 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, + 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, + 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, + 0x61, 0x6c, 0x22, 0x54, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, + 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, + 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x6d, 0x0a, 0x15, 0x41, 0x70, 0x70, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x3e, 0x0a, 0x05, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x70, 0x70, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x05, 0x66, 0x72, 0x61, 0x6d, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0x57, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x41, 0x70, + 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x21, 0x0a, + 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x22, 0x63, 0x0a, 0x18, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x70, + 0x6f, 0x73, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x08, + 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, + 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x70, 0x70, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, + 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x22, 0x4a, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x22, 0x98, 0x01, 0x0a, 0x0c, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, + 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0d, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, + 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x12, + 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4b, 0x65, 0x79, 0x22, 0x53, 0x0a, 0x14, + 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x41, + 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, + 0x6f, 0x22, 0x38, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x6c, + 0x31, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x6c, 0x31, 0x12, 0x0e, 0x0a, 0x02, 0x6c, + 0x32, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x6c, 0x32, 0x22, 0x4d, 0x0a, 0x17, 0x47, + 0x65, 0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, + 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, + 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x22, 0x63, 0x0a, 0x19, 0x47, 0x65, + 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x21, 0x0a, 0x0c, + 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, + 0x9d, 0x01, 0x0a, 0x11, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x61, 0x73, 0x68, + 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0e, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6d, + 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, 0x6f, + 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x6c, 0x65, + 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x6c, 0x65, 0x64, 0x22, + 0x6e, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, + 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, + 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, + 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, + 0x1c, 0x0a, 0x1a, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, + 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xf8, 0x01, + 0x0a, 0x1f, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, + 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x49, 0x74, 0x65, + 0x6d, 0x12, 0x17, 0x0a, 0x07, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x06, 0x63, 0x6f, 0x72, 0x65, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x4d, 0x75, 0x6c, 0x74, + 0x69, 0x61, 0x64, 0x64, 0x72, 0x12, 0x36, 0x0a, 0x17, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, + 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x61, 0x64, 0x64, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x61, 0x64, 0x64, 0x72, 0x12, 0x16, 0x0a, + 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x6c, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, + 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x22, 0x73, 0x0a, 0x1b, 0x47, 0x6c, 0x6f, 0x62, + 0x61, 0x6c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x07, 0x77, 0x6f, 0x72, 0x6b, 0x65, + 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, + 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, + 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x47, 0x65, 0x74, 0x57, 0x6f, + 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x49, 0x74, 0x65, 0x6d, 0x52, 0x07, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x22, 0x53, 0x0a, + 0x0b, 0x53, 0x65, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x17, 0x0a, 0x07, + 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x63, 0x69, 0x72, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x63, 0x69, 0x72, 0x63, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x63, 0x65, - 0x6c, 0x6c, 0x22, 0x49, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x30, 0x0a, 0x14, 0x69, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x61, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x69, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x70, 0x0a, - 0x16, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x08, 0x72, 0x65, 0x67, 0x69, 0x73, - 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x71, 0x75, 0x69, 0x6c, - 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, - 0x2e, 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x52, - 0x08, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, - 0x4d, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, - 0x79, 0x42, 0x79, 0x50, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x61, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x70, 0x72, - 0x6f, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x78, - 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, - 0x42, 0x79, 0x50, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x40, 0x0a, 0x08, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x79, - 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x52, 0x08, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, - 0x72, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x7d, 0x0a, 0x15, 0x50, 0x75, 0x74, 0x49, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x4a, 0x0a, 0x0c, 0x69, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, - 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x64, 0x34, 0x34, - 0x38, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x0b, 0x69, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x22, 0x2e, 0x0a, 0x16, 0x50, 0x75, 0x74, 0x49, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x78, 0x0a, 0x14, 0x50, 0x75, 0x74, 0x50, 0x72, - 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x60, 0x0a, 0x0b, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, - 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x42, - 0x4c, 0x53, 0x34, 0x38, 0x35, 0x38, 0x31, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x57, 0x69, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x4f, 0x66, 0x50, 0x6f, 0x73, 0x73, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, - 0x79, 0x22, 0x2d, 0x0a, 0x15, 0x50, 0x75, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, - 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x22, 0x9e, 0x02, 0x0a, 0x18, 0x50, 0x75, 0x74, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x53, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x30, 0x0a, - 0x14, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x61, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x69, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, - 0x2e, 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x61, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x70, 0x72, - 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, - 0x4f, 0x0a, 0x25, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x5f, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x6f, 0x66, 0x5f, 0x70, 0x72, 0x6f, - 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x20, - 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x53, 0x69, 0x67, 0x6e, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x4f, 0x66, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, - 0x12, 0x4f, 0x0a, 0x25, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x5f, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x6f, 0x66, 0x5f, 0x69, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x20, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x53, 0x69, 0x67, 0x6e, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x4f, 0x66, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, - 0x79, 0x22, 0x31, 0x0a, 0x19, 0x50, 0x75, 0x74, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x53, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x22, 0x69, 0x0a, 0x13, 0x50, 0x75, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, - 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x69, 0x67, - 0x6e, 0x65, 0x64, 0x58, 0x34, 0x34, 0x38, 0x4b, 0x65, 0x79, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, - 0x2c, 0x0a, 0x14, 0x50, 0x75, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x31, 0x0a, - 0x15, 0x47, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x52, + 0x6c, 0x6c, 0x22, 0x63, 0x0a, 0x0e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, + 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x50, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x63, 0x69, + 0x72, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x63, 0x69, 0x72, + 0x63, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x49, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4b, 0x65, + 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x30, 0x0a, 0x14, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6b, 0x65, 0x79, + 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, + 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x22, 0x70, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x08, + 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x72, 0x79, 0x52, 0x08, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x22, 0x4d, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, + 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x42, 0x79, 0x50, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x5f, + 0x6b, 0x65, 0x79, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x22, 0x78, 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, + 0x69, 0x73, 0x74, 0x72, 0x79, 0x42, 0x79, 0x50, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x08, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, + 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, + 0x62, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x52, 0x08, 0x72, + 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x7d, 0x0a, + 0x15, 0x50, 0x75, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x22, 0x69, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, - 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x03, 0x6b, 0x65, + 0x12, 0x4a, 0x0a, 0x0c, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, + 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, + 0x2e, 0x45, 0x64, 0x34, 0x34, 0x38, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, + 0x0b, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x22, 0x2e, 0x0a, 0x16, + 0x50, 0x75, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x78, 0x0a, 0x14, + 0x50, 0x75, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x60, 0x0a, 0x0b, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x5f, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x71, 0x75, 0x69, 0x6c, + 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, + 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x4c, 0x53, 0x34, 0x38, 0x35, 0x38, 0x31, 0x53, 0x69, 0x67, 0x6e, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x57, 0x69, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x4f, 0x66, + 0x50, 0x6f, 0x73, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x76, + 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x22, 0x2d, 0x0a, 0x15, 0x50, 0x75, 0x74, 0x50, 0x72, 0x6f, + 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x9e, 0x02, 0x0a, 0x18, 0x50, 0x75, 0x74, 0x43, 0x72, 0x6f, + 0x73, 0x73, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6b, + 0x65, 0x79, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x12, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x5f, + 0x6b, 0x65, 0x79, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x11, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x12, 0x4f, 0x0a, 0x25, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x6f, + 0x66, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x20, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, + 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x4f, 0x66, 0x50, 0x72, 0x6f, 0x76, 0x69, + 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x12, 0x4f, 0x0a, 0x25, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, + 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x6f, + 0x66, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x20, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, + 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x4f, 0x66, 0x49, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x22, 0x31, 0x0a, 0x19, 0x50, 0x75, 0x74, 0x43, 0x72, 0x6f, + 0x73, 0x73, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x69, 0x0a, 0x13, 0x50, 0x75, 0x74, + 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, + 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, + 0x62, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x58, 0x34, 0x34, 0x38, 0x4b, 0x65, 0x79, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x22, 0x2c, 0x0a, 0x14, 0x50, 0x75, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, + 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x22, 0x31, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x69, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x39, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x71, + 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, + 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x64, 0x34, 0x34, 0x38, 0x50, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x22, 0x30, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x22, 0x80, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, + 0x67, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x71, 0x75, 0x69, 0x6c, + 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, + 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x4c, 0x53, 0x34, 0x38, 0x35, 0x38, 0x31, 0x53, 0x69, 0x67, 0x6e, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x57, 0x69, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x4f, 0x66, + 0x50, 0x6f, 0x73, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x2f, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, + 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x66, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, + 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x71, 0x75, + 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, + 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x58, 0x34, 0x34, 0x38, + 0x4b, 0x65, 0x79, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x6d, + 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x42, + 0x79, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, + 0x0a, 0x12, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x61, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1f, 0x0a, 0x0b, + 0x6b, 0x65, 0x79, 0x5f, 0x70, 0x75, 0x72, 0x70, 0x6f, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x6b, 0x65, 0x79, 0x50, 0x75, 0x72, 0x70, 0x6f, 0x73, 0x65, 0x22, 0x71, 0x0a, + 0x1d, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, + 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, + 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x71, + 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, + 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x58, 0x34, 0x34, + 0x38, 0x4b, 0x65, 0x79, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x22, 0x19, 0x0a, 0x17, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, + 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x83, 0x01, 0x0a, 0x18, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, + 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, + 0x42, 0x4c, 0x53, 0x34, 0x38, 0x35, 0x38, 0x31, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x57, 0x69, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x4f, 0x66, 0x50, 0x6f, 0x73, 0x73, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x22, 0x1a, 0x0a, 0x18, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x6c, 0x0a, + 0x19, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, + 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x64, 0x34, 0x34, 0x38, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x30, 0x0a, 0x14, 0x47, - 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x80, 0x01, - 0x0a, 0x15, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, - 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x42, - 0x4c, 0x53, 0x34, 0x38, 0x35, 0x38, 0x31, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x57, 0x69, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x4f, 0x66, 0x50, 0x6f, 0x73, 0x73, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x22, 0x2f, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x22, 0x66, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, - 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, - 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, - 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x58, 0x34, 0x34, 0x38, 0x4b, 0x65, 0x79, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x6d, 0x0a, 0x1c, 0x47, 0x65, 0x74, - 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x50, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, - 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x70, - 0x75, 0x72, 0x70, 0x6f, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6b, 0x65, - 0x79, 0x50, 0x75, 0x72, 0x70, 0x6f, 0x73, 0x65, 0x22, 0x71, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x53, - 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x50, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x04, 0x6b, 0x65, 0x79, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, - 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, - 0x62, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x58, 0x34, 0x34, 0x38, 0x4b, 0x65, 0x79, 0x52, - 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x19, 0x0a, 0x17, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x3f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, - 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x4c, 0x53, 0x34, 0x38, - 0x35, 0x38, 0x31, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x57, 0x69, 0x74, 0x68, - 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x4f, 0x66, 0x50, 0x6f, 0x73, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x1a, 0x0a, 0x18, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x6c, 0x0a, 0x19, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x64, 0x34, - 0x34, 0x38, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x67, 0x0a, 0x16, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x53, - 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x61, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1f, - 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x70, 0x75, 0x72, 0x70, 0x6f, 0x73, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6b, 0x65, 0x79, 0x50, 0x75, 0x72, 0x70, 0x6f, 0x73, 0x65, 0x22, - 0x69, 0x0a, 0x17, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, - 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, - 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, - 0x62, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x58, 0x34, 0x34, 0x38, 0x4b, 0x65, 0x79, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x61, 0x0a, 0x0f, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x29, 0x0a, - 0x10, 0x70, 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x0c, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x22, 0xb2, 0x01, - 0x0a, 0x11, 0x50, 0x75, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x51, 0x0a, 0x0e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x71, 0x75, - 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, - 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4b, - 0x65, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x0d, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x12, 0x30, 0x0a, 0x14, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x5f, 0x70, 0x75, - 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, - 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, - 0x65, 0x79, 0x22, 0x14, 0x0a, 0x12, 0x50, 0x75, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xf2, 0x04, 0x0a, 0x0d, 0x47, 0x6c, 0x6f, - 0x62, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x72, 0x0a, 0x0e, 0x47, 0x65, - 0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x2e, 0x71, - 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, - 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x47, 0x6c, 0x6f, 0x62, - 0x61, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, - 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x61, - 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, - 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x2e, - 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x70, - 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, - 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x70, - 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x78, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x73, 0x12, 0x31, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, - 0x65, 0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, - 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, - 0x62, 0x2e, 0x47, 0x65, 0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x81, 0x01, 0x0a, 0x12, 0x47, 0x65, - 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, - 0x12, 0x34, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, - 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, - 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, - 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, - 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7e, 0x0a, - 0x0d, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x35, - 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x61, - 0x6c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, - 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, - 0x62, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, - 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x8b, 0x01, - 0x0a, 0x0f, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x12, 0x78, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x32, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, - 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, - 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x46, 0x72, 0x61, - 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, - 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, - 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x46, 0x72, - 0x61, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x70, 0x0a, 0x0c, 0x4f, - 0x6e, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x60, 0x0a, 0x07, 0x43, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, - 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, - 0x70, 0x62, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x29, - 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x69, - 0x76, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x28, 0x01, 0x30, 0x01, 0x32, 0xdf, 0x01, - 0x0a, 0x0d, 0x4d, 0x69, 0x78, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, - 0x69, 0x0a, 0x0a, 0x50, 0x75, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x2e, - 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, - 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x71, 0x75, - 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, - 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x0b, 0x52, 0x6f, - 0x75, 0x6e, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, - 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x61, 0x70, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x1a, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x28, 0x01, 0x30, 0x01, 0x32, - 0xd7, 0x0c, 0x0a, 0x12, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x75, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, - 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x12, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, - 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, - 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, - 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x71, 0x75, 0x69, - 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, - 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, - 0x69, 0x73, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8d, 0x01, - 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, - 0x42, 0x79, 0x50, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x12, 0x38, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, - 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, - 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, - 0x74, 0x72, 0x79, 0x42, 0x79, 0x50, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, - 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x42, 0x79, 0x50, - 0x72, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, - 0x0e, 0x50, 0x75, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x12, - 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, - 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x49, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x31, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, - 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, - 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x0d, 0x50, 0x75, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, - 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x12, 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, - 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, - 0x62, 0x2e, 0x50, 0x75, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, - 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, - 0x70, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7e, 0x0a, 0x11, 0x50, 0x75, 0x74, 0x43, - 0x72, 0x6f, 0x73, 0x73, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x33, 0x2e, - 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, - 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x43, 0x72, 0x6f, - 0x73, 0x73, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, - 0x75, 0x74, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x0c, 0x50, 0x75, 0x74, 0x53, - 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x2e, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, - 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, - 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, - 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, - 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, - 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, 0x0e, 0x47, 0x65, 0x74, - 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x12, 0x30, 0x2e, 0x71, 0x75, - 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, - 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, - 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, - 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x72, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, - 0x79, 0x12, 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, - 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, - 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, - 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, - 0x64, 0x4b, 0x65, 0x79, 0x12, 0x2e, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, - 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, - 0x2e, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, - 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, - 0x2e, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8a, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, - 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, - 0x37, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, - 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x53, - 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x50, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, - 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, - 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, - 0x79, 0x73, 0x42, 0x79, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x10, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, - 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x32, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, - 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, - 0x70, 0x62, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, - 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x71, 0x75, 0x69, - 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, - 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x76, - 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x7e, 0x0a, 0x11, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x4b, 0x65, 0x79, 0x73, 0x12, 0x33, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, - 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, - 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, - 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x71, 0x75, 0x69, 0x6c, - 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, - 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x78, 0x0a, 0x0f, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, - 0x79, 0x73, 0x12, 0x31, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x52, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x67, 0x0a, 0x16, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, + 0x6b, 0x65, 0x79, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x10, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x70, 0x75, 0x72, 0x70, 0x6f, + 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6b, 0x65, 0x79, 0x50, 0x75, 0x72, + 0x70, 0x6f, 0x73, 0x65, 0x22, 0x69, 0x0a, 0x17, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x69, 0x67, + 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x38, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x71, + 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, + 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x58, 0x34, 0x34, + 0x38, 0x4b, 0x65, 0x79, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, + 0x61, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x70, 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x70, 0x61, + 0x72, 0x74, 0x79, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x23, 0x0a, + 0x0d, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4b, + 0x65, 0x79, 0x22, 0xb2, 0x01, 0x0a, 0x11, 0x50, 0x75, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x51, 0x0a, 0x0e, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x0d, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, + 0x61, 0x6c, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x12, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x22, 0x14, 0x0a, 0x12, 0x50, 0x75, 0x74, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xef, 0x05, + 0x0a, 0x0d, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, + 0x72, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x46, 0x72, 0x61, 0x6d, + 0x65, 0x12, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, + 0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, + 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, + 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, + 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x12, 0x33, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, + 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, + 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x50, 0x72, + 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, + 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, + 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x6f, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, + 0x12, 0x2e, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, + 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, + 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x78, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x73, 0x12, 0x31, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, + 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, + 0x2e, 0x47, 0x65, 0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, + 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, + 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x81, 0x01, 0x0a, 0x12, + 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x65, 0x73, 0x12, 0x34, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, + 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, + 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, + 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x7e, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, + 0x12, 0x35, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, + 0x62, 0x61, 0x6c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, + 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, + 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, + 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, + 0x8f, 0x02, 0x0a, 0x0f, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x78, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x32, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, + 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, + 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x46, + 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x71, 0x75, + 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, + 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x81, 0x01, + 0x0a, 0x13, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x6f, + 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x12, 0x35, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, - 0x62, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xec, 0x03, 0x0a, 0x0f, 0x44, 0x69, - 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x56, 0x0a, - 0x0f, 0x50, 0x75, 0x74, 0x49, 0x6e, 0x62, 0x6f, 0x78, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x12, 0x2b, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, + 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x6f, + 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x71, + 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, + 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x32, 0x70, 0x0a, 0x0c, 0x4f, 0x6e, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x12, 0x60, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x71, + 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, + 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x1a, 0x29, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, + 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, + 0x2e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x28, + 0x01, 0x30, 0x01, 0x32, 0xdf, 0x01, 0x0a, 0x0d, 0x4d, 0x69, 0x78, 0x6e, 0x65, 0x74, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x69, 0x0a, 0x0a, 0x50, 0x75, 0x74, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x12, 0x2c, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, + 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, + 0x50, 0x75, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2d, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, + 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x63, 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, + 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x62, + 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, + 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x28, 0x01, 0x30, 0x01, 0x32, 0xd7, 0x0c, 0x0a, 0x12, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, + 0x69, 0x73, 0x74, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x75, 0x0a, 0x0e, + 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x12, 0x30, + 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, + 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x31, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, + 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x8d, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, + 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x42, 0x79, 0x50, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x12, 0x38, + 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, + 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x42, 0x79, 0x50, 0x72, 0x6f, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, + 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, + 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x72, 0x79, 0x42, 0x79, 0x50, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, 0x0e, 0x50, 0x75, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x4b, 0x65, 0x79, 0x12, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, + 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, + 0x62, 0x2e, 0x50, 0x75, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, + 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, + 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, + 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x0d, 0x50, 0x75, + 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x12, 0x2f, 0x2e, 0x71, 0x75, + 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, + 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, + 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x71, + 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, + 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7e, + 0x0a, 0x11, 0x50, 0x75, 0x74, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x12, 0x33, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, + 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, + 0x50, 0x75, 0x74, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, + 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, + 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x53, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, + 0x0a, 0x0c, 0x50, 0x75, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x2e, + 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x53, 0x69, + 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, + 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x53, 0x69, + 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x75, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, + 0x79, 0x12, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, + 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, + 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, + 0x47, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, + 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x12, 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, + 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, + 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, + 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, + 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, + 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x0c, 0x47, 0x65, + 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x2e, 0x2e, 0x71, 0x75, 0x69, + 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, + 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, + 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x71, 0x75, 0x69, + 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, + 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, + 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8a, 0x01, 0x0a, 0x15, + 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x50, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x37, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, + 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, + 0x62, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x42, + 0x79, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, + 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x69, + 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x10, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x32, 0x2e, 0x71, + 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, + 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x33, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x6e, + 0x67, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7e, 0x0a, 0x11, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x33, 0x2e, 0x71, 0x75, 0x69, + 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, + 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x34, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x0f, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x69, + 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x31, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, + 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, + 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, + 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x71, 0x75, + 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, + 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x69, 0x67, + 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, + 0xec, 0x03, 0x0a, 0x0f, 0x44, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x56, 0x0a, 0x0f, 0x50, 0x75, 0x74, 0x49, 0x6e, 0x62, 0x6f, 0x78, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2b, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, + 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, + 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x62, 0x6f, 0x78, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x50, 0x75, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x75, 0x0a, 0x10, 0x47, + 0x65, 0x74, 0x49, 0x6e, 0x62, 0x6f, 0x78, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, + 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x62, + 0x6f, 0x78, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, - 0x62, 0x6f, 0x78, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x50, 0x75, 0x74, 0x1a, 0x16, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x75, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x62, 0x6f, - 0x78, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, - 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, - 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x62, 0x6f, 0x78, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x71, 0x75, 0x69, - 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, - 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x62, 0x6f, 0x78, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x06, - 0x50, 0x75, 0x74, 0x48, 0x75, 0x62, 0x12, 0x22, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, - 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, - 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x75, 0x62, 0x50, 0x75, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x12, 0x59, 0x0a, 0x06, 0x47, 0x65, 0x74, 0x48, 0x75, 0x62, 0x12, 0x26, 0x2e, 0x71, + 0x62, 0x6f, 0x78, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x44, 0x0a, 0x06, 0x50, 0x75, 0x74, 0x48, 0x75, 0x62, 0x12, 0x22, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, - 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x75, 0x62, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, - 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, - 0x62, 0x2e, 0x48, 0x75, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x69, 0x0a, - 0x04, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, - 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, - 0x70, 0x62, 0x2e, 0x44, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x53, 0x79, 0x6e, 0x63, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, - 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, - 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x53, 0x79, 0x6e, 0x63, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x3a, 0x5a, 0x38, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2f, 0x6d, 0x6f, 0x6e, - 0x6f, 0x72, 0x65, 0x70, 0x6f, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x75, 0x62, 0x50, 0x75, 0x74, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x59, 0x0a, 0x06, 0x47, 0x65, 0x74, 0x48, + 0x75, 0x62, 0x12, 0x26, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, + 0x48, 0x75, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x71, 0x75, 0x69, + 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, + 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x75, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x69, 0x0a, 0x04, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x2f, 0x2e, 0x71, 0x75, + 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, + 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, + 0x68, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x71, + 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, + 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x69, 0x73, 0x70, 0x61, 0x74, + 0x63, 0x68, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x3a, + 0x5a, 0x38, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, + 0x69, 0x75, 0x6d, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, + 0x75, 0x6d, 0x2f, 0x6d, 0x6f, 0x6e, 0x6f, 0x72, 0x65, 0x70, 0x6f, 0x2f, 0x6e, 0x6f, 0x64, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -5242,7 +5968,7 @@ func file_global_proto_rawDescGZIP() []byte { return file_global_proto_rawDescData } -var file_global_proto_msgTypes = make([]protoimpl.MessageInfo, 66) +var file_global_proto_msgTypes = make([]protoimpl.MessageInfo, 74) var file_global_proto_goTypes = []interface{}{ (*LegacyProverRequest)(nil), // 0: quilibrium.node.global.pb.LegacyProverRequest (*SeniorityMerge)(nil), // 1: quilibrium.node.global.pb.SeniorityMerge @@ -5259,215 +5985,242 @@ var file_global_proto_goTypes = []interface{}{ (*GlobalFrameHeader)(nil), // 12: quilibrium.node.global.pb.GlobalFrameHeader (*FrameHeader)(nil), // 13: quilibrium.node.global.pb.FrameHeader (*ProverLivenessCheck)(nil), // 14: quilibrium.node.global.pb.ProverLivenessCheck - (*FrameVote)(nil), // 15: quilibrium.node.global.pb.FrameVote - (*FrameConfirmation)(nil), // 16: quilibrium.node.global.pb.FrameConfirmation - (*GlobalFrame)(nil), // 17: quilibrium.node.global.pb.GlobalFrame - (*AppShardFrame)(nil), // 18: quilibrium.node.global.pb.AppShardFrame - (*GlobalAlert)(nil), // 19: quilibrium.node.global.pb.GlobalAlert - (*GetGlobalFrameRequest)(nil), // 20: quilibrium.node.global.pb.GetGlobalFrameRequest - (*GlobalFrameResponse)(nil), // 21: quilibrium.node.global.pb.GlobalFrameResponse - (*GetAppShardFrameRequest)(nil), // 22: quilibrium.node.global.pb.GetAppShardFrameRequest - (*AppShardFrameResponse)(nil), // 23: quilibrium.node.global.pb.AppShardFrameResponse - (*GetAppShardsRequest)(nil), // 24: quilibrium.node.global.pb.GetAppShardsRequest - (*AppShardInfo)(nil), // 25: quilibrium.node.global.pb.AppShardInfo - (*GetAppShardsResponse)(nil), // 26: quilibrium.node.global.pb.GetAppShardsResponse - (*GetGlobalShardsRequest)(nil), // 27: quilibrium.node.global.pb.GetGlobalShardsRequest - (*GetGlobalShardsResponse)(nil), // 28: quilibrium.node.global.pb.GetGlobalShardsResponse - (*GetLockedAddressesRequest)(nil), // 29: quilibrium.node.global.pb.GetLockedAddressesRequest - (*LockedTransaction)(nil), // 30: quilibrium.node.global.pb.LockedTransaction - (*GetLockedAddressesResponse)(nil), // 31: quilibrium.node.global.pb.GetLockedAddressesResponse - (*GlobalGetWorkerInfoRequest)(nil), // 32: quilibrium.node.global.pb.GlobalGetWorkerInfoRequest - (*GlobalGetWorkerInfoResponseItem)(nil), // 33: quilibrium.node.global.pb.GlobalGetWorkerInfoResponseItem - (*GlobalGetWorkerInfoResponse)(nil), // 34: quilibrium.node.global.pb.GlobalGetWorkerInfoResponse - (*SendMessage)(nil), // 35: quilibrium.node.global.pb.SendMessage - (*ReceiveMessage)(nil), // 36: quilibrium.node.global.pb.ReceiveMessage - (*GetKeyRegistryRequest)(nil), // 37: quilibrium.node.global.pb.GetKeyRegistryRequest - (*GetKeyRegistryResponse)(nil), // 38: quilibrium.node.global.pb.GetKeyRegistryResponse - (*GetKeyRegistryByProverRequest)(nil), // 39: quilibrium.node.global.pb.GetKeyRegistryByProverRequest - (*GetKeyRegistryByProverResponse)(nil), // 40: quilibrium.node.global.pb.GetKeyRegistryByProverResponse - (*PutIdentityKeyRequest)(nil), // 41: quilibrium.node.global.pb.PutIdentityKeyRequest - (*PutIdentityKeyResponse)(nil), // 42: quilibrium.node.global.pb.PutIdentityKeyResponse - (*PutProvingKeyRequest)(nil), // 43: quilibrium.node.global.pb.PutProvingKeyRequest - (*PutProvingKeyResponse)(nil), // 44: quilibrium.node.global.pb.PutProvingKeyResponse - (*PutCrossSignatureRequest)(nil), // 45: quilibrium.node.global.pb.PutCrossSignatureRequest - (*PutCrossSignatureResponse)(nil), // 46: quilibrium.node.global.pb.PutCrossSignatureResponse - (*PutSignedKeyRequest)(nil), // 47: quilibrium.node.global.pb.PutSignedKeyRequest - (*PutSignedKeyResponse)(nil), // 48: quilibrium.node.global.pb.PutSignedKeyResponse - (*GetIdentityKeyRequest)(nil), // 49: quilibrium.node.global.pb.GetIdentityKeyRequest - (*GetIdentityKeyResponse)(nil), // 50: quilibrium.node.global.pb.GetIdentityKeyResponse - (*GetProvingKeyRequest)(nil), // 51: quilibrium.node.global.pb.GetProvingKeyRequest - (*GetProvingKeyResponse)(nil), // 52: quilibrium.node.global.pb.GetProvingKeyResponse - (*GetSignedKeyRequest)(nil), // 53: quilibrium.node.global.pb.GetSignedKeyRequest - (*GetSignedKeyResponse)(nil), // 54: quilibrium.node.global.pb.GetSignedKeyResponse - (*GetSignedKeysByParentRequest)(nil), // 55: quilibrium.node.global.pb.GetSignedKeysByParentRequest - (*GetSignedKeysByParentResponse)(nil), // 56: quilibrium.node.global.pb.GetSignedKeysByParentResponse - (*RangeProvingKeysRequest)(nil), // 57: quilibrium.node.global.pb.RangeProvingKeysRequest - (*RangeProvingKeysResponse)(nil), // 58: quilibrium.node.global.pb.RangeProvingKeysResponse - (*RangeIdentityKeysRequest)(nil), // 59: quilibrium.node.global.pb.RangeIdentityKeysRequest - (*RangeIdentityKeysResponse)(nil), // 60: quilibrium.node.global.pb.RangeIdentityKeysResponse - (*RangeSignedKeysRequest)(nil), // 61: quilibrium.node.global.pb.RangeSignedKeysRequest - (*RangeSignedKeysResponse)(nil), // 62: quilibrium.node.global.pb.RangeSignedKeysResponse - (*MessageKeyShard)(nil), // 63: quilibrium.node.global.pb.MessageKeyShard - (*PutMessageRequest)(nil), // 64: quilibrium.node.global.pb.PutMessageRequest - (*PutMessageResponse)(nil), // 65: quilibrium.node.global.pb.PutMessageResponse - (*Ed448Signature)(nil), // 66: quilibrium.node.keys.pb.Ed448Signature - (*BLS48581SignatureWithProofOfPossession)(nil), // 67: quilibrium.node.keys.pb.BLS48581SignatureWithProofOfPossession - (*BLS48581AddressedSignature)(nil), // 68: quilibrium.node.keys.pb.BLS48581AddressedSignature - (*TraversalProof)(nil), // 69: quilibrium.node.application.pb.TraversalProof - (*TokenDeploy)(nil), // 70: quilibrium.node.token.pb.TokenDeploy - (*TokenUpdate)(nil), // 71: quilibrium.node.token.pb.TokenUpdate - (*Transaction)(nil), // 72: quilibrium.node.token.pb.Transaction - (*PendingTransaction)(nil), // 73: quilibrium.node.token.pb.PendingTransaction - (*MintTransaction)(nil), // 74: quilibrium.node.token.pb.MintTransaction - (*HypergraphDeploy)(nil), // 75: quilibrium.node.hypergraph.pb.HypergraphDeploy - (*HypergraphUpdate)(nil), // 76: quilibrium.node.hypergraph.pb.HypergraphUpdate - (*VertexAdd)(nil), // 77: quilibrium.node.hypergraph.pb.VertexAdd - (*VertexRemove)(nil), // 78: quilibrium.node.hypergraph.pb.VertexRemove - (*HyperedgeAdd)(nil), // 79: quilibrium.node.hypergraph.pb.HyperedgeAdd - (*HyperedgeRemove)(nil), // 80: quilibrium.node.hypergraph.pb.HyperedgeRemove - (*ComputeDeploy)(nil), // 81: quilibrium.node.compute.pb.ComputeDeploy - (*ComputeUpdate)(nil), // 82: quilibrium.node.compute.pb.ComputeUpdate - (*CodeDeployment)(nil), // 83: quilibrium.node.compute.pb.CodeDeployment - (*CodeExecute)(nil), // 84: quilibrium.node.compute.pb.CodeExecute - (*CodeFinalize)(nil), // 85: quilibrium.node.compute.pb.CodeFinalize - (*BLS48581AggregateSignature)(nil), // 86: quilibrium.node.keys.pb.BLS48581AggregateSignature - (*KeyRegistry)(nil), // 87: quilibrium.node.keys.pb.KeyRegistry - (*Ed448PublicKey)(nil), // 88: quilibrium.node.keys.pb.Ed448PublicKey - (*SignedX448Key)(nil), // 89: quilibrium.node.keys.pb.SignedX448Key - (*Message)(nil), // 90: quilibrium.node.application.pb.Message - (*InboxMessagePut)(nil), // 91: quilibrium.node.channel.pb.InboxMessagePut - (*InboxMessageRequest)(nil), // 92: quilibrium.node.channel.pb.InboxMessageRequest - (*HubPut)(nil), // 93: quilibrium.node.channel.pb.HubPut - (*HubRequest)(nil), // 94: quilibrium.node.channel.pb.HubRequest - (*DispatchSyncRequest)(nil), // 95: quilibrium.node.channel.pb.DispatchSyncRequest - (*emptypb.Empty)(nil), // 96: google.protobuf.Empty - (*InboxMessageResponse)(nil), // 97: quilibrium.node.channel.pb.InboxMessageResponse - (*HubResponse)(nil), // 98: quilibrium.node.channel.pb.HubResponse - (*DispatchSyncResponse)(nil), // 99: quilibrium.node.channel.pb.DispatchSyncResponse + (*AppShardProposal)(nil), // 15: quilibrium.node.global.pb.AppShardProposal + (*GlobalProposal)(nil), // 16: quilibrium.node.global.pb.GlobalProposal + (*ProposalVote)(nil), // 17: quilibrium.node.global.pb.ProposalVote + (*TimeoutState)(nil), // 18: quilibrium.node.global.pb.TimeoutState + (*QuorumCertificate)(nil), // 19: quilibrium.node.global.pb.QuorumCertificate + (*TimeoutCertificate)(nil), // 20: quilibrium.node.global.pb.TimeoutCertificate + (*GlobalFrame)(nil), // 21: quilibrium.node.global.pb.GlobalFrame + (*AppShardFrame)(nil), // 22: quilibrium.node.global.pb.AppShardFrame + (*GlobalAlert)(nil), // 23: quilibrium.node.global.pb.GlobalAlert + (*GetGlobalFrameRequest)(nil), // 24: quilibrium.node.global.pb.GetGlobalFrameRequest + (*GlobalFrameResponse)(nil), // 25: quilibrium.node.global.pb.GlobalFrameResponse + (*GetGlobalProposalRequest)(nil), // 26: quilibrium.node.global.pb.GetGlobalProposalRequest + (*GlobalProposalResponse)(nil), // 27: quilibrium.node.global.pb.GlobalProposalResponse + (*GetAppShardFrameRequest)(nil), // 28: quilibrium.node.global.pb.GetAppShardFrameRequest + (*AppShardFrameResponse)(nil), // 29: quilibrium.node.global.pb.AppShardFrameResponse + (*GetAppShardProposalRequest)(nil), // 30: quilibrium.node.global.pb.GetAppShardProposalRequest + (*AppShardProposalResponse)(nil), // 31: quilibrium.node.global.pb.AppShardProposalResponse + (*GetAppShardsRequest)(nil), // 32: quilibrium.node.global.pb.GetAppShardsRequest + (*AppShardInfo)(nil), // 33: quilibrium.node.global.pb.AppShardInfo + (*GetAppShardsResponse)(nil), // 34: quilibrium.node.global.pb.GetAppShardsResponse + (*GetGlobalShardsRequest)(nil), // 35: quilibrium.node.global.pb.GetGlobalShardsRequest + (*GetGlobalShardsResponse)(nil), // 36: quilibrium.node.global.pb.GetGlobalShardsResponse + (*GetLockedAddressesRequest)(nil), // 37: quilibrium.node.global.pb.GetLockedAddressesRequest + (*LockedTransaction)(nil), // 38: quilibrium.node.global.pb.LockedTransaction + (*GetLockedAddressesResponse)(nil), // 39: quilibrium.node.global.pb.GetLockedAddressesResponse + (*GlobalGetWorkerInfoRequest)(nil), // 40: quilibrium.node.global.pb.GlobalGetWorkerInfoRequest + (*GlobalGetWorkerInfoResponseItem)(nil), // 41: quilibrium.node.global.pb.GlobalGetWorkerInfoResponseItem + (*GlobalGetWorkerInfoResponse)(nil), // 42: quilibrium.node.global.pb.GlobalGetWorkerInfoResponse + (*SendMessage)(nil), // 43: quilibrium.node.global.pb.SendMessage + (*ReceiveMessage)(nil), // 44: quilibrium.node.global.pb.ReceiveMessage + (*GetKeyRegistryRequest)(nil), // 45: quilibrium.node.global.pb.GetKeyRegistryRequest + (*GetKeyRegistryResponse)(nil), // 46: quilibrium.node.global.pb.GetKeyRegistryResponse + (*GetKeyRegistryByProverRequest)(nil), // 47: quilibrium.node.global.pb.GetKeyRegistryByProverRequest + (*GetKeyRegistryByProverResponse)(nil), // 48: quilibrium.node.global.pb.GetKeyRegistryByProverResponse + (*PutIdentityKeyRequest)(nil), // 49: quilibrium.node.global.pb.PutIdentityKeyRequest + (*PutIdentityKeyResponse)(nil), // 50: quilibrium.node.global.pb.PutIdentityKeyResponse + (*PutProvingKeyRequest)(nil), // 51: quilibrium.node.global.pb.PutProvingKeyRequest + (*PutProvingKeyResponse)(nil), // 52: quilibrium.node.global.pb.PutProvingKeyResponse + (*PutCrossSignatureRequest)(nil), // 53: quilibrium.node.global.pb.PutCrossSignatureRequest + (*PutCrossSignatureResponse)(nil), // 54: quilibrium.node.global.pb.PutCrossSignatureResponse + (*PutSignedKeyRequest)(nil), // 55: quilibrium.node.global.pb.PutSignedKeyRequest + (*PutSignedKeyResponse)(nil), // 56: quilibrium.node.global.pb.PutSignedKeyResponse + (*GetIdentityKeyRequest)(nil), // 57: quilibrium.node.global.pb.GetIdentityKeyRequest + (*GetIdentityKeyResponse)(nil), // 58: quilibrium.node.global.pb.GetIdentityKeyResponse + (*GetProvingKeyRequest)(nil), // 59: quilibrium.node.global.pb.GetProvingKeyRequest + (*GetProvingKeyResponse)(nil), // 60: quilibrium.node.global.pb.GetProvingKeyResponse + (*GetSignedKeyRequest)(nil), // 61: quilibrium.node.global.pb.GetSignedKeyRequest + (*GetSignedKeyResponse)(nil), // 62: quilibrium.node.global.pb.GetSignedKeyResponse + (*GetSignedKeysByParentRequest)(nil), // 63: quilibrium.node.global.pb.GetSignedKeysByParentRequest + (*GetSignedKeysByParentResponse)(nil), // 64: quilibrium.node.global.pb.GetSignedKeysByParentResponse + (*RangeProvingKeysRequest)(nil), // 65: quilibrium.node.global.pb.RangeProvingKeysRequest + (*RangeProvingKeysResponse)(nil), // 66: quilibrium.node.global.pb.RangeProvingKeysResponse + (*RangeIdentityKeysRequest)(nil), // 67: quilibrium.node.global.pb.RangeIdentityKeysRequest + (*RangeIdentityKeysResponse)(nil), // 68: quilibrium.node.global.pb.RangeIdentityKeysResponse + (*RangeSignedKeysRequest)(nil), // 69: quilibrium.node.global.pb.RangeSignedKeysRequest + (*RangeSignedKeysResponse)(nil), // 70: quilibrium.node.global.pb.RangeSignedKeysResponse + (*MessageKeyShard)(nil), // 71: quilibrium.node.global.pb.MessageKeyShard + (*PutMessageRequest)(nil), // 72: quilibrium.node.global.pb.PutMessageRequest + (*PutMessageResponse)(nil), // 73: quilibrium.node.global.pb.PutMessageResponse + (*Ed448Signature)(nil), // 74: quilibrium.node.keys.pb.Ed448Signature + (*BLS48581SignatureWithProofOfPossession)(nil), // 75: quilibrium.node.keys.pb.BLS48581SignatureWithProofOfPossession + (*BLS48581AddressedSignature)(nil), // 76: quilibrium.node.keys.pb.BLS48581AddressedSignature + (*TraversalProof)(nil), // 77: quilibrium.node.application.pb.TraversalProof + (*TokenDeploy)(nil), // 78: quilibrium.node.token.pb.TokenDeploy + (*TokenUpdate)(nil), // 79: quilibrium.node.token.pb.TokenUpdate + (*Transaction)(nil), // 80: quilibrium.node.token.pb.Transaction + (*PendingTransaction)(nil), // 81: quilibrium.node.token.pb.PendingTransaction + (*MintTransaction)(nil), // 82: quilibrium.node.token.pb.MintTransaction + (*HypergraphDeploy)(nil), // 83: quilibrium.node.hypergraph.pb.HypergraphDeploy + (*HypergraphUpdate)(nil), // 84: quilibrium.node.hypergraph.pb.HypergraphUpdate + (*VertexAdd)(nil), // 85: quilibrium.node.hypergraph.pb.VertexAdd + (*VertexRemove)(nil), // 86: quilibrium.node.hypergraph.pb.VertexRemove + (*HyperedgeAdd)(nil), // 87: quilibrium.node.hypergraph.pb.HyperedgeAdd + (*HyperedgeRemove)(nil), // 88: quilibrium.node.hypergraph.pb.HyperedgeRemove + (*ComputeDeploy)(nil), // 89: quilibrium.node.compute.pb.ComputeDeploy + (*ComputeUpdate)(nil), // 90: quilibrium.node.compute.pb.ComputeUpdate + (*CodeDeployment)(nil), // 91: quilibrium.node.compute.pb.CodeDeployment + (*CodeExecute)(nil), // 92: quilibrium.node.compute.pb.CodeExecute + (*CodeFinalize)(nil), // 93: quilibrium.node.compute.pb.CodeFinalize + (*BLS48581AggregateSignature)(nil), // 94: quilibrium.node.keys.pb.BLS48581AggregateSignature + (*KeyRegistry)(nil), // 95: quilibrium.node.keys.pb.KeyRegistry + (*Ed448PublicKey)(nil), // 96: quilibrium.node.keys.pb.Ed448PublicKey + (*SignedX448Key)(nil), // 97: quilibrium.node.keys.pb.SignedX448Key + (*Message)(nil), // 98: quilibrium.node.application.pb.Message + (*InboxMessagePut)(nil), // 99: quilibrium.node.channel.pb.InboxMessagePut + (*InboxMessageRequest)(nil), // 100: quilibrium.node.channel.pb.InboxMessageRequest + (*HubPut)(nil), // 101: quilibrium.node.channel.pb.HubPut + (*HubRequest)(nil), // 102: quilibrium.node.channel.pb.HubRequest + (*DispatchSyncRequest)(nil), // 103: quilibrium.node.channel.pb.DispatchSyncRequest + (*emptypb.Empty)(nil), // 104: google.protobuf.Empty + (*InboxMessageResponse)(nil), // 105: quilibrium.node.channel.pb.InboxMessageResponse + (*HubResponse)(nil), // 106: quilibrium.node.channel.pb.HubResponse + (*DispatchSyncResponse)(nil), // 107: quilibrium.node.channel.pb.DispatchSyncResponse } var file_global_proto_depIdxs = []int32{ - 66, // 0: quilibrium.node.global.pb.LegacyProverRequest.public_key_signatures_ed448:type_name -> quilibrium.node.keys.pb.Ed448Signature - 67, // 1: quilibrium.node.global.pb.ProverJoin.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581SignatureWithProofOfPossession - 1, // 2: quilibrium.node.global.pb.ProverJoin.merge_targets:type_name -> quilibrium.node.global.pb.SeniorityMerge - 68, // 3: quilibrium.node.global.pb.ProverLeave.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature - 68, // 4: quilibrium.node.global.pb.ProverPause.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature - 68, // 5: quilibrium.node.global.pb.ProverResume.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature - 68, // 6: quilibrium.node.global.pb.ProverConfirm.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature - 68, // 7: quilibrium.node.global.pb.ProverUpdate.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature - 69, // 8: quilibrium.node.global.pb.ProverKick.traversal_proof:type_name -> quilibrium.node.application.pb.TraversalProof - 68, // 9: quilibrium.node.global.pb.ProverReject.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature - 2, // 10: quilibrium.node.global.pb.MessageRequest.join:type_name -> quilibrium.node.global.pb.ProverJoin - 3, // 11: quilibrium.node.global.pb.MessageRequest.leave:type_name -> quilibrium.node.global.pb.ProverLeave - 4, // 12: quilibrium.node.global.pb.MessageRequest.pause:type_name -> quilibrium.node.global.pb.ProverPause - 5, // 13: quilibrium.node.global.pb.MessageRequest.resume:type_name -> quilibrium.node.global.pb.ProverResume - 6, // 14: quilibrium.node.global.pb.MessageRequest.confirm:type_name -> quilibrium.node.global.pb.ProverConfirm - 9, // 15: quilibrium.node.global.pb.MessageRequest.reject:type_name -> quilibrium.node.global.pb.ProverReject - 8, // 16: quilibrium.node.global.pb.MessageRequest.kick:type_name -> quilibrium.node.global.pb.ProverKick - 7, // 17: quilibrium.node.global.pb.MessageRequest.update:type_name -> quilibrium.node.global.pb.ProverUpdate - 70, // 18: quilibrium.node.global.pb.MessageRequest.token_deploy:type_name -> quilibrium.node.token.pb.TokenDeploy - 71, // 19: quilibrium.node.global.pb.MessageRequest.token_update:type_name -> quilibrium.node.token.pb.TokenUpdate - 72, // 20: quilibrium.node.global.pb.MessageRequest.transaction:type_name -> quilibrium.node.token.pb.Transaction - 73, // 21: quilibrium.node.global.pb.MessageRequest.pending_transaction:type_name -> quilibrium.node.token.pb.PendingTransaction - 74, // 22: quilibrium.node.global.pb.MessageRequest.mint_transaction:type_name -> quilibrium.node.token.pb.MintTransaction - 75, // 23: quilibrium.node.global.pb.MessageRequest.hypergraph_deploy:type_name -> quilibrium.node.hypergraph.pb.HypergraphDeploy - 76, // 24: quilibrium.node.global.pb.MessageRequest.hypergraph_update:type_name -> quilibrium.node.hypergraph.pb.HypergraphUpdate - 77, // 25: quilibrium.node.global.pb.MessageRequest.vertex_add:type_name -> quilibrium.node.hypergraph.pb.VertexAdd - 78, // 26: quilibrium.node.global.pb.MessageRequest.vertex_remove:type_name -> quilibrium.node.hypergraph.pb.VertexRemove - 79, // 27: quilibrium.node.global.pb.MessageRequest.hyperedge_add:type_name -> quilibrium.node.hypergraph.pb.HyperedgeAdd - 80, // 28: quilibrium.node.global.pb.MessageRequest.hyperedge_remove:type_name -> quilibrium.node.hypergraph.pb.HyperedgeRemove - 81, // 29: quilibrium.node.global.pb.MessageRequest.compute_deploy:type_name -> quilibrium.node.compute.pb.ComputeDeploy - 82, // 30: quilibrium.node.global.pb.MessageRequest.compute_update:type_name -> quilibrium.node.compute.pb.ComputeUpdate - 83, // 31: quilibrium.node.global.pb.MessageRequest.code_deploy:type_name -> quilibrium.node.compute.pb.CodeDeployment - 84, // 32: quilibrium.node.global.pb.MessageRequest.code_execute:type_name -> quilibrium.node.compute.pb.CodeExecute - 85, // 33: quilibrium.node.global.pb.MessageRequest.code_finalize:type_name -> quilibrium.node.compute.pb.CodeFinalize - 13, // 34: quilibrium.node.global.pb.MessageRequest.shard:type_name -> quilibrium.node.global.pb.FrameHeader - 10, // 35: quilibrium.node.global.pb.MessageBundle.requests:type_name -> quilibrium.node.global.pb.MessageRequest - 86, // 36: quilibrium.node.global.pb.GlobalFrameHeader.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AggregateSignature - 86, // 37: quilibrium.node.global.pb.FrameHeader.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AggregateSignature - 68, // 38: quilibrium.node.global.pb.ProverLivenessCheck.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature - 68, // 39: quilibrium.node.global.pb.FrameVote.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature - 86, // 40: quilibrium.node.global.pb.FrameConfirmation.aggregate_signature:type_name -> quilibrium.node.keys.pb.BLS48581AggregateSignature - 12, // 41: quilibrium.node.global.pb.GlobalFrame.header:type_name -> quilibrium.node.global.pb.GlobalFrameHeader - 11, // 42: quilibrium.node.global.pb.GlobalFrame.requests:type_name -> quilibrium.node.global.pb.MessageBundle - 13, // 43: quilibrium.node.global.pb.AppShardFrame.header:type_name -> quilibrium.node.global.pb.FrameHeader - 11, // 44: quilibrium.node.global.pb.AppShardFrame.requests:type_name -> quilibrium.node.global.pb.MessageBundle - 17, // 45: quilibrium.node.global.pb.GlobalFrameResponse.frame:type_name -> quilibrium.node.global.pb.GlobalFrame - 18, // 46: quilibrium.node.global.pb.AppShardFrameResponse.frame:type_name -> quilibrium.node.global.pb.AppShardFrame - 25, // 47: quilibrium.node.global.pb.GetAppShardsResponse.info:type_name -> quilibrium.node.global.pb.AppShardInfo - 30, // 48: quilibrium.node.global.pb.GetLockedAddressesResponse.transactions:type_name -> quilibrium.node.global.pb.LockedTransaction - 33, // 49: quilibrium.node.global.pb.GlobalGetWorkerInfoResponse.workers:type_name -> quilibrium.node.global.pb.GlobalGetWorkerInfoResponseItem - 87, // 50: quilibrium.node.global.pb.GetKeyRegistryResponse.registry:type_name -> quilibrium.node.keys.pb.KeyRegistry - 87, // 51: quilibrium.node.global.pb.GetKeyRegistryByProverResponse.registry:type_name -> quilibrium.node.keys.pb.KeyRegistry - 88, // 52: quilibrium.node.global.pb.PutIdentityKeyRequest.identity_key:type_name -> quilibrium.node.keys.pb.Ed448PublicKey - 67, // 53: quilibrium.node.global.pb.PutProvingKeyRequest.proving_key:type_name -> quilibrium.node.keys.pb.BLS48581SignatureWithProofOfPossession - 89, // 54: quilibrium.node.global.pb.PutSignedKeyRequest.key:type_name -> quilibrium.node.keys.pb.SignedX448Key - 88, // 55: quilibrium.node.global.pb.GetIdentityKeyResponse.key:type_name -> quilibrium.node.keys.pb.Ed448PublicKey - 67, // 56: quilibrium.node.global.pb.GetProvingKeyResponse.key:type_name -> quilibrium.node.keys.pb.BLS48581SignatureWithProofOfPossession - 89, // 57: quilibrium.node.global.pb.GetSignedKeyResponse.key:type_name -> quilibrium.node.keys.pb.SignedX448Key - 89, // 58: quilibrium.node.global.pb.GetSignedKeysByParentResponse.keys:type_name -> quilibrium.node.keys.pb.SignedX448Key - 67, // 59: quilibrium.node.global.pb.RangeProvingKeysResponse.key:type_name -> quilibrium.node.keys.pb.BLS48581SignatureWithProofOfPossession - 88, // 60: quilibrium.node.global.pb.RangeIdentityKeysResponse.key:type_name -> quilibrium.node.keys.pb.Ed448PublicKey - 89, // 61: quilibrium.node.global.pb.RangeSignedKeysResponse.key:type_name -> quilibrium.node.keys.pb.SignedX448Key - 63, // 62: quilibrium.node.global.pb.PutMessageRequest.message_shards:type_name -> quilibrium.node.global.pb.MessageKeyShard - 20, // 63: quilibrium.node.global.pb.GlobalService.GetGlobalFrame:input_type -> quilibrium.node.global.pb.GetGlobalFrameRequest - 24, // 64: quilibrium.node.global.pb.GlobalService.GetAppShards:input_type -> quilibrium.node.global.pb.GetAppShardsRequest - 27, // 65: quilibrium.node.global.pb.GlobalService.GetGlobalShards:input_type -> quilibrium.node.global.pb.GetGlobalShardsRequest - 29, // 66: quilibrium.node.global.pb.GlobalService.GetLockedAddresses:input_type -> quilibrium.node.global.pb.GetLockedAddressesRequest - 32, // 67: quilibrium.node.global.pb.GlobalService.GetWorkerInfo:input_type -> quilibrium.node.global.pb.GlobalGetWorkerInfoRequest - 22, // 68: quilibrium.node.global.pb.AppShardService.GetAppShardFrame:input_type -> quilibrium.node.global.pb.GetAppShardFrameRequest - 35, // 69: quilibrium.node.global.pb.OnionService.Connect:input_type -> quilibrium.node.global.pb.SendMessage - 64, // 70: quilibrium.node.global.pb.MixnetService.PutMessage:input_type -> quilibrium.node.global.pb.PutMessageRequest - 90, // 71: quilibrium.node.global.pb.MixnetService.RoundStream:input_type -> quilibrium.node.application.pb.Message - 37, // 72: quilibrium.node.global.pb.KeyRegistryService.GetKeyRegistry:input_type -> quilibrium.node.global.pb.GetKeyRegistryRequest - 39, // 73: quilibrium.node.global.pb.KeyRegistryService.GetKeyRegistryByProver:input_type -> quilibrium.node.global.pb.GetKeyRegistryByProverRequest - 41, // 74: quilibrium.node.global.pb.KeyRegistryService.PutIdentityKey:input_type -> quilibrium.node.global.pb.PutIdentityKeyRequest - 43, // 75: quilibrium.node.global.pb.KeyRegistryService.PutProvingKey:input_type -> quilibrium.node.global.pb.PutProvingKeyRequest - 45, // 76: quilibrium.node.global.pb.KeyRegistryService.PutCrossSignature:input_type -> quilibrium.node.global.pb.PutCrossSignatureRequest - 47, // 77: quilibrium.node.global.pb.KeyRegistryService.PutSignedKey:input_type -> quilibrium.node.global.pb.PutSignedKeyRequest - 49, // 78: quilibrium.node.global.pb.KeyRegistryService.GetIdentityKey:input_type -> quilibrium.node.global.pb.GetIdentityKeyRequest - 51, // 79: quilibrium.node.global.pb.KeyRegistryService.GetProvingKey:input_type -> quilibrium.node.global.pb.GetProvingKeyRequest - 53, // 80: quilibrium.node.global.pb.KeyRegistryService.GetSignedKey:input_type -> quilibrium.node.global.pb.GetSignedKeyRequest - 55, // 81: quilibrium.node.global.pb.KeyRegistryService.GetSignedKeysByParent:input_type -> quilibrium.node.global.pb.GetSignedKeysByParentRequest - 57, // 82: quilibrium.node.global.pb.KeyRegistryService.RangeProvingKeys:input_type -> quilibrium.node.global.pb.RangeProvingKeysRequest - 59, // 83: quilibrium.node.global.pb.KeyRegistryService.RangeIdentityKeys:input_type -> quilibrium.node.global.pb.RangeIdentityKeysRequest - 61, // 84: quilibrium.node.global.pb.KeyRegistryService.RangeSignedKeys:input_type -> quilibrium.node.global.pb.RangeSignedKeysRequest - 91, // 85: quilibrium.node.global.pb.DispatchService.PutInboxMessage:input_type -> quilibrium.node.channel.pb.InboxMessagePut - 92, // 86: quilibrium.node.global.pb.DispatchService.GetInboxMessages:input_type -> quilibrium.node.channel.pb.InboxMessageRequest - 93, // 87: quilibrium.node.global.pb.DispatchService.PutHub:input_type -> quilibrium.node.channel.pb.HubPut - 94, // 88: quilibrium.node.global.pb.DispatchService.GetHub:input_type -> quilibrium.node.channel.pb.HubRequest - 95, // 89: quilibrium.node.global.pb.DispatchService.Sync:input_type -> quilibrium.node.channel.pb.DispatchSyncRequest - 21, // 90: quilibrium.node.global.pb.GlobalService.GetGlobalFrame:output_type -> quilibrium.node.global.pb.GlobalFrameResponse - 26, // 91: quilibrium.node.global.pb.GlobalService.GetAppShards:output_type -> quilibrium.node.global.pb.GetAppShardsResponse - 28, // 92: quilibrium.node.global.pb.GlobalService.GetGlobalShards:output_type -> quilibrium.node.global.pb.GetGlobalShardsResponse - 31, // 93: quilibrium.node.global.pb.GlobalService.GetLockedAddresses:output_type -> quilibrium.node.global.pb.GetLockedAddressesResponse - 34, // 94: quilibrium.node.global.pb.GlobalService.GetWorkerInfo:output_type -> quilibrium.node.global.pb.GlobalGetWorkerInfoResponse - 23, // 95: quilibrium.node.global.pb.AppShardService.GetAppShardFrame:output_type -> quilibrium.node.global.pb.AppShardFrameResponse - 36, // 96: quilibrium.node.global.pb.OnionService.Connect:output_type -> quilibrium.node.global.pb.ReceiveMessage - 65, // 97: quilibrium.node.global.pb.MixnetService.PutMessage:output_type -> quilibrium.node.global.pb.PutMessageResponse - 90, // 98: quilibrium.node.global.pb.MixnetService.RoundStream:output_type -> quilibrium.node.application.pb.Message - 38, // 99: quilibrium.node.global.pb.KeyRegistryService.GetKeyRegistry:output_type -> quilibrium.node.global.pb.GetKeyRegistryResponse - 40, // 100: quilibrium.node.global.pb.KeyRegistryService.GetKeyRegistryByProver:output_type -> quilibrium.node.global.pb.GetKeyRegistryByProverResponse - 42, // 101: quilibrium.node.global.pb.KeyRegistryService.PutIdentityKey:output_type -> quilibrium.node.global.pb.PutIdentityKeyResponse - 44, // 102: quilibrium.node.global.pb.KeyRegistryService.PutProvingKey:output_type -> quilibrium.node.global.pb.PutProvingKeyResponse - 46, // 103: quilibrium.node.global.pb.KeyRegistryService.PutCrossSignature:output_type -> quilibrium.node.global.pb.PutCrossSignatureResponse - 48, // 104: quilibrium.node.global.pb.KeyRegistryService.PutSignedKey:output_type -> quilibrium.node.global.pb.PutSignedKeyResponse - 50, // 105: quilibrium.node.global.pb.KeyRegistryService.GetIdentityKey:output_type -> quilibrium.node.global.pb.GetIdentityKeyResponse - 52, // 106: quilibrium.node.global.pb.KeyRegistryService.GetProvingKey:output_type -> quilibrium.node.global.pb.GetProvingKeyResponse - 54, // 107: quilibrium.node.global.pb.KeyRegistryService.GetSignedKey:output_type -> quilibrium.node.global.pb.GetSignedKeyResponse - 56, // 108: quilibrium.node.global.pb.KeyRegistryService.GetSignedKeysByParent:output_type -> quilibrium.node.global.pb.GetSignedKeysByParentResponse - 58, // 109: quilibrium.node.global.pb.KeyRegistryService.RangeProvingKeys:output_type -> quilibrium.node.global.pb.RangeProvingKeysResponse - 60, // 110: quilibrium.node.global.pb.KeyRegistryService.RangeIdentityKeys:output_type -> quilibrium.node.global.pb.RangeIdentityKeysResponse - 62, // 111: quilibrium.node.global.pb.KeyRegistryService.RangeSignedKeys:output_type -> quilibrium.node.global.pb.RangeSignedKeysResponse - 96, // 112: quilibrium.node.global.pb.DispatchService.PutInboxMessage:output_type -> google.protobuf.Empty - 97, // 113: quilibrium.node.global.pb.DispatchService.GetInboxMessages:output_type -> quilibrium.node.channel.pb.InboxMessageResponse - 96, // 114: quilibrium.node.global.pb.DispatchService.PutHub:output_type -> google.protobuf.Empty - 98, // 115: quilibrium.node.global.pb.DispatchService.GetHub:output_type -> quilibrium.node.channel.pb.HubResponse - 99, // 116: quilibrium.node.global.pb.DispatchService.Sync:output_type -> quilibrium.node.channel.pb.DispatchSyncResponse - 90, // [90:117] is the sub-list for method output_type - 63, // [63:90] is the sub-list for method input_type - 63, // [63:63] is the sub-list for extension type_name - 63, // [63:63] is the sub-list for extension extendee - 0, // [0:63] is the sub-list for field type_name + 74, // 0: quilibrium.node.global.pb.LegacyProverRequest.public_key_signatures_ed448:type_name -> quilibrium.node.keys.pb.Ed448Signature + 75, // 1: quilibrium.node.global.pb.ProverJoin.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581SignatureWithProofOfPossession + 1, // 2: quilibrium.node.global.pb.ProverJoin.merge_targets:type_name -> quilibrium.node.global.pb.SeniorityMerge + 76, // 3: quilibrium.node.global.pb.ProverLeave.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature + 76, // 4: quilibrium.node.global.pb.ProverPause.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature + 76, // 5: quilibrium.node.global.pb.ProverResume.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature + 76, // 6: quilibrium.node.global.pb.ProverConfirm.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature + 76, // 7: quilibrium.node.global.pb.ProverUpdate.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature + 77, // 8: quilibrium.node.global.pb.ProverKick.traversal_proof:type_name -> quilibrium.node.application.pb.TraversalProof + 76, // 9: quilibrium.node.global.pb.ProverReject.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature + 2, // 10: quilibrium.node.global.pb.MessageRequest.join:type_name -> quilibrium.node.global.pb.ProverJoin + 3, // 11: quilibrium.node.global.pb.MessageRequest.leave:type_name -> quilibrium.node.global.pb.ProverLeave + 4, // 12: quilibrium.node.global.pb.MessageRequest.pause:type_name -> quilibrium.node.global.pb.ProverPause + 5, // 13: quilibrium.node.global.pb.MessageRequest.resume:type_name -> quilibrium.node.global.pb.ProverResume + 6, // 14: quilibrium.node.global.pb.MessageRequest.confirm:type_name -> quilibrium.node.global.pb.ProverConfirm + 9, // 15: quilibrium.node.global.pb.MessageRequest.reject:type_name -> quilibrium.node.global.pb.ProverReject + 8, // 16: quilibrium.node.global.pb.MessageRequest.kick:type_name -> quilibrium.node.global.pb.ProverKick + 7, // 17: quilibrium.node.global.pb.MessageRequest.update:type_name -> quilibrium.node.global.pb.ProverUpdate + 78, // 18: quilibrium.node.global.pb.MessageRequest.token_deploy:type_name -> quilibrium.node.token.pb.TokenDeploy + 79, // 19: quilibrium.node.global.pb.MessageRequest.token_update:type_name -> quilibrium.node.token.pb.TokenUpdate + 80, // 20: quilibrium.node.global.pb.MessageRequest.transaction:type_name -> quilibrium.node.token.pb.Transaction + 81, // 21: quilibrium.node.global.pb.MessageRequest.pending_transaction:type_name -> quilibrium.node.token.pb.PendingTransaction + 82, // 22: quilibrium.node.global.pb.MessageRequest.mint_transaction:type_name -> quilibrium.node.token.pb.MintTransaction + 83, // 23: quilibrium.node.global.pb.MessageRequest.hypergraph_deploy:type_name -> quilibrium.node.hypergraph.pb.HypergraphDeploy + 84, // 24: quilibrium.node.global.pb.MessageRequest.hypergraph_update:type_name -> quilibrium.node.hypergraph.pb.HypergraphUpdate + 85, // 25: quilibrium.node.global.pb.MessageRequest.vertex_add:type_name -> quilibrium.node.hypergraph.pb.VertexAdd + 86, // 26: quilibrium.node.global.pb.MessageRequest.vertex_remove:type_name -> quilibrium.node.hypergraph.pb.VertexRemove + 87, // 27: quilibrium.node.global.pb.MessageRequest.hyperedge_add:type_name -> quilibrium.node.hypergraph.pb.HyperedgeAdd + 88, // 28: quilibrium.node.global.pb.MessageRequest.hyperedge_remove:type_name -> quilibrium.node.hypergraph.pb.HyperedgeRemove + 89, // 29: quilibrium.node.global.pb.MessageRequest.compute_deploy:type_name -> quilibrium.node.compute.pb.ComputeDeploy + 90, // 30: quilibrium.node.global.pb.MessageRequest.compute_update:type_name -> quilibrium.node.compute.pb.ComputeUpdate + 91, // 31: quilibrium.node.global.pb.MessageRequest.code_deploy:type_name -> quilibrium.node.compute.pb.CodeDeployment + 92, // 32: quilibrium.node.global.pb.MessageRequest.code_execute:type_name -> quilibrium.node.compute.pb.CodeExecute + 93, // 33: quilibrium.node.global.pb.MessageRequest.code_finalize:type_name -> quilibrium.node.compute.pb.CodeFinalize + 13, // 34: quilibrium.node.global.pb.MessageRequest.shard:type_name -> quilibrium.node.global.pb.FrameHeader + 10, // 35: quilibrium.node.global.pb.MessageBundle.requests:type_name -> quilibrium.node.global.pb.MessageRequest + 94, // 36: quilibrium.node.global.pb.GlobalFrameHeader.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AggregateSignature + 94, // 37: quilibrium.node.global.pb.FrameHeader.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AggregateSignature + 76, // 38: quilibrium.node.global.pb.ProverLivenessCheck.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature + 22, // 39: quilibrium.node.global.pb.AppShardProposal.state:type_name -> quilibrium.node.global.pb.AppShardFrame + 19, // 40: quilibrium.node.global.pb.AppShardProposal.parent_quorum_certificate:type_name -> quilibrium.node.global.pb.QuorumCertificate + 20, // 41: quilibrium.node.global.pb.AppShardProposal.prior_rank_timeout_certificate:type_name -> quilibrium.node.global.pb.TimeoutCertificate + 17, // 42: quilibrium.node.global.pb.AppShardProposal.vote:type_name -> quilibrium.node.global.pb.ProposalVote + 21, // 43: quilibrium.node.global.pb.GlobalProposal.state:type_name -> quilibrium.node.global.pb.GlobalFrame + 19, // 44: quilibrium.node.global.pb.GlobalProposal.parent_quorum_certificate:type_name -> quilibrium.node.global.pb.QuorumCertificate + 20, // 45: quilibrium.node.global.pb.GlobalProposal.prior_rank_timeout_certificate:type_name -> quilibrium.node.global.pb.TimeoutCertificate + 17, // 46: quilibrium.node.global.pb.GlobalProposal.vote:type_name -> quilibrium.node.global.pb.ProposalVote + 76, // 47: quilibrium.node.global.pb.ProposalVote.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature + 19, // 48: quilibrium.node.global.pb.TimeoutState.latest_quorum_certificate:type_name -> quilibrium.node.global.pb.QuorumCertificate + 20, // 49: quilibrium.node.global.pb.TimeoutState.prior_rank_timeout_certificate:type_name -> quilibrium.node.global.pb.TimeoutCertificate + 17, // 50: quilibrium.node.global.pb.TimeoutState.vote:type_name -> quilibrium.node.global.pb.ProposalVote + 94, // 51: quilibrium.node.global.pb.QuorumCertificate.aggregate_signature:type_name -> quilibrium.node.keys.pb.BLS48581AggregateSignature + 19, // 52: quilibrium.node.global.pb.TimeoutCertificate.latest_quorum_certificate:type_name -> quilibrium.node.global.pb.QuorumCertificate + 94, // 53: quilibrium.node.global.pb.TimeoutCertificate.aggregate_signature:type_name -> quilibrium.node.keys.pb.BLS48581AggregateSignature + 12, // 54: quilibrium.node.global.pb.GlobalFrame.header:type_name -> quilibrium.node.global.pb.GlobalFrameHeader + 11, // 55: quilibrium.node.global.pb.GlobalFrame.requests:type_name -> quilibrium.node.global.pb.MessageBundle + 13, // 56: quilibrium.node.global.pb.AppShardFrame.header:type_name -> quilibrium.node.global.pb.FrameHeader + 11, // 57: quilibrium.node.global.pb.AppShardFrame.requests:type_name -> quilibrium.node.global.pb.MessageBundle + 21, // 58: quilibrium.node.global.pb.GlobalFrameResponse.frame:type_name -> quilibrium.node.global.pb.GlobalFrame + 16, // 59: quilibrium.node.global.pb.GlobalProposalResponse.proposal:type_name -> quilibrium.node.global.pb.GlobalProposal + 22, // 60: quilibrium.node.global.pb.AppShardFrameResponse.frame:type_name -> quilibrium.node.global.pb.AppShardFrame + 15, // 61: quilibrium.node.global.pb.AppShardProposalResponse.proposal:type_name -> quilibrium.node.global.pb.AppShardProposal + 33, // 62: quilibrium.node.global.pb.GetAppShardsResponse.info:type_name -> quilibrium.node.global.pb.AppShardInfo + 38, // 63: quilibrium.node.global.pb.GetLockedAddressesResponse.transactions:type_name -> quilibrium.node.global.pb.LockedTransaction + 41, // 64: quilibrium.node.global.pb.GlobalGetWorkerInfoResponse.workers:type_name -> quilibrium.node.global.pb.GlobalGetWorkerInfoResponseItem + 95, // 65: quilibrium.node.global.pb.GetKeyRegistryResponse.registry:type_name -> quilibrium.node.keys.pb.KeyRegistry + 95, // 66: quilibrium.node.global.pb.GetKeyRegistryByProverResponse.registry:type_name -> quilibrium.node.keys.pb.KeyRegistry + 96, // 67: quilibrium.node.global.pb.PutIdentityKeyRequest.identity_key:type_name -> quilibrium.node.keys.pb.Ed448PublicKey + 75, // 68: quilibrium.node.global.pb.PutProvingKeyRequest.proving_key:type_name -> quilibrium.node.keys.pb.BLS48581SignatureWithProofOfPossession + 97, // 69: quilibrium.node.global.pb.PutSignedKeyRequest.key:type_name -> quilibrium.node.keys.pb.SignedX448Key + 96, // 70: quilibrium.node.global.pb.GetIdentityKeyResponse.key:type_name -> quilibrium.node.keys.pb.Ed448PublicKey + 75, // 71: quilibrium.node.global.pb.GetProvingKeyResponse.key:type_name -> quilibrium.node.keys.pb.BLS48581SignatureWithProofOfPossession + 97, // 72: quilibrium.node.global.pb.GetSignedKeyResponse.key:type_name -> quilibrium.node.keys.pb.SignedX448Key + 97, // 73: quilibrium.node.global.pb.GetSignedKeysByParentResponse.keys:type_name -> quilibrium.node.keys.pb.SignedX448Key + 75, // 74: quilibrium.node.global.pb.RangeProvingKeysResponse.key:type_name -> quilibrium.node.keys.pb.BLS48581SignatureWithProofOfPossession + 96, // 75: quilibrium.node.global.pb.RangeIdentityKeysResponse.key:type_name -> quilibrium.node.keys.pb.Ed448PublicKey + 97, // 76: quilibrium.node.global.pb.RangeSignedKeysResponse.key:type_name -> quilibrium.node.keys.pb.SignedX448Key + 71, // 77: quilibrium.node.global.pb.PutMessageRequest.message_shards:type_name -> quilibrium.node.global.pb.MessageKeyShard + 24, // 78: quilibrium.node.global.pb.GlobalService.GetGlobalFrame:input_type -> quilibrium.node.global.pb.GetGlobalFrameRequest + 26, // 79: quilibrium.node.global.pb.GlobalService.GetGlobalProposal:input_type -> quilibrium.node.global.pb.GetGlobalProposalRequest + 32, // 80: quilibrium.node.global.pb.GlobalService.GetAppShards:input_type -> quilibrium.node.global.pb.GetAppShardsRequest + 35, // 81: quilibrium.node.global.pb.GlobalService.GetGlobalShards:input_type -> quilibrium.node.global.pb.GetGlobalShardsRequest + 37, // 82: quilibrium.node.global.pb.GlobalService.GetLockedAddresses:input_type -> quilibrium.node.global.pb.GetLockedAddressesRequest + 40, // 83: quilibrium.node.global.pb.GlobalService.GetWorkerInfo:input_type -> quilibrium.node.global.pb.GlobalGetWorkerInfoRequest + 28, // 84: quilibrium.node.global.pb.AppShardService.GetAppShardFrame:input_type -> quilibrium.node.global.pb.GetAppShardFrameRequest + 30, // 85: quilibrium.node.global.pb.AppShardService.GetAppShardProposal:input_type -> quilibrium.node.global.pb.GetAppShardProposalRequest + 43, // 86: quilibrium.node.global.pb.OnionService.Connect:input_type -> quilibrium.node.global.pb.SendMessage + 72, // 87: quilibrium.node.global.pb.MixnetService.PutMessage:input_type -> quilibrium.node.global.pb.PutMessageRequest + 98, // 88: quilibrium.node.global.pb.MixnetService.RoundStream:input_type -> quilibrium.node.application.pb.Message + 45, // 89: quilibrium.node.global.pb.KeyRegistryService.GetKeyRegistry:input_type -> quilibrium.node.global.pb.GetKeyRegistryRequest + 47, // 90: quilibrium.node.global.pb.KeyRegistryService.GetKeyRegistryByProver:input_type -> quilibrium.node.global.pb.GetKeyRegistryByProverRequest + 49, // 91: quilibrium.node.global.pb.KeyRegistryService.PutIdentityKey:input_type -> quilibrium.node.global.pb.PutIdentityKeyRequest + 51, // 92: quilibrium.node.global.pb.KeyRegistryService.PutProvingKey:input_type -> quilibrium.node.global.pb.PutProvingKeyRequest + 53, // 93: quilibrium.node.global.pb.KeyRegistryService.PutCrossSignature:input_type -> quilibrium.node.global.pb.PutCrossSignatureRequest + 55, // 94: quilibrium.node.global.pb.KeyRegistryService.PutSignedKey:input_type -> quilibrium.node.global.pb.PutSignedKeyRequest + 57, // 95: quilibrium.node.global.pb.KeyRegistryService.GetIdentityKey:input_type -> quilibrium.node.global.pb.GetIdentityKeyRequest + 59, // 96: quilibrium.node.global.pb.KeyRegistryService.GetProvingKey:input_type -> quilibrium.node.global.pb.GetProvingKeyRequest + 61, // 97: quilibrium.node.global.pb.KeyRegistryService.GetSignedKey:input_type -> quilibrium.node.global.pb.GetSignedKeyRequest + 63, // 98: quilibrium.node.global.pb.KeyRegistryService.GetSignedKeysByParent:input_type -> quilibrium.node.global.pb.GetSignedKeysByParentRequest + 65, // 99: quilibrium.node.global.pb.KeyRegistryService.RangeProvingKeys:input_type -> quilibrium.node.global.pb.RangeProvingKeysRequest + 67, // 100: quilibrium.node.global.pb.KeyRegistryService.RangeIdentityKeys:input_type -> quilibrium.node.global.pb.RangeIdentityKeysRequest + 69, // 101: quilibrium.node.global.pb.KeyRegistryService.RangeSignedKeys:input_type -> quilibrium.node.global.pb.RangeSignedKeysRequest + 99, // 102: quilibrium.node.global.pb.DispatchService.PutInboxMessage:input_type -> quilibrium.node.channel.pb.InboxMessagePut + 100, // 103: quilibrium.node.global.pb.DispatchService.GetInboxMessages:input_type -> quilibrium.node.channel.pb.InboxMessageRequest + 101, // 104: quilibrium.node.global.pb.DispatchService.PutHub:input_type -> quilibrium.node.channel.pb.HubPut + 102, // 105: quilibrium.node.global.pb.DispatchService.GetHub:input_type -> quilibrium.node.channel.pb.HubRequest + 103, // 106: quilibrium.node.global.pb.DispatchService.Sync:input_type -> quilibrium.node.channel.pb.DispatchSyncRequest + 25, // 107: quilibrium.node.global.pb.GlobalService.GetGlobalFrame:output_type -> quilibrium.node.global.pb.GlobalFrameResponse + 27, // 108: quilibrium.node.global.pb.GlobalService.GetGlobalProposal:output_type -> quilibrium.node.global.pb.GlobalProposalResponse + 34, // 109: quilibrium.node.global.pb.GlobalService.GetAppShards:output_type -> quilibrium.node.global.pb.GetAppShardsResponse + 36, // 110: quilibrium.node.global.pb.GlobalService.GetGlobalShards:output_type -> quilibrium.node.global.pb.GetGlobalShardsResponse + 39, // 111: quilibrium.node.global.pb.GlobalService.GetLockedAddresses:output_type -> quilibrium.node.global.pb.GetLockedAddressesResponse + 42, // 112: quilibrium.node.global.pb.GlobalService.GetWorkerInfo:output_type -> quilibrium.node.global.pb.GlobalGetWorkerInfoResponse + 29, // 113: quilibrium.node.global.pb.AppShardService.GetAppShardFrame:output_type -> quilibrium.node.global.pb.AppShardFrameResponse + 31, // 114: quilibrium.node.global.pb.AppShardService.GetAppShardProposal:output_type -> quilibrium.node.global.pb.AppShardProposalResponse + 44, // 115: quilibrium.node.global.pb.OnionService.Connect:output_type -> quilibrium.node.global.pb.ReceiveMessage + 73, // 116: quilibrium.node.global.pb.MixnetService.PutMessage:output_type -> quilibrium.node.global.pb.PutMessageResponse + 98, // 117: quilibrium.node.global.pb.MixnetService.RoundStream:output_type -> quilibrium.node.application.pb.Message + 46, // 118: quilibrium.node.global.pb.KeyRegistryService.GetKeyRegistry:output_type -> quilibrium.node.global.pb.GetKeyRegistryResponse + 48, // 119: quilibrium.node.global.pb.KeyRegistryService.GetKeyRegistryByProver:output_type -> quilibrium.node.global.pb.GetKeyRegistryByProverResponse + 50, // 120: quilibrium.node.global.pb.KeyRegistryService.PutIdentityKey:output_type -> quilibrium.node.global.pb.PutIdentityKeyResponse + 52, // 121: quilibrium.node.global.pb.KeyRegistryService.PutProvingKey:output_type -> quilibrium.node.global.pb.PutProvingKeyResponse + 54, // 122: quilibrium.node.global.pb.KeyRegistryService.PutCrossSignature:output_type -> quilibrium.node.global.pb.PutCrossSignatureResponse + 56, // 123: quilibrium.node.global.pb.KeyRegistryService.PutSignedKey:output_type -> quilibrium.node.global.pb.PutSignedKeyResponse + 58, // 124: quilibrium.node.global.pb.KeyRegistryService.GetIdentityKey:output_type -> quilibrium.node.global.pb.GetIdentityKeyResponse + 60, // 125: quilibrium.node.global.pb.KeyRegistryService.GetProvingKey:output_type -> quilibrium.node.global.pb.GetProvingKeyResponse + 62, // 126: quilibrium.node.global.pb.KeyRegistryService.GetSignedKey:output_type -> quilibrium.node.global.pb.GetSignedKeyResponse + 64, // 127: quilibrium.node.global.pb.KeyRegistryService.GetSignedKeysByParent:output_type -> quilibrium.node.global.pb.GetSignedKeysByParentResponse + 66, // 128: quilibrium.node.global.pb.KeyRegistryService.RangeProvingKeys:output_type -> quilibrium.node.global.pb.RangeProvingKeysResponse + 68, // 129: quilibrium.node.global.pb.KeyRegistryService.RangeIdentityKeys:output_type -> quilibrium.node.global.pb.RangeIdentityKeysResponse + 70, // 130: quilibrium.node.global.pb.KeyRegistryService.RangeSignedKeys:output_type -> quilibrium.node.global.pb.RangeSignedKeysResponse + 104, // 131: quilibrium.node.global.pb.DispatchService.PutInboxMessage:output_type -> google.protobuf.Empty + 105, // 132: quilibrium.node.global.pb.DispatchService.GetInboxMessages:output_type -> quilibrium.node.channel.pb.InboxMessageResponse + 104, // 133: quilibrium.node.global.pb.DispatchService.PutHub:output_type -> google.protobuf.Empty + 106, // 134: quilibrium.node.global.pb.DispatchService.GetHub:output_type -> quilibrium.node.channel.pb.HubResponse + 107, // 135: quilibrium.node.global.pb.DispatchService.Sync:output_type -> quilibrium.node.channel.pb.DispatchSyncResponse + 107, // [107:136] is the sub-list for method output_type + 78, // [78:107] is the sub-list for method input_type + 78, // [78:78] is the sub-list for extension type_name + 78, // [78:78] is the sub-list for extension extendee + 0, // [0:78] is the sub-list for field type_name } func init() { file_global_proto_init() } @@ -5663,7 +6416,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FrameVote); i { + switch v := v.(*AppShardProposal); i { case 0: return &v.state case 1: @@ -5675,7 +6428,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FrameConfirmation); i { + switch v := v.(*GlobalProposal); i { case 0: return &v.state case 1: @@ -5687,7 +6440,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GlobalFrame); i { + switch v := v.(*ProposalVote); i { case 0: return &v.state case 1: @@ -5699,7 +6452,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AppShardFrame); i { + switch v := v.(*TimeoutState); i { case 0: return &v.state case 1: @@ -5711,7 +6464,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GlobalAlert); i { + switch v := v.(*QuorumCertificate); i { case 0: return &v.state case 1: @@ -5723,7 +6476,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetGlobalFrameRequest); i { + switch v := v.(*TimeoutCertificate); i { case 0: return &v.state case 1: @@ -5735,7 +6488,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GlobalFrameResponse); i { + switch v := v.(*GlobalFrame); i { case 0: return &v.state case 1: @@ -5747,7 +6500,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAppShardFrameRequest); i { + switch v := v.(*AppShardFrame); i { case 0: return &v.state case 1: @@ -5759,7 +6512,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AppShardFrameResponse); i { + switch v := v.(*GlobalAlert); i { case 0: return &v.state case 1: @@ -5771,7 +6524,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAppShardsRequest); i { + switch v := v.(*GetGlobalFrameRequest); i { case 0: return &v.state case 1: @@ -5783,7 +6536,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AppShardInfo); i { + switch v := v.(*GlobalFrameResponse); i { case 0: return &v.state case 1: @@ -5795,7 +6548,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAppShardsResponse); i { + switch v := v.(*GetGlobalProposalRequest); i { case 0: return &v.state case 1: @@ -5807,7 +6560,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetGlobalShardsRequest); i { + switch v := v.(*GlobalProposalResponse); i { case 0: return &v.state case 1: @@ -5819,7 +6572,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetGlobalShardsResponse); i { + switch v := v.(*GetAppShardFrameRequest); i { case 0: return &v.state case 1: @@ -5831,7 +6584,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetLockedAddressesRequest); i { + switch v := v.(*AppShardFrameResponse); i { case 0: return &v.state case 1: @@ -5843,7 +6596,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LockedTransaction); i { + switch v := v.(*GetAppShardProposalRequest); i { case 0: return &v.state case 1: @@ -5855,7 +6608,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetLockedAddressesResponse); i { + switch v := v.(*AppShardProposalResponse); i { case 0: return &v.state case 1: @@ -5867,7 +6620,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GlobalGetWorkerInfoRequest); i { + switch v := v.(*GetAppShardsRequest); i { case 0: return &v.state case 1: @@ -5879,7 +6632,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GlobalGetWorkerInfoResponseItem); i { + switch v := v.(*AppShardInfo); i { case 0: return &v.state case 1: @@ -5891,7 +6644,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GlobalGetWorkerInfoResponse); i { + switch v := v.(*GetAppShardsResponse); i { case 0: return &v.state case 1: @@ -5903,7 +6656,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SendMessage); i { + switch v := v.(*GetGlobalShardsRequest); i { case 0: return &v.state case 1: @@ -5915,7 +6668,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReceiveMessage); i { + switch v := v.(*GetGlobalShardsResponse); i { case 0: return &v.state case 1: @@ -5927,7 +6680,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetKeyRegistryRequest); i { + switch v := v.(*GetLockedAddressesRequest); i { case 0: return &v.state case 1: @@ -5939,7 +6692,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetKeyRegistryResponse); i { + switch v := v.(*LockedTransaction); i { case 0: return &v.state case 1: @@ -5951,7 +6704,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetKeyRegistryByProverRequest); i { + switch v := v.(*GetLockedAddressesResponse); i { case 0: return &v.state case 1: @@ -5963,7 +6716,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetKeyRegistryByProverResponse); i { + switch v := v.(*GlobalGetWorkerInfoRequest); i { case 0: return &v.state case 1: @@ -5975,7 +6728,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PutIdentityKeyRequest); i { + switch v := v.(*GlobalGetWorkerInfoResponseItem); i { case 0: return &v.state case 1: @@ -5987,7 +6740,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PutIdentityKeyResponse); i { + switch v := v.(*GlobalGetWorkerInfoResponse); i { case 0: return &v.state case 1: @@ -5999,7 +6752,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PutProvingKeyRequest); i { + switch v := v.(*SendMessage); i { case 0: return &v.state case 1: @@ -6011,7 +6764,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PutProvingKeyResponse); i { + switch v := v.(*ReceiveMessage); i { case 0: return &v.state case 1: @@ -6023,7 +6776,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PutCrossSignatureRequest); i { + switch v := v.(*GetKeyRegistryRequest); i { case 0: return &v.state case 1: @@ -6035,7 +6788,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PutCrossSignatureResponse); i { + switch v := v.(*GetKeyRegistryResponse); i { case 0: return &v.state case 1: @@ -6047,7 +6800,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PutSignedKeyRequest); i { + switch v := v.(*GetKeyRegistryByProverRequest); i { case 0: return &v.state case 1: @@ -6059,7 +6812,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PutSignedKeyResponse); i { + switch v := v.(*GetKeyRegistryByProverResponse); i { case 0: return &v.state case 1: @@ -6071,7 +6824,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetIdentityKeyRequest); i { + switch v := v.(*PutIdentityKeyRequest); i { case 0: return &v.state case 1: @@ -6083,7 +6836,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetIdentityKeyResponse); i { + switch v := v.(*PutIdentityKeyResponse); i { case 0: return &v.state case 1: @@ -6095,7 +6848,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetProvingKeyRequest); i { + switch v := v.(*PutProvingKeyRequest); i { case 0: return &v.state case 1: @@ -6107,7 +6860,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetProvingKeyResponse); i { + switch v := v.(*PutProvingKeyResponse); i { case 0: return &v.state case 1: @@ -6119,7 +6872,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSignedKeyRequest); i { + switch v := v.(*PutCrossSignatureRequest); i { case 0: return &v.state case 1: @@ -6131,7 +6884,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSignedKeyResponse); i { + switch v := v.(*PutCrossSignatureResponse); i { case 0: return &v.state case 1: @@ -6143,7 +6896,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSignedKeysByParentRequest); i { + switch v := v.(*PutSignedKeyRequest); i { case 0: return &v.state case 1: @@ -6155,7 +6908,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSignedKeysByParentResponse); i { + switch v := v.(*PutSignedKeyResponse); i { case 0: return &v.state case 1: @@ -6167,7 +6920,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RangeProvingKeysRequest); i { + switch v := v.(*GetIdentityKeyRequest); i { case 0: return &v.state case 1: @@ -6179,7 +6932,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RangeProvingKeysResponse); i { + switch v := v.(*GetIdentityKeyResponse); i { case 0: return &v.state case 1: @@ -6191,7 +6944,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RangeIdentityKeysRequest); i { + switch v := v.(*GetProvingKeyRequest); i { case 0: return &v.state case 1: @@ -6203,7 +6956,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RangeIdentityKeysResponse); i { + switch v := v.(*GetProvingKeyResponse); i { case 0: return &v.state case 1: @@ -6215,7 +6968,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RangeSignedKeysRequest); i { + switch v := v.(*GetSignedKeyRequest); i { case 0: return &v.state case 1: @@ -6227,7 +6980,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RangeSignedKeysResponse); i { + switch v := v.(*GetSignedKeyResponse); i { case 0: return &v.state case 1: @@ -6239,7 +6992,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MessageKeyShard); i { + switch v := v.(*GetSignedKeysByParentRequest); i { case 0: return &v.state case 1: @@ -6251,7 +7004,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PutMessageRequest); i { + switch v := v.(*GetSignedKeysByParentResponse); i { case 0: return &v.state case 1: @@ -6263,6 +7016,102 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RangeProvingKeysRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_global_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RangeProvingKeysResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_global_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RangeIdentityKeysRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_global_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RangeIdentityKeysResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_global_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RangeSignedKeysRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_global_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RangeSignedKeysResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_global_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MessageKeyShard); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_global_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PutMessageRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_global_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PutMessageResponse); i { case 0: return &v.state @@ -6308,7 +7157,7 @@ func file_global_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_global_proto_rawDesc, NumEnums: 0, - NumMessages: 66, + NumMessages: 74, NumExtensions: 0, NumServices: 6, }, diff --git a/protobufs/global.pb.gw.go b/protobufs/global.pb.gw.go index 0626f76..98e0711 100644 --- a/protobufs/global.pb.gw.go +++ b/protobufs/global.pb.gw.go @@ -65,6 +65,40 @@ func local_request_GlobalService_GetGlobalFrame_0(ctx context.Context, marshaler } +func request_GlobalService_GetGlobalProposal_0(ctx context.Context, marshaler runtime.Marshaler, client GlobalServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetGlobalProposalRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetGlobalProposal(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_GlobalService_GetGlobalProposal_0(ctx context.Context, marshaler runtime.Marshaler, server GlobalServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetGlobalProposalRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetGlobalProposal(ctx, &protoReq) + return msg, metadata, err + +} + func request_GlobalService_GetAppShards_0(ctx context.Context, marshaler runtime.Marshaler, client GlobalServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GetAppShardsRequest var metadata runtime.ServerMetadata @@ -235,6 +269,40 @@ func local_request_AppShardService_GetAppShardFrame_0(ctx context.Context, marsh } +func request_AppShardService_GetAppShardProposal_0(ctx context.Context, marshaler runtime.Marshaler, client AppShardServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetAppShardProposalRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetAppShardProposal(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_AppShardService_GetAppShardProposal_0(ctx context.Context, marshaler runtime.Marshaler, server AppShardServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetAppShardProposalRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetAppShardProposal(ctx, &protoReq) + return msg, metadata, err + +} + func request_OnionService_Connect_0(ctx context.Context, marshaler runtime.Marshaler, client OnionServiceClient, req *http.Request, pathParams map[string]string) (OnionService_ConnectClient, runtime.ServerMetadata, error) { var metadata runtime.ServerMetadata stream, err := client.Connect(ctx) @@ -998,6 +1066,31 @@ func RegisterGlobalServiceHandlerServer(ctx context.Context, mux *runtime.ServeM }) + mux.Handle("POST", pattern_GlobalService_GetGlobalProposal_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/quilibrium.node.global.pb.GlobalService/GetGlobalProposal", runtime.WithHTTPPathPattern("/quilibrium.node.global.pb.GlobalService/GetGlobalProposal")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_GlobalService_GetGlobalProposal_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_GlobalService_GetGlobalProposal_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("POST", pattern_GlobalService_GetAppShards_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -1132,6 +1225,31 @@ func RegisterAppShardServiceHandlerServer(ctx context.Context, mux *runtime.Serv }) + mux.Handle("POST", pattern_AppShardService_GetAppShardProposal_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/quilibrium.node.global.pb.AppShardService/GetAppShardProposal", runtime.WithHTTPPathPattern("/quilibrium.node.global.pb.AppShardService/GetAppShardProposal")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_AppShardService_GetAppShardProposal_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_AppShardService_GetAppShardProposal_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + return nil } @@ -1720,6 +1838,28 @@ func RegisterGlobalServiceHandlerClient(ctx context.Context, mux *runtime.ServeM }) + mux.Handle("POST", pattern_GlobalService_GetGlobalProposal_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/quilibrium.node.global.pb.GlobalService/GetGlobalProposal", runtime.WithHTTPPathPattern("/quilibrium.node.global.pb.GlobalService/GetGlobalProposal")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_GlobalService_GetGlobalProposal_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_GlobalService_GetGlobalProposal_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("POST", pattern_GlobalService_GetAppShards_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -1814,6 +1954,8 @@ func RegisterGlobalServiceHandlerClient(ctx context.Context, mux *runtime.ServeM var ( pattern_GlobalService_GetGlobalFrame_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"quilibrium.node.global.pb.GlobalService", "GetGlobalFrame"}, "")) + pattern_GlobalService_GetGlobalProposal_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"quilibrium.node.global.pb.GlobalService", "GetGlobalProposal"}, "")) + pattern_GlobalService_GetAppShards_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"quilibrium.node.global.pb.GlobalService", "GetAppShards"}, "")) pattern_GlobalService_GetGlobalShards_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"quilibrium.node.global.pb.GlobalService", "GetGlobalShards"}, "")) @@ -1826,6 +1968,8 @@ var ( var ( forward_GlobalService_GetGlobalFrame_0 = runtime.ForwardResponseMessage + forward_GlobalService_GetGlobalProposal_0 = runtime.ForwardResponseMessage + forward_GlobalService_GetAppShards_0 = runtime.ForwardResponseMessage forward_GlobalService_GetGlobalShards_0 = runtime.ForwardResponseMessage @@ -1895,15 +2039,41 @@ func RegisterAppShardServiceHandlerClient(ctx context.Context, mux *runtime.Serv }) + mux.Handle("POST", pattern_AppShardService_GetAppShardProposal_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/quilibrium.node.global.pb.AppShardService/GetAppShardProposal", runtime.WithHTTPPathPattern("/quilibrium.node.global.pb.AppShardService/GetAppShardProposal")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_AppShardService_GetAppShardProposal_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_AppShardService_GetAppShardProposal_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + return nil } var ( pattern_AppShardService_GetAppShardFrame_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"quilibrium.node.global.pb.AppShardService", "GetAppShardFrame"}, "")) + + pattern_AppShardService_GetAppShardProposal_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"quilibrium.node.global.pb.AppShardService", "GetAppShardProposal"}, "")) ) var ( forward_AppShardService_GetAppShardFrame_0 = runtime.ForwardResponseMessage + + forward_AppShardService_GetAppShardProposal_0 = runtime.ForwardResponseMessage ) // RegisterOnionServiceHandlerFromEndpoint is same as RegisterOnionServiceHandler but diff --git a/protobufs/global.proto b/protobufs/global.proto index bb98e38..0602ded 100644 --- a/protobufs/global.proto +++ b/protobufs/global.proto @@ -117,44 +117,10 @@ message GlobalFrameHeader { // A strictly monotonically-increasing frame number. Used for culling old // frames past a configurable cutoff point. uint64 frame_number = 1; - // The self-reported timestamp from the proof publisher, encoded as an int64 - // of the Unix epoch in milliseconds. Should be good until - // 292278994-08-17 07:12:55.807, at which point, this is someone else's - // problem. Timestamps are imperfect, but smoothed in a rolling window to - // ensure a network and quorum-stable difficulty adjustment. Anomalies are - // bounded such that a timestamp beyond ten times the average issuance rate - // is discarded in preference to the runner up electees, unless there is - // simply no alternative available (for example, if a network outage occurred - // from an upgrade or bug). - int64 timestamp = 2; - // The difficulty level used for the frame. Difficulty is calculated based on - // the previous 60 timestamps correlated with difficulties, such that the - // interval smooths out to align to the type-defined rate. This is expected to - // increase subtly with clock speed and future hardware implementations, but - // due to incentive alignment associated with global proofs, not fastest clock - // in the west, should be gradual. - uint32 difficulty = 3; - // The output data from the VDF, serialized as bytes. For Wesolowski, this is - // an encoding of the 258 byte Y value concatenated with the 258 byte proof - // value. - bytes output = 4; - // The selector value of the previous frame's output, produced as a Poseidon - // hash of the output. - bytes parent_selector = 5; - // The 256 global commitment values - repeated bytes global_commitments = 6; - // The prover tree root commitment - bytes prover_tree_commitment = 7; - // The confirmation signatures of the frame - quilibrium.node.keys.pb.BLS48581AggregateSignature public_key_signature_bls48581 = 8; -} - -message FrameHeader { - // The address dictates the depth of the shard, at a minimum, the app domain. - bytes address = 1; - // A strictly monotonically-increasing frame number. Used for culling old - // frames past a configurable cutoff point. - uint64 frame_number = 2; + // A strictly monotonically-increasing rank number. Disambiguates timeouts + // and allows for consistent determination of leader, without having to rely + // on parsing internal state. + uint64 rank = 2; // The self-reported timestamp from the proof publisher, encoded as an int64 // of the Unix epoch in milliseconds. Should be good until // 292278994-08-17 07:12:55.807, at which point, this is someone else's @@ -179,58 +145,166 @@ message FrameHeader { // The selector value of the previous frame's output, produced as a Poseidon // hash of the output. bytes parent_selector = 6; - // The root commitment to the set of requests for the frame. - bytes requests_root = 7; - // The root commitments to to the hypergraph state at the address. - repeated bytes state_roots = 8; - // The prover of the frame, incorporated into the input to the VDF. - bytes prover = 9; - // The prover's proposed fee multiplier, incorporated into sliding window - // averaging. - uint64 fee_multiplier_vote = 10; + // The 256 global commitment values + repeated bytes global_commitments = 7; + // The prover tree root commitment + bytes prover_tree_commitment = 8; + // The request root commitment + bytes requests_root = 9; + // The prover of the frame + bytes prover = 10; // The confirmation signatures of the frame quilibrium.node.keys.pb.BLS48581AggregateSignature public_key_signature_bls48581 = 11; } +message FrameHeader { + // The address dictates the depth of the shard, at a minimum, the app domain. + bytes address = 1; + // A strictly monotonically-increasing frame number. Used for culling old + // frames past a configurable cutoff point. + uint64 frame_number = 2; + // A strictly monotonically-increasing rank number. Disambiguates timeouts + // and allows for consistent determination of leader, without having to rely + // on parsing internal state. + uint64 rank = 3; + // The self-reported timestamp from the proof publisher, encoded as an int64 + // of the Unix epoch in milliseconds. Should be good until + // 292278994-08-17 07:12:55.807, at which point, this is someone else's + // problem. Timestamps are imperfect, but smoothed in a rolling window to + // ensure a network and quorum-stable difficulty adjustment. Anomalies are + // bounded such that a timestamp beyond ten times the average issuance rate + // is discarded in preference to the runner up electees, unless there is + // simply no alternative available (for example, if a network outage occurred + // from an upgrade or bug). + int64 timestamp = 4; + // The difficulty level used for the frame. Difficulty is calculated based on + // the previous 60 timestamps correlated with difficulties, such that the + // interval smooths out to align to the type-defined rate. This is expected to + // increase subtly with clock speed and future hardware implementations, but + // due to incentive alignment associated with global proofs, not fastest clock + // in the west, should be gradual. + uint32 difficulty = 5; + // The output data from the VDF, serialized as bytes. For Wesolowski, this is + // an encoding of the 258 byte Y value concatenated with the 258 byte proof + // value. + bytes output = 6; + // The selector value of the previous frame's output, produced as a Poseidon + // hash of the output. + bytes parent_selector = 7; + // The root commitment to the set of requests for the frame. + bytes requests_root = 8; + // The root commitments to to the hypergraph state at the address. + repeated bytes state_roots = 9; + // The prover of the frame, incorporated into the input to the VDF. + bytes prover = 10; + // The prover's proposed fee multiplier, incorporated into sliding window + // averaging. + uint64 fee_multiplier_vote = 11; + // The confirmation signatures of the frame + quilibrium.node.keys.pb.BLS48581AggregateSignature public_key_signature_bls48581 = 12; +} + message ProverLivenessCheck { // The filter for the prover's commitment in the trie bytes filter = 1; + // The rank of the consensus clique + uint64 rank = 2; // The frame number for which this liveness check is being sent - uint64 frame_number = 2; + uint64 frame_number = 3; // The timestamp when the liveness check was created - int64 timestamp = 3; + int64 timestamp = 4; // The hash of the shard commitments and prover root - bytes commitment_hash = 4; + bytes commitment_hash = 5; // The BLS signature with the prover's address - quilibrium.node.keys.pb.BLS48581AddressedSignature public_key_signature_bls48581 = 5; + quilibrium.node.keys.pb.BLS48581AddressedSignature public_key_signature_bls48581 = 6; } -message FrameVote { +message AppShardProposal { + // The associated state for the proposal + AppShardFrame state = 1; + // The parent quorum certificate to this state + QuorumCertificate parent_quorum_certificate = 2; + // The previous rank's timeout certificate, if applicable + TimeoutCertificate prior_rank_timeout_certificate = 3; + // The proposer's vote + ProposalVote vote = 4; +} + +message GlobalProposal { + // The associated state for the proposal + GlobalFrame state = 1; + // The parent quorum certificate to this state + QuorumCertificate parent_quorum_certificate = 2; + // The previous rank's timeout certificate, if applicable + TimeoutCertificate prior_rank_timeout_certificate = 3; + // The proposer's vote + ProposalVote vote = 4; +} + +message ProposalVote { // The filter for the prover's commitment in the trie bytes filter = 1; - // The frame number being voted on - uint64 frame_number = 2; - // The proposer of the frame - bytes proposer = 3; - // Whether the voter approves the frame - bool approve = 4; + // The rank of the consensus clique + uint64 rank = 2; + // The frame number for which this proposal applies + uint64 frame_number = 3; + // The selector being voted for + bytes selector = 4; // The timestamp when the vote was created - int64 timestamp = 5; + uint64 timestamp = 5; // The BLS signature with the voter's address quilibrium.node.keys.pb.BLS48581AddressedSignature public_key_signature_bls48581 = 6; } -message FrameConfirmation { +message TimeoutState { + // The latest quorum certificate seen by the pacemaker. + QuorumCertificate latest_quorum_certificate = 1; + // The previous rank's timeout certificate, if applicable. + TimeoutCertificate prior_rank_timeout_certificate = 2; + // The signed payload which will become part of the new timeout certificate. + ProposalVote vote = 3; + // TimeoutTick is the number of times the `timeout.Controller` has + // (re-)emitted the timeout for this rank. When the timer for the rank's + // original duration expires, a `TimeoutState` with `TimeoutTick = 0` is + // broadcast. Subsequently, `timeout.Controller` re-broadcasts the + // `TimeoutState` periodically based on some internal heuristic. Each time + // we attempt a re-broadcast, the `TimeoutTick` is incremented. Incrementing + // the field prevents de-duplicated within the network layer, which in turn + // guarantees quick delivery of the `TimeoutState` after GST and facilitates + // recovery. + uint64 timeout_tick = 4; + // The timestamp of the message (not the timeout state) + uint64 timestamp = 5; +} + +message QuorumCertificate { // The filter for the prover's commitment in the trie bytes filter = 1; - // The frame number that was confirmed - uint64 frame_number = 2; + // The rank of the consensus clique + uint64 rank = 2; + // The frame number for which this certificate applies + uint64 frame_number = 3; // The selector (hash) of the confirmed frame - bytes selector = 3; - // The timestamp when the vote was created - int64 timestamp = 4; + bytes selector = 4; + // The timestamp of the message (not the certificate) + uint64 timestamp = 5; // The aggregated BLS signature from all voters - quilibrium.node.keys.pb.BLS48581AggregateSignature aggregate_signature = 5; + quilibrium.node.keys.pb.BLS48581AggregateSignature aggregate_signature = 6; +} + +message TimeoutCertificate { + // The filter for the prover's commitment in the trie + bytes filter = 1; + // The rank of the consensus clique + uint64 rank = 2; + // The latest ranks in signer order + repeated uint64 latest_ranks = 3; + // The latest quorum certificate from all timeouts + QuorumCertificate latest_quorum_certificate = 4; + // The timestamp of the message (not the certificate) + uint64 timestamp = 5; + // The aggregated BLS signature from all voters + quilibrium.node.keys.pb.BLS48581AggregateSignature aggregate_signature = 6; } message GlobalFrame { @@ -257,6 +331,14 @@ message GlobalFrameResponse { bytes proof = 2; } +message GetGlobalProposalRequest { + uint64 frame_number = 1; +} + +message GlobalProposalResponse { + GlobalProposal proposal = 1; +} + message GetAppShardFrameRequest { bytes filter = 1; uint64 frame_number = 2; @@ -267,6 +349,16 @@ message AppShardFrameResponse { bytes proof = 2; } +message GetAppShardProposalRequest { + bytes filter = 1; + uint64 frame_number = 2; +} + +message AppShardProposalResponse { + AppShardProposal proposal = 1; +} + + message GetAppShardsRequest { bytes shard_key = 1; repeated uint32 prefix = 2; @@ -334,6 +426,7 @@ message GlobalGetWorkerInfoResponse { service GlobalService { rpc GetGlobalFrame (GetGlobalFrameRequest) returns (GlobalFrameResponse); + rpc GetGlobalProposal (GetGlobalProposalRequest) returns (GlobalProposalResponse); rpc GetAppShards(GetAppShardsRequest) returns (GetAppShardsResponse); rpc GetGlobalShards(GetGlobalShardsRequest) returns (GetGlobalShardsResponse); rpc GetLockedAddresses(GetLockedAddressesRequest) returns (GetLockedAddressesResponse); @@ -342,6 +435,7 @@ service GlobalService { service AppShardService { rpc GetAppShardFrame (GetAppShardFrameRequest) returns (AppShardFrameResponse); + rpc GetAppShardProposal (GetAppShardProposalRequest) returns (AppShardProposalResponse); } message SendMessage { diff --git a/protobufs/global_grpc.pb.go b/protobufs/global_grpc.pb.go index 5329ff9..fa1a965 100644 --- a/protobufs/global_grpc.pb.go +++ b/protobufs/global_grpc.pb.go @@ -21,6 +21,7 @@ const _ = grpc.SupportPackageIsVersion7 const ( GlobalService_GetGlobalFrame_FullMethodName = "/quilibrium.node.global.pb.GlobalService/GetGlobalFrame" + GlobalService_GetGlobalProposal_FullMethodName = "/quilibrium.node.global.pb.GlobalService/GetGlobalProposal" GlobalService_GetAppShards_FullMethodName = "/quilibrium.node.global.pb.GlobalService/GetAppShards" GlobalService_GetGlobalShards_FullMethodName = "/quilibrium.node.global.pb.GlobalService/GetGlobalShards" GlobalService_GetLockedAddresses_FullMethodName = "/quilibrium.node.global.pb.GlobalService/GetLockedAddresses" @@ -32,6 +33,7 @@ const ( // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type GlobalServiceClient interface { GetGlobalFrame(ctx context.Context, in *GetGlobalFrameRequest, opts ...grpc.CallOption) (*GlobalFrameResponse, error) + GetGlobalProposal(ctx context.Context, in *GetGlobalProposalRequest, opts ...grpc.CallOption) (*GlobalProposalResponse, error) GetAppShards(ctx context.Context, in *GetAppShardsRequest, opts ...grpc.CallOption) (*GetAppShardsResponse, error) GetGlobalShards(ctx context.Context, in *GetGlobalShardsRequest, opts ...grpc.CallOption) (*GetGlobalShardsResponse, error) GetLockedAddresses(ctx context.Context, in *GetLockedAddressesRequest, opts ...grpc.CallOption) (*GetLockedAddressesResponse, error) @@ -55,6 +57,15 @@ func (c *globalServiceClient) GetGlobalFrame(ctx context.Context, in *GetGlobalF return out, nil } +func (c *globalServiceClient) GetGlobalProposal(ctx context.Context, in *GetGlobalProposalRequest, opts ...grpc.CallOption) (*GlobalProposalResponse, error) { + out := new(GlobalProposalResponse) + err := c.cc.Invoke(ctx, GlobalService_GetGlobalProposal_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *globalServiceClient) GetAppShards(ctx context.Context, in *GetAppShardsRequest, opts ...grpc.CallOption) (*GetAppShardsResponse, error) { out := new(GetAppShardsResponse) err := c.cc.Invoke(ctx, GlobalService_GetAppShards_FullMethodName, in, out, opts...) @@ -96,6 +107,7 @@ func (c *globalServiceClient) GetWorkerInfo(ctx context.Context, in *GlobalGetWo // for forward compatibility type GlobalServiceServer interface { GetGlobalFrame(context.Context, *GetGlobalFrameRequest) (*GlobalFrameResponse, error) + GetGlobalProposal(context.Context, *GetGlobalProposalRequest) (*GlobalProposalResponse, error) GetAppShards(context.Context, *GetAppShardsRequest) (*GetAppShardsResponse, error) GetGlobalShards(context.Context, *GetGlobalShardsRequest) (*GetGlobalShardsResponse, error) GetLockedAddresses(context.Context, *GetLockedAddressesRequest) (*GetLockedAddressesResponse, error) @@ -110,6 +122,9 @@ type UnimplementedGlobalServiceServer struct { func (UnimplementedGlobalServiceServer) GetGlobalFrame(context.Context, *GetGlobalFrameRequest) (*GlobalFrameResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetGlobalFrame not implemented") } +func (UnimplementedGlobalServiceServer) GetGlobalProposal(context.Context, *GetGlobalProposalRequest) (*GlobalProposalResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetGlobalProposal not implemented") +} func (UnimplementedGlobalServiceServer) GetAppShards(context.Context, *GetAppShardsRequest) (*GetAppShardsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetAppShards not implemented") } @@ -153,6 +168,24 @@ func _GlobalService_GetGlobalFrame_Handler(srv interface{}, ctx context.Context, return interceptor(ctx, in, info, handler) } +func _GlobalService_GetGlobalProposal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetGlobalProposalRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GlobalServiceServer).GetGlobalProposal(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: GlobalService_GetGlobalProposal_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GlobalServiceServer).GetGlobalProposal(ctx, req.(*GetGlobalProposalRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _GlobalService_GetAppShards_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetAppShardsRequest) if err := dec(in); err != nil { @@ -236,6 +269,10 @@ var GlobalService_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetGlobalFrame", Handler: _GlobalService_GetGlobalFrame_Handler, }, + { + MethodName: "GetGlobalProposal", + Handler: _GlobalService_GetGlobalProposal_Handler, + }, { MethodName: "GetAppShards", Handler: _GlobalService_GetAppShards_Handler, @@ -258,7 +295,8 @@ var GlobalService_ServiceDesc = grpc.ServiceDesc{ } const ( - AppShardService_GetAppShardFrame_FullMethodName = "/quilibrium.node.global.pb.AppShardService/GetAppShardFrame" + AppShardService_GetAppShardFrame_FullMethodName = "/quilibrium.node.global.pb.AppShardService/GetAppShardFrame" + AppShardService_GetAppShardProposal_FullMethodName = "/quilibrium.node.global.pb.AppShardService/GetAppShardProposal" ) // AppShardServiceClient is the client API for AppShardService service. @@ -266,6 +304,7 @@ const ( // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type AppShardServiceClient interface { GetAppShardFrame(ctx context.Context, in *GetAppShardFrameRequest, opts ...grpc.CallOption) (*AppShardFrameResponse, error) + GetAppShardProposal(ctx context.Context, in *GetAppShardProposalRequest, opts ...grpc.CallOption) (*AppShardProposalResponse, error) } type appShardServiceClient struct { @@ -285,11 +324,21 @@ func (c *appShardServiceClient) GetAppShardFrame(ctx context.Context, in *GetApp return out, nil } +func (c *appShardServiceClient) GetAppShardProposal(ctx context.Context, in *GetAppShardProposalRequest, opts ...grpc.CallOption) (*AppShardProposalResponse, error) { + out := new(AppShardProposalResponse) + err := c.cc.Invoke(ctx, AppShardService_GetAppShardProposal_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // AppShardServiceServer is the server API for AppShardService service. // All implementations must embed UnimplementedAppShardServiceServer // for forward compatibility type AppShardServiceServer interface { GetAppShardFrame(context.Context, *GetAppShardFrameRequest) (*AppShardFrameResponse, error) + GetAppShardProposal(context.Context, *GetAppShardProposalRequest) (*AppShardProposalResponse, error) mustEmbedUnimplementedAppShardServiceServer() } @@ -300,6 +349,9 @@ type UnimplementedAppShardServiceServer struct { func (UnimplementedAppShardServiceServer) GetAppShardFrame(context.Context, *GetAppShardFrameRequest) (*AppShardFrameResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetAppShardFrame not implemented") } +func (UnimplementedAppShardServiceServer) GetAppShardProposal(context.Context, *GetAppShardProposalRequest) (*AppShardProposalResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetAppShardProposal not implemented") +} func (UnimplementedAppShardServiceServer) mustEmbedUnimplementedAppShardServiceServer() {} // UnsafeAppShardServiceServer may be embedded to opt out of forward compatibility for this service. @@ -331,6 +383,24 @@ func _AppShardService_GetAppShardFrame_Handler(srv interface{}, ctx context.Cont return interceptor(ctx, in, info, handler) } +func _AppShardService_GetAppShardProposal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetAppShardProposalRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AppShardServiceServer).GetAppShardProposal(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: AppShardService_GetAppShardProposal_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AppShardServiceServer).GetAppShardProposal(ctx, req.(*GetAppShardProposalRequest)) + } + return interceptor(ctx, in, info, handler) +} + // AppShardService_ServiceDesc is the grpc.ServiceDesc for AppShardService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -342,6 +412,10 @@ var AppShardService_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetAppShardFrame", Handler: _AppShardService_GetAppShardFrame_Handler, }, + { + MethodName: "GetAppShardProposal", + Handler: _AppShardService_GetAppShardProposal_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "global.proto", diff --git a/protobufs/global_test.go b/protobufs/global_test.go index 1957959..7d2b6b1 100644 --- a/protobufs/global_test.go +++ b/protobufs/global_test.go @@ -732,35 +732,23 @@ func TestProverLivenessCheck_Serialization(t *testing.T) { } } -func TestFrameVote_Serialization(t *testing.T) { +func TestProposalVote_Serialization(t *testing.T) { tests := []struct { name string - vote *FrameVote + vote *ProposalVote }{ { name: "complete frame vote approve", - vote: &FrameVote{ + vote: &ProposalVote{ FrameNumber: 77777, - Proposer: make([]byte, 32), - Approve: true, + Rank: 77777, + Selector: make([]byte, 32), PublicKeySignatureBls48581: &BLS48581AddressedSignature{ Signature: make([]byte, 74), Address: make([]byte, 32), }, }, }, - { - name: "frame vote reject", - vote: &FrameVote{ - FrameNumber: 88888, - Proposer: append([]byte{0xFF}, make([]byte, 31)...), - Approve: false, - PublicKeySignatureBls48581: &BLS48581AddressedSignature{ - Signature: append([]byte{0xAA}, make([]byte, 73)...), - Address: append([]byte{0xCC}, make([]byte, 31)...), - }, - }, - }, } for _, tt := range tests { @@ -769,13 +757,13 @@ func TestFrameVote_Serialization(t *testing.T) { require.NoError(t, err) require.NotNil(t, data) - vote2 := &FrameVote{} + vote2 := &ProposalVote{} err = vote2.FromCanonicalBytes(data) require.NoError(t, err) assert.Equal(t, tt.vote.FrameNumber, vote2.FrameNumber) - assert.Equal(t, tt.vote.Proposer, vote2.Proposer) - assert.Equal(t, tt.vote.Approve, vote2.Approve) + assert.Equal(t, tt.vote.Rank, vote2.Rank) + assert.Equal(t, tt.vote.Selector, vote2.Selector) assert.NotNil(t, vote2.PublicKeySignatureBls48581) assert.Equal(t, tt.vote.PublicKeySignatureBls48581.Signature, vote2.PublicKeySignatureBls48581.Signature) assert.Equal(t, tt.vote.PublicKeySignatureBls48581.Address, vote2.PublicKeySignatureBls48581.Address) @@ -783,15 +771,16 @@ func TestFrameVote_Serialization(t *testing.T) { } } -func TestFrameConfirmation_Serialization(t *testing.T) { +func TestQuorumCertificate_Serialization(t *testing.T) { tests := []struct { name string - conf *FrameConfirmation + conf *QuorumCertificate }{ { - name: "complete frame confirmation", - conf: &FrameConfirmation{ + name: "complete confirmation", + conf: &QuorumCertificate{ FrameNumber: 12345, + Rank: 12345, Selector: make([]byte, 32), AggregateSignature: &BLS48581AggregateSignature{ Signature: make([]byte, 74), @@ -803,9 +792,10 @@ func TestFrameConfirmation_Serialization(t *testing.T) { }, }, { - name: "minimal frame confirmation", - conf: &FrameConfirmation{ + name: "minimal confirmation", + conf: &QuorumCertificate{ FrameNumber: 0, + Rank: 0, Selector: []byte{}, AggregateSignature: nil, }, @@ -818,11 +808,12 @@ func TestFrameConfirmation_Serialization(t *testing.T) { require.NoError(t, err) require.NotNil(t, data) - conf2 := &FrameConfirmation{} + conf2 := &QuorumCertificate{} err = conf2.FromCanonicalBytes(data) require.NoError(t, err) assert.Equal(t, tt.conf.FrameNumber, conf2.FrameNumber) + assert.Equal(t, tt.conf.Rank, conf2.Rank) assert.Equal(t, tt.conf.Selector, conf2.Selector) if tt.conf.AggregateSignature != nil { assert.NotNil(t, conf2.AggregateSignature) diff --git a/protobufs/go.mod b/protobufs/go.mod index 9a46a97..f83756c 100644 --- a/protobufs/go.mod +++ b/protobufs/go.mod @@ -1,6 +1,6 @@ module source.quilibrium.com/quilibrium/monorepo/protobufs -go 1.23.0 +go 1.23.2 toolchain go1.23.4 @@ -14,6 +14,7 @@ require ( github.com/cloudflare/circl v1.6.1 github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 github.com/iden3/go-iden3-crypto v0.0.17 + github.com/libp2p/go-libp2p v0.0.0-00010101000000-000000000000 github.com/multiformats/go-multiaddr v0.16.1 github.com/pkg/errors v0.9.1 github.com/stretchr/testify v1.10.0 @@ -24,26 +25,29 @@ require ( require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/ipfs/go-cid v0.0.7 // indirect - github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect + github.com/ipfs/go-cid v0.5.0 // indirect + github.com/klauspost/cpuid/v2 v2.2.10 // indirect github.com/kr/text v0.2.0 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-multicodec v0.9.1 // indirect github.com/multiformats/go-multihash v0.2.3 // indirect github.com/multiformats/go-varint v0.0.7 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - golang.org/x/crypto v0.37.0 // indirect - golang.org/x/exp v0.0.0-20230725012225-302865e7556b // indirect - golang.org/x/net v0.35.0 // indirect - golang.org/x/sys v0.32.0 // indirect - golang.org/x/text v0.24.0 // indirect + golang.org/x/crypto v0.39.0 // indirect + golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 // indirect + golang.org/x/net v0.41.0 // indirect + golang.org/x/sys v0.33.0 // indirect + golang.org/x/text v0.26.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - lukechampine.com/blake3 v1.2.1 // indirect + lukechampine.com/blake3 v1.4.1 // indirect ) diff --git a/protobufs/go.sum b/protobufs/go.sum index af78912..f9c28cb 100644 --- a/protobufs/go.sum +++ b/protobufs/go.sum @@ -3,6 +3,10 @@ github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= +github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -17,37 +21,32 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5uk github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= github.com/iden3/go-iden3-crypto v0.0.17 h1:NdkceRLJo/pI4UpcjVah4lN/a3yzxRUGXqxbWcYh9mY= github.com/iden3/go-iden3-crypto v0.0.17/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= -github.com/ipfs/go-cid v0.0.7 h1:ysQJVJA3fNDF1qigJbsSQOdjhVLsOEoPdh0+R97k3jY= -github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= -github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= +github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= +github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= -github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= -github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= -github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= -github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo= +github.com/multiformats/go-multicodec v0.9.1/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= -github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -72,23 +71,16 @@ go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= -golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= -golang.org/x/exp v0.0.0-20230725012225-302865e7556b h1:tK7yjGqVRzYdXsBcfD2MLhFAhHfDgGLm2rY1ub7FA9k= -golang.org/x/exp v0.0.0-20230725012225-302865e7556b/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= -golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= -golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= -golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= +golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= +golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 h1:bsqhLWFR6G6xiQcb+JoGqdKdRU6WzPWmK8E0jxTjzo4= +golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= +golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= +golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= +golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4= @@ -102,5 +94,5 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= -lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= +lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= +lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= diff --git a/protobufs/keys.go b/protobufs/keys.go index 45c9a46..a72a9ff 100644 --- a/protobufs/keys.go +++ b/protobufs/keys.go @@ -103,10 +103,10 @@ func (s *SignedX448Key) Validate() error { "validate", ) } - // Ed448Signature has its own Validate method - if err := sig.Ed448Signature.Validate(); err != nil { + + if len(sig.Ed448Signature.Signature) != 114 { return errors.Wrap( - errors.Wrap(err, "ed448 signature"), + errors.New("invalid ed448 signature"), "validate", ) } @@ -124,13 +124,6 @@ func (s *SignedX448Key) Validate() error { "validate", ) } - if sig.BlsSignature.PublicKey != nil && - len(sig.BlsSignature.PublicKey.KeyValue) != 585 { - return errors.Wrap( - errors.New("invalid bls public key length"), - "validate", - ) - } case *SignedX448Key_DecafSignature: if sig.DecafSignature == nil { return errors.Wrap( @@ -145,13 +138,6 @@ func (s *SignedX448Key) Validate() error { "validate", ) } - if sig.DecafSignature.PublicKey != nil && - len(sig.DecafSignature.PublicKey.KeyValue) != 56 { - return errors.Wrap( - errors.New("invalid decaf public key length"), - "validate", - ) - } case nil: return errors.Wrap( errors.New("no signature specified"), @@ -203,10 +189,10 @@ func (s *SignedDecaf448Key) Validate() error { "validate", ) } - // Ed448Signature has its own Validate method - if err := sig.Ed448Signature.Validate(); err != nil { + + if len(sig.Ed448Signature.Signature) != 114 { return errors.Wrap( - errors.Wrap(err, "ed448 signature"), + errors.New("invalid ed448 signature"), "validate", ) } @@ -224,13 +210,6 @@ func (s *SignedDecaf448Key) Validate() error { "validate", ) } - if sig.BlsSignature.PublicKey != nil && - len(sig.BlsSignature.PublicKey.KeyValue) != 585 { - return errors.Wrap( - errors.New("invalid bls public key length"), - "validate", - ) - } case *SignedDecaf448Key_DecafSignature: if sig.DecafSignature == nil { return errors.Wrap( @@ -245,13 +224,6 @@ func (s *SignedDecaf448Key) Validate() error { "validate", ) } - if sig.DecafSignature.PublicKey != nil && - len(sig.DecafSignature.PublicKey.KeyValue) != 56 { - return errors.Wrap( - errors.New("invalid decaf public key length"), - "validate", - ) - } case nil: return errors.Wrap( errors.New("no signature specified"), @@ -319,6 +291,10 @@ func (s *BLS48581AggregateSignature) Identity() string { return string(s.GetPublicKey().GetKeyValue()) } +func (s *BLS48581AggregateSignature) GetPubKey() []byte { + return s.PublicKey.KeyValue +} + func (s *BLS48581Signature) Verify( msg, context []byte, blsVerifier BlsVerifier, diff --git a/types/consensus/distributor.go b/types/consensus/distributor.go index 075ebe3..a5e51c5 100644 --- a/types/consensus/distributor.go +++ b/types/consensus/distributor.go @@ -1,7 +1,7 @@ package consensus import ( - "context" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" ) type ControlEventType int @@ -126,10 +126,7 @@ func (s *ShardSplitEventData) ControlEventData() {} // EventDistributor defines the interface for event distribution systems type EventDistributor interface { // Start begins the event processing loop with a cancelable context - Start(ctx context.Context) error - - // Stop gracefully shuts down the event distributor - Stop() error + Start(ctx lifecycle.SignalerContext, ready lifecycle.ReadyFunc) // Subscribe registers a new subscriber with a unique ID and returns their // control event channel diff --git a/types/crypto/frame_prover.go b/types/crypto/frame_prover.go index beb298d..858f3c5 100644 --- a/types/crypto/frame_prover.go +++ b/types/crypto/frame_prover.go @@ -38,6 +38,7 @@ type FrameProver interface { previousFrame *protobufs.GlobalFrameHeader, commitments [][]byte, proverRoot []byte, + requestRoot []byte, provingKey Signer, timestamp int64, difficulty uint32, diff --git a/types/execution/execution_engine.go b/types/execution/execution_engine.go index fed1234..103d5bb 100644 --- a/types/execution/execution_engine.go +++ b/types/execution/execution_engine.go @@ -3,6 +3,7 @@ package execution import ( "math/big" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/protobufs" "source.quilibrium.com/quilibrium/monorepo/types/execution/state" ) @@ -14,8 +15,7 @@ type ProcessMessageResult struct { type ShardExecutionEngine interface { GetName() string - Start() <-chan error - Stop(force bool) <-chan error + Start(ctx lifecycle.SignalerContext, ready lifecycle.ReadyFunc) ValidateMessage(frameNumber uint64, address []byte, message []byte) error ProcessMessage( frameNumber uint64, diff --git a/types/go.mod b/types/go.mod index 1cd647a..22936ea 100644 --- a/types/go.mod +++ b/types/go.mod @@ -12,6 +12,8 @@ replace source.quilibrium.com/quilibrium/monorepo/config => ../config replace source.quilibrium.com/quilibrium/monorepo/utils => ../utils +replace source.quilibrium.com/quilibrium/monorepo/lifecycle => ../lifecycle + replace github.com/multiformats/go-multiaddr => ../go-multiaddr replace github.com/multiformats/go-multiaddr-dns => ../go-multiaddr-dns diff --git a/types/mocks/clock_store.go b/types/mocks/clock_store.go index a1db6ab..17b6544 100644 --- a/types/mocks/clock_store.go +++ b/types/mocks/clock_store.go @@ -16,6 +16,318 @@ type MockClockStore struct { mock.Mock } +// GetProposalVote implements store.ClockStore. +func (m *MockClockStore) GetProposalVote( + filter []byte, + rank uint64, + identity []byte, +) (*protobufs.ProposalVote, error) { + args := m.Called( + filter, + rank, + identity, + ) + return args.Get(0).(*protobufs.ProposalVote), args.Error(1) +} + +// GetProposalVotes implements store.ClockStore. +func (m *MockClockStore) GetProposalVotes( + filter []byte, + rank uint64, +) ([]*protobufs.ProposalVote, error) { + args := m.Called( + filter, + rank, + ) + return args.Get(0).([]*protobufs.ProposalVote), args.Error(1) +} + +// GetTimeoutVote implements store.ClockStore. +func (m *MockClockStore) GetTimeoutVote( + filter []byte, + rank uint64, + identity []byte, +) (*protobufs.TimeoutState, error) { + args := m.Called( + filter, + rank, + identity, + ) + return args.Get(0).(*protobufs.TimeoutState), args.Error(1) +} + +// GetTimeoutVotes implements store.ClockStore. +func (m *MockClockStore) GetTimeoutVotes( + filter []byte, + rank uint64, +) ([]*protobufs.TimeoutState, error) { + args := m.Called( + filter, + rank, + ) + return args.Get(0).([]*protobufs.TimeoutState), args.Error(1) +} + +// PutProposalVote implements store.ClockStore. +func (m *MockClockStore) PutProposalVote( + txn store.Transaction, + vote *protobufs.ProposalVote, +) error { + args := m.Called( + txn, + vote, + ) + return args.Error(0) +} + +// PutTimeoutVote implements store.ClockStore. +func (m *MockClockStore) PutTimeoutVote( + txn store.Transaction, + vote *protobufs.TimeoutState, +) error { + args := m.Called( + txn, + vote, + ) + return args.Error(0) +} + +// GetCertifiedAppShardState implements store.ClockStore. +func (m *MockClockStore) GetCertifiedAppShardState( + filter []byte, + rank uint64, +) (*protobufs.AppShardProposal, error) { + args := m.Called( + filter, + rank, + ) + return args.Get(0).(*protobufs.AppShardProposal), args.Error(1) +} + +// GetCertifiedGlobalState implements store.ClockStore. +func (m *MockClockStore) GetCertifiedGlobalState(rank uint64) ( + *protobufs.GlobalProposal, + error, +) { + args := m.Called( + rank, + ) + return args.Get(0).(*protobufs.GlobalProposal), args.Error(1) +} + +// GetEarliestCertifiedAppShardState implements store.ClockStore. +func (m *MockClockStore) GetEarliestCertifiedAppShardState( + filter []byte, +) (*protobufs.AppShardProposal, error) { + args := m.Called( + filter, + ) + return args.Get(0).(*protobufs.AppShardProposal), args.Error(1) +} + +// GetEarliestCertifiedGlobalState implements store.ClockStore. +func (m *MockClockStore) GetEarliestCertifiedGlobalState() ( + *protobufs.GlobalProposal, + error, +) { + args := m.Called() + return args.Get(0).(*protobufs.GlobalProposal), args.Error(1) +} + +// GetEarliestQuorumCertificate implements store.ClockStore. +func (m *MockClockStore) GetEarliestQuorumCertificate(filter []byte) ( + *protobufs.QuorumCertificate, + error, +) { + args := m.Called( + filter, + ) + return args.Get(0).(*protobufs.QuorumCertificate), args.Error(1) +} + +// GetEarliestTimeoutCertificate implements store.ClockStore. +func (m *MockClockStore) GetEarliestTimeoutCertificate(filter []byte) ( + *protobufs.TimeoutCertificate, + error, +) { + args := m.Called( + filter, + ) + return args.Get(0).(*protobufs.TimeoutCertificate), args.Error(1) +} + +// GetLatestCertifiedAppShardState implements store.ClockStore. +func (m *MockClockStore) GetLatestCertifiedAppShardState(filter []byte) ( + *protobufs.AppShardProposal, + error, +) { + args := m.Called( + filter, + ) + return args.Get(0).(*protobufs.AppShardProposal), args.Error(1) +} + +// GetLatestCertifiedGlobalState implements store.ClockStore. +func (m *MockClockStore) GetLatestCertifiedGlobalState() ( + *protobufs.GlobalProposal, + error, +) { + args := m.Called() + return args.Get(0).(*protobufs.GlobalProposal), args.Error(1) +} + +// GetLatestQuorumCertificate implements store.ClockStore. +func (m *MockClockStore) GetLatestQuorumCertificate(filter []byte) ( + *protobufs.QuorumCertificate, + error, +) { + args := m.Called( + filter, + ) + return args.Get(0).(*protobufs.QuorumCertificate), args.Error(1) +} + +// GetLatestTimeoutCertificate implements store.ClockStore. +func (m *MockClockStore) GetLatestTimeoutCertificate(filter []byte) ( + *protobufs.TimeoutCertificate, + error, +) { + args := m.Called( + filter, + ) + return args.Get(0).(*protobufs.TimeoutCertificate), args.Error(1) +} + +// GetQuorumCertificate implements store.ClockStore. +func (m *MockClockStore) GetQuorumCertificate(filter []byte, rank uint64) ( + *protobufs.QuorumCertificate, + error, +) { + args := m.Called( + filter, + rank, + ) + return args.Get(0).(*protobufs.QuorumCertificate), args.Error(1) +} + +// GetTimeoutCertificate implements store.ClockStore. +func (m *MockClockStore) GetTimeoutCertificate(filter []byte, rank uint64) ( + *protobufs.TimeoutCertificate, + error, +) { + args := m.Called( + filter, + rank, + ) + return args.Get(0).(*protobufs.TimeoutCertificate), args.Error(1) +} + +// PutCertifiedAppShardState implements store.ClockStore. +func (m *MockClockStore) PutCertifiedAppShardState( + state *protobufs.AppShardProposal, + txn store.Transaction, +) error { + args := m.Called( + state, + txn, + ) + return args.Error(0) +} + +// PutCertifiedGlobalState implements store.ClockStore. +func (m *MockClockStore) PutCertifiedGlobalState( + state *protobufs.GlobalProposal, + txn store.Transaction, +) error { + args := m.Called( + state, + txn, + ) + return args.Error(0) +} + +// PutQuorumCertificate implements store.ClockStore. +func (m *MockClockStore) PutQuorumCertificate( + qc *protobufs.QuorumCertificate, + txn store.Transaction, +) error { + args := m.Called( + qc, + txn, + ) + return args.Error(0) +} + +// PutTimeoutCertificate implements store.ClockStore. +func (m *MockClockStore) PutTimeoutCertificate( + timeoutCertificate *protobufs.TimeoutCertificate, + txn store.Transaction, +) error { + args := m.Called( + timeoutCertificate, + txn, + ) + return args.Error(0) +} + +// RangeCertifiedAppShardStates implements store.ClockStore. +func (m *MockClockStore) RangeCertifiedAppShardStates( + filter []byte, + startRank uint64, + endRank uint64, +) (store.TypedIterator[*protobufs.AppShardProposal], error) { + args := m.Called( + filter, + startRank, + endRank, + ) + return args.Get(0).(store.TypedIterator[*protobufs.AppShardProposal]), + args.Error(1) +} + +// RangeCertifiedGlobalStates implements store.ClockStore. +func (m *MockClockStore) RangeCertifiedGlobalStates( + startRank uint64, + endRank uint64, +) (store.TypedIterator[*protobufs.GlobalProposal], error) { + args := m.Called( + startRank, + endRank, + ) + return args.Get(0).(store.TypedIterator[*protobufs.GlobalProposal]), + args.Error(1) +} + +// RangeQuorumCertificates implements store.ClockStore. +func (m *MockClockStore) RangeQuorumCertificates( + filter []byte, + startRank uint64, + endRank uint64, +) (store.TypedIterator[*protobufs.QuorumCertificate], error) { + args := m.Called( + filter, + startRank, + endRank, + ) + return args.Get(0).(store.TypedIterator[*protobufs.QuorumCertificate]), + args.Error(1) +} + +// RangeTimeoutCertificates implements store.ClockStore. +func (m *MockClockStore) RangeTimeoutCertificates( + filter []byte, + startRank uint64, + endRank uint64, +) (store.TypedIterator[*protobufs.TimeoutCertificate], error) { + args := m.Called( + filter, + startRank, + endRank, + ) + return args.Get(0).(store.TypedIterator[*protobufs.TimeoutCertificate]), + args.Error(1) +} + // CommitShardClockFrame implements store.ClockStore. func (m *MockClockStore) CommitShardClockFrame( filter []byte, diff --git a/types/mocks/event_distributor.go b/types/mocks/event_distributor.go index 3b85b9d..69c0c8b 100644 --- a/types/mocks/event_distributor.go +++ b/types/mocks/event_distributor.go @@ -1,9 +1,8 @@ package mocks import ( - "context" - "github.com/stretchr/testify/mock" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/types/consensus" ) @@ -11,14 +10,11 @@ type MockEventDistributor struct { mock.Mock } -func (m *MockEventDistributor) Start(ctx context.Context) error { - args := m.Called(ctx) - return args.Error(0) -} - -func (m *MockEventDistributor) Stop() error { - args := m.Called() - return args.Error(0) +func (m *MockEventDistributor) Start( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, +) { + m.Called(ctx, ready) } func (m *MockEventDistributor) Subscribe( @@ -35,3 +31,5 @@ func (m *MockEventDistributor) Publish(event consensus.ControlEvent) { func (m *MockEventDistributor) Unsubscribe(id string) { m.Called(id) } + +var _ consensus.EventDistributor = (*MockEventDistributor)(nil) diff --git a/types/mocks/frame_prover.go b/types/mocks/frame_prover.go index d82bb08..bbee27d 100644 --- a/types/mocks/frame_prover.go +++ b/types/mocks/frame_prover.go @@ -118,6 +118,7 @@ func (m *MockFrameProver) ProveGlobalFrameHeader( previousFrame *protobufs.GlobalFrameHeader, commitments [][]byte, proverRoot []byte, + requestsRoot []byte, provingKey crypto.Signer, timestamp int64, difficulty uint32, @@ -127,6 +128,7 @@ func (m *MockFrameProver) ProveGlobalFrameHeader( previousFrame, commitments, proverRoot, + requestsRoot, provingKey, timestamp, difficulty, diff --git a/types/mocks/peer_info_manager.go b/types/mocks/peer_info_manager.go index a12fda5..67cef21 100644 --- a/types/mocks/peer_info_manager.go +++ b/types/mocks/peer_info_manager.go @@ -2,6 +2,7 @@ package mocks import ( "github.com/stretchr/testify/mock" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/protobufs" "source.quilibrium.com/quilibrium/monorepo/types/p2p" ) @@ -36,11 +37,9 @@ func (m *MockPeerInfoManager) GetPeersBySpeed() [][]byte { } // Start implements p2p.PeerInfoManager. -func (m *MockPeerInfoManager) Start() { - m.Called() -} - -// Stop implements p2p.PeerInfoManager. -func (m *MockPeerInfoManager) Stop() { - m.Called() +func (m *MockPeerInfoManager) Start( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, +) { + m.Called(ctx, ready) } diff --git a/types/mocks/shard_execution.go b/types/mocks/shard_execution.go index 40421fc..f6b40ce 100644 --- a/types/mocks/shard_execution.go +++ b/types/mocks/shard_execution.go @@ -4,6 +4,7 @@ import ( "math/big" "github.com/stretchr/testify/mock" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" "source.quilibrium.com/quilibrium/monorepo/protobufs" "source.quilibrium.com/quilibrium/monorepo/types/crypto" "source.quilibrium.com/quilibrium/monorepo/types/execution" @@ -117,15 +118,11 @@ func (m *MockShardExecutionEngine) ProcessMessage( } // Start implements execution.ShardExecutionEngine. -func (m *MockShardExecutionEngine) Start() <-chan error { - args := m.Called() - return args.Get(0).(chan error) -} - -// Stop implements execution.ShardExecutionEngine. -func (m *MockShardExecutionEngine) Stop(force bool) <-chan error { - args := m.Called(force) - return args.Get(0).(chan error) +func (m *MockShardExecutionEngine) Start( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, +) { + m.Called(ctx, ready) } var _ execution.ShardExecutionEngine = (*MockShardExecutionEngine)(nil) diff --git a/types/p2p/peer_info_manager.go b/types/p2p/peer_info_manager.go index bdeeb7d..aaf617a 100644 --- a/types/p2p/peer_info_manager.go +++ b/types/p2p/peer_info_manager.go @@ -1,10 +1,12 @@ package p2p -import "source.quilibrium.com/quilibrium/monorepo/protobufs" +import ( + "source.quilibrium.com/quilibrium/monorepo/lifecycle" + "source.quilibrium.com/quilibrium/monorepo/protobufs" +) type PeerInfoManager interface { - Start() - Stop() + Start(context lifecycle.SignalerContext, ready lifecycle.ReadyFunc) AddPeerInfo(info *protobufs.PeerInfo) GetPeerInfo(peerId []byte) *PeerInfo GetPeerMap() map[string]*PeerInfo diff --git a/types/store/clock.go b/types/store/clock.go index 63ba542..e303ab8 100644 --- a/types/store/clock.go +++ b/types/store/clock.go @@ -17,6 +17,55 @@ type ClockStore interface { endFrameNumber uint64, ) (TypedIterator[*protobufs.GlobalFrame], error) PutGlobalClockFrame(frame *protobufs.GlobalFrame, txn Transaction) error + GetLatestCertifiedGlobalState() (*protobufs.GlobalProposal, error) + GetEarliestCertifiedGlobalState() (*protobufs.GlobalProposal, error) + GetCertifiedGlobalState(rank uint64) (*protobufs.GlobalProposal, error) + RangeCertifiedGlobalStates( + startRank uint64, + endRank uint64, + ) (TypedIterator[*protobufs.GlobalProposal], error) + PutCertifiedGlobalState( + state *protobufs.GlobalProposal, + txn Transaction, + ) error + GetLatestQuorumCertificate( + filter []byte, + ) (*protobufs.QuorumCertificate, error) + GetEarliestQuorumCertificate( + filter []byte, + ) (*protobufs.QuorumCertificate, error) + GetQuorumCertificate( + filter []byte, + rank uint64, + ) (*protobufs.QuorumCertificate, error) + RangeQuorumCertificates( + filter []byte, + startRank uint64, + endRank uint64, + ) (TypedIterator[*protobufs.QuorumCertificate], error) + PutQuorumCertificate( + qc *protobufs.QuorumCertificate, + txn Transaction, + ) error + GetLatestTimeoutCertificate( + filter []byte, + ) (*protobufs.TimeoutCertificate, error) + GetEarliestTimeoutCertificate( + filter []byte, + ) (*protobufs.TimeoutCertificate, error) + GetTimeoutCertificate( + filter []byte, + rank uint64, + ) (*protobufs.TimeoutCertificate, error) + RangeTimeoutCertificates( + filter []byte, + startRank uint64, + endRank uint64, + ) (TypedIterator[*protobufs.TimeoutCertificate], error) + PutTimeoutCertificate( + timeoutCertificate *protobufs.TimeoutCertificate, + txn Transaction, + ) error GetLatestShardClockFrame( filter []byte, ) (*protobufs.AppShardFrame, []*tries.RollingFrecencyCritbitTrie, error) @@ -58,6 +107,25 @@ type ClockStore interface { filter []byte, frameNumber uint64, ) error + GetLatestCertifiedAppShardState( + filter []byte, + ) (*protobufs.AppShardProposal, error) + GetEarliestCertifiedAppShardState( + filter []byte, + ) (*protobufs.AppShardProposal, error) + GetCertifiedAppShardState( + filter []byte, + rank uint64, + ) (*protobufs.AppShardProposal, error) + RangeCertifiedAppShardStates( + filter []byte, + startRank uint64, + endRank uint64, + ) (TypedIterator[*protobufs.AppShardProposal], error) + PutCertifiedAppShardState( + state *protobufs.AppShardProposal, + txn Transaction, + ) error ResetGlobalClockFrames() error ResetShardClockFrames(filter []byte) error Compact( @@ -103,4 +171,22 @@ type ClockStore interface { filter []byte, tree *tries.VectorCommitmentTree, ) error + PutProposalVote(txn Transaction, vote *protobufs.ProposalVote) error + GetProposalVote(filter []byte, rank uint64, identity []byte) ( + *protobufs.ProposalVote, + error, + ) + GetProposalVotes(filter []byte, rank uint64) ( + []*protobufs.ProposalVote, + error, + ) + PutTimeoutVote(txn Transaction, vote *protobufs.TimeoutState) error + GetTimeoutVote(filter []byte, rank uint64, identity []byte) ( + *protobufs.TimeoutState, + error, + ) + GetTimeoutVotes(filter []byte, rank uint64) ( + []*protobufs.TimeoutState, + error, + ) } diff --git a/types/tries/lazy_proof_tree.go b/types/tries/lazy_proof_tree.go index f34256b..4cdaa8d 100644 --- a/types/tries/lazy_proof_tree.go +++ b/types/tries/lazy_proof_tree.go @@ -1666,6 +1666,10 @@ func (t *LazyVectorCommitmentTree) ProveMultiple( for _, key := range keys { pathIndices := [][]uint64{} polys, commits, ys, ps := prove(t.Root, key, 0) + if len(commits) == 0 { + return nil + } + for _, p := range ps { index := []uint64{} for _, i := range p { diff --git a/types/tries/proof_tree.go b/types/tries/proof_tree.go index 6ee1b88..a883df0 100644 --- a/types/tries/proof_tree.go +++ b/types/tries/proof_tree.go @@ -599,6 +599,10 @@ func (t *VectorCommitmentTree) Prove( } polynomials, commits, ys, paths := prove(t.Root, 0) + if len(commits) == 0 { + return nil + } + pathIndices := [][]uint64{} indices := []uint64{} for _, p := range paths { diff --git a/utils/logging/file_logger.go b/utils/logging/file_logger.go index ddacbd2..ed8bfba 100644 --- a/utils/logging/file_logger.go +++ b/utils/logging/file_logger.go @@ -23,31 +23,29 @@ func NewRotatingFileLogger( debug bool, coreId uint, dir string, - filename string, + maxSize int, + maxBackups int, + maxAge int, + compress bool, ) ( *zap.Logger, io.Closer, error, ) { - if dir == "" { - dir = "./logs" - } if err := os.MkdirAll(dir, 0o755); err != nil { return nil, nil, err } - if filename == "" { - filename = filenameForCore(coreId) - } + filename := filenameForCore(coreId) - path := filepath.Join(dir, filename) + logFilePath := filepath.Join(dir, filename) rot := &lumberjack.Logger{ - Filename: path, - MaxSize: 50, // megabytes per file before rotation - MaxBackups: 5, // number of old files to keep - MaxAge: 14, // days - Compress: true, // gzip old files + Filename: logFilePath, + MaxSize: maxSize, + MaxBackups: maxBackups, + MaxAge: maxAge, + Compress: compress, } encCfg := zap.NewProductionEncoderConfig() @@ -59,7 +57,13 @@ func NewRotatingFileLogger( enc := zapcore.NewConsoleEncoder(encCfg) ws := zapcore.AddSync(rot) - core := zapcore.NewCore(enc, ws, zap.DebugLevel) + + logLevel := zap.InfoLevel + if debug { + logLevel = zap.DebugLevel + } + + core := zapcore.NewCore(enc, ws, logLevel) logger := zap.New(core, zap.AddCaller(), zap.Fields( zap.Uint("coreId", coreId), )) diff --git a/vdf/wesolowski_frame_prover.go b/vdf/wesolowski_frame_prover.go index b8bcd62..c1d0eaf 100644 --- a/vdf/wesolowski_frame_prover.go +++ b/vdf/wesolowski_frame_prover.go @@ -374,6 +374,7 @@ func (w *WesolowskiFrameProver) ProveGlobalFrameHeader( previousFrame *protobufs.GlobalFrameHeader, commitments [][]byte, proverRoot []byte, + requestRoot []byte, provingKey qcrypto.Signer, timestamp int64, difficulty uint32, @@ -410,6 +411,7 @@ func (w *WesolowskiFrameProver) ProveGlobalFrameHeader( } input = append(input, proverRoot...) + input = append(input, requestRoot...) b := sha3.Sum256(input) o := WesolowskiSolve(b, difficulty) @@ -433,6 +435,7 @@ func (w *WesolowskiFrameProver) ProveGlobalFrameHeader( ParentSelector: parent.FillBytes(make([]byte, 32)), GlobalCommitments: commitments, ProverTreeCommitment: proverRoot, + RequestsRoot: requestRoot, } switch pubkeyType { @@ -489,6 +492,7 @@ func (w *WesolowskiFrameProver) GetGlobalFrameSignaturePayload( } input = append(input, frame.ProverTreeCommitment...) + input = append(input, frame.RequestsRoot...) b := sha3.Sum256(input) proof := [516]byte{}