This commit is contained in:
FiveMovesAhead 2024-09-12 18:18:20 +08:00
parent 9fc8347e89
commit 13cf632593
13 changed files with 53 additions and 24 deletions

View File

@ -1,8 +1,8 @@
use super::{state, QueryData, Result};
use std::collections::HashSet;
use tig_worker::SolutionData;
use tig_worker::OutputData;
pub async fn execute() -> Result<Option<(String, Vec<SolutionData>)>> {
pub async fn execute() -> Result<Option<(String, Vec<OutputData>)>> {
let QueryData {
proofs,
benchmarks,

View File

@ -279,7 +279,7 @@ async fn run_once(num_workers: u32, ms_per_benchmark: u32) -> Result<()> {
})
.collect(),
};
let solutions_data = Arc::new(Mutex::new(Vec::<SolutionData>::new()));
let solutions_data = Arc::new(Mutex::new(Vec::<OutputData>::new()));
let solutions_count = Arc::new(Mutex::new(0u32));
update_status("Starting benchmark").await;
run_benchmark::execute(
@ -396,7 +396,7 @@ async fn run_once(num_workers: u32, ms_per_benchmark: u32) -> Result<()> {
Ok(())
}
pub async fn drain_solutions(benchmark_id: &String, solutions_data: &mut Vec<SolutionData>) -> u32 {
pub async fn drain_solutions(benchmark_id: &String, solutions_data: &mut Vec<OutputData>) -> u32 {
let mut state = (*state()).lock().await;
let QueryData {
benchmarks, proofs, ..
@ -407,7 +407,7 @@ pub async fn drain_solutions(benchmark_id: &String, solutions_data: &mut Vec<Sol
x.extend(
solutions_data
.iter()
.map(|x| SolutionMetaData::from(x.clone())),
.map(|x| OutputMetaData::from(x.clone())),
);
benchmark.details.num_solutions = x.len() as u32;
}

View File

@ -4,13 +4,13 @@ use future_utils::{spawn, time, yield_now, Mutex};
use std::sync::Arc;
use tig_algorithms::{c001, c002, c003, c004};
use tig_challenges::ChallengeTrait;
use tig_worker::{compute_solution, verify_solution, SolutionData};
use tig_worker::{compute_solution, verify_solution, OutputData};
pub async fn execute(
nonce_iters: Vec<Arc<Mutex<NonceIterator>>>,
job: &Job,
wasm: &Vec<u8>,
solutions_data: Arc<Mutex<Vec<SolutionData>>>,
solutions_data: Arc<Mutex<Vec<OutputData>>>,
solutions_count: Arc<Mutex<u32>>,
) {
for nonce_iter in nonce_iters {

View File

@ -1,14 +1,14 @@
use super::{api, Result};
use crate::future_utils::sleep;
use tig_api::SubmitProofReq;
use tig_worker::SolutionData;
use tig_worker::OutputData;
const MAX_RETRIES: u32 = 3;
pub async fn execute(benchmark_id: String, solutions_data: Vec<SolutionData>) -> Result<()> {
pub async fn execute(benchmark_id: String, solutions_data: Vec<OutputData>) -> Result<()> {
let req = SubmitProofReq {
benchmark_id,
solutions_data,
merkle_data: solutions_data,
};
for attempt in 1..=MAX_RETRIES {
println!("Submission attempt {} of {}", attempt, MAX_RETRIES);

View File

@ -109,7 +109,7 @@ async fn slave_node(master: &String, port: u16, num_workers: u32) {
let master_url = format!("http://{}:{}", master, port);
let mut job: Option<Job> = None;
let mut nonce_iters: Vec<Arc<Mutex<NonceIterator>>> = Vec::new();
let mut solutions_data = Arc::new(Mutex::new(Vec::<SolutionData>::new()));
let mut solutions_data = Arc::new(Mutex::new(Vec::<OutputData>::new()));
let mut solutions_count = Arc::new(Mutex::new(0u32));
let mut num_solutions = 0;
loop {
@ -129,7 +129,7 @@ async fn slave_node(master: &String, port: u16, num_workers: u32) {
(*(*nonce_iter).lock().await).empty();
}
nonce_iters.clear();
solutions_data = Arc::new(Mutex::new(Vec::<SolutionData>::new()));
solutions_data = Arc::new(Mutex::new(Vec::<OutputData>::new()));
solutions_count = Arc::new(Mutex::new(0u32));
num_solutions = 0;
if next_job
@ -195,7 +195,7 @@ async fn slave_node(master: &String, port: u16, num_workers: u32) {
let n = solutions_data.len();
if n > 0 {
num_solutions += n as u32;
let data: Vec<SolutionData> = solutions_data.drain(..).collect();
let data: Vec<OutputData> = solutions_data.drain(..).collect();
println!("Posting {} solutions", n);
if let Err(e) = post::<String>(
&format!("{}/solutions_data/{}", master_url, job.benchmark_id),
@ -264,7 +264,7 @@ async fn master_node(
.and(warp::post())
.and(warp::body::json())
.and_then(
|benchmark_id: String, mut solutions_data: Vec<SolutionData>| async move {
|benchmark_id: String, mut solutions_data: Vec<OutputData>| async move {
benchmarker::drain_solutions(&benchmark_id, &mut solutions_data).await;
Ok::<_, warp::Rejection>(warp::reply::with_status(
"SolutionsData received",

View File

@ -10,6 +10,7 @@ pub(crate) async fn execute<T: Context>(ctx: &T) -> String {
let (block, mut cache) = create_block(ctx).await;
confirm_mempool_challenges(&block, &mut cache).await;
confirm_mempool_algorithms(&block, &mut cache).await;
confirm_mempool_precommits(&block, &mut cache).await;
confirm_mempool_benchmarks(&block, &mut cache).await;
confirm_mempool_proofs(ctx, &block, &mut cache).await;
confirm_mempool_frauds(&block, &mut cache).await;
@ -19,6 +20,7 @@ pub(crate) async fn execute<T: Context>(ctx: &T) -> String {
update_qualifiers(&block, &mut cache).await;
update_frontiers(&block, &mut cache).await;
update_solution_signature_thresholds(&block, &mut cache).await;
update_base_fees(&block, &mut cache).await;
update_influence(&block, &mut cache).await;
update_adoption(&block, &mut cache).await;
update_innovator_rewards(&block, &mut cache).await;
@ -33,6 +35,7 @@ struct AddBlockCache {
pub mempool_challenges: Vec<Challenge>,
pub mempool_algorithms: Vec<Algorithm>,
pub mempool_benchmarks: Vec<Benchmark>,
pub mempool_precommits: Vec<Precommit>,
pub mempool_proofs: Vec<Proof>,
pub mempool_frauds: Vec<Fraud>,
pub mempool_wasms: Vec<Wasm>,
@ -93,6 +96,17 @@ async fn setup_cache<T: Context>(
});
mempool_benchmarks.push(benchmark);
}
let mut mempool_precommits = Vec::new();
for mut precommit in ctx
.get_precommits(PrecommitsFilter::Mempool { from_block_started })
.await
.unwrap_or_else(|e| panic!("get_precommits error: {:?}", e))
{
precommit.state = Some(PrecommitState {
block_confirmed: None,
});
mempool_precommits.push(precommit);
}
let mut mempool_proofs = Vec::new();
for mut proof in ctx
.get_proofs(ProofsFilter::Mempool { from_block_started }, false)
@ -148,6 +162,7 @@ async fn setup_cache<T: Context>(
cutoff_frontier: None,
scaling_factor: None,
qualifier_difficulties: None,
base_fee: None,
});
active_challenges.insert(challenge.id.clone(), challenge);
}
@ -292,6 +307,7 @@ async fn setup_cache<T: Context>(
mempool_challenges,
mempool_algorithms,
mempool_benchmarks,
mempool_precommits,
mempool_proofs,
mempool_frauds,
mempool_wasms,
@ -395,10 +411,17 @@ async fn confirm_mempool_algorithms(block: &Block, cache: &mut AddBlockCache) {
}
}
#[time]
async fn confirm_mempool_precommits(block: &Block, cache: &mut AddBlockCache) {
let config = block.config();
// FIXME
}
#[time]
async fn confirm_mempool_benchmarks(block: &Block, cache: &mut AddBlockCache) {
let config = block.config();
// FIXME sample solutions and non-solutions
for benchmark in cache.mempool_benchmarks.iter_mut() {
let seed = u32_from_str(format!("{:?}|{:?}", block.id, benchmark.id).as_str());
let mut rng = StdRng::seed_from_u64(seed as u64);
@ -614,6 +637,11 @@ async fn update_solution_signature_thresholds(block: &Block, cache: &mut AddBloc
}
}
#[time]
async fn update_base_fees(block: &Block, cache: &mut AddBlockCache) {
// FIXME
}
fn find_smallest_range_dimension(points: &Frontier) -> usize {
(0..2)
.min_by_key(|&d| {

View File

@ -153,14 +153,14 @@ impl MerkleTree {
hashes[0].clone()
}
pub fn calc_merkle_proof(&self, branch_idx: usize) -> Result<MerkleBranch> {
pub fn calc_merkle_branch(&self, branch_idx: usize) -> Result<MerkleBranch> {
if branch_idx >= self.n {
return Err(anyhow!("Invalid branch index"));
}
let mut hashes = self.hashed_leafs.clone();
let null_hash = MerkleHash::null();
let mut proof = Vec::new();
let mut branch = Vec::new();
let mut idx = branch_idx;
while hashes.len() > 1 {
@ -170,7 +170,7 @@ impl MerkleTree {
let right = chunk.get(1).unwrap_or(&null_hash);
if idx >> 1 == i {
proof.push(if idx % 2 == 0 { right } else { left }.clone());
branch.push(if idx % 2 == 0 { right } else { left }.clone());
}
let mut combined = [0u8; 32];
@ -182,7 +182,7 @@ impl MerkleTree {
idx /= 2;
}
Ok(MerkleBranch(proof))
Ok(MerkleBranch(branch))
}
}

View File

@ -17,7 +17,7 @@ mod tests {
let tree = MerkleTree::new(hashes.clone(), 16).unwrap();
let root = tree.calc_merkle_root();
let proof = tree.calc_merkle_proof(7).unwrap();
let proof = tree.calc_merkle_branch(7).unwrap();
let leaf_hash = &hashes[7];
let calculated_root = proof.calc_merkle_root(leaf_hash, 7);
@ -37,7 +37,7 @@ mod tests {
let hashes = create_test_hashes();
let tree = MerkleTree::new(hashes, 16).unwrap();
let result = tree.calc_merkle_proof(16);
let result = tree.calc_merkle_branch(16);
assert!(result.is_err());
}
@ -46,7 +46,7 @@ mod tests {
let hashes = create_test_hashes();
let tree = MerkleTree::new(hashes.clone(), 16).unwrap();
let proof = tree.calc_merkle_proof(7).unwrap();
let proof = tree.calc_merkle_branch(7).unwrap();
let tree_json = serde_json::to_string(&tree).unwrap();
let deserialized_tree: MerkleTree = serde_json::from_str(&tree_json).unwrap();

View File

@ -1,7 +1,7 @@
use anyhow::{anyhow, Result};
use bincode;
use tig_challenges::*;
pub use tig_structs::core::{BenchmarkSettings, Solution, SolutionData};
pub use tig_structs::core::{BenchmarkSettings, OutputData, Solution};
use tig_utils::decompress_obj;
use wasmi::{Config, Engine, Linker, Module, Store, StoreLimitsBuilder};
@ -11,7 +11,7 @@ pub fn compute_solution(
wasm: &[u8],
max_memory: u64,
max_fuel: u64,
) -> Result<Option<SolutionData>> {
) -> Result<Option<OutputData>> {
let seeds = settings.calc_seeds(nonce);
let serialized_challenge = match settings.challenge_id.as_str() {
"c001" => {
@ -86,6 +86,7 @@ pub fn compute_solution(
.map_err(|e| anyhow!("Failed to call function: {:?}", e))?;
// Get runtime signature
// FIXME read runtime signature on execution error
let runtime_signature_u64 = store.get_runtime_signature();
let runtime_signature = (runtime_signature_u64 as u32) ^ ((runtime_signature_u64 >> 32) as u32);
let fuel_consumed = max_fuel - store.get_fuel().unwrap();
@ -103,7 +104,7 @@ pub fn compute_solution(
&mut serialized_solution,
)
.expect("Failed to read solution from memory");
let mut solution_data = SolutionData {
let mut solution_data = OutputData {
nonce,
runtime_signature,
fuel_consumed,