Update benchmarker to work with races.

This commit is contained in:
FiveMovesAhead 2025-11-17 16:47:27 +00:00
parent d03c193d1d
commit 652aa6d6f7
10 changed files with 45 additions and 78 deletions

View File

@ -43,7 +43,7 @@ class BenchmarkSettings(FromDict):
block_id: str
challenge_id: str
algorithm_id: str
size: int
race_id: str
def calc_seed(self, rand_hash: str, nonce: int) -> bytes:
return u8s_from_str(f"{jsonify(self)}_{rand_hash}_{nonce}")
@ -274,9 +274,9 @@ class TopUp(FromDict):
@dataclass
class DifficultyData(FromDict):
num_solutions: int
average_solution_quality: int
num_nonces: int
difficulty: Point
algorithm_id: str
@dataclass
class DepositDetails(FromDict):

View File

@ -2,7 +2,7 @@ import logging
import os
import time
from master.data_fetcher import *
from master.size_sampler import *
from master.race_sampler import *
from master.job_manager import *
from master.precommit_manager import *
from master.slave_manager import *
@ -19,7 +19,7 @@ def main():
client_manager.start()
data_fetcher = DataFetcher()
size_sampler = SizeSampler()
race_sampler = RaceSampler()
job_manager = JobManager()
precommit_manager = PrecommitManager()
submissions_manager = SubmissionsManager()
@ -33,12 +33,12 @@ def main():
if data["block"].id != last_block_id:
last_block_id = data["block"].id
client_manager.on_new_block(**data)
size_sampler.on_new_block(**data)
race_sampler.on_new_block(**data)
job_manager.on_new_block(**data)
submissions_manager.on_new_block(**data)
precommit_manager.on_new_block(**data)
job_manager.run()
samples = size_sampler.run()
samples = race_sampler.run()
submit_precommit_req = precommit_manager.run(samples)
submissions_manager.run(submit_precommit_req)
slave_manager.run()

View File

@ -177,7 +177,7 @@ class ClientManager:
B.benchmark_id,
B.challenge,
B.algorithm,
(B.settings->>'size')::INTEGER AS size,
B.settings->>'race_id' as race_id,
B.batch_size,
B.num_nonces,
COALESCE(C.average_solution_quality, A.average_solution_quality) AS average_solution_quality,

View File

@ -71,7 +71,10 @@ class DataFetcher:
difficulty_responses = list(executor.map(_get, difficulty_urls))
difficulty_data = {
c_id: [DifficultyData.from_dict(d) for d in resp.get("data", [])]
c_id: {
race_id: [DifficultyData.from_dict(x) for x in v]
for race_id, v in resp["data"].items()
}
for c_id, resp in zip(challenges, difficulty_responses)
}

View File

@ -18,54 +18,11 @@ class PrecommitManager:
self.algorithm_name_2_id = {}
self.challenge_name_2_id = {}
def on_new_block(
self,
block: Block,
precommits: Dict[str, Precommit],
benchmarks: Dict[str, Benchmark],
challenges: Dict[str, Challenge],
difficulty_data: Dict[str, List[DifficultyData]],
**kwargs
):
def on_new_block(self, block: Block, **kwargs):
self.last_block_id = block.id
self.num_precommits_submitted = 0
benchmark_stats_by_challenge = {
c.details.name: {
"solutions": 0,
"nonces": 0,
"qualifiers": 0
}
for c in challenges.values()
if c.block_data is not None
}
for benchmark in benchmarks.values():
precommit = precommits[benchmark.id]
c_name = challenges[precommit.settings.challenge_id].details.name
benchmark_stats_by_challenge[c_name]["solutions"] += benchmark.details.num_solutions
benchmark_stats_by_challenge[c_name]["nonces"] += precommit.details.num_nonces
for c_name, x in benchmark_stats_by_challenge.items():
avg_nonces_per_solution = (x["nonces"] // x["solutions"]) if x["solutions"] > 0 else 0
logger.info(f"benchmark stats for {c_name}: (#nonces: {x['nonces']}, #solutions: {x['solutions']}, #qualifiers: {x['qualifiers']}, avg_nonces_per_solution: {avg_nonces_per_solution})")
aggregate_difficulty_data = {
c_id: {
"nonces": sum(
x.num_nonces if x.difficulty in challenges[c_id].block_data.qualifier_difficulties else 0
for x in difficulty_data
),
"solutions": sum(
x.num_solutions if x.difficulty in challenges[c_id].block_data.qualifier_difficulties else 0
for x in difficulty_data
),
}
for c_id, difficulty_data in difficulty_data.items()
}
for c_id, x in aggregate_difficulty_data.items():
avg_nonces_per_solution = (x["nonces"] // x["solutions"]) if x["solutions"] > 0 else 0
logger.info(f"global qualifier difficulty stats for {challenges[c_id].details.name}: (#nonces: {x['nonces']}, #solutions: {x['solutions']}, avg_nonces_per_solution: {avg_nonces_per_solution})")
def run(self, size_samples: Dict[str, List[int]]) -> SubmitPrecommitRequest:
def run(self, race_samples: Dict[str, List[int]]) -> SubmitPrecommitRequest:
num_pending_jobs = get_db_conn().fetch_one(
"""
SELECT COUNT(*)
@ -92,11 +49,11 @@ class PrecommitManager:
algorithm_id=a_id,
player_id=CONFIG["player_id"],
block_id=self.last_block_id,
size=size_samples[a_id]
race_id=race_samples[a_id]
),
num_nonces=selection["num_nonces"],
hyperparameters=selection["hyperparameters"],
runtime_config=selection["runtime_config"],
)
logger.info(f"Created precommit (algorithm_id: {a_id}, size: {req.settings.size}, num_nonces: {req.num_nonces}, hyperparameters: {req.hyperparameters}, runtime_config: {req.runtime_config})")
logger.info(f"Created precommit (algorithm_id: {a_id}, race: {req.settings.race_id}, num_nonces: {req.num_nonces}, hyperparameters: {req.hyperparameters}, runtime_config: {req.runtime_config})")
return req

View File

@ -9,13 +9,13 @@ logger = logging.getLogger(os.path.splitext(os.path.basename(__file__))[0])
Point = List[int]
class SizeSampler:
class RaceSampler:
def __init__(self):
self.valid_size_ranges = {}
def on_new_block(self, block: Block, **kwargs):
self.allowed_sizes = {
c_id: d["difficulty"]["allowed_sizes"]
self.active_race_ids = {
c_id: d["active_race_ids"]
for c_id, d in block.config["challenges"].items()
}
@ -25,14 +25,14 @@ class SizeSampler:
for config in CONFIG["algo_selection"]:
a_id = config["algorithm_id"]
c_id = a_id[:4]
allowed_sizes = self.allowed_sizes[c_id]
active_races = self.active_race_ids[c_id]
selected_sizes = sorted(set(config["selected_sizes"]) & set(allowed_sizes))
if len(selected_sizes) == 0:
selected_sizes = list(allowed_sizes)
config["selected_sizes"] = selected_sizes
selected_races = sorted(set(config["selected_races"]) & set(active_races))
if len(selected_races) == 0:
selected_races = list(active_races)
config["selected_races"] = selected_races
samples[a_id] = random.choice(selected_sizes)
logger.debug(f"Selected size {samples[a_id]} for algorithm {a_id} in challenge {c_id}")
samples[a_id] = random.choice(selected_races)
logger.debug(f"Selected race '{samples[a_id]}' for algorithm {a_id} in challenge {c_id}")
return samples

View File

@ -116,7 +116,7 @@ SELECT '
{
"algorithm_id": "c001_a001",
"num_nonces": 40,
"selected_sizes": [],
"selected_races": [],
"weight": 1,
"batch_size": 8,
"hyperparameters": null,
@ -128,7 +128,7 @@ SELECT '
{
"algorithm_id": "c002_a001",
"num_nonces": 40,
"selected_sizes": [],
"selected_races": [],
"weight": 1,
"batch_size": 8,
"hyperparameters": null,
@ -140,7 +140,7 @@ SELECT '
{
"algorithm_id": "c003_a001",
"num_nonces": 40,
"selected_sizes": [],
"selected_races": [],
"weight": 1,
"batch_size": 8,
"hyperparameters": null,
@ -152,7 +152,7 @@ SELECT '
{
"algorithm_id": "c004_a001",
"num_nonces": 40,
"selected_sizes": [],
"selected_races": [],
"weight": 1,
"batch_size": 8,
"hyperparameters": null,
@ -164,7 +164,7 @@ SELECT '
{
"algorithm_id": "c005_a001",
"num_nonces": 40,
"selected_sizes": [],
"selected_races": [],
"weight": 1,
"batch_size": 8,
"hyperparameters": null,
@ -176,7 +176,7 @@ SELECT '
{
"algorithm_id": "c006_a001",
"num_nonces": 40,
"selected_sizes": [],
"selected_races": [],
"weight": 1,
"batch_size": 8,
"hyperparameters": null,

View File

@ -27,7 +27,7 @@ class TestData(unittest.TestCase):
block_id="some_block",
challenge_id="some_challenge",
algorithm_id="some_algorithm",
difficulty=[1, 2, 3]
race_id="a=1,b=2"
)
rand_hash = "random_hash"
@ -35,8 +35,8 @@ class TestData(unittest.TestCase):
# Assert same as Rust version: tig-structs/tests/core.rs
expected = bytes([
135, 168, 152, 35, 57, 28, 184, 91, 10, 189, 139, 111, 171, 82, 156, 14,
165, 68, 80, 41, 169, 236, 42, 41, 198, 73, 124, 78, 130, 216, 168, 67
122, 94, 247, 46, 146, 71, 140, 234, 78, 160, 235, 180, 79, 32, 69, 205, 247, 91, 94,
43, 231, 184, 120, 114, 182, 226, 24, 176, 227, 170, 72, 31
])
self.assertEqual(settings.calc_seed(rand_hash, nonce), expected)

View File

@ -88,8 +88,8 @@
<th pSortableColumn="algorithm" class="tig-dark text-center">
ALGORITHM
</th>
<th pSortableColumn="size" class="tig-dark text-center">
INST. SIZE
<th pSortableColumn="race_id" class="tig-dark text-center">
RACE
</th>
<th pSortableColumn="batch_size" class="tig-dark text-center">
BATCH SIZE
@ -142,7 +142,7 @@
</td>
<td class="row-cell">
<div class="flex justify-content-center align-items-center">
[{{ benchmark.size }}]
[innerHTML]="formatRaceId(benchmark.race_id)">
</div>
</td>
<td class="row-cell">

View File

@ -123,4 +123,11 @@ export class HomeComponent {
clearInterval(this.interval);
}
}
formatRaceId(value: string): string {
if (!value) return '';
return value
.replace(/=/g, ': ')
.replace(/,/g, '<br>');
}
}