Update benchmarker to work with tracks.

This commit is contained in:
FiveMovesAhead 2025-11-25 16:51:50 +00:00
parent 136d2a0e35
commit a0995e01a8
13 changed files with 69 additions and 132 deletions

View File

@ -172,15 +172,15 @@ paths:
application/json:
schema:
$ref: '#/components/schemas/GetDelegatorsResponse'
/get-difficulty-data:
/get-tracks-data:
get:
tags:
- GET
summary: Get latest difficulty data for a specific challenge id
summary: Get latest tracks data for a specific challenge id
description: |-
# Notes
* Returns aggregated num_nonces, num_solutions, difficulty data from benchmarks in the last 120 blocks.
* Returns aggregated algorithm_id, average_quality, num_bundles data from benchmarks in the last 120 blocks.
* Query parameter `<block_id>` must be the latest block. Use `/get-block` endpoint
@ -202,7 +202,7 @@ paths:
content:
application/json:
schema:
$ref: '#/components/schemas/GetDifficultyDataResponse'
$ref: '#/components/schemas/GetTracksDataResponse'
/get-challenges:
get:
tags:
@ -651,7 +651,7 @@ components:
type: string
pattern: ^c[0-9]{3}_a[0-9]{3}$
example: c002_a001
RaceId:
TrackId:
type: string
pattern: ^\w+=\w+(,\w+=\w+)*$
example: num_variables=100,clauses_to_variables_percent=426
@ -736,8 +736,8 @@ components:
$ref: '#/components/schemas/ChallengeId'
algorithm_id:
$ref: '#/components/schemas/AlgorithmId'
race_id:
$ref: '#/components/schemas/RaceId'
track_id:
$ref: '#/components/schemas/TrackId'
BenchmarkState:
type: object
properties:
@ -1011,8 +1011,8 @@ components:
properties:
id:
$ref: '#/components/schemas/ChallengeId'
details:
$ref: '#/components/schemas/ChallengeDetails'
config:
type: object
state:
$ref: '#/components/schemas/ChallengeState'
block_data:
@ -1024,20 +1024,12 @@ components:
type: integer
format: uint64
qualifier_qualities:
description: "Map<RaceId, int32[]>"
description: "Map<TrackId, int32[]>"
additionalProperties:
type: array
items:
type: integer
format: int32
ChallengeDetails:
type: object
properties:
name:
type: string
type:
type: string
enum: [cpu, gpu]
ChallengeId:
type: string
pattern: ^c[0-9]{3}$
@ -1081,21 +1073,15 @@ components:
block_confirmed:
type: integer
format: uint32
Difficulty:
type: array
items:
type: integer
format: int32
example: [40, 250]
DifficultyData:
TracksData:
type: object
properties:
algorithm_id:
$ref: '#/components/schemas/AlgorithmId'
num_nonces:
num_bundles:
type: integer
format: uint64
average_solution_quality:
average_quality:
type: integer
format: int32
Fraud:
@ -1114,10 +1100,6 @@ components:
block_confirmed:
type: integer
format: uint32
Frontier:
type: array
items:
$ref: '#/components/schemas/Difficulty'
MD5:
type: string
pattern: ^[a-f0-9]{32}$
@ -1452,16 +1434,16 @@ components:
format: double
reward:
$ref: '#/components/schemas/PreciseNumber'
GetDifficultyDataResponse:
GetTracksDataResponse:
type: object
properties:
data:
type: object
description: "Map<RaceId, DifficultyData[]>"
description: "Map<TrackId, TracksData[]>"
additionalProperties:
type: array
items:
$ref: '#/components/schemas/DifficultyData'
$ref: '#/components/schemas/TracksData'
GetOPoWResponse:
type: object
properties:

View File

@ -25,7 +25,7 @@ class CodeState(FromDict):
@dataclass
class CodeBlockData(FromDict):
num_qualifiers_by_player: Dict[str, int]
num_qualifiers_by_track_by_player: Dict[str, int]
adoption: PreciseNumber
merge_points: int
reward: PreciseNumber
@ -43,7 +43,7 @@ class BenchmarkSettings(FromDict):
block_id: str
challenge_id: str
algorithm_id: str
race_id: str
track_id: str
def calc_seed(self, rand_hash: str, nonce: int) -> bytes:
return u8s_from_str(f"{jsonify(self)}_{rand_hash}_{nonce}")
@ -52,6 +52,7 @@ class BenchmarkSettings(FromDict):
class PrecommitDetails(FromDict):
block_started: int
num_nonces: int
num_bundles: int
rand_hash: str
fee_paid: PreciseNumber
hyperparameters: Optional[dict]
@ -72,7 +73,7 @@ class Precommit(FromDict):
class BenchmarkDetails(FromDict):
stopped: bool
merkle_root: Optional[MerkleHash]
average_solution_quality: Optional[int]
average_quality_by_bundle: Optional[List[int]]
sampled_nonces: Optional[List[int]]
@dataclass
@ -175,29 +176,25 @@ class Block(FromDict):
config: dict
data: Optional[BlockData]
@dataclass
class ChallengeDetails(FromDict):
name: str
@dataclass
class ChallengeState(FromDict):
round_active: int
@dataclass
class ChallengeBlockData(FromDict):
num_qualifiers: int
num_qualifiers_by_track: int
qualifier_qualities: Dict[str, List[int]]
@dataclass
class Challenge(FromDict):
id: str
details: ChallengeDetails
config: dict
state: ChallengeState
block_data: Optional[ChallengeBlockData]
@dataclass
class OPoWBlockData(FromDict):
num_qualifiers_by_challenge: Dict[str, int]
num_qualifiers_by_challenge_by_track: Dict[str, int]
cutoff: int
weighted_delegated_deposit: PreciseNumber
weighted_self_deposit: PreciseNumber
@ -274,8 +271,8 @@ class TopUp(FromDict):
@dataclass
class DifficultyData(FromDict):
average_solution_quality: int
num_nonces: int
average_quality: int
num_bundles: int
algorithm_id: str
@dataclass

View File

@ -2,7 +2,6 @@ import logging
import os
import time
from master.data_fetcher import *
from master.race_sampler import *
from master.job_manager import *
from master.precommit_manager import *
from master.slave_manager import *
@ -19,7 +18,6 @@ def main():
client_manager.start()
data_fetcher = DataFetcher()
race_sampler = RaceSampler()
job_manager = JobManager()
precommit_manager = PrecommitManager()
submissions_manager = SubmissionsManager()
@ -33,13 +31,11 @@ def main():
if data["block"].id != last_block_id:
last_block_id = data["block"].id
client_manager.on_new_block(**data)
race_sampler.on_new_block(**data)
job_manager.on_new_block(**data)
submissions_manager.on_new_block(**data)
precommit_manager.on_new_block(**data)
job_manager.run()
samples = race_sampler.run()
submit_precommit_req = precommit_manager.run(samples)
submit_precommit_req = precommit_manager.run()
submissions_manager.run(submit_precommit_req)
slave_manager.run()
except Exception as e:

View File

@ -122,7 +122,7 @@ class ClientManager:
B.batch_size,
B.num_nonces - B.batch_size * C.batch_idx
),
'average_solution_quality', D.average_solution_quality,
'average_quality', D.average_quality,
'num_attempts', COALESCE(E.num_attempts, C.num_attempts),
'status', CASE
WHEN B.stopped IS NOT NULL THEN 'STOPPED'
@ -167,9 +167,9 @@ class ClientManager:
benchmark_id,
JSONB_AGG(batch_data ORDER BY batch_idx) AS batches,
(
SUM((batch_data->>'num_nonces')::BIGINT * (batch_data->>'average_solution_quality')::BIGINT) /
SUM((batch_data->>'num_nonces')::BIGINT * (batch_data->>'average_quality')::BIGINT) /
SUM((batch_data->>'num_nonces')::BIGINT)
)::INTEGER AS average_solution_quality
)::INTEGER AS average_quality
FROM recent_batches
GROUP BY benchmark_id
)
@ -180,7 +180,7 @@ class ClientManager:
B.settings->>'race_id' as race_id,
B.batch_size,
B.num_nonces,
COALESCE(C.average_solution_quality, A.average_solution_quality) AS average_solution_quality,
COALESCE(C.average_quality, A.average_quality) AS average_quality,
CASE
WHEN B.end_time IS NOT NULL THEN 'COMPLETED'
WHEN B.stopped IS NOT NULL THEN 'STOPPED'

View File

@ -28,7 +28,7 @@ class JobManager:
algo_selection = CONFIG["algo_selection"]
# create jobs from confirmed precommits
challenge_id_2_name = {
c.id: c.details.name
c.id: c["config"]["name"]
for c in challenges.values()
}
algorithm_id_2_name = {
@ -180,10 +180,10 @@ class JobManager:
(
"""
UPDATE job_data
SET average_solution_quality = %s
WHERE benchmark_id = %s
SET average_quality = %s
WHERE benchmark_id = %s AND average_quality IS NULL
""",
(x.details.average_solution_quality, benchmark_id)
(sum(x.details.average_quality_by_bundle) // len(x.details.average_quality_by_bundle), benchmark_id)
)
]
@ -263,12 +263,12 @@ class JobManager:
for row in rows:
benchmark_id = row['benchmark_id']
solution_quality = [x for y in row['solution_quality'] for x in y]
average_solution_quality = sum(solution_quality) // len(solution_quality)
average_quality = sum(solution_quality) // len(solution_quality)
batch_merkle_roots = [MerkleHash.from_str(root) for root in row['batch_merkle_roots']]
num_batches = len(batch_merkle_roots)
logger.info(f"job {benchmark_id}: (benchmark ready, average_solution_nonces: {average_solution_quality}")
logger.info(f"job {benchmark_id}: (benchmark ready, average_solution_nonces: {average_quality}")
tree = MerkleTree(
batch_merkle_roots,
@ -283,13 +283,13 @@ class JobManager:
UPDATE job_data
SET merkle_root = %s,
solution_quality = %s,
average_solution_quality = %s
average_quality = %s
WHERE benchmark_id = %s
""",
(
merkle_root.to_str(),
json.dumps(solution_quality),
average_solution_quality,
average_quality,
benchmark_id
)
),

View File

@ -22,7 +22,7 @@ class PrecommitManager:
self.last_block_id = block.id
self.num_precommits_submitted = 0
def run(self, race_samples: Dict[str, List[int]]) -> SubmitPrecommitRequest:
def run(self) -> SubmitPrecommitRequest:
num_pending_jobs = get_db_conn().fetch_one(
"""
SELECT COUNT(*)
@ -49,11 +49,11 @@ class PrecommitManager:
algorithm_id=a_id,
player_id=CONFIG["player_id"],
block_id=self.last_block_id,
race_id=race_samples[a_id]
track_id=selection["selected_track"]
),
num_nonces=selection["num_nonces"],
num_bundles=selection["num_bundles"],
hyperparameters=selection["hyperparameters"],
runtime_config=selection["runtime_config"],
)
logger.info(f"Created precommit (algorithm_id: {a_id}, race: {req.settings.race_id}, num_nonces: {req.num_nonces}, hyperparameters: {req.hyperparameters}, runtime_config: {req.runtime_config})")
logger.info(f"Created precommit (algorithm_id: {a_id}, track: {req.settings.track_id}, num_bundles: {req.num_bundles}, hyperparameters: {req.hyperparameters}, runtime_config: {req.runtime_config})")
return req

View File

@ -1,38 +0,0 @@
import logging
import os
import random
from common.structs import *
from typing import List, Dict
from master.client_manager import CONFIG
logger = logging.getLogger(os.path.splitext(os.path.basename(__file__))[0])
Point = List[int]
class RaceSampler:
def __init__(self):
self.valid_size_ranges = {}
def on_new_block(self, block: Block, **kwargs):
self.active_race_ids = {
c_id: d["active_race_ids"]
for c_id, d in block.config["challenges"].items()
}
def run(self) -> Dict[str, int]:
samples = {}
for config in CONFIG["algo_selection"]:
a_id = config["algorithm_id"]
c_id = a_id[:4]
active_races = self.active_race_ids[c_id]
selected_races = sorted(set(config["selected_races"]) & set(active_races))
if len(selected_races) == 0:
selected_races = list(active_races)
config["selected_races"] = selected_races
samples[a_id] = random.choice(selected_races)
logger.debug(f"Selected race '{samples[a_id]}' for algorithm {a_id} in challenge {c_id}")
return samples

View File

@ -241,7 +241,7 @@ class SlaveManager:
UPDATE batch_data
SET merkle_root = %s,
solution_quality = %s,
average_solution_quality = %s
average_quality = %s
WHERE benchmark_id = %s
AND batch_idx = %s
""",

View File

@ -14,7 +14,7 @@ logger = logging.getLogger(os.path.splitext(os.path.basename(__file__))[0])
@dataclass
class SubmitPrecommitRequest(FromDict):
settings: BenchmarkSettings
num_nonces: int
num_bundles: int
hyperparameters: Optional[dict]
runtime_config: dict

View File

@ -42,7 +42,7 @@ CREATE TABLE IF NOT EXISTS job_data (
benchmark_id TEXT PRIMARY KEY,
merkle_root TEXT,
solution_quality JSONB,
average_solution_quality INTEGER,
average_quality INTEGER,
merkle_proofs JSONB,
FOREIGN KEY (benchmark_id) REFERENCES job(benchmark_id)
@ -94,7 +94,7 @@ CREATE TABLE IF NOT EXISTS batch_data (
batch_idx INTEGER,
merkle_root TEXT,
solution_quality JSONB,
average_solution_quality INTEGER,
average_quality INTEGER,
merkle_proofs JSONB,
PRIMARY KEY (benchmark_id, batch_idx),
@ -115,8 +115,8 @@ SELECT '
"algo_selection": [
{
"algorithm_id": "c001_a001",
"num_nonces": 40,
"selected_races": [],
"num_bundles": 1,
"selected_track": null,
"weight": 1,
"batch_size": 8,
"hyperparameters": null,
@ -127,8 +127,8 @@ SELECT '
},
{
"algorithm_id": "c002_a001",
"num_nonces": 40,
"selected_races": [],
"num_bundles": 1,
"selected_track": null,
"weight": 1,
"batch_size": 8,
"hyperparameters": null,
@ -139,8 +139,8 @@ SELECT '
},
{
"algorithm_id": "c003_a001",
"num_nonces": 40,
"selected_races": [],
"num_bundles": 1,
"selected_track": null,
"weight": 1,
"batch_size": 8,
"hyperparameters": null,
@ -151,8 +151,8 @@ SELECT '
},
{
"algorithm_id": "c004_a001",
"num_nonces": 40,
"selected_races": [],
"num_bundles": 1,
"selected_track": null,
"weight": 1,
"batch_size": 8,
"hyperparameters": null,
@ -163,8 +163,8 @@ SELECT '
},
{
"algorithm_id": "c005_a001",
"num_nonces": 40,
"selected_races": [],
"num_bundles": 1,
"selected_track": null,
"weight": 1,
"batch_size": 8,
"hyperparameters": null,
@ -175,8 +175,8 @@ SELECT '
},
{
"algorithm_id": "c006_a001",
"num_nonces": 40,
"selected_races": [],
"num_bundles": 1,
"selected_track": null,
"weight": 1,
"batch_size": 8,
"hyperparameters": null,

View File

@ -27,7 +27,7 @@ class TestData(unittest.TestCase):
block_id="some_block",
challenge_id="some_challenge",
algorithm_id="some_algorithm",
race_id="a=1,b=2"
track_id="a=1,b=2"
)
rand_hash = "random_hash"
@ -35,8 +35,8 @@ class TestData(unittest.TestCase):
# Assert same as Rust version: tig-structs/tests/core.rs
expected = bytes([
122, 94, 247, 46, 146, 71, 140, 234, 78, 160, 235, 180, 79, 32, 69, 205, 247, 91, 94,
43, 231, 184, 120, 114, 182, 226, 24, 176, 227, 170, 72, 31
84, 136, 44, 57, 142, 50, 248, 37, 94, 195, 254, 190, 222, 27, 136, 115, 229, 136, 19,
207, 7, 208, 15, 193, 111, 99, 209, 131, 27, 189, 226, 175
])
self.assertEqual(settings.calc_seed(rand_hash, nonce), expected)

View File

@ -88,8 +88,8 @@
<th pSortableColumn="algorithm" class="tig-dark text-center">
ALGORITHM
</th>
<th pSortableColumn="race_id" class="tig-dark text-center">
RACE
<th pSortableColumn="track_id" class="tig-dark text-center">
TRACK
</th>
<th pSortableColumn="batch_size" class="tig-dark text-center">
BATCH SIZE
@ -97,7 +97,7 @@
<th pSortableColumn="nonces" class="tig-dark text-center">#NONCES</th>
<th pSortableColumn="quality" class="tig-dark text-center">
AVG SOLUTION QUALITY
AVERAGE QUALITY
</th>
<th pSortableColumn="status" class="tig-dark text-center">STATUS</th>
@ -142,7 +142,7 @@
</td>
<td class="row-cell">
<div class="flex justify-content-center align-items-center multiline">
{{ benchmark.race_id_display }}
{{ benchmark.track_id_display }}
</div>
</td>
<td class="row-cell">
@ -157,7 +157,7 @@
</td>
<td class="row-cell">
<div class="flex justify-content-center align-items-center">
{{ benchmark.average_solution_quality }}
{{ benchmark.average_quality }}
</div>
</td>
@ -256,7 +256,7 @@
<div
class="flex justify-content-center align-items-center tig-dark"
>
{{ batch.end_time ? batch.average_solution_quality : "" }}
{{ batch.end_time ? batch.average_quality : "" }}
</div>
</td>
<td>

View File

@ -100,13 +100,13 @@ export class TigApisService {
};
});
const race_id_display = b.race_id
const track_id_display = b.track_id
.replace(/=/g, ': ')
.replace(/,/g, '\n');
return {
...b,
race_id_display,
track_id_display,
time_elapsed,
benchmark_id_display,
batches,