Compare commits

..

27 Commits

Author SHA1 Message Date
FiveMovesAhead
7d9d889dc0 Compiled neuralnet_optimizer/neural_scientist
Some checks failed
Build Algorithm / init (push) Has been cancelled
Test Workspace / Test Workspace (push) Has been cancelled
Build Algorithm / Compile algorithm on arm64 (push) Has been cancelled
Build Algorithm / Compile algorithm on amd64 (push) Has been cancelled
Build Algorithm / commit (push) Has been cancelled
2026-02-10 15:30:51 +00:00
FiveMovesAhead
8c50ce8aa7 Submitted neuralnet_optimizer/neural_scientist 2026-02-10 12:01:08 +00:00
FiveMovesAhead
8ccedeb43e Add set-profile endpoint
Some checks failed
Test Workspace / Test Workspace (push) Has been cancelled
2026-02-03 16:17:35 +00:00
FiveMovesAhead
0068b40ba7 Bundles should carry over for cutoff calc, even if tracks change. 2026-02-03 14:58:20 +00:00
FiveMovesAhead
9b6e791bf8 Add challenge description for job scheduling 2026-02-03 12:47:46 +00:00
FiveMovesAhead
cbbd3d308b Update vector search challenge description 2026-02-03 12:25:38 +00:00
FiveMovesAhead
b2d2b284e0 Update neuralnet optimizer challenge description 2026-02-03 12:22:42 +00:00
FiveMovesAhead
277019e043 Update satisfiability challenge description 2026-02-03 12:21:37 +00:00
FiveMovesAhead
df3d580f81 Update vehicle routing challenge description 2026-02-03 12:19:16 +00:00
FiveMovesAhead
2db4a6e84c Update knapsack challenge description 2026-02-03 12:13:12 +00:00
FiveMovesAhead
693c0bbd10 Update hypergraph challenge description 2026-02-03 12:07:50 +00:00
FiveMovesAhead
7a4387300a Fix logging bug. 2026-02-03 12:06:00 +00:00
FiveMovesAhead
4581036a33 Update submit-precommit swagger 2026-02-03 12:05:24 +00:00
FiveMovesAhead
37c87d9a1e Bump docker images to 0.0.5
Some checks failed
Test Workspace / Test Workspace (push) Has been cancelled
2026-01-28 10:47:08 +00:00
FiveMovesAhead
666dfd4179 Add job_scheduling to docker-compose.yml 2026-01-28 10:46:10 +00:00
FiveMovesAhead
8629e985f2 Bump git workflow to 0.0.5 2026-01-28 09:17:04 +00:00
FiveMovesAhead
938a4b2c20 Random track selection 2026-01-27 12:07:34 +00:00
FiveMovesAhead
4de243cb16 Merge branch 'c007_job_scheduling' into temp 2026-01-26 23:25:02 +00:00
FiveMovesAhead
96d56007bd Add raw identifier for gen() 2026-01-26 23:24:26 +00:00
FiveMovesAhead
15251730ab Add 2-tier evaluation to job scheduling. 2026-01-26 23:23:18 +00:00
FiveMovesAhead
9c3c720752 Allow optimizer access to model params in nn challenge 2026-01-26 23:13:56 +00:00
FiveMovesAhead
c66234736b Update knapsack instance generation to team formation. 2026-01-26 23:06:55 +00:00
FiveMovesAhead
5c3f79e845 Patch potential pipe deadlock
Some checks failed
Test Workspace / Test Workspace (push) Has been cancelled
2026-01-16 11:49:53 +00:00
FiveMovesAhead
06362d0e75 Fix non-deterministic instances.
Some checks failed
Test Workspace / Test Workspace (push) Has been cancelled
2025-12-25 00:08:56 +08:00
FiveMovesAhead
bf7d69a47d Fix job scheduling track id format
Some checks are pending
Test Workspace / Test Workspace (push) Waiting to run
2025-12-24 00:11:35 +08:00
FiveMovesAhead
2386003b54 Add missing mod for job scheduling 2025-12-24 00:03:49 +08:00
FiveMovesAhead
91574007eb Implement job scheduling challenge 2025-12-23 23:41:00 +08:00
47 changed files with 3501 additions and 221 deletions

View File

@ -9,12 +9,14 @@ on:
- 'vector_search/*'
- 'hypergraph/*'
- 'neuralnet_optimizer/*'
- 'job_scheduling/*'
- 'test/satisfiability/*'
- 'test/vehicle_routing/*'
- 'test/knapsack/*'
- 'test/vector_search/*'
- 'test/hypergraph/*'
- 'test/neuralnet_optimizer/*'
- 'test/job_scheduling/*'
jobs:
init:
@ -66,7 +68,7 @@ jobs:
docker run --rm --user root \
-v ${{ github.workspace }}:/workspace \
-w /workspace \
ghcr.io/tig-foundation/tig-monorepo/${{ needs.init.outputs.CHALLENGE }}/dev:0.0.4 \
ghcr.io/tig-foundation/tig-monorepo/${{ needs.init.outputs.CHALLENGE }}/dev:0.0.5 \
build_algorithm ${{ needs.init.outputs.ALGORITHM }}
- name: Upload Artifact
@ -103,7 +105,7 @@ jobs:
docker run --rm --user root \
-v ${{ github.workspace }}:/workspace \
-w /workspace \
ghcr.io/tig-foundation/tig-monorepo/${{ needs.init.outputs.CHALLENGE }}/dev:0.0.4 \
ghcr.io/tig-foundation/tig-monorepo/${{ needs.init.outputs.CHALLENGE }}/dev:0.0.5 \
build_algorithm ${{ needs.init.outputs.ALGORITHM }}
- name: Upload Artifact

1
Cargo.lock generated
View File

@ -2067,6 +2067,7 @@ dependencies = [
"ndarray",
"paste",
"rand",
"rand_distr",
"serde",
"serde_json",
"statrs",

View File

@ -100,6 +100,7 @@ f"""Library not found at {so_path}:
"vector_search": "c004",
"hypergraph": "c005",
"neuralnet_optimizer": "c006",
"job_scheduling": "c007",
}
challenge_id = challenge_ids[CHALLENGE]

View File

@ -33,7 +33,7 @@ paths:
* Query parameter `<block_id>` must be the latest block. Use `/get-block` endpoint
* `names` is a map of `<player_id>` to ENS name (only if one exists)
* `player_details` is a map of `<player_id>` to `PlayerDetails`
parameters:
- name: block_id
in: query
@ -158,6 +158,8 @@ paths:
* Query parameter `<block_id>` must be the latest block. Use `/get-block` endpoint
* All players who have a deposit are included in the response
* `player_details` is a map of `<player_id>` to `PlayerDetails`
parameters:
- name: block_id
@ -236,7 +238,7 @@ paths:
* Query parameter `<block_id>` must be the latest block. Use `/get-block` endpoint
* Each opow is for a specific Benchmarker. `opow.block_data.reward` is the reward before any sharing amongst delegators
* `names` is a map of `<player_id>` to ENS name (only if one exists)
* `player_details` is a map of `<player_id>` to `PlayerDetails`
parameters:
- name: block_id
in: query
@ -303,7 +305,7 @@ paths:
* `players` is a map of `<player_id>` to a dict of reward types (algorithm, benchmark, advance, delegator)
* `names` is a map of `<player_id>` to ENS name (only if one exists)
* `player_details` is a map of `<player_id>` to `PlayerDetails`
* `totals` is a map of emission types to `uint256`
parameters:
@ -377,6 +379,39 @@ paths:
application/json:
schema:
$ref: '#/components/schemas/SetCoinbaseResponse'
/set-profile:
post:
tags:
- POST
summary: Set your profile (display name and social handles)
description: |-
# Notes
* Can only be updated once every 10080 blocks
* Header `X-Api-Key` is required. Use `/request-api-key` endpoint.
* If `<api_key>` is invalid, a `401` error will be returned
* All fields are optional. Omit a field to leave it unchanged.
* **name**: max 20 characters; letters, numbers, underscores, dashes only.
* **x**: X (Twitter) handle; optional `@` prefix; 115 characters; letters, numbers, underscores.
* **telegram**: Telegram username; optional `@` prefix; 532 characters; letters, numbers, underscores.
* **discord**: Discord username; optional `@` prefix; 232 characters; letters, numbers, underscores, periods.
requestBody:
content:
application/json:
schema:
$ref: '#/components/schemas/SetProfileRequest'
parameters:
- in: header
name: X-Api-Key
description: <api_key> from /request-api-key endpoint
schema:
$ref: '#/components/schemas/MD5'
responses:
'200':
description: Success
content:
application/json:
schema:
$ref: '#/components/schemas/SetProfileResponse'
/set-delegatees:
post:
tags:
@ -550,11 +585,16 @@ paths:
- POST
summary: Submit a precommit
description: |-
Submit benchmark settings and per-track settings for all active tracks of the challenge.
The server selects a track at random; the returned precommit will indicate the selected track.
# Notes
* This endpoint can only be invoked once every few seconds
* When a precommit is confirmed (`precommit.state != null`), a random string will be set (`precommit.state.rand_hash`). This string must be used when generating seeds
* `track_settings` must include every active track for the challenge (key = track_id). Each value has `hyperparameters`, `fuel_budget`, and `num_bundles`.
* When a precommit is confirmed (`precommit.state != null`), a random string will be set (`precommit.state.rand_hash`). This string must be used when generating seeds.
requestBody:
content:
application/json:
@ -740,6 +780,22 @@ components:
$ref: '#/components/schemas/AlgorithmId'
track_id:
$ref: '#/components/schemas/TrackId'
TrackSettings:
type: object
required:
- fuel_budget
- num_bundles
properties:
hyperparameters:
type: object
nullable: true
additionalProperties: true
fuel_budget:
type: integer
format: uint64
num_bundles:
type: integer
format: uint64
BenchmarkState:
type: object
properties:
@ -1230,7 +1286,20 @@ components:
type: object
properties:
name:
$ref: '#/components/schemas/Address'
type: string
nullable: true
x:
type: string
nullable: true
description: X (Twitter) handle
telegram:
type: string
nullable: true
description: Telegram username
discord:
type: string
nullable: true
description: Discord username
is_multisig:
type: boolean
PlayerState:
@ -1377,10 +1446,11 @@ components:
type: array
items:
$ref: '#/components/schemas/Binary'
names:
player_details:
type: object
additionalProperties:
type: string
$ref: '#/components/schemas/PlayerDetails'
description: Map of player_id to PlayerDetails
GetBenchmarksResponse:
type: object
properties:
@ -1442,6 +1512,11 @@ components:
format: double
reward:
$ref: '#/components/schemas/PreciseNumber'
player_details:
type: object
additionalProperties:
$ref: '#/components/schemas/PlayerDetails'
description: Map of player_id to PlayerDetails
GetTracksDataResponse:
type: object
properties:
@ -1459,10 +1534,11 @@ components:
type: array
items:
$ref: '#/components/schemas/OPoW'
names:
player_details:
type: object
additionalProperties:
type: string
$ref: '#/components/schemas/PlayerDetails'
description: Map of player_id to PlayerDetails
GetPlayerDataResponse:
type: object
properties:
@ -1513,10 +1589,11 @@ components:
$ref: '#/components/schemas/PreciseNumber'
delegator:
$ref: '#/components/schemas/PreciseNumber'
names:
player_details:
type: object
additionalProperties:
type: string
$ref: '#/components/schemas/PlayerDetails'
description: Map of player_id to PlayerDetails
totals:
type: object
properties:
@ -1559,6 +1636,34 @@ components:
properties:
ok:
type: boolean
SetProfileRequest:
type: object
properties:
name:
type: string
maxLength: 20
pattern: ^[a-zA-Z0-9_-]+$
description: Display name (max 20 characters; letters, numbers, underscores, dashes only)
x:
type: string
maxLength: 16
pattern: ^@?[a-zA-Z0-9_]{1,15}$
description: X (Twitter) handle; optional @ prefix; 115 characters
telegram:
type: string
maxLength: 33
pattern: ^@?[a-zA-Z0-9_]{5,32}$
description: Telegram username; optional @ prefix; 532 characters
discord:
type: string
maxLength: 33
pattern: ^@?[a-zA-Z0-9_.]{2,32}$
description: Discord username; optional @ prefix; 232 characters; letters, numbers, underscores, periods
SetProfileResponse:
type: object
properties:
ok:
type: boolean
SetDelegateesRequest:
type: object
properties:
@ -1638,31 +1743,31 @@ components:
type: boolean
SubmitPrecommitRequest:
type: object
required:
- settings
- track_settings
properties:
settings:
$ref: '#/components/schemas/BenchmarkSettings'
num_nonces:
type: integer
format: uint32
hyperparameters:
type: object
nullable: true
runtime_config:
track_settings:
type: object
additionalProperties:
$ref: '#/components/schemas/TrackSettings'
description: Settings per active track (key = track_id). Must include all active tracks for the challenge.
SubmitPrecommitResponse:
type: object
properties:
benchmark_id:
$ref: '#/components/schemas/MD5'
SubmitProofRequest:
type: object
properties:
benchmark_id:
$ref: '#/components/schemas/MD5'
merkle_proofs:
type: array
items:
$ref: '#/components/schemas/MerkleProof'
type: object
properties:
benchmark_id:
$ref: '#/components/schemas/MD5'
merkle_proofs:
type: array
items:
$ref: '#/components/schemas/MerkleProof'
SubmitProofResponse:
type: object
properties:

View File

@ -39,3 +39,5 @@ c005 = ["cudarc", "tig-challenges/c005"]
hypergraph = ["c005"]
c006 = ["cudarc", "tig-challenges/c006"]
neuralnet_optimizer = ["c006"]
c007 = ["tig-challenges/c007"]
job_scheduling = ["c007"]

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,41 @@
# TIG Code Submission
## Submission Details
* **Challenge Name:** job_scheduling
* **Algorithm Name:** [name of submission]
* **Copyright:** [year work created] [name of copyright owner]
* **Identity of Submitter:** [name of person or entity submitting the work to TIG]
* **Identity of Creator of Algorithmic Method:** [if applicable else null]
* **Unique Algorithm Identifier (UAI):** [if applicable else null]
## References and Acknowledgments
*(If this implementation is based on or inspired by existing work, please include citations and acknowledgments below. Remove this section if unused.)*
### 1. Academic Papers
- [Author(s)], *"[Paper Title]"*, DOI: [DOI or URL if available]
### 2. Code References
- [Author(s)] [URL]
### 3. Other
- [Author(s)] [Details or description]
## Additional Notes
*(Include any relevant context, usage notes, or implementation details here. Remove this section if unused.)*
## License
The files in this folder are under the following licenses:
* TIG Benchmarker Outbound License
* TIG Commercial License
* TIG Inbound Game License
* TIG Innovator Outbound Game License
* TIG Open Data License
* TIG THV Game License
Copies of the licenses can be obtained at:
https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses

View File

@ -0,0 +1,50 @@
// TIG's UI uses the pattern `tig_challenges::<challenge_name>` to automatically detect your algorithm's challenge
use crate::{seeded_hasher, HashMap, HashSet};
use anyhow::{anyhow, Result};
use serde::{Deserialize, Serialize};
use serde_json::{Map, Value};
use tig_challenges::job_scheduling::*;
#[derive(Serialize, Deserialize)]
pub struct Hyperparameters {
// Optionally define hyperparameters here. Example:
// pub param1: usize,
// pub param2: f64,
}
pub fn help() {
// Print help information about your algorithm here. It will be invoked with `help_algorithm` script
println!("No help information provided.");
}
pub fn solve_challenge(
challenge: &Challenge,
save_solution: &dyn Fn(&Solution) -> Result<()>,
hyperparameters: &Option<Map<String, Value>>,
) -> Result<()> {
// If you need random numbers, recommend using SmallRng with challenge.seed:
// use rand::{rngs::SmallRng, Rng, SeedableRng};
// let mut rng = SmallRng::from_seed(challenge.seed);
// If you need HashMap or HashSet, make sure to use a deterministic hasher for consistent runtime_signature:
// use crate::{seeded_hasher, HashMap, HashSet};
// let hasher = seeded_hasher(&challenge.seed);
// let map = HashMap::with_hasher(hasher);
// Support hyperparameters if needed:
// let hyperparameters = match hyperparameters {
// Some(hyperparameters) => {
// serde_json::from_value::<Hyperparameters>(Value::Object(hyperparameters.clone()))
// .map_err(|e| anyhow!("Failed to parse hyperparameters: {}", e))?
// }
// None => Hyperparameters { /* set default values here */ },
// };
// use save_solution(&Solution) to save your solution. Overwrites any previous solution
// return Err(<msg>) if your algorithm encounters an error
// return Ok(()) if your algorithm is finished
Err(anyhow!("Not implemented"))
}
// Important! Do not include any tests in this file, it will result in your submission being rejected

View File

@ -35,3 +35,7 @@ pub use hypergraph as c005;
pub mod neuralnet_optimizer;
#[cfg(feature = "c006")]
pub use neuralnet_optimizer as c006;
#[cfg(feature = "c007")]
pub mod job_scheduling;
#[cfg(feature = "c007")]
pub use job_scheduling as c007;

View File

@ -320,6 +320,7 @@ fn compute_blends(s: &DualPhaseConsensusState, val_loss: Option<f32>) -> (f32, f
fn optimizer_step(
optimizer_state: &mut dyn OptimizerStateTrait,
_model_params: &[CudaSlice<f32>],
gradients: &[CudaSlice<f32>],
epoch: usize,
_train_loss: Option<f32>,
@ -529,4 +530,4 @@ fn optimizer_step(
pub fn help() {
println!("No help information available.");
}
}

View File

@ -95,6 +95,7 @@ fn optimizer_query_at_params(
fn optimizer_step(
optimizer_state: &mut dyn OptimizerStateTrait,
model_params: &[CudaSlice<f32>],
gradients: &[CudaSlice<f32>],
epoch: usize,
train_loss: Option<f32>,

View File

@ -1,5 +1,5 @@
# Version of all benchmarker containers
VERSION=0.0.4
VERSION=0.0.5
# Set to 1 to enable verbose logging
VERBOSE=1

View File

@ -56,7 +56,7 @@ class PrecommitDetails(FromDict):
rand_hash: str
fee_paid: PreciseNumber
hyperparameters: Optional[dict]
runtime_config: dict
fuel_budget: int
@dataclass
class PrecommitState(FromDict):

View File

@ -82,7 +82,7 @@ class JobManager:
num_nonces,
num_batches,
rand_hash,
runtime_config,
fuel_budget,
batch_size,
challenge,
algorithm,
@ -100,7 +100,7 @@ class JobManager:
x.details.num_nonces,
num_batches,
x.details.rand_hash,
json.dumps(x.details.runtime_config),
x.details.fuel_budget,
batch_size,
c_name,
a_name,

View File

@ -47,21 +47,24 @@ class PrecommitManager:
logger.error(f"Invalid selected challenge_id '{c_id}'. Valid challenge_ids: {sorted(self.challenge_configs)}")
return
challenge_config = self.challenge_configs[c_id]
selected_track_ids = sorted(set(selection["selected_track_ids"]) & set(challenge_config["active_tracks"]))
if len(selected_track_ids) == 0:
selected_track_ids = sorted(challenge_config["active_tracks"])
selection["selected_track_ids"] = selected_track_ids
for t_id in set(selection["track_settings"]) - set(challenge_config["active_tracks"]):
selection["track_settings"].pop(t_id)
for t_id in set(challenge_config["active_tracks"]) - set(selection["track_settings"]):
selection["track_settings"][t_id] = {}
if selection["num_bundles"] < challenge_config["min_num_bundles"]:
selection["num_bundles"] = challenge_config["min_num_bundles"]
if (
len(selection["runtime_config"]) > 1 or
selection["runtime_config"].get("max_fuel") is None or
selection["runtime_config"]["max_fuel"] < 0 or
selection["runtime_config"]["max_fuel"] > challenge_config["runtime_config_limits"]["max_fuel"]
):
selection["runtime_config"] = {"max_fuel": challenge_config["runtime_config_limits"]["max_fuel"]}
for t_id in set(challenge_config["active_tracks"]):
for k in set(selection["track_settings"][t_id]) - {"num_bundles", "hyperparameters", "fuel_budget"}:
selection["track_settings"][t_id].pop(k)
if selection["track_settings"][t_id].get("num_bundles", 0) < challenge_config["min_num_bundles"]:
selection["track_settings"][t_id]["num_bundles"] = challenge_config["min_num_bundles"]
if (
selection["track_settings"][t_id].get("fuel_budget") is None or
selection["track_settings"][t_id]["fuel_budget"] < 0 or
selection["track_settings"][t_id]["fuel_budget"] > challenge_config["max_fuel_budget"]
):
selection["track_settings"][t_id]["fuel_budget"] = challenge_config["max_fuel_budget"]
if "hyperparameters" not in selection["track_settings"][t_id]:
selection["track_settings"][t_id]["hyperparameters"] = None
self.num_precommits_submitted += 1
req = SubmitPrecommitRequest(
@ -70,14 +73,9 @@ class PrecommitManager:
algorithm_id=a_id,
player_id=CONFIG["player_id"],
block_id=self.last_block_id,
track_id=random.choice(selection["selected_track_ids"]),
track_id="",
),
num_bundles=selection["num_bundles"],
hyperparameters=selection["hyperparameters"],
runtime_config={
**challenge_config["runtime_config_limits"],
**selection["runtime_config"]
},
track_settings=selection["track_settings"],
)
logger.info(f"Created precommit (algorithm_id: {a_id}, track: {req.settings.track_id}, num_bundles: {req.num_bundles}, hyperparameters: {req.hyperparameters}, runtime_config: {req.runtime_config})")
logger.info(f"Created precommit with algorithm: {a_id}")
return req

View File

@ -43,7 +43,7 @@ class SlaveManager:
'settings', B.settings,
'hyperparameters', B.hyperparameters,
'sampled_nonces', A.sampled_nonces,
'runtime_config', B.runtime_config,
'fuel_budget', B.fuel_budget,
'download_url', B.download_url,
'rand_hash', B.rand_hash,
'batch_size', B.batch_size,
@ -76,7 +76,7 @@ class SlaveManager:
'settings', B.settings,
'hyperparameters', B.hyperparameters,
'sampled_nonces', NULL,
'runtime_config', B.runtime_config,
'fuel_budget', B.fuel_budget,
'download_url', B.download_url,
'rand_hash', B.rand_hash,
'batch_size', B.batch_size,

View File

@ -12,11 +12,15 @@ from master.client_manager import CONFIG
logger = logging.getLogger(os.path.splitext(os.path.basename(__file__))[0])
@dataclass
class SubmitPrecommitRequest(FromDict):
settings: BenchmarkSettings
class TrackSettings(FromDict):
num_bundles: int
hyperparameters: Optional[dict]
runtime_config: dict
fuel_budget: int
@dataclass
class SubmitPrecommitRequest(FromDict):
settings: BenchmarkSettings
track_settings: Dict[str, TrackSettings]
@dataclass
class SubmitBenchmarkRequest(FromDict):

View File

@ -8,7 +8,7 @@ CREATE TABLE IF NOT EXISTS job (
hyperparameters JSONB,
num_nonces INTEGER NOT NULL,
rand_hash TEXT NOT NULL,
runtime_config JSONB NOT NULL,
fuel_budget BIGINT NOT NULL,
batch_size INTEGER NOT NULL,
num_batches INTEGER NOT NULL,
challenge TEXT NOT NULL,
@ -115,57 +115,45 @@ SELECT '
"algo_selection": [
{
"algorithm_id": "c001_a001",
"num_bundles": 1,
"selected_track_ids": [],
"track_settings": {},
"weight": 1,
"batch_size": 8,
"hyperparameters": null,
"runtime_config": {}
"batch_size": 8
},
{
"algorithm_id": "c002_a001",
"num_bundles": 1,
"selected_track_ids": [],
"track_settings": {},
"weight": 1,
"batch_size": 8,
"hyperparameters": null,
"runtime_config": {}
"batch_size": 8
},
{
"algorithm_id": "c003_a001",
"num_bundles": 1,
"selected_track_ids": [],
"track_settings": {},
"weight": 1,
"batch_size": 8,
"hyperparameters": null,
"runtime_config": {}
"batch_size": 8
},
{
"algorithm_id": "c004_a001",
"num_bundles": 1,
"selected_track_ids": [],
"track_settings": {},
"weight": 1,
"batch_size": 8,
"hyperparameters": null,
"runtime_config": {}
"batch_size": 8
},
{
"algorithm_id": "c005_a001",
"num_bundles": 1,
"selected_track_ids": [],
"track_settings": {},
"weight": 1,
"batch_size": 8,
"hyperparameters": null,
"runtime_config": {}
"batch_size": 8
},
{
"algorithm_id": "c006_a001",
"num_bundles": 1,
"selected_track_ids": [],
"track_settings": {},
"weight": 1,
"batch_size": 8,
"hyperparameters": null,
"runtime_config": {}
"batch_size": 8
},
{
"algorithm_id": "c007_a001",
"track_settings": {},
"weight": 1,
"batch_size": 8
}
],
"time_before_batch_retry": 60000,

View File

@ -55,4 +55,9 @@ services:
neuralnet_optimizer:
<<: [*common, *common-gpu]
image: ghcr.io/tig-foundation/tig-monorepo/neuralnet_optimizer/runtime:${VERSION}
container_name: neuralnet_optimizer
container_name: neuralnet_optimizer
job_scheduling:
<<: *common
image: ghcr.io/tig-foundation/tig-monorepo/job_scheduling/runtime:${VERSION}
container_name: job_scheduling

View File

@ -68,7 +68,7 @@ def run_tig_runtime(nonce, batch, so_path, ptx_path, results_dir):
batch["rand_hash"],
str(nonce),
so_path,
"--fuel", str(batch["runtime_config"]["max_fuel"]),
"--fuel", str(batch["fuel_budget"]),
"--output", output_dir,
]
if batch["hyperparameters"] is not None:
@ -81,26 +81,17 @@ def run_tig_runtime(nonce, batch, so_path, ptx_path, results_dir):
]
logger.debug(f"computing nonce: {' '.join(cmd[:4] + [f"'{cmd[4]}'"] + cmd[5:])}")
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
)
while True:
ret = process.poll()
if ret is not None:
exit_codes = {
0: "success",
# 82: "cuda out of memory",
# 83: "host out of memory",
84: "runtime error",
# 85: "no solution",
# 86: "invalid solution",
87: "out of fuel",
}
try:
_, stderr = process.communicate(timeout=0.1)
ret = process.returncode
if not os.path.exists(output_file):
if ret == 0:
raise Exception(f"no output")
else:
raise Exception(f"failed with exit code {ret}: {process.stderr.read().decode()}")
raise Exception(f"failed with exit code {ret}: {stderr}")
start = now()
cmd = [
@ -133,13 +124,11 @@ def run_tig_runtime(nonce, batch, so_path, ptx_path, results_dir):
with open(output_file, "w") as f:
json.dump(d, f)
break
elif batch["id"] not in PROCESSING_BATCH_IDS:
process.kill()
logger.debug(f"batch {batch['id']}, nonce {nonce} stopped")
break
time.sleep(0.1)
except subprocess.TimeoutExpired:
if batch["id"] not in PROCESSING_BATCH_IDS:
process.kill()
logger.debug(f"batch {batch['id']}, nonce {nonce} stopped")
break
logger.debug(f"batch {batch['id']}, nonce {nonce} finished, took {now() - start}ms")

View File

@ -36,3 +36,5 @@ c005 = ["cuda", "tig-algorithms/c005", "tig-challenges/c005"]
hypergraph = ["c005"]
c006 = ["cuda", "tig-algorithms/c006", "tig-challenges/c006"]
neuralnet_optimizer = ["c006"]
c007 = ["tig-algorithms/c007", "tig-challenges/c007"]
job_scheduling = ["c007"]

View File

@ -46,8 +46,12 @@ case "$CHALLENGE" in
build_so $ALGORITHM
build_ptx $ALGORITHM
;;
job_scheduling)
echo "Building ALGORITHM '$ALGORITHM' for CHALLENGE 'job_scheduling'"
build_so $ALGORITHM
;;
*)
echo "Error: Invalid CHALLENGE value. Must be one of: satisfiability, knapsack, vehicle_routing, vector_search, hypergraph, neuralnet_optimizer"
echo "Error: Invalid CHALLENGE value. Must be one of: satisfiability, knapsack, vehicle_routing, vector_search, hypergraph, neuralnet_optimizer, job_scheduling"
exit 1
;;
esac

View File

@ -21,6 +21,7 @@ rand = { version = "0.8.5", default-features = false, features = [
"std_rng",
"small_rng",
] }
rand_distr = "0.4.3"
serde = { version = "1.0.196", features = ["derive"] }
serde_json = { version = "1.0.113" }
statrs = { version = "0.18.0" }
@ -39,3 +40,5 @@ c005 = ["cudarc"]
hypergraph = ["c005"]
c006 = ["cudarc", "cudarc/cublas", "cudarc/cudnn"]
neuralnet_optimizer = ["c006"]
c007 = []
job_scheduling = ["c007"]

View File

@ -104,13 +104,13 @@ better_than_baseline = 1 - connectivity_metric / baseline_connectivity_metric
= 0.077
```
## Our Challenge
## Our Challenge
At TIG, the baseline connectivity is determined using a greedy bipartition approach. The nodes are ordered by degree, then at each bipartition, nodes are assigned to the left or right part based on the number of hyperedges in common with the nodes already in each part. This process is repeated until the desired number of partitions is reached (eg: 64).
At TIG, the baseline connectivity is determined using a greedy bipartition approach. The nodes are ordered by degree, then at each bipartition, nodes are assigned to the left or right part based on the number of hyperedges in common with the nodes already in each part. This process is repeated until the desired number of partitions is reached (e.g. 64).
Each instance of TIG's hypergraph partitioning problem contains 4 random sub-instances, each with its own baseline connectivity metric. For each sub-instance, we calculate how much your connectivity metric is better than the baseline connectivity metric, expressed as a percentage improvement. This improvement percentage is called `better_than_baseline`. Your overall performance is measured by taking the root mean square of these 4 `better_than_baseline` percentages. To pass a difficulty level, this overall score must meet or exceed the specified difficulty target.
Your algorithm does not return a solution; it calls `save_solution` as it runs. The **last** saved solution is evaluated. A valid solution must meet all constraints: every node assigned to exactly one part, every part non-empty, and part sizes at most 1.03× the average. Invalid solutions are not scored.
For precision, `better_than_baseline` is stored as an integer where each unit represents 0.1%. For example, a `better_than_baseline` value of 22 corresponds to 22/1000 = 2.2%.
The evaluated metric is **quality** (a fixed-point integer with 6 decimal places). For this challenge, quality functions as improvement over the baseline: `quality = 1 (connectivity_metric / baseline_connectivity_metric)` (expressed in the fixed-point format). Higher quality is better. See the challenge code for the precise definition.
## Applications

View File

@ -0,0 +1,77 @@
# Job Scheduling
The Job Scheduling challenge is based on [Flexible Job Shop (FJSP)](https://en.wikipedia.org/wiki/Flexible_job_shop_scheduling): given a set of jobs, each assigned to a product with a fixed sequence of operations, schedule all operations on eligible machines to minimize the **makespan** (the maximum completion time across all jobs). Each operation can run on a subset of machines, and each eligible machine has its own processing time for that operation.
## Challenge Overview
For our challenge, we use flexible job-shop instances with configurable difficulty. Parameters that affect difficulty include:
- **Number of jobs** and **number of machines** larger instances increase the search space.
- **Machine flexibility** how many machines are eligible per operation (JSSP-style vs FJSP-style).
- **Product variety** one shared route vs multiple products with different operation sequences.
- **Routing structure** strict sequential order vs mixed or random operation order per product.
**Constraints.** A valid solution must satisfy:
- `job_schedule.len()` equals `num_jobs`; each job has one entry.
- Each jobs schedule length matches its products number of operations.
- Each `(machine_id, start_time)` uses an eligible machine for that operation.
- Operations for a job are sequential: each start time ≥ previous operations finish time.
- No machine processes overlapping operations.
**Objective.** The goal is to minimize makespan (last finish time of all jobs)
## Example
Consider an instance with 4 jobs, 3 machines, 3 operation types, and 2 products. Product 0 uses op0 and op1; product 1 uses op1 and op2. Op0 can run on machines 0 and 1; op1 on 0, 1, 2; op2 only on machine 2.
```
num_jobs = 4
num_machines = 3
num_operations = 3 # three operation types: op0, op1, op2
jobs_per_product = [2, 2] # jobs 01 are product 0, jobs 23 are product 1
# product_processing_times[product][op] = map: machine_id -> processing_time
# Product 0: op0, op1. Product 1: op1, op2.
# op0: machines 0,1. op1: machines 0,1,2. op2: machine 2 only.
product_processing_times = [
[ {0: 3, 1: 4}, {0: 2, 1: 1, 2: 3} ], # product 0: Op0, Op1
[ {0: 2, 1: 1, 2: 3}, {2: 4} ], # product 1: Op1, Op2
]
```
A feasible solution:
```
job_schedule = [
[(0, 0), (1, 4)], # Job 0 (product 0): Op0 on machine 0, Op1 on machine 1
[(1, 0), (0, 4)], # Job 1 (product 0): Op0 on machine 1, Op1 on machine 0
[(2, 0), (2, 3)], # Job 2 (product 1): Op1 on machine 2, Op2 on machine 2
[(1, 5), (2, 7)], # Job 3 (product 1): Op1 on machine 1, Op2 on machine 2
]
```
Verification:
- **Job 0:** Op0 on machine 0 from 0→3, Op1 on machine 1 from 4→5 → completion time 5.
- **Job 1:** Op0 on machine 1 from 0→4, Op1 on machine 0 from 4→6 → completion time 6.
- **Job 2:** Op1 on machine 2 from 0→3, Op2 on machine 2 from 3→7 → completion time 7.
- **Job 3:** Op1 on machine 1 from 5→6, Op2 on machine 2 from 7→11 → completion time 11.
No machine has overlapping operations; each jobs operations are sequential. The **makespan** is max(5, 6, 7, 11) = **11**.
## Our Challenge
In TIG, your algorithm does not return a solution; it calls `save_solution` as it runs. The **last** saved solution is evaluated. A valid solution must meet all constraints above; invalid solutions are not scored.
The evaluated metric is **quality** (a fixed-point integer with 6 decimal places), comparing your makespan to a greedy dispatching-rule baseline: `quality = 1.0 - make_span / greedy_makespan`
Higher quality is better. See the challenge code for the precise encoding.
**Problem types your solver should handle:**
- **Strict:** Fixed machine assignments (flexibility = 1.0, JSSP-style), single shared route, strict sequential order (flow_structure = 0.0), 20% reentrance, moderate product variety.
- **Parallel:** Multiple eligible machines per operation (flexibility = 3.0, FJSP-style), single shared route, strict order, 20% reentrance, moderate product variety.
- **Random:** Fixed machine assignments (flexibility = 1.0), multiple routes with mixed routing (flow_structure = 0.4), no reentrance, high product variety.
- **Complex:** Multiple eligible machines (flexibility = 3.0), multiple routes with mixed routing (flow_structure = 0.4), 20% reentrance, high product variety.
- **Chaotic:** Very high machine flexibility (flexibility = 10.0), many routes with full randomization (flow_structure = 1.0), no reentrance, high product variety.

View File

@ -0,0 +1,504 @@
use crate::job_scheduling::{Challenge, Solution};
use anyhow::{anyhow, Result};
use rand::seq::SliceRandom;
use rand::{rngs::SmallRng, Rng, SeedableRng};
use serde_json::{Map, Value};
use std::cmp::Ordering;
use std::collections::HashMap;
const DEFAULT_EFFORT: usize = 0;
const WORK_MIN_WEIGHT: f64 = 0.3;
fn average_processing_time(operation: &HashMap<usize, u32>) -> f64 {
if operation.is_empty() {
return 0.0;
}
let sum: u32 = operation.values().sum();
sum as f64 / operation.len() as f64
}
fn min_processing_time(operation: &HashMap<usize, u32>) -> f64 {
operation.values().copied().min().unwrap_or(0) as f64
}
fn earliest_end_time(
time: u32,
machine_available_time: &[u32],
operation: &HashMap<usize, u32>,
) -> u32 {
let mut earliest_end = u32::MAX;
for (&machine_id, &proc_time) in operation.iter() {
let start = time.max(machine_available_time[machine_id]);
let end = start + proc_time;
if end < earliest_end {
earliest_end = end;
}
}
earliest_end
}
#[derive(Clone, Copy)]
enum DispatchRule {
MostWorkRemaining,
MostOpsRemaining,
LeastFlexibility,
ShortestProcTime,
LongestProcTime,
}
#[derive(Clone, Copy)]
struct Candidate {
job: usize,
priority: f64,
machine_end: u32,
proc_time: u32,
flexibility: usize,
}
struct ScheduleResult {
job_schedule: Vec<Vec<(usize, u32)>>,
makespan: u32,
}
struct RestartResult {
makespan: u32,
rule: DispatchRule,
random_top_k: usize,
seed: u64,
}
fn better_candidate(candidate: &Candidate, best: &Candidate, eps: f64) -> bool {
if candidate.priority > best.priority + eps {
return true;
}
if (candidate.priority - best.priority).abs() <= eps {
if candidate.machine_end < best.machine_end {
return true;
}
if candidate.machine_end == best.machine_end {
if candidate.proc_time < best.proc_time {
return true;
}
if candidate.proc_time == best.proc_time {
if candidate.flexibility < best.flexibility {
return true;
}
if candidate.flexibility == best.flexibility && candidate.job < best.job {
return true;
}
}
}
}
false
}
fn run_dispatch_rule(
challenge: &Challenge,
job_products: &[usize],
product_work_times: &[Vec<f64>],
job_ops_len: &[usize],
job_total_work: &[f64],
rule: DispatchRule,
random_top_k: Option<usize>,
rng: Option<&mut SmallRng>,
) -> Result<ScheduleResult> {
let num_jobs = challenge.num_jobs;
let num_machines = challenge.num_machines;
let mut job_next_op_idx = vec![0usize; num_jobs];
let mut job_ready_time = vec![0u32; num_jobs];
let mut machine_available_time = vec![0u32; num_machines];
let mut job_schedule = job_ops_len
.iter()
.map(|&ops_len| Vec::with_capacity(ops_len))
.collect::<Vec<_>>();
let mut job_remaining_work = job_total_work.to_vec();
let mut remaining_ops = job_ops_len.iter().sum::<usize>();
let mut time = 0u32;
let eps = 1e-9_f64;
let random_top_k = random_top_k.unwrap_or(0);
let mut rng = rng;
let use_random = random_top_k > 1 && rng.is_some();
while remaining_ops > 0 {
let mut available_machines = (0..num_machines)
.filter(|&m| machine_available_time[m] <= time)
.collect::<Vec<usize>>();
available_machines.sort_unstable();
if use_random {
available_machines.shuffle(rng.as_mut().unwrap());
}
let mut scheduled_any = false;
for &machine in available_machines.iter() {
let mut best_candidate: Option<Candidate> = None;
if use_random {
let mut candidates: Vec<Candidate> = Vec::new();
for job in 0..num_jobs {
if job_next_op_idx[job] >= job_ops_len[job] {
continue;
}
if job_ready_time[job] > time {
continue;
}
let product = job_products[job];
let op_idx = job_next_op_idx[job];
let op_times = &challenge.product_processing_times[product][op_idx];
let proc_time = match op_times.get(&machine) {
Some(&value) => value,
None => continue,
};
let earliest_end = earliest_end_time(time, &machine_available_time, op_times);
let machine_end = time.max(machine_available_time[machine]) + proc_time;
if machine_end != earliest_end {
continue;
}
let flexibility = op_times.len();
let priority = match rule {
DispatchRule::MostWorkRemaining => job_remaining_work[job],
DispatchRule::MostOpsRemaining => {
(job_ops_len[job] - job_next_op_idx[job]) as f64
}
DispatchRule::LeastFlexibility => -(flexibility as f64),
DispatchRule::ShortestProcTime => -(proc_time as f64),
DispatchRule::LongestProcTime => proc_time as f64,
};
candidates.push(Candidate {
job,
priority,
machine_end,
proc_time,
flexibility,
});
}
if !candidates.is_empty() {
candidates.sort_by(|a, b| {
let ord = b
.priority
.partial_cmp(&a.priority)
.unwrap_or(Ordering::Equal);
if ord != Ordering::Equal {
return ord;
}
let ord = a.machine_end.cmp(&b.machine_end);
if ord != Ordering::Equal {
return ord;
}
let ord = a.proc_time.cmp(&b.proc_time);
if ord != Ordering::Equal {
return ord;
}
let ord = a.flexibility.cmp(&b.flexibility);
if ord != Ordering::Equal {
return ord;
}
a.job.cmp(&b.job)
});
let k = random_top_k.min(candidates.len());
let pick = rng.as_mut().unwrap().gen_range(0..k);
best_candidate = Some(candidates[pick]);
}
} else {
for job in 0..num_jobs {
if job_next_op_idx[job] >= job_ops_len[job] {
continue;
}
if job_ready_time[job] > time {
continue;
}
let product = job_products[job];
let op_idx = job_next_op_idx[job];
let op_times = &challenge.product_processing_times[product][op_idx];
let proc_time = match op_times.get(&machine) {
Some(&value) => value,
None => continue,
};
let earliest_end = earliest_end_time(time, &machine_available_time, op_times);
let machine_end = time.max(machine_available_time[machine]) + proc_time;
if machine_end != earliest_end {
continue;
}
let flexibility = op_times.len();
let priority = match rule {
DispatchRule::MostWorkRemaining => job_remaining_work[job],
DispatchRule::MostOpsRemaining => {
(job_ops_len[job] - job_next_op_idx[job]) as f64
}
DispatchRule::LeastFlexibility => -(flexibility as f64),
DispatchRule::ShortestProcTime => -(proc_time as f64),
DispatchRule::LongestProcTime => proc_time as f64,
};
let candidate = Candidate {
job,
priority,
machine_end,
proc_time,
flexibility,
};
if best_candidate
.as_ref()
.map_or(true, |best| better_candidate(&candidate, best, eps))
{
best_candidate = Some(candidate);
}
}
}
if let Some(candidate) = best_candidate {
let job = candidate.job;
let product = job_products[job];
let op_idx = job_next_op_idx[job];
let op_times = &challenge.product_processing_times[product][op_idx];
let proc_time = op_times[&machine];
let start_time = time.max(machine_available_time[machine]);
let end_time = start_time + proc_time;
job_schedule[job].push((machine, start_time));
job_next_op_idx[job] += 1;
job_ready_time[job] = end_time;
machine_available_time[machine] = end_time;
job_remaining_work[job] -= product_work_times[product][op_idx];
if job_remaining_work[job] < 0.0 {
job_remaining_work[job] = 0.0;
}
remaining_ops -= 1;
scheduled_any = true;
}
}
if remaining_ops == 0 {
break;
}
// Compute next event time (either machine becoming available or job becoming ready)
let mut next_time: Option<u32> = None;
for &t in machine_available_time.iter() {
if t > time {
next_time = Some(next_time.map_or(t, |best| best.min(t)));
}
}
for job in 0..num_jobs {
if job_next_op_idx[job] < job_ops_len[job] && job_ready_time[job] > time {
let t = job_ready_time[job];
next_time = Some(next_time.map_or(t, |best| best.min(t)));
}
}
// Advance time to next event
time = next_time.ok_or_else(|| {
if scheduled_any {
anyhow!("No next event time found while operations remain unscheduled")
} else {
anyhow!("No schedulable operations remain; dispatching rules stalled")
}
})?;
}
let makespan = job_ready_time.iter().copied().max().unwrap_or(0);
Ok(ScheduleResult {
job_schedule,
makespan,
})
}
pub fn solve_challenge(
challenge: &Challenge,
save_solution: &dyn Fn(&Solution) -> Result<()>,
_hyperparameters: &Option<Map<String, Value>>,
) -> Result<()> {
solve_challenge_with_effort(challenge, save_solution, DEFAULT_EFFORT)
}
pub fn solve_challenge_with_effort(
challenge: &Challenge,
save_solution: &dyn Fn(&Solution) -> Result<()>,
effort: usize,
) -> Result<()> {
let (random_restarts, top_k) = if effort == 0 {
(10usize, 0usize)
} else if effort == 1 {
(200usize, 2usize)
} else {
let random_restarts = 200usize.saturating_add(50usize.saturating_mul(effort));
let top_k = 2usize.saturating_mul(effort);
(random_restarts, top_k)
};
let local_search_tries = 1usize.saturating_add(3usize.saturating_mul(effort));
solve_challenge_with_params(
challenge,
save_solution,
random_restarts,
top_k,
local_search_tries,
)
}
fn solve_challenge_with_params(
challenge: &Challenge,
save_solution: &dyn Fn(&Solution) -> Result<()>,
random_restarts: usize,
top_k: usize,
local_search_tries: usize,
) -> Result<()> {
let save_best = |best: &ScheduleResult| -> Result<()> {
save_solution(&Solution {
job_schedule: best.job_schedule.clone(),
})
};
let num_jobs = challenge.num_jobs;
let mut job_products = Vec::with_capacity(num_jobs);
for (product, count) in challenge.jobs_per_product.iter().enumerate() {
for _ in 0..*count {
job_products.push(product);
}
}
if job_products.len() != num_jobs {
return Err(anyhow!(
"Job count mismatch. Expected {}, got {}",
num_jobs,
job_products.len()
));
}
let mut product_work_times = Vec::with_capacity(challenge.product_processing_times.len());
for product_ops in challenge.product_processing_times.iter() {
let mut work_ops = Vec::with_capacity(product_ops.len());
for op in product_ops.iter() {
let avg = average_processing_time(op);
let min = min_processing_time(op);
let work = avg * (1.0 - WORK_MIN_WEIGHT) + min * WORK_MIN_WEIGHT;
work_ops.push(work);
}
product_work_times.push(work_ops);
}
let mut job_ops_len = Vec::with_capacity(num_jobs);
let mut job_total_work: Vec<f64> = Vec::with_capacity(num_jobs);
for &product in job_products.iter() {
let work_ops = &product_work_times[product];
job_ops_len.push(work_ops.len());
job_total_work.push(work_ops.iter().sum());
}
let rules = [
DispatchRule::MostWorkRemaining,
DispatchRule::MostOpsRemaining,
DispatchRule::LeastFlexibility,
DispatchRule::ShortestProcTime,
DispatchRule::LongestProcTime,
];
let mut best_result: Option<ScheduleResult> = None;
for rule in rules.iter().copied() {
let result = run_dispatch_rule(
challenge,
&job_products,
&product_work_times,
&job_ops_len,
&job_total_work,
rule,
None,
None,
)?;
let is_better = best_result
.as_ref()
.map_or(true, |best| result.makespan < best.makespan);
if is_better {
best_result = Some(result);
}
}
let mut best_result = best_result.ok_or_else(|| anyhow!("No valid schedule produced"))?;
save_best(&best_result)?;
let mut top_restarts: Vec<RestartResult> = Vec::new();
if random_restarts > 0 {
let mut rng = SmallRng::from_seed(challenge.seed);
for _ in 1..=random_restarts {
let seed = rng.r#gen::<u64>();
let rule = rules[rng.gen_range(0..rules.len())];
let random_top_k = rng.gen_range(2..=5);
let mut local_rng = SmallRng::seed_from_u64(seed);
let result = run_dispatch_rule(
challenge,
&job_products,
&product_work_times,
&job_ops_len,
&job_total_work,
rule,
Some(random_top_k),
Some(&mut local_rng),
)?;
let makespan = result.makespan;
let is_better = makespan < best_result.makespan;
if is_better {
best_result = result;
save_best(&best_result)?;
}
if top_k > 0 {
top_restarts.push(RestartResult {
makespan,
rule,
random_top_k,
seed,
});
top_restarts.sort_by(|a, b| a.makespan.cmp(&b.makespan));
if top_restarts.len() > top_k {
top_restarts.pop();
}
}
}
}
if !top_restarts.is_empty() {
for restart in top_restarts.iter() {
for attempt in 0..local_search_tries {
let local_seed = restart.seed.wrapping_add(attempt as u64 + 1);
let mut local_rng = SmallRng::seed_from_u64(local_seed);
let local_k = match attempt % 3 {
0 => restart.random_top_k,
1 => restart.random_top_k.saturating_sub(1),
_ => restart.random_top_k.saturating_add(1),
}
.max(2);
let result = run_dispatch_rule(
challenge,
&job_products,
&product_work_times,
&job_ops_len,
&job_total_work,
restart.rule,
Some(local_k),
Some(&mut local_rng),
)?;
if result.makespan < best_result.makespan {
best_result = result;
save_best(&best_result)?;
}
}
}
}
save_solution(&Solution {
job_schedule: best_result.job_schedule,
})?;
Ok(())
}

View File

@ -0,0 +1 @@
pub mod dispatching_rules;

View File

@ -0,0 +1,400 @@
use crate::QUALITY_PRECISION;
mod baselines;
use anyhow::{anyhow, Result};
use rand::{
distributions::Distribution,
rngs::{SmallRng, StdRng},
Rng, SeedableRng,
};
use rand_distr::Normal;
use serde::{Deserialize, Serialize};
use std::cell::RefCell;
use std::collections::{HashMap, HashSet};
pub struct FlowConfig {
pub avg_op_flexibility: f32,
pub reentrance_level: f32,
pub flow_structure: f32,
pub product_mix_ratio: f32,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Flow {
STRICT,
PARALLEL,
RANDOM,
COMPLEX,
CHAOTIC,
}
impl From<Flow> for FlowConfig {
fn from(flow: Flow) -> Self {
match flow {
Flow::STRICT => FlowConfig {
avg_op_flexibility: 1.0,
reentrance_level: 0.2,
flow_structure: 0.0,
product_mix_ratio: 0.5,
},
Flow::PARALLEL => FlowConfig {
avg_op_flexibility: 3.0,
reentrance_level: 0.2,
flow_structure: 0.0,
product_mix_ratio: 0.5,
},
Flow::RANDOM => FlowConfig {
avg_op_flexibility: 1.0,
reentrance_level: 0.0,
flow_structure: 0.4,
product_mix_ratio: 1.0,
},
Flow::COMPLEX => FlowConfig {
avg_op_flexibility: 3.0,
reentrance_level: 0.2,
flow_structure: 0.4,
product_mix_ratio: 1.0,
},
Flow::CHAOTIC => FlowConfig {
avg_op_flexibility: 10.0,
reentrance_level: 0.0,
flow_structure: 1.0,
product_mix_ratio: 1.0,
},
}
}
}
impl std::fmt::Display for Flow {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Flow::STRICT => write!(f, "strict"),
Flow::PARALLEL => write!(f, "parallel"),
Flow::RANDOM => write!(f, "random"),
Flow::COMPLEX => write!(f, "complex"),
Flow::CHAOTIC => write!(f, "chaotic"),
}
}
}
impl std::str::FromStr for Flow {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"strict" => Ok(Flow::STRICT),
"parallel" => Ok(Flow::PARALLEL),
"random" => Ok(Flow::RANDOM),
"complex" => Ok(Flow::COMPLEX),
"chaotic" => Ok(Flow::CHAOTIC),
_ => Err(anyhow::anyhow!("Invalid flow type: {}", s)),
}
}
}
impl_kv_string_serde! {
Track {
n: usize,
m: usize,
o: usize,
flow: Flow
}
}
impl_base64_serde! {
Solution {
job_schedule: Vec<Vec<(usize, u32)>>,
}
}
impl Solution {
pub fn new() -> Self {
Self {
job_schedule: Vec::new(),
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Challenge {
pub seed: [u8; 32],
pub num_jobs: usize,
pub num_machines: usize,
pub num_operations: usize,
pub jobs_per_product: Vec<usize>,
// each product has a sequence of operations, and each operation has a map of eligible machines to processing times
pub product_processing_times: Vec<Vec<HashMap<usize, u32>>>,
}
impl Challenge {
pub fn generate_instance(seed: &[u8; 32], track: &Track) -> Result<Self> {
let mut rng = SmallRng::from_seed(StdRng::from_seed(seed.clone()).r#gen());
let FlowConfig {
avg_op_flexibility,
reentrance_level,
flow_structure,
product_mix_ratio,
} = track.flow.clone().into();
let n_jobs = track.n;
let n_machines = track.m;
let n_op_types = track.o;
let n_products = 1.max((product_mix_ratio * n_jobs as f32) as usize);
let n_routes = 1.max((flow_structure * n_jobs as f32) as usize);
let min_eligible_machines = 1;
let flexibility_std_dev = 0.5;
let base_proc_time_min = 1;
let base_proc_time_max = 200;
let min_speed_factor = 0.8;
let max_speed_factor = 1.2;
// random product for each job, only keep products that have at least one job
let mut map = HashMap::new();
let jobs_per_product = (0..n_jobs).fold(Vec::new(), |mut acc, _| {
let map_len = map.len();
let product = *map
.entry(rng.gen_range(0..n_products))
.or_insert_with(|| map_len);
if product >= acc.len() {
acc.push(0);
}
acc[product] += 1;
acc
});
// actual number of products (some products may have zero jobs)
let n_products = jobs_per_product.len();
// random route for each product, only keep routes that are used
let mut map = HashMap::new();
let product_route = (0..n_products)
.map(|_| {
let map_len = map.len();
*map.entry(rng.gen_range(0..n_routes))
.or_insert_with(|| map_len)
})
.collect::<Vec<usize>>();
// actual number of routes
let n_routes = map.len();
// generate operation sequence for each route
let routes = (0..n_routes)
.map(|_| {
let seq_len = n_op_types;
let mut base_sequence: Vec<usize> = (0..n_op_types).collect();
let mut steps = Vec::new();
// randomly build op sequence
for _ in 0..seq_len {
let next_op_idx = if rng.r#gen::<f32>() < flow_structure {
// Job Shop Logic: Random permutation
rng.gen_range(0..base_sequence.len())
} else {
// Flow Shop Logic: Pick next sequential op
0
};
let op_id = base_sequence.remove(next_op_idx);
steps.push(op_id);
}
for step_idx in (2..steps.len()).rev() {
// Reentrance Logic
if rng.r#gen::<f32>() < reentrance_level {
// assuming reentrance_level of 0.1
let op_id = steps[rng.gen_range(0..step_idx - 1)];
steps.insert(step_idx, op_id);
}
}
steps
})
.collect::<Vec<Vec<usize>>>();
// generate machine eligibility and base processing time for each operation
let normal = Normal::new(avg_op_flexibility, flexibility_std_dev).unwrap();
let all_machines = (0..n_machines).collect::<HashSet<usize>>();
let op_eligible_machines = (0..n_op_types)
.map(|i| {
if avg_op_flexibility as usize >= n_machines {
(0..n_machines).collect::<Vec<usize>>()
} else {
let mut eligible = HashSet::<usize>::from([if i < n_machines {
i
} else {
rng.gen_range(0..n_machines)
}]);
if avg_op_flexibility > 1.0 {
let target_flex = min_eligible_machines
.max(normal.sample(&mut rng) as usize)
.min(n_machines);
let mut remaining = all_machines
.difference(&eligible)
.cloned()
.collect::<Vec<usize>>();
remaining.sort_unstable();
let num_to_add = (target_flex - 1).min(remaining.len());
for j in 0..num_to_add {
let idx = rng.gen_range(j..remaining.len());
remaining.swap(j, idx);
}
eligible.extend(remaining[..num_to_add].iter().cloned());
}
let mut eligible = eligible.into_iter().collect::<Vec<usize>>();
eligible.sort_unstable();
eligible
}
})
.collect::<Vec<_>>();
let base_proc_times = (0..n_op_types)
.map(|_| rng.gen_range(base_proc_time_min..=base_proc_time_max))
.collect::<Vec<u32>>();
// generate processing times for each product according to its route
let product_processing_times = product_route
.iter()
.map(|&r_idx| {
let route = &routes[r_idx];
route
.iter()
.map(|&op_id| {
let machines = &op_eligible_machines[op_id];
let base_time = base_proc_times[op_id];
machines
.iter()
.map(|&m_id| {
(
m_id,
1.max(
(base_time as f32
* (min_speed_factor
+ (max_speed_factor - min_speed_factor)
* rng.r#gen::<f32>()))
as u32,
),
)
})
.collect::<HashMap<usize, u32>>()
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
Ok(Challenge {
seed: seed.clone(),
num_jobs: n_jobs,
num_machines: n_machines,
num_operations: n_op_types,
jobs_per_product,
product_processing_times,
})
}
pub fn evaluate_makespan(&self, solution: &Solution) -> Result<u32> {
if solution.job_schedule.len() != self.num_jobs {
return Err(anyhow!(
"Expecting solution to have {} jobs. Got {}",
self.num_jobs,
solution.job_schedule.len(),
));
}
let mut job = 0;
let mut machine_usage = HashMap::<usize, Vec<(u32, u32)>>::new();
let mut makespan = 0u32;
for (product, num_jobs) in self.jobs_per_product.iter().enumerate() {
for _ in 0..*num_jobs {
let schedule = &solution.job_schedule[job];
let processing_times = &self.product_processing_times[product];
if schedule.len() != processing_times.len() {
return Err(anyhow!(
"Job {} of product {} expecting {} operations. Got {}",
job,
product,
processing_times.len(),
schedule.len(),
));
}
let mut min_start_time = 0;
for (op_idx, &(machine, start_time)) in schedule.iter().enumerate() {
let eligible_machines = &processing_times[op_idx];
if !eligible_machines.contains_key(&machine) {
return Err(anyhow!("Job {} schedule contains ineligible machine", job,));
}
if start_time < min_start_time {
return Err(anyhow!(
"Job {} schedule contains operation starting before previous is complete",
job,
));
}
let finish_time = start_time + eligible_machines[&machine];
machine_usage
.entry(machine)
.or_default()
.push((start_time, finish_time));
min_start_time = finish_time;
}
// min_start_time is the finish time of the job
if min_start_time > makespan {
makespan = min_start_time;
}
job += 1;
}
}
for (machine, usage) in machine_usage.iter_mut() {
usage.sort_by_key(|&(start, _)| start);
for i in 1..usage.len() {
if usage[i].0 < usage[i - 1].1 {
return Err(anyhow!(
"Machine {} is scheduled with overlapping jobs",
machine,
));
}
}
}
Ok(makespan)
}
conditional_pub!(
fn compute_greedy_baseline(&self) -> Result<Solution> {
let solution = RefCell::new(Solution::new());
let save_solution_fn = |s: &Solution| -> Result<()> {
*solution.borrow_mut() = s.clone();
Ok(())
};
baselines::dispatching_rules::solve_challenge_with_effort(self, &save_solution_fn, 0)?;
Ok(solution.into_inner())
}
);
conditional_pub!(
fn compute_sota_baseline(&self) -> Result<Solution> {
let solution = RefCell::new(Solution::new());
let save_solution_fn = |s: &Solution| -> Result<()> {
*solution.borrow_mut() = s.clone();
Ok(())
};
baselines::dispatching_rules::solve_challenge_with_effort(self, &save_solution_fn, 1)?;
Ok(solution.into_inner())
}
);
conditional_pub!(
fn evaluate_solution(&self, solution: &Solution) -> Result<i32> {
let makespan = self.evaluate_makespan(solution)?;
let greedy_solution = self.compute_greedy_baseline()?;
let greedy_makespan = self.evaluate_makespan(&greedy_solution)?;
if makespan > greedy_makespan {
return Err(anyhow!(
"Makespan {} must be better than greedy baseline makespan {}",
makespan,
greedy_makespan
));
}
let sota_solution = self.compute_sota_baseline()?;
let sota_makespan = self.evaluate_makespan(&sota_solution)?;
let quality = (sota_makespan as f64 - makespan as f64) / sota_makespan as f64;
let quality = quality.clamp(-10.0, 10.0) * QUALITY_PRECISION as f64;
let quality = quality.round() as i32;
Ok(quality)
}
);
}

View File

@ -1,26 +1,29 @@
# Knapsack Problem
The quadratic knapsack problem is one of the most popular variants of the single knapsack problem, with applications in many optimization contexts. The aim is to maximize the value of individual items placed in the knapsack while satisfying a weight constraint. However, pairs of items also have positive interaction values, contributing to the total value within the knapsack.
The quadratic knapsack problem is one of the most popular variants of the single knapsack problem, with applications in many optimization contexts. The aim is to select items to maximize the value of the knapsack while satisfying a weight constraint. Pairs of items also have positive interaction values, contributing to the total value within the knapsack.
## Challenge Overview
For our challenge, we use a version of the quadratic knapsack problem with configurable difficulty, where the following two parameters can be adjusted in order to vary the difficulty of the challenge:
For our challenge, we use a version of the quadratic knapsack problem with configurable difficulty, framed as **team formation**. Each "item" is a **participant**; you select a subset of participants (a team) subject to a weight (budget) constraint. Value comes from how well participants work together, based on shared projects.
- Parameter 1: $num\textunderscore{ }items$ is the number of items from which you need to select a subset to put in the knapsack.
- Parameter 2: $better\textunderscore{ }than\textunderscore{ }baseline \geq 1$ (see Our Challenge)
- Parameter 1: $num\textunderscore{ }items$ is the number of participants (items) from which you select a subset.
- Parameter 2: quality target (see Our Challenge).
The larger the $num\textunderscore{ }items$, the more number of possible $S_{knapsack}$, making the challenge more difficult. Also, the higher $better\textunderscore{ }than\textunderscore{ }baseline$, the less likely a given $S_{knapsack}$ will be a solution, making the challenge more difficult.
The larger $num\textunderscore{ }items$, the larger the search space. The generation method is as follows (see the challenge code for full detail):
The weight $w_i$ of each of the $num\textunderscore{ }items$ is an integer, chosen independently, uniformly at random, and such that each of the item weights $1 <= w_i <= 50$, for $i=1,2,...,num\textunderscore{ }items$. The values of the items are nonzero with a density of 25%, meaning they have a 25% probability of being nonzero. The nonzero individual values of the item, $v_i$, and the nonzero interaction values of pairs of items, $V_{ij}$, are selected at random from the range $[1,100]$.
- **Participants and projects:** There is a large pool of projects. Each participant is assigned a set of projects (cardinality and assignment follow a lognormal-based process so that participants often share projects with others in the same "region" of the project space).
- **Weights:** Each participant has an integer weight in $[1, 10]$, chosen uniformly at random. The knapsack capacity (max weight) is a percentage of the total weight of all participants.
- **Individual values:** $v_i = 0$ for all $i$ (no linear term).
- **Interaction values:** For $i \neq j$, $V_{ij}$ is based on participants $i$ and $j$ being in the same projects: it is the **Jaccard similarity** of their project sets (intersection size / union size), scaled to an integer (e.g. multiplied by 1000). If they share no projects or the union is empty, $V_{ij} = 0$. The matrix is symmetric: $V_{ij} = V_{ji}$.
The total value of a knapsack is determined by summing up the individual values of items in the knapsack, as well as the interaction values of every pair of items \((i,j)\), where \( i > j \), in the knapsack:
The total value of a knapsack (team) is the sum of individual values plus the sum of interaction values for every pair in the selection:
$$
V_{knapsack} = \sum_{i \in knapsack}{v_i} + \sum_{(i,j)\in knapsack}{V_{ij}}
V_{knapsack} = \sum_{i \in knapsack}{v_i} + \sum_{(i,j)\in knapsack,\, i < j}{V_{ij}}
$$
We impose a weight constraint $W(S_{knapsack}) <= 0.5 \cdot W(S_{all})$, where the knapsack can hold at most half the total weight of all items.
A valid solution must use unique participant indices and have total weight at most the given capacity.
# Example
@ -62,6 +65,6 @@ better_than_baseline = total_value / baseline_value - 1
# Our Challenge
In TIG, the baseline value is determined by a two-stage approach. First, items are selected based on their value-to-weight ratio, including interaction values, until the capacity is reached. Then, a tabu-based local search refines the solution by swapping items to improve value while avoiding reversals, with early termination for unpromising swaps.
Each instance of TIG's knapsack problem contains 16 random sub-instances, each with its own baseline selection and baseline value. For each sub-instance, we calculate how much your selection's total value exceeds the baseline value, expressed as a percentage improvement. This improvement percentage is called `better_than_baseline`. Your overall performance is measured by taking the root mean square of these 16 `better_than_baseline` percentages. To pass a difficulty level, this overall score must meet or exceed the specified difficulty target.
Your algorithm does not return a solution; it calls `save_solution` as it runs. The **last** saved solution is evaluated. A valid solution must meet the constraints: only **unique** item indices may be selected, and total weight must **not exceed** the knapsack capacity. Invalid solutions are not scored.
For precision, `better_than_baseline` is stored as an integer where each unit represents 0.01%. For example, a `better_than_baseline` value of 150 corresponds to 150/10000 = 1.5%.
The evaluated metric is **quality** (a fixed-point integer with 6 decimal places). For knapsack, quality functions as improvement over the baseline: `quality = (total_value / baseline_value) 1` (expressed in the fixed-point format). Higher quality is better. See the challenge code for the precise definition.

View File

@ -3,13 +3,20 @@ mod baselines;
use anyhow::{anyhow, Result};
use rand::{rngs::SmallRng, Rng, SeedableRng};
use serde::{Deserialize, Serialize};
use std::cell::RefCell;
use std::collections::HashSet;
use std::{cell::RefCell, collections::HashSet, f64::consts::PI};
/// Generate a sample from lognormal distribution using Box-Muller transform
fn sample_lognormal(rng: &mut SmallRng, mean: f64, std_dev: f64) -> f64 {
let u1: f64 = rng.r#gen();
let u2: f64 = rng.r#gen();
let z = (-2.0 * u1.ln()).sqrt() * (2.0 * PI * u2).cos();
(mean + std_dev * z).exp()
}
impl_kv_string_serde! {
Track {
n_items: usize,
density: u32,
budget: u32,
}
}
@ -38,48 +45,99 @@ pub struct Challenge {
impl Challenge {
pub fn generate_instance(seed: &[u8; 32], track: &Track) -> Result<Self> {
let mut rng = SmallRng::from_seed(seed.clone());
// Set constant density for value generation
let density = track.density as f64 / 100.0;
let n_participants = track.n_items;
let n_projects = 30000;
let log_normal_mean = 4.0;
let log_normal_std = 1.0;
let max_weight_val = 10;
// Generate weights w_i in the range [1, 50]
let weights: Vec<u32> = (0..track.n_items).map(|_| rng.gen_range(1..=50)).collect();
// Step 1: Generate subsets of projects using lognormal cardinalities
let mut subsets: Vec<Vec<usize>> = Vec::new();
let mut counter: usize = 0;
while counter < n_projects {
let cardinality =
1 + sample_lognormal(&mut rng, log_normal_mean, log_normal_std) as usize;
let end = (counter + cardinality).min(n_projects);
subsets.push((counter..end).collect());
counter = end;
}
let n_subsets = subsets.len();
// Generate values v_i in the range [1, 100] with density probability, 0 otherwise
let values: Vec<u32> = (0..track.n_items)
.map(|_| {
if rng.gen_bool(density) {
rng.gen_range(1..=100)
} else {
0
}
})
// Step 2: Determine number of projects per participant
let n_projects_per_participant: Vec<usize> = (0..n_participants)
.map(|_| 1 + sample_lognormal(&mut rng, log_normal_mean, log_normal_std) as usize)
.collect();
// Generate interaction values V_ij with the following properties:
// - V_ij == V_ji (symmetric matrix)
// - V_ii == 0 (diagonal is zero)
// - Values are in range [1, 100] with density probability, 0 otherwise
let mut interaction_values: Vec<Vec<i32>> = vec![vec![0; track.n_items]; track.n_items];
// Step 3: Assign projects to each participant
let mut projects_dict: Vec<HashSet<usize>> = Vec::with_capacity(n_participants);
for i in 0..n_participants {
let subset_id = rng.gen_range(0..n_subsets);
let subset = &subsets[subset_id];
let cardinality_of_subset = subset.len();
for i in 0..track.n_items {
for j in (i + 1)..track.n_items {
let value = if rng.gen_bool(density) {
rng.gen_range(1..=100)
} else {
0
};
let selected_projects: HashSet<usize> = if n_projects_per_participant[i]
< cardinality_of_subset
{
// Sample without replacement from subset
let mut selected: Vec<usize> = subset.clone();
for j in 0..n_projects_per_participant[i] {
let idx = rng.gen_range(j..selected.len());
selected.swap(j, idx);
}
selected
.into_iter()
.take(n_projects_per_participant[i])
.collect()
} else {
// Take all from subset and sample more from remaining projects
let mut selected: HashSet<usize> = subset.iter().cloned().collect();
let n_projects_to_choose = n_projects_per_participant[i] - cardinality_of_subset;
// Set both V_ij and V_ji due to symmetry
interaction_values[i][j] = value;
interaction_values[j][i] = value;
// Sample additional projects not in the subset
let mut remaining: Vec<usize> =
(0..n_projects).filter(|p| !selected.contains(p)).collect();
for j in 0..n_projects_to_choose.min(remaining.len()) {
let idx = rng.gen_range(j..remaining.len());
remaining.swap(j, idx);
selected.insert(remaining[j]);
}
selected
};
projects_dict.push(selected_projects);
}
// Step 4: Compute Jaccard similarity for interaction values
// Scale by 1000 to convert float to integer
let mut interaction_values: Vec<Vec<i32>> = vec![vec![0; n_participants]; n_participants];
for i in 0..n_participants {
for j in (i + 1)..n_participants {
let set_i = &projects_dict[i];
let set_j = &projects_dict[j];
let intersection_size = set_i.intersection(set_j).count();
let union_size = set_i.len() + set_j.len() - intersection_size;
if union_size > 0 && intersection_size > 0 {
let jaccard = (intersection_size as f64 / union_size as f64 * 1000.0) as i32;
interaction_values[i][j] = jaccard;
interaction_values[j][i] = jaccard;
}
}
}
let max_weight: u32 = weights.iter().sum::<u32>() / 2;
// Generate weights in [1, 10]
let weights: Vec<u32> = (0..n_participants)
.map(|_| rng.gen_range(1..=max_weight_val))
.collect();
// No linear values in team-formation
let values: Vec<u32> = vec![0; n_participants];
let max_weight = (track.budget as f64 / 100.0 * weights.iter().sum::<u32>() as f64) as u32;
Ok(Challenge {
seed: seed.clone(),
num_items: track.n_items.clone(),
num_items: n_participants,
weights,
values,
interaction_values,

View File

@ -202,3 +202,7 @@ pub use hypergraph as c005;
pub mod neuralnet_optimizer;
#[cfg(feature = "c006")]
pub use neuralnet_optimizer as c006;
#[cfg(feature = "c007")]
pub mod job_scheduling;
#[cfg(feature = "c007")]
pub use job_scheduling as c007;

View File

@ -37,7 +37,7 @@ The data has the following split: Train = 1000, Validation = 200, Test = 250.
Innovator optimizers integrate into the training loop via three functions:
- `optimizer_init_state(seed, param_sizes, ...) -> state`. This is a one-time setup function that initialises the optimizer state.
- `optimizer_query_at_params(state, model_params, epoch, train_loss, val_loss, ...) -> Option<modified_params>`. This is an optional “parameter proposal” function: if you return modified parameters, the forward/backward uses them for that batch; the original parameters are then restored before applying updates. This enables lookahead optimizer schemes.
- `optimizer_step(state, gradients, epoch, train_loss, val_loss, ...) -> updates`. This is the main function in the submission, it decides how to update the weights given the graidents of the loss.
- `optimizer_step(state, model_params, gradients, epoch, train_loss, val_loss, ...) -> updates`. This is the main function in the submission; it receives the current model parameters and gradients and returns per-parameter update tensors.
You may only change optimizer logic and its internal hyperparameters/state. Model architecture (beyond `num_hidden_layers` from difficulty), data, batch size, and training loop controls are fixed.
@ -46,11 +46,13 @@ Each epoch (iteration of the training loop) consists of:
* For each mini-batch:
- Optional parameter proposal: the harness calls optimizer_query_at_params(...). If you return modified parameters, the forward/backward uses them for this batch; the original parameters are restored immediately after.
- Run a forward pass to compute the batch loss, then a backward pass to compute gradients with respect to the current model parameters.
- Update computation: the harness calls optimizer_step(...) with the gradients (and optional loss signals). Your function returns per-parameter update tensors.
- Update computation: the harness calls optimizer_step(...) with the current model parameters and gradients (and optional loss signals). Your function returns per-parameter update tensors.
- Apply the returned updates to the model parameters.
- After all batches, evaluate on the validation set, track the best validation loss for early stopping, and save the best model so far.
**Scoring and Acceptance**
Your optimizer integrates into the training loop; the harness evaluates the best model state produced during the run (no separate "returned" solution). The evaluated metric is **quality** (a fixed-point integer with 6 decimal places); see the challenge code for how it is derived from test loss and the acceptance threshold.
After training, we compute the average MSE on the test set (`avg_model_loss_on_test`) and compare it to a target computed from the datas noise:
- Let `alpha = 4.0 - accuracy_factor / 1000.0`.

View File

@ -326,6 +326,7 @@ pub type OptimizerQueryAtParamsFn = fn(
/// Function type for optimizer step (computes parameter updates)
pub type OptimizerStepFn = fn(
optimizer_state: &mut dyn OptimizerStateTrait,
model_params: &[CudaSlice<f32>],
gradients: &[CudaSlice<f32>], // FIXME pass in model map instead
epoch: usize,
train_loss: Option<f32>,
@ -489,14 +490,19 @@ pub fn training_loop(
)?;
// Restore original parameters if they were modified
if let Some(params_to_restore) = original_params {
model.set_parameters(&params_to_restore, stream.clone(), module.clone())?;
if let Some(params_to_restore) = &original_params {
model.set_parameters(params_to_restore, stream.clone(), module.clone())?;
}
// Get gradients and apply optimizer step
let gradients = model.extract_gradients(stream.clone())?;
let param_updates = optimizer_step(
optimizer_state.as_mut(),
if let Some(modified_params) = &original_params {
modified_params
} else {
&model_params
},
&gradients,
epoch,
prev_train_loss,

View File

@ -70,7 +70,7 @@ impl MLP {
) -> Result<()> {
let mut rng = StdRng::from_seed(seed);
for layer in &mut self.lin {
layer.init_weights(rng.gen(), stream.clone(), module.clone())?;
layer.init_weights(rng.r#gen(), stream.clone(), module.clone())?;
}
Ok(())
}

View File

@ -56,7 +56,9 @@ This assignment corresponds to the variable assignment $X1=False, X2=True, X3=Tr
When substituted into the Boolean formula, each clause will evaluate to True, thereby this assignment is a solution as it satisfies all clauses.
# Our Challenge
In TIG, the 3-SAT Challenge is based on the example above with configurable difficulty. Please see the challenge code for a precise specification.
In TIG, the 3-SAT Challenge is based on the example above with configurable difficulty. Please see the challenge code for a precise specification.
Your algorithm does not return a solution; it calls `save_solution` as it runs. The **last** saved solution is evaluated. A valid solution is any assignment of True/False to the variables (one value per variable). The evaluated metric is **quality** (a fixed-point integer with 6 decimal places): quality is **1.0** if the assignment satisfies the formula (makes all clauses true) and **0** otherwise.
# Applications

View File

@ -38,7 +38,7 @@ pub struct Challenge {
impl Challenge {
pub fn generate_instance(seed: &[u8; 32], track: &Track) -> Result<Self> {
let mut rng = SmallRng::from_seed(StdRng::from_seed(seed.clone()).gen());
let mut rng = SmallRng::from_seed(StdRng::from_seed(seed.clone()).r#gen());
let num_clauses = (track.n_vars as f64 * track.ratio as f64 / 1000.0).floor() as usize;
let var_distr = Uniform::new(1, track.n_vars as i32 + 1);

View File

@ -56,9 +56,11 @@ mean_distance = 0.47 / 3 = 0.16
In TIG, the vector search challenge features vectors with 250 dimensions, and uses the Euclidean distance. The set we sample from is the hypercube $[-1,1]^{250}$. The number of Database vectors scales with the number of Query vectors, such that the number Database vectors is the Query vectors multiplied by $100$. There are two parameters that can be adjusted in order to vary the difficulty of the challenge instance:
- Parameter 1: $num\textunderscore{ }queries$ = **The number of queries**.
- Parameter 2: $better\textunderscore{ }than\textunderscore{ }baseline$ = **The mean Euclidean distance of query vectors to selected nearby vectors in the database have to be below `threshold = 11 - better_than_baseline / 1000`**.
- Parameter 2: **quality target** Your mean Euclidean distance is compared to a baseline (see below).
Real-world data is typically clustered, we generate cluster sizes from the log-normal distribution, such that the mean number of points in a cluster is $700$. All vectors in the Query and Database sets are generated in the following way. When a vector is generated it is assigned a cluster center with a probability proportional to that clusters size. Once a vector is assigned a cluster center it is generated from a anisotropic Guassian with mean equal to the cluster center.
Real-world data is typically clustered; we generate cluster sizes from the log-normal distribution, such that the mean number of points in a cluster is $700$. All vectors in the Query and Database sets are generated in the following way. When a vector is generated it is assigned a cluster center with a probability proportional to that cluster's size. Once a vector is assigned a cluster center it is generated from an anisotropic Gaussian with mean equal to the cluster center.
Your algorithm does not return a solution; it calls `save_solution` as it runs. The **last** saved solution is evaluated. A valid solution must assign each query vector to a valid database index. The evaluated metric is **quality** (a fixed-point integer with 6 decimal places), computed from your mean distance (avg_dist) and a solutions mean Euclidean distance to a hardcoded baseline distance of **11.0**: `quality = 1.0 avg_dist / 11.0` (avg_dist is your solutions mean Euclidean distance). Lower mean distance gives higher quality; the result is encoded in the fixed-point format in the challenge code.)
# Application

View File

@ -81,11 +81,11 @@ better_than_baseline = 1 - total_distance / baseline_total_distance
```
## Our Challenge
In TIG, the baseline route is determined by using Solomon's I1 insertion heuristic that iteratively inserts customers into routes based on a cost function that balances distance and time constraints. The routes are built one by one until all customers are served.
In TIG, the baseline route is determined by using Solomon's I1 insertion heuristic that iteratively inserts customers into routes based on a cost function that balances distance and time constraints. The routes are built one by one until all customers are served.
Each instance of TIG's vehicle routing problem contains 16 random sub-instances, each with its own baseline routes and baseline distance. For each sub-instance, we calculate how much your routes' total distance is shorter than the baseline distance, expressed as a percentage improvement. This improvement percentage is called `better_than_baseline`. Your overall performance is measured by taking the root mean square of these 16 `better_than_baseline` percentages. To pass a difficulty level, this overall score must meet or exceed the specified difficulty target.
Your algorithm does not return a solution; it calls `save_solution` as it runs. The **last** saved solution is evaluated. A valid solution must meet all constraints: each customer visited exactly once, capacity and time windows respected, routes start and end at the depot, and fleet size not exceeded. Invalid solutions are not scored.
For precision, `better_than_baseline` is stored as an integer where each unit represents 0.1%. For example, a `better_than_baseline` value of 22 corresponds to 22/1000 = 2.2%.
The evaluated metric is **quality** (a fixed-point integer with 6 decimal places). For this challenge, quality functions as improvement over the baseline: quality = 1 (total_distance / baseline_total_distance) (expressed in the fixed-point format). Higher quality is better. See the challenge code for the precise definition.
## Applications
* **Logistics & Delivery Services:** Optimizes parcel and ship routing by ensuring vehicles meet customer and operational time constraints, reducing operational costs and environmental impact [^1].

View File

@ -1,19 +1,16 @@
use crate::context::*;
use anyhow::{anyhow, Result};
use logging_timer::time;
use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng};
use serde_json::{Map, Value};
use std::collections::HashSet;
use rand::{prelude::IteratorRandom, rngs::StdRng, seq::SliceRandom, Rng, SeedableRng};
use std::collections::{HashMap, HashSet};
use tig_structs::{config::*, core::*};
#[time]
pub async fn submit_precommit<T: Context>(
ctx: &T,
player_id: String,
settings: BenchmarkSettings,
hyperparameters: Option<Map<String, Value>>,
runtime_config: RuntimeConfig,
num_bundles: u64,
mut settings: BenchmarkSettings,
mut track_settings: HashMap<String, TrackSettings>,
seed: u64,
) -> Result<String> {
if player_id != settings.player_id {
@ -53,14 +50,33 @@ pub async fn submit_precommit<T: Context>(
// verify size
let challenge_config = &config.challenges[&settings.challenge_id];
if !challenge_config
.active_tracks
.contains_key(&settings.track_id)
if challenge_config.active_tracks.len() != track_settings.len()
|| !track_settings
.keys()
.all(|k| challenge_config.active_tracks.contains_key(k))
{
return Err(anyhow!("Invalid track_id '{}'", settings.track_id));
return Err(anyhow!(
"Must submit settings for all active tracks: {:?}",
challenge_config.active_tracks.keys().collect::<Vec<_>>(),
));
}
// randomly select a track
let mut rng = StdRng::seed_from_u64(seed);
settings.track_id = challenge_config
.active_tracks
.keys()
.choose(&mut rng)
.unwrap()
.clone();
let track_config = &challenge_config.active_tracks[&settings.track_id];
let TrackSettings {
hyperparameters,
fuel_budget,
num_bundles,
} = track_settings.remove(&settings.track_id).unwrap();
if num_bundles < challenge_config.min_num_bundles {
return Err(anyhow!(
"Invalid num_bundles '{}'. Must be at least {}",
@ -69,19 +85,11 @@ pub async fn submit_precommit<T: Context>(
));
}
if runtime_config.max_memory > challenge_config.runtime_config_limits.max_memory {
if fuel_budget > challenge_config.max_fuel_budget {
return Err(anyhow!(
"Invalid runtime_config.max_memory '{}'. Must be <= {}",
runtime_config.max_memory,
challenge_config.runtime_config_limits.max_memory
));
}
if runtime_config.max_fuel > challenge_config.runtime_config_limits.max_fuel {
return Err(anyhow!(
"Invalid runtime_config.max_fuel '{}'. Must be <= {}",
runtime_config.max_fuel,
challenge_config.runtime_config_limits.max_fuel
"Invalid fuel_budget '{}'. Must be <= {}",
fuel_budget,
challenge_config.max_fuel_budget
));
}
@ -103,10 +111,10 @@ pub async fn submit_precommit<T: Context>(
block_started: block_details.height,
num_nonces: num_bundles * track_config.num_nonces_per_bundle,
num_bundles,
rand_hash: hex::encode(StdRng::seed_from_u64(seed).gen::<[u8; 16]>()),
rand_hash: hex::encode(rng.r#gen::<[u8; 16]>()),
fee_paid: submission_fee,
hyperparameters,
runtime_config,
fuel_budget,
},
)
.await?;

View File

@ -46,16 +46,11 @@ pub(crate) async fn update(cache: &mut AddBlockCache) {
let mut num_bundles_by_player_by_challenge = HashMap::<String, HashMap<String, u64>>::new();
for (settings, average_quality_by_bundle) in active_benchmarks.iter() {
if config.challenges[&settings.challenge_id]
.active_tracks
.contains_key(&settings.track_id)
{
*num_bundles_by_player_by_challenge
.entry(settings.player_id.clone())
.or_default()
.entry(settings.challenge_id.clone())
.or_default() += average_quality_by_bundle.len() as u64;
}
*num_bundles_by_player_by_challenge
.entry(settings.player_id.clone())
.or_default()
.entry(settings.challenge_id.clone())
.or_default() += average_quality_by_bundle.len() as u64;
}
for (player_id, player_bundles_by_challenge) in num_bundles_by_player_by_challenge.iter() {
let opow_data = active_opow_block_data.get_mut(player_id).unwrap();

View File

@ -34,3 +34,5 @@ c005 = ["cuda", "tig-challenges/c005"]
hypergraph = ["c005"]
c006 = ["cuda", "tig-challenges/c006"]
neuralnet_optimizer = ["c006"]
c007 = ["tig-challenges/c007"]
job_scheduling = ["c007"]

View File

@ -335,6 +335,12 @@ pub fn compute_solution(
#[cfg(feature = "c006")]
dispatch_challenge!(c006, gpu)
}
"c007" => {
#[cfg(not(feature = "c007"))]
panic!("tig-runtime was not compiled with '--features c007'");
#[cfg(feature = "c007")]
dispatch_challenge!(c007, cpu)
}
_ => panic!("Unsupported challenge"),
}
}

View File

@ -51,12 +51,6 @@ serializable_struct_with_getters! {
token_locker_weight: u32,
}
}
serializable_struct_with_getters! {
RuntimeConfig {
max_memory: u64,
max_fuel: u64,
}
}
serializable_struct_with_getters! {
TopUpsConfig {
topup_address: String,
@ -93,7 +87,7 @@ serializable_struct_with_getters! {
per_nonce_fee: PreciseNumber,
base_fee: PreciseNumber,
active_tracks: HashMap<String, TrackConfig>,
runtime_config_limits: RuntimeConfig,
max_fuel_budget: u64,
max_qualifiers_per_track: u64,
legacy_multiplier_span: f32,
min_num_bundles: u64,

View File

@ -1,5 +1,5 @@
use crate::{
config::{ChallengeConfig, ProtocolConfig, RuntimeConfig},
config::{ChallengeConfig, ProtocolConfig},
serializable_struct_with_getters,
};
use serde::{Deserialize, Serialize};
@ -369,6 +369,9 @@ serializable_struct_with_getters! {
serializable_struct_with_getters! {
PlayerDetails {
name: Option<String>,
x: Option<String>,
telegram: Option<String>,
discord: Option<String>,
is_multisig: bool,
}
}
@ -397,6 +400,13 @@ serializable_struct_with_getters! {
}
// Precommit child structs
serializable_struct_with_getters! {
TrackSettings {
hyperparameters: Option<Map<String, Value>>,
fuel_budget: u64,
num_bundles: u64,
}
}
serializable_struct_with_getters! {
PrecommitDetails {
block_started: u32,
@ -404,7 +414,7 @@ serializable_struct_with_getters! {
num_bundles: u64,
rand_hash: String,
fee_paid: PreciseNumber,
runtime_config: RuntimeConfig,
fuel_budget: u64,
hyperparameters: Option<Map<String, Value>>,
}
}

View File

@ -32,3 +32,5 @@ c005 = ["cuda", "tig-challenges/c005"]
hypergraph = ["c005"]
c006 = ["cuda", "tig-challenges/c006"]
neuralnet_optimizer = ["c006"]
c007 = ["tig-challenges/c007"]
job_scheduling = ["c007"]

View File

@ -209,6 +209,12 @@ pub fn verify_solution(
#[cfg(feature = "c006")]
dispatch_challenge!(c006, gpu)
}
"c007" => {
#[cfg(not(feature = "c007"))]
panic!("tig-verifier was not compiled with '--features c007'");
#[cfg(feature = "c007")]
dispatch_challenge!(c007, cpu)
}
_ => panic!("Unsupported challenge"),
}