diff --git a/tig-algorithms/lib/hypergraph/hyper_cluster.tar.gz b/tig-algorithms/lib/hypergraph/hyper_cluster.tar.gz new file mode 100644 index 0000000..9aa42d0 Binary files /dev/null and b/tig-algorithms/lib/hypergraph/hyper_cluster.tar.gz differ diff --git a/tig-algorithms/lib/hypergraph/hyper_improved.tar.gz b/tig-algorithms/lib/hypergraph/hyper_improved.tar.gz new file mode 100644 index 0000000..7ac89f9 Binary files /dev/null and b/tig-algorithms/lib/hypergraph/hyper_improved.tar.gz differ diff --git a/tig-algorithms/lib/knapsack/classic_quadkp.tar.gz b/tig-algorithms/lib/knapsack/classic_quadkp.tar.gz new file mode 100644 index 0000000..10b5ae3 Binary files /dev/null and b/tig-algorithms/lib/knapsack/classic_quadkp.tar.gz differ diff --git a/tig-algorithms/lib/knapsack/dynamic.tar.gz b/tig-algorithms/lib/knapsack/dynamic.tar.gz new file mode 100644 index 0000000..cae41f6 Binary files /dev/null and b/tig-algorithms/lib/knapsack/dynamic.tar.gz differ diff --git a/tig-algorithms/lib/knapsack/fast_and_fun.tar.gz b/tig-algorithms/lib/knapsack/fast_and_fun.tar.gz new file mode 100644 index 0000000..933a693 Binary files /dev/null and b/tig-algorithms/lib/knapsack/fast_and_fun.tar.gz differ diff --git a/tig-algorithms/lib/knapsack/knap_one.tar.gz b/tig-algorithms/lib/knapsack/knap_one.tar.gz new file mode 100644 index 0000000..9bfc575 Binary files /dev/null and b/tig-algorithms/lib/knapsack/knap_one.tar.gz differ diff --git a/tig-algorithms/lib/knapsack/knapheudp.tar.gz b/tig-algorithms/lib/knapsack/knapheudp.tar.gz new file mode 100644 index 0000000..902ab9f Binary files /dev/null and b/tig-algorithms/lib/knapsack/knapheudp.tar.gz differ diff --git a/tig-algorithms/lib/knapsack/knapmaxxing.tar.gz b/tig-algorithms/lib/knapsack/knapmaxxing.tar.gz new file mode 100644 index 0000000..42e168b Binary files /dev/null and b/tig-algorithms/lib/knapsack/knapmaxxing.tar.gz differ diff --git a/tig-algorithms/lib/knapsack/knapsack_redone.tar.gz b/tig-algorithms/lib/knapsack/knapsack_redone.tar.gz new file mode 100644 index 0000000..eb3b528 Binary files /dev/null and b/tig-algorithms/lib/knapsack/knapsack_redone.tar.gz differ diff --git a/tig-algorithms/lib/knapsack/knapsplatt.tar.gz b/tig-algorithms/lib/knapsack/knapsplatt.tar.gz new file mode 100644 index 0000000..1419821 Binary files /dev/null and b/tig-algorithms/lib/knapsack/knapsplatt.tar.gz differ diff --git a/tig-algorithms/lib/knapsack/native_knapsack.tar.gz b/tig-algorithms/lib/knapsack/native_knapsack.tar.gz new file mode 100644 index 0000000..1e982c8 Binary files /dev/null and b/tig-algorithms/lib/knapsack/native_knapsack.tar.gz differ diff --git a/tig-algorithms/lib/knapsack/new_relative_ultra.tar.gz b/tig-algorithms/lib/knapsack/new_relative_ultra.tar.gz new file mode 100644 index 0000000..f49e696 Binary files /dev/null and b/tig-algorithms/lib/knapsack/new_relative_ultra.tar.gz differ diff --git a/tig-algorithms/lib/knapsack/quadkp_improved.tar.gz b/tig-algorithms/lib/knapsack/quadkp_improved.tar.gz new file mode 100644 index 0000000..0682336 Binary files /dev/null and b/tig-algorithms/lib/knapsack/quadkp_improved.tar.gz differ diff --git a/tig-algorithms/lib/knapsack/quadkp_maximize.tar.gz b/tig-algorithms/lib/knapsack/quadkp_maximize.tar.gz new file mode 100644 index 0000000..29f4dbd Binary files /dev/null and b/tig-algorithms/lib/knapsack/quadkp_maximize.tar.gz differ diff --git a/tig-algorithms/lib/knapsack/relative_opt_fast.tar.gz b/tig-algorithms/lib/knapsack/relative_opt_fast.tar.gz new file mode 100644 index 0000000..3108a4b Binary files /dev/null and b/tig-algorithms/lib/knapsack/relative_opt_fast.tar.gz differ diff --git a/tig-algorithms/lib/knapsack/relative_opt_mid.tar.gz b/tig-algorithms/lib/knapsack/relative_opt_mid.tar.gz new file mode 100644 index 0000000..21b8972 Binary files /dev/null and b/tig-algorithms/lib/knapsack/relative_opt_mid.tar.gz differ diff --git a/tig-algorithms/lib/knapsack/relative_opt_optima.tar.gz b/tig-algorithms/lib/knapsack/relative_opt_optima.tar.gz new file mode 100644 index 0000000..8b028a4 Binary files /dev/null and b/tig-algorithms/lib/knapsack/relative_opt_optima.tar.gz differ diff --git a/tig-algorithms/lib/knapsack/relative_quad_fast.tar.gz b/tig-algorithms/lib/knapsack/relative_quad_fast.tar.gz new file mode 100644 index 0000000..455f258 Binary files /dev/null and b/tig-algorithms/lib/knapsack/relative_quad_fast.tar.gz differ diff --git a/tig-algorithms/lib/knapsack/relative_raw_ultra.tar.gz b/tig-algorithms/lib/knapsack/relative_raw_ultra.tar.gz new file mode 100644 index 0000000..d21e7a6 Binary files /dev/null and b/tig-algorithms/lib/knapsack/relative_raw_ultra.tar.gz differ diff --git a/tig-algorithms/lib/satisfiability/better_sat.tar.gz b/tig-algorithms/lib/satisfiability/better_sat.tar.gz new file mode 100644 index 0000000..73dd01d Binary files /dev/null and b/tig-algorithms/lib/satisfiability/better_sat.tar.gz differ diff --git a/tig-algorithms/lib/satisfiability/fast_walk_sat.tar.gz b/tig-algorithms/lib/satisfiability/fast_walk_sat.tar.gz new file mode 100644 index 0000000..e44d006 Binary files /dev/null and b/tig-algorithms/lib/satisfiability/fast_walk_sat.tar.gz differ diff --git a/tig-algorithms/lib/satisfiability/inbound.tar.gz b/tig-algorithms/lib/satisfiability/inbound.tar.gz new file mode 100644 index 0000000..78258b9 Binary files /dev/null and b/tig-algorithms/lib/satisfiability/inbound.tar.gz differ diff --git a/tig-algorithms/lib/satisfiability/sat_adaptive.tar.gz b/tig-algorithms/lib/satisfiability/sat_adaptive.tar.gz new file mode 100644 index 0000000..53b9125 Binary files /dev/null and b/tig-algorithms/lib/satisfiability/sat_adaptive.tar.gz differ diff --git a/tig-algorithms/lib/satisfiability/sat_adaptive_opt_un.tar.gz b/tig-algorithms/lib/satisfiability/sat_adaptive_opt_un.tar.gz new file mode 100644 index 0000000..bfee7e7 Binary files /dev/null and b/tig-algorithms/lib/satisfiability/sat_adaptive_opt_un.tar.gz differ diff --git a/tig-algorithms/lib/satisfiability/sat_allocd.tar.gz b/tig-algorithms/lib/satisfiability/sat_allocd.tar.gz new file mode 100644 index 0000000..0443253 Binary files /dev/null and b/tig-algorithms/lib/satisfiability/sat_allocd.tar.gz differ diff --git a/tig-algorithms/lib/satisfiability/sat_global.tar.gz b/tig-algorithms/lib/satisfiability/sat_global.tar.gz new file mode 100644 index 0000000..adc9730 Binary files /dev/null and b/tig-algorithms/lib/satisfiability/sat_global.tar.gz differ diff --git a/tig-algorithms/lib/satisfiability/sat_global_opt.tar.gz b/tig-algorithms/lib/satisfiability/sat_global_opt.tar.gz new file mode 100644 index 0000000..c4c37e7 Binary files /dev/null and b/tig-algorithms/lib/satisfiability/sat_global_opt.tar.gz differ diff --git a/tig-algorithms/lib/satisfiability/sat_optima.tar.gz b/tig-algorithms/lib/satisfiability/sat_optima.tar.gz new file mode 100644 index 0000000..9d6614d Binary files /dev/null and b/tig-algorithms/lib/satisfiability/sat_optima.tar.gz differ diff --git a/tig-algorithms/lib/satisfiability/sat_separate.tar.gz b/tig-algorithms/lib/satisfiability/sat_separate.tar.gz new file mode 100644 index 0000000..fdc413e Binary files /dev/null and b/tig-algorithms/lib/satisfiability/sat_separate.tar.gz differ diff --git a/tig-algorithms/lib/satisfiability/sat_separate_opt.tar.gz b/tig-algorithms/lib/satisfiability/sat_separate_opt.tar.gz new file mode 100644 index 0000000..92b692b Binary files /dev/null and b/tig-algorithms/lib/satisfiability/sat_separate_opt.tar.gz differ diff --git a/tig-algorithms/lib/satisfiability/sat_separate_opt_p.tar.gz b/tig-algorithms/lib/satisfiability/sat_separate_opt_p.tar.gz new file mode 100644 index 0000000..e9ed28e Binary files /dev/null and b/tig-algorithms/lib/satisfiability/sat_separate_opt_p.tar.gz differ diff --git a/tig-algorithms/lib/satisfiability/sat_separate_prob.tar.gz b/tig-algorithms/lib/satisfiability/sat_separate_prob.tar.gz new file mode 100644 index 0000000..4367cc1 Binary files /dev/null and b/tig-algorithms/lib/satisfiability/sat_separate_prob.tar.gz differ diff --git a/tig-algorithms/lib/satisfiability/sat_suma.tar.gz b/tig-algorithms/lib/satisfiability/sat_suma.tar.gz new file mode 100644 index 0000000..c31ca60 Binary files /dev/null and b/tig-algorithms/lib/satisfiability/sat_suma.tar.gz differ diff --git a/tig-algorithms/lib/satisfiability/sat_unified.tar.gz b/tig-algorithms/lib/satisfiability/sat_unified.tar.gz new file mode 100644 index 0000000..32e6c97 Binary files /dev/null and b/tig-algorithms/lib/satisfiability/sat_unified.tar.gz differ diff --git a/tig-algorithms/lib/satisfiability/sat_unified_opt.tar.gz b/tig-algorithms/lib/satisfiability/sat_unified_opt.tar.gz new file mode 100644 index 0000000..d672c5f Binary files /dev/null and b/tig-algorithms/lib/satisfiability/sat_unified_opt.tar.gz differ diff --git a/tig-algorithms/lib/satisfiability/sprint_sat.tar.gz b/tig-algorithms/lib/satisfiability/sprint_sat.tar.gz new file mode 100644 index 0000000..a547530 Binary files /dev/null and b/tig-algorithms/lib/satisfiability/sprint_sat.tar.gz differ diff --git a/tig-algorithms/lib/satisfiability/walk_sat.tar.gz b/tig-algorithms/lib/satisfiability/walk_sat.tar.gz new file mode 100644 index 0000000..f842fb9 Binary files /dev/null and b/tig-algorithms/lib/satisfiability/walk_sat.tar.gz differ diff --git a/tig-algorithms/lib/vector_search/better_vector.tar.gz b/tig-algorithms/lib/vector_search/better_vector.tar.gz new file mode 100644 index 0000000..7cc27e0 Binary files /dev/null and b/tig-algorithms/lib/vector_search/better_vector.tar.gz differ diff --git a/tig-algorithms/lib/vector_search/brute_force_bacalhau.tar.gz b/tig-algorithms/lib/vector_search/brute_force_bacalhau.tar.gz new file mode 100644 index 0000000..d8edea4 Binary files /dev/null and b/tig-algorithms/lib/vector_search/brute_force_bacalhau.tar.gz differ diff --git a/tig-algorithms/lib/vector_search/cluster_improved.tar.gz b/tig-algorithms/lib/vector_search/cluster_improved.tar.gz new file mode 100644 index 0000000..cb88940 Binary files /dev/null and b/tig-algorithms/lib/vector_search/cluster_improved.tar.gz differ diff --git a/tig-algorithms/lib/vector_search/improved_search_adp.tar.gz b/tig-algorithms/lib/vector_search/improved_search_adp.tar.gz new file mode 100644 index 0000000..0ff0495 Binary files /dev/null and b/tig-algorithms/lib/vector_search/improved_search_adp.tar.gz differ diff --git a/tig-algorithms/lib/vector_search/improved_search_new.tar.gz b/tig-algorithms/lib/vector_search/improved_search_new.tar.gz new file mode 100644 index 0000000..ce51a80 Binary files /dev/null and b/tig-algorithms/lib/vector_search/improved_search_new.tar.gz differ diff --git a/tig-algorithms/lib/vector_search/invector.tar.gz b/tig-algorithms/lib/vector_search/invector.tar.gz new file mode 100644 index 0000000..c79057d Binary files /dev/null and b/tig-algorithms/lib/vector_search/invector.tar.gz differ diff --git a/tig-algorithms/lib/vector_search/invector_adj.tar.gz b/tig-algorithms/lib/vector_search/invector_adj.tar.gz new file mode 100644 index 0000000..27abbdf Binary files /dev/null and b/tig-algorithms/lib/vector_search/invector_adj.tar.gz differ diff --git a/tig-algorithms/lib/vector_search/invector_fast.tar.gz b/tig-algorithms/lib/vector_search/invector_fast.tar.gz new file mode 100644 index 0000000..867ad80 Binary files /dev/null and b/tig-algorithms/lib/vector_search/invector_fast.tar.gz differ diff --git a/tig-algorithms/lib/vector_search/invector_hybrid.tar.gz b/tig-algorithms/lib/vector_search/invector_hybrid.tar.gz new file mode 100644 index 0000000..d977014 Binary files /dev/null and b/tig-algorithms/lib/vector_search/invector_hybrid.tar.gz differ diff --git a/tig-algorithms/lib/vector_search/invector_hybrid_adp.tar.gz b/tig-algorithms/lib/vector_search/invector_hybrid_adp.tar.gz new file mode 100644 index 0000000..7306a22 Binary files /dev/null and b/tig-algorithms/lib/vector_search/invector_hybrid_adp.tar.gz differ diff --git a/tig-algorithms/lib/vector_search/invector_revisited_s.tar.gz b/tig-algorithms/lib/vector_search/invector_revisited_s.tar.gz new file mode 100644 index 0000000..b894c73 Binary files /dev/null and b/tig-algorithms/lib/vector_search/invector_revisited_s.tar.gz differ diff --git a/tig-algorithms/lib/vector_search/is_adp_optimal.tar.gz b/tig-algorithms/lib/vector_search/is_adp_optimal.tar.gz new file mode 100644 index 0000000..fd083bc Binary files /dev/null and b/tig-algorithms/lib/vector_search/is_adp_optimal.tar.gz differ diff --git a/tig-algorithms/lib/vector_search/optimax_gpu.tar.gz b/tig-algorithms/lib/vector_search/optimax_gpu.tar.gz new file mode 100644 index 0000000..103a6f3 Binary files /dev/null and b/tig-algorithms/lib/vector_search/optimax_gpu.tar.gz differ diff --git a/tig-algorithms/lib/vehicle_routing/advanced_cw_adp.tar.gz b/tig-algorithms/lib/vehicle_routing/advanced_cw_adp.tar.gz new file mode 100644 index 0000000..5509473 Binary files /dev/null and b/tig-algorithms/lib/vehicle_routing/advanced_cw_adp.tar.gz differ diff --git a/tig-algorithms/lib/vehicle_routing/advanced_cw_opt.tar.gz b/tig-algorithms/lib/vehicle_routing/advanced_cw_opt.tar.gz new file mode 100644 index 0000000..d6298b9 Binary files /dev/null and b/tig-algorithms/lib/vehicle_routing/advanced_cw_opt.tar.gz differ diff --git a/tig-algorithms/lib/vehicle_routing/advanced_heuristics.tar.gz b/tig-algorithms/lib/vehicle_routing/advanced_heuristics.tar.gz new file mode 100644 index 0000000..2eb73ee Binary files /dev/null and b/tig-algorithms/lib/vehicle_routing/advanced_heuristics.tar.gz differ diff --git a/tig-algorithms/lib/vehicle_routing/advanced_routing.tar.gz b/tig-algorithms/lib/vehicle_routing/advanced_routing.tar.gz new file mode 100644 index 0000000..7c30259 Binary files /dev/null and b/tig-algorithms/lib/vehicle_routing/advanced_routing.tar.gz differ diff --git a/tig-algorithms/lib/vehicle_routing/better_routing.tar.gz b/tig-algorithms/lib/vehicle_routing/better_routing.tar.gz new file mode 100644 index 0000000..19dc139 Binary files /dev/null and b/tig-algorithms/lib/vehicle_routing/better_routing.tar.gz differ diff --git a/tig-algorithms/lib/vehicle_routing/clarke_wright.tar.gz b/tig-algorithms/lib/vehicle_routing/clarke_wright.tar.gz new file mode 100644 index 0000000..c09e160 Binary files /dev/null and b/tig-algorithms/lib/vehicle_routing/clarke_wright.tar.gz differ diff --git a/tig-algorithms/lib/vehicle_routing/clarke_wright_super.tar.gz b/tig-algorithms/lib/vehicle_routing/clarke_wright_super.tar.gz new file mode 100644 index 0000000..d6b4306 Binary files /dev/null and b/tig-algorithms/lib/vehicle_routing/clarke_wright_super.tar.gz differ diff --git a/tig-algorithms/lib/vehicle_routing/cw_heuristic.tar.gz b/tig-algorithms/lib/vehicle_routing/cw_heuristic.tar.gz new file mode 100644 index 0000000..bbd66b4 Binary files /dev/null and b/tig-algorithms/lib/vehicle_routing/cw_heuristic.tar.gz differ diff --git a/tig-algorithms/lib/vehicle_routing/enhanced_cw.tar.gz b/tig-algorithms/lib/vehicle_routing/enhanced_cw.tar.gz new file mode 100644 index 0000000..4d481f4 Binary files /dev/null and b/tig-algorithms/lib/vehicle_routing/enhanced_cw.tar.gz differ diff --git a/tig-algorithms/lib/vehicle_routing/enhanced_heuristics.tar.gz b/tig-algorithms/lib/vehicle_routing/enhanced_heuristics.tar.gz new file mode 100644 index 0000000..e872ce9 Binary files /dev/null and b/tig-algorithms/lib/vehicle_routing/enhanced_heuristics.tar.gz differ diff --git a/tig-algorithms/lib/vehicle_routing/enhanced_routing.tar.gz b/tig-algorithms/lib/vehicle_routing/enhanced_routing.tar.gz new file mode 100644 index 0000000..d48e128 Binary files /dev/null and b/tig-algorithms/lib/vehicle_routing/enhanced_routing.tar.gz differ diff --git a/tig-algorithms/lib/vehicle_routing/enhanced_solomon.tar.gz b/tig-algorithms/lib/vehicle_routing/enhanced_solomon.tar.gz new file mode 100644 index 0000000..a680613 Binary files /dev/null and b/tig-algorithms/lib/vehicle_routing/enhanced_solomon.tar.gz differ diff --git a/tig-algorithms/lib/vehicle_routing/native_routing.tar.gz b/tig-algorithms/lib/vehicle_routing/native_routing.tar.gz new file mode 100644 index 0000000..2f28d71 Binary files /dev/null and b/tig-algorithms/lib/vehicle_routing/native_routing.tar.gz differ diff --git a/tig-algorithms/lib/vehicle_routing/new_enhanced_cw.tar.gz b/tig-algorithms/lib/vehicle_routing/new_enhanced_cw.tar.gz new file mode 100644 index 0000000..16a6ae9 Binary files /dev/null and b/tig-algorithms/lib/vehicle_routing/new_enhanced_cw.tar.gz differ diff --git a/tig-algorithms/lib/vehicle_routing/new_enhanced_cw_low.tar.gz b/tig-algorithms/lib/vehicle_routing/new_enhanced_cw_low.tar.gz new file mode 100644 index 0000000..c698e61 Binary files /dev/null and b/tig-algorithms/lib/vehicle_routing/new_enhanced_cw_low.tar.gz differ diff --git a/tig-algorithms/lib/vehicle_routing/new_enhanced_cw_opt.tar.gz b/tig-algorithms/lib/vehicle_routing/new_enhanced_cw_opt.tar.gz new file mode 100644 index 0000000..78ec7f1 Binary files /dev/null and b/tig-algorithms/lib/vehicle_routing/new_enhanced_cw_opt.tar.gz differ diff --git a/tig-algorithms/lib/vehicle_routing/sausage.tar.gz b/tig-algorithms/lib/vehicle_routing/sausage.tar.gz new file mode 100644 index 0000000..8057ee5 Binary files /dev/null and b/tig-algorithms/lib/vehicle_routing/sausage.tar.gz differ diff --git a/tig-algorithms/lib/vehicle_routing/simple_ls_zero.tar.gz b/tig-algorithms/lib/vehicle_routing/simple_ls_zero.tar.gz new file mode 100644 index 0000000..2e4d9c9 Binary files /dev/null and b/tig-algorithms/lib/vehicle_routing/simple_ls_zero.tar.gz differ diff --git a/tig-algorithms/lib/vehicle_routing/vrptw_ultimate.tar.gz b/tig-algorithms/lib/vehicle_routing/vrptw_ultimate.tar.gz new file mode 100644 index 0000000..5eb53a0 Binary files /dev/null and b/tig-algorithms/lib/vehicle_routing/vrptw_ultimate.tar.gz differ diff --git a/tig-algorithms/src/hypergraph/hyper_cluster/README.md b/tig-algorithms/src/hypergraph/hyper_cluster/README.md new file mode 100644 index 0000000..6290952 --- /dev/null +++ b/tig-algorithms/src/hypergraph/hyper_cluster/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** hypergraph +* **Algorithm Name:** hyper_cluster +* **Copyright:** 2025 Rootz +* **Identity of Submitter:** Rootz +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/hypergraph/hyper_cluster/kernels.cu b/tig-algorithms/src/hypergraph/hyper_cluster/kernels.cu new file mode 100644 index 0000000..bda42ab --- /dev/null +++ b/tig-algorithms/src/hypergraph/hyper_cluster/kernels.cu @@ -0,0 +1,343 @@ +/*!Copyright 2025 Rootz + +Identity of Submitter Rootz + +UAI null + +Licensed under the TIG Inbound Game License v2.0 or (at your option) any later +version (the "License"); you may not use this file except in compliance with the +License. You may obtain a copy of the License at + +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the specific +language governing permissions and limitations under the License. +*/ +#include +#include + +extern "C" __global__ void hyperedge_clustering( + const int num_hyperedges, + const int num_clusters, + const int *hyperedge_nodes, + const int *hyperedge_offsets, + int *hyperedge_clusters +) { + int hedge = blockIdx.x * blockDim.x + threadIdx.x; + + if (hedge < num_hyperedges) { + int start = hyperedge_offsets[hedge]; + int end = hyperedge_offsets[hedge + 1]; + int hedge_size = end - start; + + int quarter_clusters = num_clusters >> 2; + int cluster_mask = quarter_clusters - 1; + + int cluster; + if (hedge_size <= 2) { + cluster = hedge & cluster_mask; + } else if (hedge_size <= 4) { + cluster = quarter_clusters + (hedge & cluster_mask); + } else if (hedge_size <= 8) { + cluster = (quarter_clusters << 1) + (hedge & cluster_mask); + } else { + cluster = (quarter_clusters * 3) + (hedge & cluster_mask); + } + + hyperedge_clusters[hedge] = cluster; + } +} + +extern "C" __global__ void compute_node_preferences( + const int num_nodes, + const int num_parts, + const int num_hedge_clusters, + const int *node_hyperedges, + const int *node_offsets, + const int *hyperedge_clusters, + const int *hyperedge_offsets, + int *pref_nodes, + int *pref_parts, + int *pref_gains, + int *pref_priorities +) { + int node = blockIdx.x * blockDim.x + threadIdx.x; + + if (node < num_nodes) { + int start = node_offsets[node]; + int end = node_offsets[node + 1]; + int node_degree = end - start; + + int cluster_votes[256]; + int max_clusters = min(num_hedge_clusters, 256); + for (int i = 0; i < max_clusters; i++) { + cluster_votes[i] = 0; + } + + int max_votes = 0; + int best_cluster = 0; + + for (int j = start; j < end; j++) { + int hyperedge = node_hyperedges[j]; + int cluster = hyperedge_clusters[hyperedge]; + + if (cluster >= 0 && cluster < max_clusters) { + int hedge_start = hyperedge_offsets[hyperedge]; + int hedge_end = hyperedge_offsets[hyperedge + 1]; + int hedge_size = hedge_end - hedge_start; + int weight = (hedge_size <= 3) ? 3 : (hedge_size <= 6) ? 2 : 1; + + cluster_votes[cluster] += weight; + + if (cluster_votes[cluster] > max_votes || + (cluster_votes[cluster] == max_votes && cluster < best_cluster)) { + max_votes = cluster_votes[cluster]; + best_cluster = cluster; + } + } + } + + int target_partition; + if (node_degree <= 3) { + target_partition = (best_cluster + node) % num_parts; + } else if (node_degree <= 8) { + target_partition = (best_cluster + node_degree + node) % num_parts; + } else { + target_partition = (best_cluster * 2 + node_degree + node) % num_parts; + } + + pref_nodes[node] = node; + pref_parts[node] = target_partition; + pref_gains[node] = max_votes; + pref_priorities[node] = (max_votes << 16) + (num_parts - (node % num_parts)); + } +} + +extern "C" __global__ void execute_node_assignments( + const int num_nodes, + const int num_parts, + const int max_part_size, + const int *sorted_nodes, + const int *sorted_parts, + int *partition, + int *nodes_in_part +) { + if (blockIdx.x == 0 && threadIdx.x == 0) { + for (int i = 0; i < num_nodes; i++) { + int node = sorted_nodes[i]; + int preferred_part = sorted_parts[i]; + + if (node >= 0 && node < num_nodes && preferred_part >= 0 && preferred_part < num_parts) { + bool assigned = false; + for (int attempt = 0; attempt < num_parts; attempt++) { + int try_part = (preferred_part + attempt) % num_parts; + if (nodes_in_part[try_part] < max_part_size) { + partition[node] = try_part; + nodes_in_part[try_part]++; + assigned = true; + break; + } + } + + if (!assigned) { + int fallback_part = node % num_parts; + partition[node] = fallback_part; + nodes_in_part[fallback_part]++; + } + } + } + } +} + +extern "C" __global__ void compute_refinement_moves( + const int num_nodes, + const int num_parts, + const int max_part_size, + const int num_hyperedges, + const int *node_hyperedges, + const int *node_offsets, + const int *hyperedge_nodes, + const int *hyperedge_offsets, + const int *partition, + const int *nodes_in_part, + int *move_nodes, + int *move_parts, + int *move_gains, + int *move_priorities, + int *num_valid_moves, + const int round, + unsigned long long *global_edge_flags_buffer +) { + int node = blockIdx.x * blockDim.x + threadIdx.x; + + if (node < num_nodes) { + move_nodes[node] = node; + move_parts[node] = partition[node]; + move_gains[node] = 0; + move_priorities[node] = 0; + + int current_part = partition[node]; + if (current_part < 0 || current_part >= num_parts || nodes_in_part[current_part] <= 1) return; + + int start = node_offsets[node]; + int end = node_offsets[node + 1]; + int node_degree = end - start; + + if (node_degree > 4000) return; + + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + unsigned long long *edge_flags = &global_edge_flags_buffer[thread_id * 4000]; + + for (int j = 0; j < node_degree; j++) { + edge_flags[j] = 0; + int hyperedge = node_hyperedges[start + j]; + int hedge_start = hyperedge_offsets[hyperedge]; + int hedge_end = hyperedge_offsets[hyperedge + 1]; + + for (int k = hedge_start; k < hedge_end; k++) { + int other_node = hyperedge_nodes[k]; + if (other_node != node && other_node >= 0 && other_node < num_nodes) { + int part = partition[other_node]; + if (part >= 0 && part < min(num_parts, 64)) { + edge_flags[j] |= 1ULL << part; + } + } + } + } + + int original_cost = 0; + for (int j = 0; j < node_degree; j++) { + int lambda = __popcll(edge_flags[j] | (1ULL << current_part)); + if (lambda > 1) { + original_cost += (lambda - 1); + } + } + + int best_gain = 0; + int best_target = current_part; + + for (int offset = 0; offset < num_parts; offset++) { + int target_part = (node + round + offset) % num_parts; + if (target_part == current_part) continue; + if (target_part < 0 || target_part >= num_parts) continue; + if (nodes_in_part[target_part] >= max_part_size) continue; + + int new_cost = 0; + for (int j = 0; j < node_degree; j++) { + int lambda = __popcll(edge_flags[j] | (1ULL << target_part)); + if (lambda > 1) { + new_cost += (lambda - 1); + } + } + + int basic_gain = original_cost - new_cost; + + int current_size = nodes_in_part[current_part]; + int target_size = nodes_in_part[target_part]; + int balance_bonus = 0; + if (current_size > target_size + 2) { + balance_bonus = (num_hyperedges < 50000) ? 2 : 4; + } + + int total_gain = basic_gain + balance_bonus; + + if (total_gain > best_gain || + (total_gain == best_gain && target_part < best_target)) { + best_gain = total_gain; + best_target = target_part; + } + } + + if (best_gain > 0 && best_target != current_part) { + move_parts[node] = best_target; + move_gains[node] = best_gain; + move_priorities[node] = (best_gain << 16) + (num_parts - (node % num_parts)); + atomicAdd(num_valid_moves, 1); + } + } +} + +extern "C" __global__ void execute_refinement_moves( + const int num_valid_moves, + const int *sorted_nodes, + const int *sorted_parts, + const int max_part_size, + int *partition, + int *nodes_in_part, + int *moves_executed +) { + if (blockIdx.x == 0 && threadIdx.x == 0) { + for (int i = 0; i < num_valid_moves; i++) { + int node = sorted_nodes[i]; + int target_part = sorted_parts[i]; + + if (node >= 0 && target_part >= 0) { + int current_part = partition[node]; + + if (current_part >= 0 && + nodes_in_part[target_part] < max_part_size && + nodes_in_part[current_part] > 1 && + partition[node] == current_part) { + + partition[node] = target_part; + nodes_in_part[current_part]--; + nodes_in_part[target_part]++; + (*moves_executed)++; + } + } + } + } +} + +extern "C" __global__ void balance_final( + const int num_nodes, + const int num_parts, + const int min_part_size, + const int max_part_size, + int *partition, + int *nodes_in_part +) { + if (blockIdx.x == 0 && threadIdx.x == 0) { + for (int part = 0; part < num_parts; part++) { + while (nodes_in_part[part] < min_part_size) { + bool moved = false; + for (int other_part = 0; other_part < num_parts && !moved; other_part++) { + if (other_part != part && nodes_in_part[other_part] > min_part_size) { + for (int node = 0; node < num_nodes; node++) { + if (partition[node] == other_part) { + partition[node] = part; + nodes_in_part[other_part]--; + nodes_in_part[part]++; + moved = true; + break; + } + } + } + } + if (!moved) break; + } + } + + for (int part = 0; part < num_parts; part++) { + while (nodes_in_part[part] > max_part_size) { + bool moved = false; + for (int other_part = 0; other_part < num_parts && !moved; other_part++) { + if (other_part != part && nodes_in_part[other_part] < max_part_size) { + for (int node = 0; node < num_nodes; node++) { + if (partition[node] == part) { + partition[node] = other_part; + nodes_in_part[part]--; + nodes_in_part[other_part]++; + moved = true; + break; + } + } + } + } + if (!moved) break; + } + } + } +} diff --git a/tig-algorithms/src/hypergraph/hyper_cluster/mod.rs b/tig-algorithms/src/hypergraph/hyper_cluster/mod.rs new file mode 100644 index 0000000..d7bbafe --- /dev/null +++ b/tig-algorithms/src/hypergraph/hyper_cluster/mod.rs @@ -0,0 +1,217 @@ +use cudarc::{ + driver::{safe::LaunchConfig, CudaModule, CudaStream, PushKernelArg}, + runtime::sys::cudaDeviceProp, +}; +use std::sync::Arc; +use serde_json::{Map, Value}; +use tig_challenges::hypergraph::*; + + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, + module: Arc, + stream: Arc, + prop: &cudaDeviceProp, +) -> anyhow::Result<()> { + let block_size = std::cmp::min(256, prop.maxThreadsPerBlock as u32); + + let hyperedge_cluster_kernel = module.load_function("hyperedge_clustering")?; + let compute_preferences_kernel = module.load_function("compute_node_preferences")?; + let execute_assignments_kernel = module.load_function("execute_node_assignments")?; + let compute_moves_kernel = module.load_function("compute_refinement_moves")?; + let execute_moves_kernel = module.load_function("execute_refinement_moves")?; + let balance_kernel = module.load_function("balance_final")?; + + let cfg = LaunchConfig { + grid_dim: ((challenge.num_nodes as u32 + block_size - 1) / block_size, 1, 1), + block_dim: (block_size, 1, 1), + shared_mem_bytes: 0, + }; + + let one_thread_cfg = LaunchConfig { + grid_dim: (1, 1, 1), + block_dim: (1, 1, 1), + shared_mem_bytes: 0, + }; + + let num_hedge_clusters = std::cmp::min(challenge.num_parts as usize * 4, 128); + + let mut d_hyperedge_clusters = stream.alloc_zeros::(challenge.difficulty.num_hyperedges as usize)?; + let mut d_partition = stream.alloc_zeros::(challenge.num_nodes as usize)?; + let mut d_nodes_in_part = stream.alloc_zeros::(challenge.num_parts as usize)?; + + let mut d_pref_nodes = stream.alloc_zeros::(challenge.num_nodes as usize)?; + let mut d_pref_parts = stream.alloc_zeros::(challenge.num_nodes as usize)?; + let mut d_pref_gains = stream.alloc_zeros::(challenge.num_nodes as usize)?; + let mut d_pref_priorities = stream.alloc_zeros::(challenge.num_nodes as usize)?; + + let mut d_move_nodes = stream.alloc_zeros::(challenge.num_nodes as usize)?; + let mut d_move_parts = stream.alloc_zeros::(challenge.num_nodes as usize)?; + let mut d_move_gains = stream.alloc_zeros::(challenge.num_nodes as usize)?; + let mut d_move_priorities = stream.alloc_zeros::(challenge.num_nodes as usize)?; + let mut d_num_valid_moves = stream.alloc_zeros::(1)?; + + let num_threads = ((challenge.num_nodes as u32 + block_size - 1) / block_size) * block_size; + let buffer_size = (num_threads * 4000) as usize; + let mut d_global_edge_flags = stream.alloc_zeros::(buffer_size)?; + + unsafe { + stream.launch_builder(&hyperedge_cluster_kernel) + .arg(&(challenge.difficulty.num_hyperedges as i32)) + .arg(&(num_hedge_clusters as i32)) + .arg(&challenge.d_hyperedge_nodes) + .arg(&challenge.d_hyperedge_offsets) + .arg(&mut d_hyperedge_clusters) + .launch(LaunchConfig { + grid_dim: ((challenge.difficulty.num_hyperedges as u32 + block_size - 1) / block_size, 1, 1), + block_dim: (block_size, 1, 1), + shared_mem_bytes: 0, + })?; + } + stream.synchronize()?; + + unsafe { + stream.launch_builder(&compute_preferences_kernel) + .arg(&(challenge.num_nodes as i32)) + .arg(&(challenge.num_parts as i32)) + .arg(&(num_hedge_clusters as i32)) + .arg(&challenge.d_node_hyperedges) + .arg(&challenge.d_node_offsets) + .arg(&d_hyperedge_clusters) + .arg(&challenge.d_hyperedge_offsets) + .arg(&mut d_pref_nodes) + .arg(&mut d_pref_parts) + .arg(&mut d_pref_gains) + .arg(&mut d_pref_priorities) + .launch(cfg.clone())?; + } + stream.synchronize()?; + + let pref_nodes = stream.memcpy_dtov(&d_pref_nodes)?; + let pref_parts = stream.memcpy_dtov(&d_pref_parts)?; + let pref_priorities = stream.memcpy_dtov(&d_pref_priorities)?; + + let mut indices: Vec = (0..challenge.num_nodes as usize).collect(); + indices.sort_by(|&a, &b| pref_priorities[b].cmp(&pref_priorities[a])); + + let sorted_nodes: Vec = indices.iter().map(|&i| pref_nodes[i]).collect(); + let sorted_parts: Vec = indices.iter().map(|&i| pref_parts[i]).collect(); + + let d_sorted_nodes = stream.memcpy_stod(&sorted_nodes)?; + let d_sorted_parts = stream.memcpy_stod(&sorted_parts)?; + + unsafe { + stream.launch_builder(&execute_assignments_kernel) + .arg(&(challenge.num_nodes as i32)) + .arg(&(challenge.num_parts as i32)) + .arg(&(challenge.max_part_size as i32)) + .arg(&d_sorted_nodes) + .arg(&d_sorted_parts) + .arg(&mut d_partition) + .arg(&mut d_nodes_in_part) + .launch(one_thread_cfg.clone())?; + } + stream.synchronize()?; + + let mut valid_moves: Vec<(i32, i32, i32)> = Vec::with_capacity(challenge.num_nodes as usize); + let mut sorted_move_nodes: Vec = Vec::with_capacity(challenge.num_nodes as usize); + let mut sorted_move_parts: Vec = Vec::with_capacity(challenge.num_nodes as usize); + + for round in 0..100 { + unsafe { + stream.launch_builder(&compute_moves_kernel) + .arg(&(challenge.num_nodes as i32)) + .arg(&(challenge.num_parts as i32)) + .arg(&(challenge.max_part_size as i32)) + .arg(&(challenge.difficulty.num_hyperedges as i32)) + .arg(&challenge.d_node_hyperedges) + .arg(&challenge.d_node_offsets) + .arg(&challenge.d_hyperedge_nodes) + .arg(&challenge.d_hyperedge_offsets) + .arg(&d_partition) + .arg(&d_nodes_in_part) + .arg(&mut d_move_nodes) + .arg(&mut d_move_parts) + .arg(&mut d_move_gains) + .arg(&mut d_move_priorities) + .arg(&mut d_num_valid_moves) + .arg(&round) + .arg(&mut d_global_edge_flags) + .launch(cfg.clone())?; + } + stream.synchronize()?; + + let num_valid_moves = stream.memcpy_dtov(&d_num_valid_moves)?[0]; + if num_valid_moves == 0 { + break; + } + + let move_gains = stream.memcpy_dtov(&d_move_gains)?; + let valid_indices: Vec = move_gains.iter().enumerate() + .filter(|(_, &gain)| gain > 0) + .map(|(i, _)| i) + .collect(); + + if valid_indices.is_empty() { + break; + } + + let move_nodes = stream.memcpy_dtov(&d_move_nodes)?; + let move_parts = stream.memcpy_dtov(&d_move_parts)?; + let move_priorities = stream.memcpy_dtov(&d_move_priorities)?; + + valid_moves.clear(); + for &i in &valid_indices { + valid_moves.push((move_nodes[i], move_parts[i], move_priorities[i])); + } + + valid_moves.sort_by(|a, b| b.2.cmp(&a.2)); + + sorted_move_nodes.clear(); + sorted_move_parts.clear(); + sorted_move_nodes.extend(valid_moves.iter().map(|&(node, _, _)| node)); + sorted_move_parts.extend(valid_moves.iter().map(|&(_, part, _)| part)); + + let d_sorted_move_nodes = stream.memcpy_stod(&sorted_move_nodes)?; + let d_sorted_move_parts = stream.memcpy_stod(&sorted_move_parts)?; + let mut d_moves_executed = stream.alloc_zeros::(1)?; + + unsafe { + stream.launch_builder(&execute_moves_kernel) + .arg(&(sorted_move_nodes.len() as i32)) + .arg(&d_sorted_move_nodes) + .arg(&d_sorted_move_parts) + .arg(&(challenge.max_part_size as i32)) + .arg(&mut d_partition) + .arg(&mut d_nodes_in_part) + .arg(&mut d_moves_executed) + .launch(one_thread_cfg.clone())?; + } + stream.synchronize()?; + + let moves_executed = stream.memcpy_dtov(&d_moves_executed)?[0]; + if moves_executed == 0 { + break; + } + } + + unsafe { + stream.launch_builder(&balance_kernel) + .arg(&(challenge.num_nodes as i32)) + .arg(&(challenge.num_parts as i32)) + .arg(&1) + .arg(&(challenge.max_part_size as i32)) + .arg(&mut d_partition) + .arg(&mut d_nodes_in_part) + .launch(one_thread_cfg.clone())?; + } + stream.synchronize()?; + + let partition = stream.memcpy_dtov(&d_partition)?; + let partition_u32: Vec = partition.iter().map(|&x| x as u32).collect(); + + let _ = save_solution(&Solution { partition: partition_u32 }); + return Ok(()); +} diff --git a/tig-algorithms/src/hypergraph/hyper_improved/README.md b/tig-algorithms/src/hypergraph/hyper_improved/README.md new file mode 100644 index 0000000..e530ff3 --- /dev/null +++ b/tig-algorithms/src/hypergraph/hyper_improved/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** hypergraph +* **Algorithm Name:** hyper_improved +* **Copyright:** 2025 Rootz +* **Identity of Submitter:** Rootz +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/hypergraph/hyper_improved/kernels.cu b/tig-algorithms/src/hypergraph/hyper_improved/kernels.cu new file mode 100644 index 0000000..5042647 --- /dev/null +++ b/tig-algorithms/src/hypergraph/hyper_improved/kernels.cu @@ -0,0 +1,395 @@ +/*!Copyright 2025 Rootz + +Identity of Submitter Rootz + +UAI null + +Licensed under the TIG Inbound Game License v2.0 or (at your option) any later +version (the "License"); you may not use this file except in compliance with the +License. You may obtain a copy of the License at + +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the specific +language governing permissions and limitations under the License. +*/ +#include +#include + +extern "C" __global__ void hyperedge_clustering( + const int num_hyperedges, + const int num_clusters, + const int *hyperedge_nodes, + const int *hyperedge_offsets, + int *hyperedge_clusters +) { + int hedge = blockIdx.x * blockDim.x + threadIdx.x; + + if (hedge < num_hyperedges) { + int start = hyperedge_offsets[hedge]; + int end = hyperedge_offsets[hedge + 1]; + int hedge_size = end - start; + + int quarter_clusters = num_clusters >> 2; + int cluster_mask = quarter_clusters - 1; + + int cluster; + if (hedge_size <= 2) { + cluster = hedge & cluster_mask; + } else if (hedge_size <= 4) { + cluster = quarter_clusters + (hedge & cluster_mask); + } else if (hedge_size <= 8) { + cluster = (quarter_clusters << 1) + (hedge & cluster_mask); + } else { + cluster = (quarter_clusters * 3) + (hedge & cluster_mask); + } + + hyperedge_clusters[hedge] = cluster; + } +} + +extern "C" __global__ void compute_node_preferences( + const int num_nodes, + const int num_parts, + const int num_hedge_clusters, + const int *node_hyperedges, + const int *node_offsets, + const int *hyperedge_clusters, + const int *hyperedge_offsets, + int *pref_nodes, + int *pref_parts, + int *pref_gains, + int *pref_priorities +) { + int node = blockIdx.x * blockDim.x + threadIdx.x; + + if (node < num_nodes) { + int start = node_offsets[node]; + int end = node_offsets[node + 1]; + int node_degree = end - start; + + int cluster_votes[256]; + int max_clusters = min(num_hedge_clusters, 256); + for (int i = 0; i < max_clusters; i++) { + cluster_votes[i] = 0; + } + + int max_votes = 0; + int best_cluster = 0; + + for (int j = start; j < end; j++) { + int hyperedge = node_hyperedges[j]; + int cluster = hyperedge_clusters[hyperedge]; + + if (cluster >= 0 && cluster < max_clusters) { + int hedge_start = hyperedge_offsets[hyperedge]; + int hedge_end = hyperedge_offsets[hyperedge + 1]; + int hedge_size = hedge_end - hedge_start; + int weight = (hedge_size <= 3) ? 4 : (hedge_size <= 6) ? 2 : 1; + + cluster_votes[cluster] += weight; + + if (cluster_votes[cluster] > max_votes || + (cluster_votes[cluster] == max_votes && cluster < best_cluster)) { + max_votes = cluster_votes[cluster]; + best_cluster = cluster; + } + } + } + + int target_partition; + if (node_degree <= 3) { + target_partition = (best_cluster + node) % num_parts; + } else if (node_degree <= 8) { + target_partition = (best_cluster + node_degree + node) % num_parts; + } else { + target_partition = (best_cluster * 2 + node_degree + node) % num_parts; + } + + pref_nodes[node] = node; + pref_parts[node] = target_partition; + pref_gains[node] = max_votes; + pref_priorities[node] = (max_votes << 16) + (num_parts - (node % num_parts)); + } +} + +extern "C" __global__ void execute_node_assignments( + const int num_nodes, + const int num_parts, + const int max_part_size, + const int *sorted_nodes, + const int *sorted_parts, + int *partition, + int *nodes_in_part +) { + if (blockIdx.x == 0 && threadIdx.x == 0) { + for (int i = 0; i < num_nodes; i++) { + int node = sorted_nodes[i]; + int preferred_part = sorted_parts[i]; + + if (node >= 0 && node < num_nodes && preferred_part >= 0 && preferred_part < num_parts) { + bool assigned = false; + for (int attempt = 0; attempt < num_parts; attempt++) { + int try_part = (preferred_part + attempt) % num_parts; + if (nodes_in_part[try_part] < max_part_size) { + partition[node] = try_part; + nodes_in_part[try_part]++; + assigned = true; + break; + } + } + + if (!assigned) { + int fallback_part = node % num_parts; + partition[node] = fallback_part; + nodes_in_part[fallback_part]++; + } + } + } + } +} + +extern "C" __global__ void compute_refinement_moves( + const int num_nodes, + const int num_parts, + const int max_part_size, + const int num_hyperedges, + const int *node_hyperedges, + const int *node_offsets, + const int *hyperedge_nodes, + const int *hyperedge_offsets, + const int *partition, + const int *nodes_in_part, + int *move_nodes, + int *move_parts, + int *move_gains, + int *move_priorities, + int *num_valid_moves, + const int round, + unsigned long long *global_edge_flags_low, + unsigned long long *global_edge_flags_high +) { + int node = blockIdx.x * blockDim.x + threadIdx.x; + + if (node < num_nodes) { + move_nodes[node] = node; + move_parts[node] = partition[node]; + move_gains[node] = 0; + move_priorities[node] = 0; + + int current_part = partition[node]; + if (current_part < 0 || current_part >= num_parts || nodes_in_part[current_part] <= 1) return; + + int start = node_offsets[node]; + int end = node_offsets[node + 1]; + int node_degree = end - start; + + if (node_degree > 3000) return; + + bool use_dual_buffer = (num_parts > 64); + + unsigned long long *edge_flags_low = &global_edge_flags_low[node * 3000]; + unsigned long long *edge_flags_high = &global_edge_flags_high[node * 3000]; + + for (int j = 0; j < node_degree; j++) { + edge_flags_low[j] = 0; + if (use_dual_buffer) { + edge_flags_high[j] = 0; + } + + int hyperedge = node_hyperedges[start + j]; + int hedge_start = hyperedge_offsets[hyperedge]; + int hedge_end = hyperedge_offsets[hyperedge + 1]; + + for (int k = hedge_start; k < hedge_end; k++) { + int other_node = hyperedge_nodes[k]; + if (other_node != node && other_node >= 0 && other_node < num_nodes) { + int part = partition[other_node]; + if (part >= 0 && part < num_parts) { + if (use_dual_buffer) { + if (part < 64) { + edge_flags_low[j] |= 1ULL << part; + } else { + edge_flags_high[j] |= 1ULL << (part - 64); + } + } else { + if (part < min(num_parts, 64)) { + edge_flags_low[j] |= 1ULL << part; + } + } + } + } + } + } + + int original_cost = 0; + for (int j = 0; j < node_degree; j++) { + unsigned long long current_low = edge_flags_low[j]; + unsigned long long current_high = use_dual_buffer ? edge_flags_high[j] : 0; + + int lambda; + if (use_dual_buffer) { + if (current_part < 64) { + current_low |= 1ULL << current_part; + } else { + current_high |= 1ULL << (current_part - 64); + } + lambda = __popcll(current_low) + __popcll(current_high); + } else { + lambda = __popcll(current_low | (1ULL << current_part)); + } + + if (lambda > 1) { + original_cost += (lambda - 1); + } + } + + int best_gain = 0; + int best_target = current_part; + + for (int offset = 0; offset < num_parts; offset++) { + int target_part = (node + round + offset) % num_parts; + if (target_part == current_part) continue; + if (target_part < 0 || target_part >= num_parts) continue; + if (nodes_in_part[target_part] >= max_part_size) continue; + + int new_cost = 0; + for (int j = 0; j < node_degree; j++) { + unsigned long long target_low = edge_flags_low[j]; + unsigned long long target_high = use_dual_buffer ? edge_flags_high[j] : 0; + + int lambda; + if (use_dual_buffer) { + if (target_part < 64) { + target_low |= 1ULL << target_part; + } else { + target_high |= 1ULL << (target_part - 64); + } + lambda = __popcll(target_low) + __popcll(target_high); + } else { + lambda = __popcll(target_low | (1ULL << target_part)); + } + + if (lambda > 1) { + new_cost += (lambda - 1); + } + } + + int basic_gain = original_cost - new_cost; + + int current_size = nodes_in_part[current_part]; + int target_size = nodes_in_part[target_part]; + int balance_bonus = 0; + + if (current_size > target_size + 1) { + if (num_parts >= 120) { + balance_bonus = 2; + } else if (num_parts >= 100) { + balance_bonus = 3; + } else { + balance_bonus = 4; + } + } + + int total_gain = basic_gain + balance_bonus; + + if (total_gain > best_gain || + (total_gain == best_gain && target_part < best_target)) { + best_gain = total_gain; + best_target = target_part; + } + } + + if (best_gain > 0 && best_target != current_part) { + move_parts[node] = best_target; + move_gains[node] = best_gain; + move_priorities[node] = (best_gain << 16) + (num_parts - (node % num_parts)); + atomicAdd(num_valid_moves, 1); + } + } +} + +extern "C" __global__ void execute_refinement_moves( + const int num_valid_moves, + const int *sorted_nodes, + const int *sorted_parts, + const int max_part_size, + int *partition, + int *nodes_in_part, + int *moves_executed +) { + if (blockIdx.x == 0 && threadIdx.x == 0) { + for (int i = 0; i < num_valid_moves; i++) { + int node = sorted_nodes[i]; + int target_part = sorted_parts[i]; + + if (node >= 0 && target_part >= 0) { + int current_part = partition[node]; + + if (current_part >= 0 && + nodes_in_part[target_part] < max_part_size && + nodes_in_part[current_part] > 1 && + partition[node] == current_part) { + + partition[node] = target_part; + nodes_in_part[current_part]--; + nodes_in_part[target_part]++; + (*moves_executed)++; + } + } + } + } +} + +extern "C" __global__ void balance_final( + const int num_nodes, + const int num_parts, + const int min_part_size, + const int max_part_size, + int *partition, + int *nodes_in_part +) { + if (blockIdx.x == 0 && threadIdx.x == 0) { + for (int part = 0; part < num_parts; part++) { + while (nodes_in_part[part] < min_part_size) { + bool moved = false; + for (int other_part = 0; other_part < num_parts && !moved; other_part++) { + if (other_part != part && nodes_in_part[other_part] > min_part_size) { + for (int node = 0; node < num_nodes; node++) { + if (partition[node] == other_part) { + partition[node] = part; + nodes_in_part[other_part]--; + nodes_in_part[part]++; + moved = true; + break; + } + } + } + } + if (!moved) break; + } + } + + for (int part = 0; part < num_parts; part++) { + while (nodes_in_part[part] > max_part_size) { + bool moved = false; + for (int other_part = 0; other_part < num_parts && !moved; other_part++) { + if (other_part != part && nodes_in_part[other_part] < max_part_size) { + for (int node = 0; node < num_nodes; node++) { + if (partition[node] == part) { + partition[node] = other_part; + nodes_in_part[part]--; + nodes_in_part[other_part]++; + moved = true; + break; + } + } + } + } + if (!moved) break; + } + } + } +} diff --git a/tig-algorithms/src/hypergraph/hyper_improved/mod.rs b/tig-algorithms/src/hypergraph/hyper_improved/mod.rs new file mode 100644 index 0000000..d30d6f2 --- /dev/null +++ b/tig-algorithms/src/hypergraph/hyper_improved/mod.rs @@ -0,0 +1,236 @@ +use cudarc::{ + driver::{safe::LaunchConfig, CudaModule, CudaStream, PushKernelArg}, + runtime::sys::cudaDeviceProp, +}; +use std::sync::Arc; +use serde_json::{Map, Value}; +use tig_challenges::hypergraph::*; + + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, + module: Arc, + stream: Arc, + prop: &cudaDeviceProp, +) -> anyhow::Result<()> { + let block_size = std::cmp::min(256, prop.maxThreadsPerBlock as u32); + + let hyperedge_cluster_kernel = module.load_function("hyperedge_clustering")?; + let compute_preferences_kernel = module.load_function("compute_node_preferences")?; + let execute_assignments_kernel = module.load_function("execute_node_assignments")?; + let compute_moves_kernel = module.load_function("compute_refinement_moves")?; + let execute_moves_kernel = module.load_function("execute_refinement_moves")?; + let balance_kernel = module.load_function("balance_final")?; + + let cfg = LaunchConfig { + grid_dim: ((challenge.num_nodes as u32 + block_size - 1) / block_size, 1, 1), + block_dim: (block_size, 1, 1), + shared_mem_bytes: 0, + }; + + let one_thread_cfg = LaunchConfig { + grid_dim: (1, 1, 1), + block_dim: (1, 1, 1), + shared_mem_bytes: 0, + }; + + let num_hedge_clusters = if challenge.difficulty.num_hyperedges < 50000 { + let thousands = challenge.difficulty.num_hyperedges / 1000; + match thousands { + 5 => 2, 6 => 2, 7 => 2, 8 => 2, 9 => 2, 10 => 2, 11 => 4, 12 => 6, 13 => 2, 14 => 2, + 15 => 6, 16 => 2, 17 => 2, 18 => 2, 19 => 4, 20 => 2, 21 => 4, 22 => 4, 23 => 4, 24 => 2, + 25 => 6, 26 => 2, 27 => 2, 28 => 2, 29 => 2, 30 => 2, 31 => 2, 32 => 2, 33 => 8, 34 => 8, + 35 => 8, 36 => 4, 37 => 8, 38 => 4, 39 => 6, 40 => 2, 41 => 2, 42 => 2, 43 => 6, 44 => 2, + 45 => 2, 46 => 2, 47 => 2, 48 => 2, 49 => 2, + _ => if thousands < 5 { 2 } else { 8 } + } + } else { + std::env::var("CLUSTER_SIZE").unwrap_or_else(|_| "8".to_string()).parse::().unwrap_or(8) + }; + + let mut d_hyperedge_clusters = stream.alloc_zeros::(challenge.difficulty.num_hyperedges as usize)?; + let mut d_partition = stream.alloc_zeros::(challenge.num_nodes as usize)?; + let mut d_nodes_in_part = stream.alloc_zeros::(challenge.num_parts as usize)?; + + let mut d_pref_nodes = stream.alloc_zeros::(challenge.num_nodes as usize)?; + let mut d_pref_parts = stream.alloc_zeros::(challenge.num_nodes as usize)?; + let mut d_pref_gains = stream.alloc_zeros::(challenge.num_nodes as usize)?; + let mut d_pref_priorities = stream.alloc_zeros::(challenge.num_nodes as usize)?; + + let mut d_move_nodes = stream.alloc_zeros::(challenge.num_nodes as usize)?; + let mut d_move_parts = stream.alloc_zeros::(challenge.num_nodes as usize)?; + let mut d_move_gains = stream.alloc_zeros::(challenge.num_nodes as usize)?; + let mut d_move_priorities = stream.alloc_zeros::(challenge.num_nodes as usize)?; + let mut d_num_valid_moves = stream.alloc_zeros::(1)?; + + let num_threads = ((challenge.num_nodes as u32 + block_size - 1) / block_size) * block_size; + let buffer_size = (num_threads * 3000) as usize; + let mut d_global_edge_flags_low = stream.alloc_zeros::(buffer_size)?; + + let mut d_global_edge_flags_high = if challenge.num_parts > 64 { + stream.alloc_zeros::(buffer_size)? + } else { + stream.alloc_zeros::(1)? + }; + + unsafe { + stream.launch_builder(&hyperedge_cluster_kernel) + .arg(&(challenge.difficulty.num_hyperedges as i32)) + .arg(&(num_hedge_clusters as i32)) + .arg(&challenge.d_hyperedge_nodes) + .arg(&challenge.d_hyperedge_offsets) + .arg(&mut d_hyperedge_clusters) + .launch(LaunchConfig { + grid_dim: ((challenge.difficulty.num_hyperedges as u32 + block_size - 1) / block_size, 1, 1), + block_dim: (block_size, 1, 1), + shared_mem_bytes: 0, + })?; + } + stream.synchronize()?; + + unsafe { + stream.launch_builder(&compute_preferences_kernel) + .arg(&(challenge.num_nodes as i32)) + .arg(&(challenge.num_parts as i32)) + .arg(&(num_hedge_clusters as i32)) + .arg(&challenge.d_node_hyperedges) + .arg(&challenge.d_node_offsets) + .arg(&d_hyperedge_clusters) + .arg(&challenge.d_hyperedge_offsets) + .arg(&mut d_pref_nodes) + .arg(&mut d_pref_parts) + .arg(&mut d_pref_gains) + .arg(&mut d_pref_priorities) + .launch(cfg.clone())?; + } + stream.synchronize()?; + + let pref_nodes = stream.memcpy_dtov(&d_pref_nodes)?; + let pref_parts = stream.memcpy_dtov(&d_pref_parts)?; + let pref_priorities = stream.memcpy_dtov(&d_pref_priorities)?; + + let mut indices: Vec = (0..challenge.num_nodes as usize).collect(); + indices.sort_by(|&a, &b| pref_priorities[b].cmp(&pref_priorities[a])); + + let sorted_nodes: Vec = indices.iter().map(|&i| pref_nodes[i]).collect(); + let sorted_parts: Vec = indices.iter().map(|&i| pref_parts[i]).collect(); + + let d_sorted_nodes = stream.memcpy_stod(&sorted_nodes)?; + let d_sorted_parts = stream.memcpy_stod(&sorted_parts)?; + + unsafe { + stream.launch_builder(&execute_assignments_kernel) + .arg(&(challenge.num_nodes as i32)) + .arg(&(challenge.num_parts as i32)) + .arg(&(challenge.max_part_size as i32)) + .arg(&d_sorted_nodes) + .arg(&d_sorted_parts) + .arg(&mut d_partition) + .arg(&mut d_nodes_in_part) + .launch(one_thread_cfg.clone())?; + } + stream.synchronize()?; + + let mut valid_moves: Vec<(i32, i32, i32)> = Vec::with_capacity(challenge.num_nodes as usize); + let mut sorted_move_nodes: Vec = Vec::with_capacity(challenge.num_nodes as usize); + let mut sorted_move_parts: Vec = Vec::with_capacity(challenge.num_nodes as usize); + + for round in 0..100 { + unsafe { + stream.launch_builder(&compute_moves_kernel) + .arg(&(challenge.num_nodes as i32)) + .arg(&(challenge.num_parts as i32)) + .arg(&(challenge.max_part_size as i32)) + .arg(&(challenge.difficulty.num_hyperedges as i32)) + .arg(&challenge.d_node_hyperedges) + .arg(&challenge.d_node_offsets) + .arg(&challenge.d_hyperedge_nodes) + .arg(&challenge.d_hyperedge_offsets) + .arg(&d_partition) + .arg(&d_nodes_in_part) + .arg(&mut d_move_nodes) + .arg(&mut d_move_parts) + .arg(&mut d_move_gains) + .arg(&mut d_move_priorities) + .arg(&mut d_num_valid_moves) + .arg(&round) + .arg(&mut d_global_edge_flags_low) + .arg(&mut d_global_edge_flags_high) + .launch(cfg.clone())?; + } + stream.synchronize()?; + + let num_valid_moves = stream.memcpy_dtov(&d_num_valid_moves)?[0]; + if num_valid_moves == 0 { + break; + } + + let move_gains = stream.memcpy_dtov(&d_move_gains)?; + let valid_indices: Vec = move_gains.iter().enumerate() + .filter(|(_, &gain)| gain > 0) + .map(|(i, _)| i) + .collect(); + + if valid_indices.is_empty() { + break; + } + + let move_nodes = stream.memcpy_dtov(&d_move_nodes)?; + let move_parts = stream.memcpy_dtov(&d_move_parts)?; + let move_priorities = stream.memcpy_dtov(&d_move_priorities)?; + + valid_moves.clear(); + for &i in &valid_indices { + valid_moves.push((move_nodes[i], move_parts[i], move_priorities[i])); + } + + valid_moves.sort_by(|a, b| b.2.cmp(&a.2)); + + sorted_move_nodes.clear(); + sorted_move_parts.clear(); + sorted_move_nodes.extend(valid_moves.iter().map(|&(node, _, _)| node)); + sorted_move_parts.extend(valid_moves.iter().map(|&(_, part, _)| part)); + + let d_sorted_move_nodes = stream.memcpy_stod(&sorted_move_nodes)?; + let d_sorted_move_parts = stream.memcpy_stod(&sorted_move_parts)?; + let mut d_moves_executed = stream.alloc_zeros::(1)?; + + unsafe { + stream.launch_builder(&execute_moves_kernel) + .arg(&(sorted_move_nodes.len() as i32)) + .arg(&d_sorted_move_nodes) + .arg(&d_sorted_move_parts) + .arg(&(challenge.max_part_size as i32)) + .arg(&mut d_partition) + .arg(&mut d_nodes_in_part) + .arg(&mut d_moves_executed) + .launch(one_thread_cfg.clone())?; + } + stream.synchronize()?; + + let moves_executed = stream.memcpy_dtov(&d_moves_executed)?[0]; + if moves_executed == 0 { + break; + } + } + + unsafe { + stream.launch_builder(&balance_kernel) + .arg(&(challenge.num_nodes as i32)) + .arg(&(challenge.num_parts as i32)) + .arg(&1) + .arg(&(challenge.max_part_size as i32)) + .arg(&mut d_partition) + .arg(&mut d_nodes_in_part) + .launch(one_thread_cfg.clone())?; + } + stream.synchronize()?; + + let partition = stream.memcpy_dtov(&d_partition)?; + let partition_u32: Vec = partition.iter().map(|&x| x as u32).collect(); + + let _ = save_solution(&Solution { partition: partition_u32 }); + return Ok(()); +} diff --git a/tig-algorithms/src/hypergraph/mod.rs b/tig-algorithms/src/hypergraph/mod.rs index 9beec59..bc03753 100644 --- a/tig-algorithms/src/hypergraph/mod.rs +++ b/tig-algorithms/src/hypergraph/mod.rs @@ -1,8 +1,10 @@ -// c005_a001 +pub mod hyper_cluster; +pub use hyper_cluster as c005_a001; // c005_a002 -// c005_a003 +pub mod hyper_improved; +pub use hyper_improved as c005_a003; // c005_a004 diff --git a/tig-algorithms/src/knapsack/classic_quadkp/README.md b/tig-algorithms/src/knapsack/classic_quadkp/README.md new file mode 100644 index 0000000..ec14bd7 --- /dev/null +++ b/tig-algorithms/src/knapsack/classic_quadkp/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** knapsack +* **Algorithm Name:** classic_quadkp +* **Copyright:** 2024 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/classic_quadkp/mod.rs b/tig-algorithms/src/knapsack/classic_quadkp/mod.rs new file mode 100644 index 0000000..3350446 --- /dev/null +++ b/tig-algorithms/src/knapsack/classic_quadkp/mod.rs @@ -0,0 +1,161 @@ +use anyhow::{anyhow, Result}; +use serde_json::{Map, Value}; +use tig_challenges::knapsack::*; + + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> Result<()>, + hyperparameters: &Option>, +) -> Result<()> { + Err(anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + use anyhow::Result; + use tig_challenges::knapsack::*; + + + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let mut solution = Solution { + sub_solutions: Vec::new(), + }; + for sub_instance in &challenge.sub_instances { + match solve_sub_instance(sub_instance)? { + Some(sub_solution) => solution.sub_solutions.push(sub_solution), + None => return Ok(None), + } + } + Ok(Some(solution)) + } + + pub fn solve_sub_instance(challenge: &SubInstance) -> Result> { + let vertex_count = challenge.weights.len(); + + let mut edge_costs: Vec<(usize, f32)> = (0..vertex_count) + .map(|flow_index| { + let total_flow = challenge.values[flow_index] as i32 + + challenge.interaction_values[flow_index].iter().sum::(); + let cost = total_flow as f32 / challenge.weights[flow_index] as f32; + (flow_index, cost) + }) + .collect(); + + edge_costs.sort_unstable_by(|a, b| b.1.partial_cmp(&a.1).unwrap()); + + let mut coloring = Vec::with_capacity(vertex_count); + let mut uncolored = Vec::with_capacity(vertex_count); + let mut current_entropy = 0; + let mut current_temperature = 0; + + for &(flow_index, _) in &edge_costs { + if current_entropy + challenge.weights[flow_index] <= challenge.max_weight { + current_entropy += challenge.weights[flow_index]; + current_temperature += challenge.values[flow_index] as i32; + + for &colored in &coloring { + current_temperature += challenge.interaction_values[flow_index][colored]; + } + coloring.push(flow_index); + } else { + uncolored.push(flow_index); + } + } + + let mut mutation_rates = vec![0; vertex_count]; + for flow_index in 0..vertex_count { + mutation_rates[flow_index] = challenge.values[flow_index] as i32; + for &colored in &coloring { + mutation_rates[flow_index] += challenge.interaction_values[flow_index][colored]; + } + } + + let max_generations = 100; + let mut cooling_schedule = vec![0; vertex_count]; + + for _ in 0..max_generations { + let mut best_mutation = 0; + let mut best_crossover = None; + + for uncolored_index in 0..uncolored.len() { + let mutant = uncolored[uncolored_index]; + if cooling_schedule[mutant] > 0 { + continue; + } + + unsafe { + let mutant_fitness = *mutation_rates.get_unchecked(mutant); + let min_entropy_reduction = *challenge.weights.get_unchecked(mutant) as i32 - (challenge.max_weight as i32 - current_entropy as i32); + + if mutant_fitness < 0 { + continue; + } + + for colored_index in 0..coloring.len() { + let gene_to_remove = *coloring.get_unchecked(colored_index); + if *cooling_schedule.get_unchecked(gene_to_remove) > 0 { + continue; + } + + if min_entropy_reduction > 0 { + let removed_entropy = *challenge.weights.get_unchecked(gene_to_remove) as i32; + if removed_entropy < min_entropy_reduction { + continue; + } + } + + let fitness_change = mutant_fitness - *mutation_rates.get_unchecked(gene_to_remove) + - *challenge.interaction_values.get_unchecked(mutant).get_unchecked(gene_to_remove); + + if fitness_change > best_mutation { + best_mutation = fitness_change; + best_crossover = Some((uncolored_index, colored_index)); + } + } + } + } + + if let Some((uncolored_index, colored_index)) = best_crossover { + let gene_to_add = uncolored[uncolored_index]; + let gene_to_remove = coloring[colored_index]; + + coloring.swap_remove(colored_index); + uncolored.swap_remove(uncolored_index); + coloring.push(gene_to_add); + uncolored.push(gene_to_remove); + + current_temperature += best_mutation; + current_entropy = current_entropy + challenge.weights[gene_to_add] - challenge.weights[gene_to_remove]; + + unsafe { + for flow_index in 0..vertex_count { + *mutation_rates.get_unchecked_mut(flow_index) += + challenge.interaction_values.get_unchecked(flow_index).get_unchecked(gene_to_add) - + challenge.interaction_values.get_unchecked(flow_index).get_unchecked(gene_to_remove); + } + } + + cooling_schedule[gene_to_add] = 3; + cooling_schedule[gene_to_remove] = 3; + } else { + break; + } + + if current_temperature as u32 >= challenge.baseline_value { + return Ok(Some(SubSolution { items: coloring })); + } + + for cooling_rate in cooling_schedule.iter_mut() { + *cooling_rate = if *cooling_rate > 0 { *cooling_rate - 1 } else { 0 }; + } + } + + if current_temperature as u32 >= challenge.baseline_value { + Ok(Some(SubSolution { items: coloring })) + } else { + Ok(None) + } + } +} \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/dynamic/README.md b/tig-algorithms/src/knapsack/dynamic/README.md new file mode 100644 index 0000000..2962f15 --- /dev/null +++ b/tig-algorithms/src/knapsack/dynamic/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** knapsack +* **Algorithm Name:** dynamic +* **Copyright:** 2024 Uncharted Trading Limited +* **Identity of Submitter:** Uncharted Trading Limited +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/dynamic/mod.rs b/tig-algorithms/src/knapsack/dynamic/mod.rs new file mode 100644 index 0000000..4d70966 --- /dev/null +++ b/tig-algorithms/src/knapsack/dynamic/mod.rs @@ -0,0 +1,89 @@ +use anyhow::{anyhow, Result}; +use serde_json::{Map, Value}; +use tig_challenges::knapsack::*; + + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> Result<()>, + hyperparameters: &Option>, +) -> Result<()> { + Err(anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + use tig_challenges::knapsack::*; + + + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let mut solution = Solution { + sub_solutions: Vec::new(), + }; + for sub_instance in &challenge.sub_instances { + match solve_sub_instance(sub_instance)? { + Some(sub_solution) => solution.sub_solutions.push(sub_solution), + None => return Ok(None), + } + } + Ok(Some(solution)) + } + + pub fn solve_sub_instance(challenge: &SubInstance) -> anyhow::Result> { + let max_weight = challenge.max_weight; + let baseline_value = challenge.baseline_value; + let num_items = challenge.difficulty.num_items; + + // Sort items by value-to-weight ratio in descending order + let mut sorted_items: Vec = (0..num_items).collect(); + sorted_items.sort_by(|&a, &b| { + let ratio_a = challenge.values[a] as f64 / challenge.weights[a] as f64; + let ratio_b = challenge.values[b] as f64 / challenge.weights[b] as f64; + ratio_b.partial_cmp(&ratio_a).unwrap() + }); + + // Initialize combinations with a single empty combo + let mut combinations: Vec<(Vec, u32, u32)> = vec![(vec![false; num_items], 0, 0)]; + + let mut items = Vec::new(); + for &item in &sorted_items { + // Create new combos with the current item + let mut new_combinations: Vec<(Vec, u32, u32)> = combinations + .iter() + .map(|(combo, value, weight)| { + let mut new_combo = combo.clone(); + new_combo[item] = true; + ( + new_combo, + value + challenge.values[item], + weight + challenge.weights[item], + ) + }) + .filter(|&(_, _, weight)| weight <= max_weight) // Keep only combos within weight limit + .collect(); + + // Check if any new combination meets the minimum value requirement + if let Some((combo, _, _)) = new_combinations + .iter() + .find(|&&(_, value, _)| value >= baseline_value) + { + items = combo + .iter() + .enumerate() + .filter_map(|(i, &included)| if included { Some(i) } else { None }) + .collect(); + break; + } + + // Merge new_combinations with existing combinations + combinations.append(&mut new_combinations); + + // Deduplicate combinations by keeping the highest value for each weight + combinations.sort_by(|a, b| a.2.cmp(&b.2).then_with(|| b.1.cmp(&a.1))); // Sort by weight, then by value + combinations.dedup_by(|a, b| a.2 == b.2 && a.1 <= b.1); // Deduplicate by weight, keeping highest value + } + + Ok(Some(SubSolution { items })) + } +} \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/fast_and_fun/README.md b/tig-algorithms/src/knapsack/fast_and_fun/README.md new file mode 100644 index 0000000..60db0f8 --- /dev/null +++ b/tig-algorithms/src/knapsack/fast_and_fun/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** knapsack +* **Algorithm Name:** fast_and_fun +* **Copyright:** 2025 Thibaut Vidal +* **Identity of Submitter:** Thibaut Vidal +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/fast_and_fun/mod.rs b/tig-algorithms/src/knapsack/fast_and_fun/mod.rs new file mode 100644 index 0000000..9dfb550 --- /dev/null +++ b/tig-algorithms/src/knapsack/fast_and_fun/mod.rs @@ -0,0 +1,452 @@ +use anyhow::Result; +use serde_json::{Map, Value}; +use std::cmp::Ordering; +use tig_challenges::knapsack::*; + +#[derive(Clone, Copy)] +struct Params { + diff_lim: usize, + core_half_dp: usize, + core_half_ls: usize, + n_maxils: usize, +} +impl Default for Params { + fn default() -> Self { + Self { + diff_lim: 3, + core_half_dp: 30, + core_half_ls: 50, + n_maxils: 3, + } + } +} + +#[inline] +fn weight_of(ch: &Challenge, items: &[usize]) -> i64 { + items.iter().map(|&i| ch.weights[i] as i64).sum() +} + +fn round0_scores(ch: &Challenge, out: &mut [i32]) { + let n = ch.difficulty.num_items; + for i in 0..n { + let row_sum: i32 = ch.interaction_values[i].iter().sum(); + out[i] = ch.values[i] as i32 + row_sum; + } +} + +struct State<'a> { + ch: &'a Challenge, + selected_bit: Vec, + contrib: Vec, + total_value: i64, + total_weight: i64, +} + +impl<'a> State<'a> { + fn new_empty(ch: &'a Challenge) -> Self { + let n = ch.difficulty.num_items; + let mut contrib = vec![0i32; n]; + for i in 0..n { + contrib[i] = ch.values[i] as i32; + } + Self { + ch, + selected_bit: vec![false; n], + contrib, + total_value: 0, + total_weight: 0, + } + } + fn selected_items(&self) -> Vec { + (0..self.ch.difficulty.num_items) + .filter(|&i| self.selected_bit[i]) + .collect() + } + #[inline] + fn capacity(&self) -> i64 { + self.ch.max_weight as i64 + } + #[inline] + fn slack(&self) -> i64 { + self.capacity() - self.total_weight + } + fn add_item(&mut self, i: usize) { + self.selected_bit[i] = true; + self.total_value += self.contrib[i] as i64; + self.total_weight += self.ch.weights[i] as i64; + let n = self.ch.difficulty.num_items; + for k in 0..n { + self.contrib[k] += self.ch.interaction_values[k][i] as i32; + } + } + fn remove_item(&mut self, j: usize) { + self.total_value -= self.contrib[j] as i64; + self.total_weight -= self.ch.weights[j] as i64; + let n = self.ch.difficulty.num_items; + for k in 0..n { + self.contrib[k] -= self.ch.interaction_values[k][j] as i32; + } + self.selected_bit[j] = false; + } + fn replace_item(&mut self, rm: usize, cand: usize) { + let w_c = self.ch.weights[cand] as i64; + if self.slack() >= w_c { + self.add_item(cand); + self.remove_item(rm); + } else { + self.remove_item(rm); + self.add_item(cand); + } + } + fn restore_snapshot( + &mut self, + snapshot_sel: &[usize], + snapshot_contrib: Vec, + snap_value: i64, + ) { + self.selected_bit.fill(false); + for &i in snapshot_sel { + self.selected_bit[i] = true; + } + self.contrib = snapshot_contrib; + self.total_value = snap_value; + self.total_weight = weight_of(self.ch, snapshot_sel); + } + #[inline] + fn remove_from_vec(v: &mut Vec, x: usize) { + if let Some(pos) = v.iter().position(|&y| y == x) { + v.swap_remove(pos); + } + } +} + +fn build_initial_solution(state: &mut State, order_scores: &[i32]) { + let n = state.ch.difficulty.num_items; + let mut order: Vec = (0..n).collect(); + order.sort_unstable_by(|&a, &b| { + let da = (order_scores[a] as f64) / (state.ch.weights[a] as f64); + let db = (order_scores[b] as f64) / (state.ch.weights[b] as f64); + db.partial_cmp(&da).unwrap_or(Ordering::Equal) + }); + for &i in &order { + let w = state.ch.weights[i] as i64; + if state.total_weight + w <= state.capacity() { + state.add_item(i); + } + } +} + +fn integer_core_target(ch: &Challenge, contrib: &[i32], core_half_dp: usize) -> Vec { + let n = ch.difficulty.num_items; + let mut order: Vec = (0..n).collect(); + order.sort_unstable_by(|&a, &b| { + let da = (contrib[a] as f64) / (ch.weights[a] as f64); + let db = (contrib[b] as f64) / (ch.weights[b] as f64); + db.partial_cmp(&da).unwrap_or(Ordering::Equal) + }); + let mut pref_w: i64 = 0; + let mut break_idx: usize = order.len().saturating_sub(1); + for (pos, &i) in order.iter().enumerate() { + let w = ch.weights[i] as i64; + if pref_w + w > ch.max_weight as i64 { + break_idx = pos; + break; + } + pref_w += w; + } + let left = break_idx.saturating_sub(core_half_dp); + let right = (break_idx + core_half_dp + 1).min(n); + let locked = &order[..left]; + let core = &order[left..right]; + let used_locked: i64 = locked.iter().map(|&i| ch.weights[i] as i64).sum(); + let rem_cap = ((ch.max_weight as i64) - used_locked).max(0) as usize; + let myw = rem_cap; + let myk = core.len(); + let mut dp: Vec = vec![i64::MIN / 4; myw + 1]; + dp[0] = 0; + let mut choose: Vec = vec![0u8; myk * (myw + 1)]; + let mut w_hi: usize = 0; + for (t, &it) in core.iter().enumerate() { + let wt = ch.weights[it] as usize; + if wt > myw { + continue; + } + let val = contrib[it] as i64; + let new_hi = (w_hi + wt).min(myw); + for w in (wt..=new_hi).rev() { + let cand = dp[w - wt] + val; + if cand > dp[w] { + dp[w] = cand; + choose[t * (myw + 1) + w] = 1; + } + } + w_hi = new_hi; + } + let mut selected: Vec = locked.to_vec(); + let mut w_star = (0..=myw).max_by_key(|&w| dp[w]).unwrap_or(0); + for t in (0..myk).rev() { + let it = core[t]; + let wt = ch.weights[it] as usize; + if wt <= w_star && choose[t * (myw + 1) + w_star] == 1 { + selected.push(it); + w_star -= wt; + } + } + selected.sort_unstable(); + selected +} + +fn apply_dp_target_via_ops(state: &mut State, target_sel: &[usize]) { + let n = state.ch.difficulty.num_items; + let mut in_target = vec![false; n]; + for &i in target_sel { + in_target[i] = true; + } + let mut to_remove: Vec = Vec::new(); + for i in 0..n { + if state.selected_bit[i] && !in_target[i] { + to_remove.push(i); + } + } + let mut to_add: Vec = Vec::new(); + for &i in target_sel { + if !state.selected_bit[i] { + to_add.push(i); + } + } + for &r in &to_remove { + state.remove_item(r); + } + for &a in &to_add { + state.add_item(a); + } +} + +fn build_ls_windows(state: &State, core_half_ls: usize) -> (Vec, Vec) { + let n = state.ch.difficulty.num_items; + let mut order: Vec = (0..n).collect(); + order.sort_unstable_by(|&a, &b| { + let da = (state.contrib[a] as f64) / (state.ch.weights[a] as f64); + let db = (state.contrib[b] as f64) / (state.ch.weights[b] as f64); + db.partial_cmp(&da).unwrap_or(Ordering::Equal) + }); + let mut best_unused = Vec::with_capacity(core_half_ls); + for &i in &order { + if !state.selected_bit[i] { + best_unused.push(i); + if best_unused.len() >= core_half_ls { + break; + } + } + } + let mut worst_used = Vec::with_capacity(core_half_ls); + for &i in order.iter().rev() { + if state.selected_bit[i] { + worst_used.push(i); + if worst_used.len() >= core_half_ls { + break; + } + } + } + (best_unused, worst_used) +} + +fn apply_best_add_windowed( + state: &mut State, + best_unused: &mut Vec, + worst_used: &mut Vec, +) -> bool { + let slack = state.slack(); + if slack <= 0 { + return false; + } + let mut best: Option<(usize, i64)> = None; + for &cand in &*best_unused { + let w = state.ch.weights[cand] as i64; + if w > slack { + continue; + } + let delta = state.contrib[cand] as i64; + if delta > 0 && best.map_or(true, |(_, bd)| delta > bd) { + best = Some((cand, delta)); + } + } + if let Some((cand, _)) = best { + state.add_item(cand); + State::remove_from_vec(best_unused, cand); + worst_used.push(cand); + true + } else { + false + } +} + +fn apply_best_swap11_equal_windowed( + state: &mut State, + best_unused: &mut Vec, + worst_used: &mut Vec, +) -> bool { + let mut best: Option<(usize, usize, i64)> = None; + for &rm in &*worst_used { + let w_rm = state.ch.weights[rm]; + for &cand in &*best_unused { + if state.ch.weights[cand] != w_rm { + continue; + } + let delta = (state.contrib[cand] as i64) + - (state.contrib[rm] as i64) + - (state.ch.interaction_values[cand][rm] as i64); + if delta > 0 && best.map_or(true, |(_, _, bd)| delta > bd) { + best = Some((cand, rm, delta)); + } + } + } + if let Some((cand, rm, _)) = best { + state.replace_item(rm, cand); + State::remove_from_vec(worst_used, rm); + best_unused.push(rm); + State::remove_from_vec(best_unused, cand); + worst_used.push(cand); + true + } else { + false + } +} + +fn apply_best_swap_diff_reduce_windowed( + state: &mut State, + params: &Params, + best_unused: &mut Vec, + worst_used: &mut Vec, +) -> bool { + let mut best: Option<(usize, usize, i64)> = None; + for &rm in &*worst_used { + let w_rm = state.ch.weights[rm] as i64; + for &cand in &*best_unused { + let w_c = state.ch.weights[cand] as i64; + if w_c >= w_rm { + continue; + } + let dw = (w_rm - w_c) as usize; + if dw == 0 || dw > params.diff_lim { + continue; + } + let delta = (state.contrib[cand] as i64) + - (state.contrib[rm] as i64) + - (state.ch.interaction_values[cand][rm] as i64); + if delta > 0 && best.map_or(true, |(_, _, bd)| delta > bd) { + best = Some((cand, rm, delta)); + } + } + } + if let Some((cand, rm, _)) = best { + state.replace_item(rm, cand); + State::remove_from_vec(worst_used, rm); + best_unused.push(rm); + State::remove_from_vec(best_unused, cand); + worst_used.push(cand); + true + } else { + false + } +} + +fn apply_best_swap_diff_increase_windowed( + state: &mut State, + params: &Params, + best_unused: &mut Vec, + worst_used: &mut Vec, +) -> bool { + if state.slack() <= 0 { + return false; + } + let mut best: Option<(usize, usize, f64)> = None; + for &rm in &*worst_used { + let w_rm = state.ch.weights[rm] as i64; + for &cand in &*best_unused { + let w_c = state.ch.weights[cand] as i64; + if w_c <= w_rm { + continue; + } + let dw = (w_c - w_rm) as i64; + if dw as usize > params.diff_lim { + continue; + } + if state.slack() < dw { + continue; + } + let delta = (state.contrib[cand] as i64) + - (state.contrib[rm] as i64) + - (state.ch.interaction_values[cand][rm] as i64); + if delta > 0 { + let ratio = (delta as f64) / (dw as f64); + if best.map_or(true, |(_, _, br)| ratio > br) { + best = Some((cand, rm, ratio)); + } + } + } + } + if let Some((cand, rm, _)) = best { + state.replace_item(rm, cand); + State::remove_from_vec(worst_used, rm); + best_unused.push(rm); + State::remove_from_vec(best_unused, cand); + worst_used.push(cand); + true + } else { + false + } +} + +fn local_search_vnd(state: &mut State, params: &Params) { + let (mut best_unused, mut worst_used) = build_ls_windows(state, params.core_half_ls); + loop { + if apply_best_add_windowed(state, &mut best_unused, &mut worst_used) { + continue; + } + if apply_best_swap_diff_reduce_windowed(state, params, &mut best_unused, &mut worst_used) { + continue; + } + if apply_best_swap11_equal_windowed(state, &mut best_unused, &mut worst_used) { + continue; + } + if apply_best_swap_diff_increase_windowed(state, params, &mut best_unused, &mut worst_used) + { + continue; + } + break; + } +} + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + let params = Params::default(); + let n = challenge.difficulty.num_items; + let mut build_scores = vec![0i32; n]; + round0_scores(challenge, &mut build_scores); + + let mut state = State::new_empty(challenge); + build_initial_solution(&mut state, &build_scores); + local_search_vnd(&mut state, ¶ms); + + for _it in 0..params.n_maxils { + let prev_sel = state.selected_items(); + let prev_val = state.total_value; + let prev_contrib = state.contrib.clone(); + let target = integer_core_target(challenge, &state.contrib, params.core_half_dp); + apply_dp_target_via_ops(&mut state, &target); + local_search_vnd(&mut state, ¶ms); + if state.total_value <= prev_val { + state.restore_snapshot(&prev_sel, prev_contrib, prev_val); + break; + } + } + + let mut items = state.selected_items(); + items.sort_unstable(); + let _ = save_solution(&Solution { items }); + Ok(()) +} diff --git a/tig-algorithms/src/knapsack/knap_one/README.md b/tig-algorithms/src/knapsack/knap_one/README.md new file mode 100644 index 0000000..c801eab --- /dev/null +++ b/tig-algorithms/src/knapsack/knap_one/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** knapsack +* **Algorithm Name:** knap_one +* **Copyright:** 2024 VNX +* **Identity of Submitter:** VNX +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/knap_one/mod.rs b/tig-algorithms/src/knapsack/knap_one/mod.rs new file mode 100644 index 0000000..7222691 --- /dev/null +++ b/tig-algorithms/src/knapsack/knap_one/mod.rs @@ -0,0 +1,193 @@ +use anyhow::{anyhow, Result}; +use serde_json::{Map, Value}; +use tig_challenges::knapsack::*; + + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> Result<()>, + hyperparameters: &Option>, +) -> Result<()> { + Err(anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + use anyhow::Result; + use tig_challenges::knapsack::*; + + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let mut solution = Solution { + sub_solutions: Vec::new(), + }; + for sub_instance in &challenge.sub_instances { + match solve_sub_instance(sub_instance)? { + Some(sub_solution) => solution.sub_solutions.push(sub_solution), + None => return Ok(None), + } + } + Ok(Some(solution)) + } + + pub fn solve_sub_instance(challenge: &SubInstance) -> Result> { + const WAIT_ITERATIONS: usize = 5; + const MAX_STAGNANT_ITERATIONS: usize = 5; + + let num_items = challenge.weights.len(); + let mut selected_items = vec![false; num_items]; + let mut total_value: i32 = 0; + let mut total_weight: u32 = 0; + let mut wait_map = vec![None; num_items]; + let values: Vec = challenge.values.iter().map(|&v| v as i32).collect(); + let weights: Vec = challenge.weights.iter().map(|&w| w as f64).collect(); + + let mut items_by_ratio: Vec<(usize, f64)> = (0..num_items) + .map(|i| { + let adjusted_value = values[i]; + let ratio = adjusted_value as f64 / weights[i]; + (i, ratio) + }) + .collect(); + items_by_ratio.sort_unstable_by(|a, b| b.1.partial_cmp(&a.1).unwrap()); + let mut interaction_gains = vec![0; num_items]; + + let mut iteration_count = 0; + let mut stagnant_iterations = 0; + let mut max_total_value = total_value; + + loop { + iteration_count += 1; + + for entry in &mut wait_map { + if let Some(iter) = entry { + if *iter <= iteration_count { + *entry = None; + } + } + } + + let mut available_items: Vec<_> = items_by_ratio + .iter() + .filter(|(i, _)| !selected_items[*i] && wait_map[*i].is_none()) + .collect(); + + let mut improvement_found = false; + let mut index = 0; + + while index < available_items.len() { + let (i, _) = available_items[index]; + let individual_value = values[*i]; + let interaction_gain = interaction_gains[*i]; + let gain = individual_value + interaction_gain; + if gain >= individual_value { + selected_items[*i] = true; + total_value += gain; + total_weight += challenge.weights[*i]; + + for j in 0..num_items { + interaction_gains[j] += challenge.interaction_values[*i][j]; + } + + improvement_found = true; + available_items.remove(index); + } else { + index += 1; + } + } + + if !improvement_found { + for &(i, _) in &available_items { + let new_item_value = values[*i] + interaction_gains[*i]; + let new_item_weight = challenge.weights[*i]; + + if new_item_value <= values[*i] { + continue; + } + + for j in 0..num_items { + if selected_items[j] { + let removal_loss = values[j] + interaction_gains[j]; + if total_value + new_item_value - removal_loss > total_value { + for k in 0..num_items { + interaction_gains[k] -= challenge.interaction_values[j][k]; + } + selected_items[j] = false; + total_value -= removal_loss; + total_weight -= challenge.weights[j]; + + selected_items[*i] = true; + total_value += new_item_value; + total_weight += new_item_weight; + + for k in 0..num_items { + interaction_gains[k] += challenge.interaction_values[*i][k]; + } + + wait_map[j] = Some(iteration_count + WAIT_ITERATIONS); + improvement_found = true; + break; + } + } + } + + if improvement_found { + break; + } else { + return Ok(None); + } + } + } + + if total_weight > challenge.max_weight { + let mut item_loss_ratios = Vec::new(); + for i in 0..num_items { + if selected_items[i] { + let loss = values[i] + interaction_gains[i]; + let ratio = weights[i] / (loss as f64).max(1.0); + item_loss_ratios.push((ratio, i)); + } + } + item_loss_ratios.sort_unstable_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); + + while total_weight > challenge.max_weight { + if let Some((_, item)) = item_loss_ratios.pop() { + for k in 0..num_items { + interaction_gains[k] -= challenge.interaction_values[item][k]; + } + selected_items[item] = false; + total_weight -= challenge.weights[item]; + total_value -= values[item] + interaction_gains[item]; + wait_map[item] = Some(iteration_count + WAIT_ITERATIONS); + } else { + break; + } + } + } + + if total_value >= challenge.baseline_value as i32 && total_weight <= challenge.max_weight { + let result_items: Vec = selected_items + .iter() + .enumerate() + .filter(|&(_, &is_selected)| is_selected) + .map(|(i, _)| i) + .collect(); + + return Ok(Some(SubSolution { + items: result_items, + })); + } + + if total_value > max_total_value { + max_total_value = total_value; + stagnant_iterations = 0; + } else { + stagnant_iterations += 1; + } + + if stagnant_iterations >= MAX_STAGNANT_ITERATIONS { + return Ok(None); + } + } + } +} \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/knapheudp/README.md b/tig-algorithms/src/knapsack/knapheudp/README.md new file mode 100644 index 0000000..00d580a --- /dev/null +++ b/tig-algorithms/src/knapsack/knapheudp/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** knapsack +* **Algorithm Name:** knapheudp +* **Copyright:** 2024 AllFather +* **Identity of Submitter:** AllFather +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/knapheudp/mod.rs b/tig-algorithms/src/knapsack/knapheudp/mod.rs new file mode 100644 index 0000000..470f8e5 --- /dev/null +++ b/tig-algorithms/src/knapsack/knapheudp/mod.rs @@ -0,0 +1,104 @@ +use anyhow::{anyhow, Result}; +use serde_json::{Map, Value}; +use tig_challenges::knapsack::*; + + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> Result<()>, + hyperparameters: &Option>, +) -> Result<()> { + Err(anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + use tig_challenges::knapsack::*; + + + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let mut solution = Solution { + sub_solutions: Vec::new(), + }; + for sub_instance in &challenge.sub_instances { + match solve_sub_instance(sub_instance)? { + Some(sub_solution) => solution.sub_solutions.push(sub_solution), + None => return Ok(None), + } + } + Ok(Some(solution)) + } + + pub fn solve_sub_instance(challenge: &SubInstance) -> anyhow::Result> { + let max_weight = challenge.max_weight as usize; + let baseline_value = challenge.baseline_value as usize; + let num_items = challenge.difficulty.num_items; + + let weights: Vec = challenge.weights.iter().map(|&w| w as usize).collect(); + let values: Vec = challenge.values.iter().map(|&v| v as usize).collect(); + + let mut sorted_items: Vec<(usize, f64)> = (0..num_items) + .map(|i| (i, values[i] as f64 / weights[i] as f64)) + .collect(); + sorted_items.sort_unstable_by(|a, b| b.1.partial_cmp(&a.1).unwrap()); + + let mut upper_bound = 0; + let mut remaining_weight = max_weight; + for &(item_index, ratio) in &sorted_items { + let item_weight = weights[item_index]; + let item_value = values[item_index]; + + if item_weight <= remaining_weight { + upper_bound += item_value; + remaining_weight -= item_weight; + } else { + upper_bound += (ratio * remaining_weight as f64).floor() as usize; + break; + } + } + + if upper_bound < baseline_value { + return Ok(None); + } + + let mut dp = vec![0; max_weight + 1]; + let mut selected = vec![vec![false; max_weight + 1]; num_items]; + + for (i, &(item_index, _)) in sorted_items.iter().enumerate() { + let weight = weights[item_index]; + let value = values[item_index]; + + for w in (weight..=max_weight).rev() { + let new_value = dp[w - weight] + value; + if new_value > dp[w] { + dp[w] = new_value; + selected[i][w] = true; + } + } + + if dp[max_weight] >= baseline_value { + break; + } + } + + if dp[max_weight] < baseline_value { + return Ok(None); + } + + let mut items = Vec::new(); + let mut w = max_weight; + for i in (0..num_items).rev() { + if selected[i][w] { + let item_index = sorted_items[i].0; + items.push(item_index); + w -= weights[item_index]; + } + if w == 0 { + break; + } + } + + Ok(Some(SubSolution { items })) + } +} \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/knapmaxxing/README.md b/tig-algorithms/src/knapsack/knapmaxxing/README.md new file mode 100644 index 0000000..8447e81 --- /dev/null +++ b/tig-algorithms/src/knapsack/knapmaxxing/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** knapsack +* **Algorithm Name:** knapmaxxing +* **Copyright:** 2024 Dominic Kennedy +* **Identity of Submitter:** Dominic Kennedy +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/knapmaxxing/mod.rs b/tig-algorithms/src/knapsack/knapmaxxing/mod.rs new file mode 100644 index 0000000..54bd893 --- /dev/null +++ b/tig-algorithms/src/knapsack/knapmaxxing/mod.rs @@ -0,0 +1,109 @@ +use anyhow::{anyhow, Result}; +use serde_json::{Map, Value}; +use tig_challenges::knapsack::*; + + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> Result<()>, + hyperparameters: &Option>, +) -> Result<()> { + Err(anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + use tig_challenges::knapsack::*; + + + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let mut solution = Solution { + sub_solutions: Vec::new(), + }; + for sub_instance in &challenge.sub_instances { + match solve_sub_instance(sub_instance)? { + Some(sub_solution) => solution.sub_solutions.push(sub_solution), + None => return Ok(None), + } + } + Ok(Some(solution)) + } + + pub fn solve_sub_instance(challenge: &SubInstance) -> anyhow::Result> { + let max_weight = challenge.max_weight as usize; + let baseline_value = challenge.baseline_value as usize; + let num_items = challenge.difficulty.num_items; + + let max_weight_plus_one = max_weight + 1; + + let weights: Vec = challenge.weights.iter().map(|weight| *weight as usize).collect(); + let values: Vec = challenge.values.iter().map(|value| *value as usize).collect(); + + let mut sorted_items: Vec<(usize, f64)> = (0..num_items) + .map(|i| (i, values[i] as f64 / weights[i] as f64)) + .collect(); + sorted_items.sort_unstable_by(|a, b| b.1.partial_cmp(&a.1).unwrap()); + + let mut upper_bound = 0; + let mut remaining_weight = max_weight; + for &(item_index, ratio) in &sorted_items { + let item_weight = weights[item_index]; + let item_value = values[item_index]; + + if item_weight <= remaining_weight { + upper_bound += item_value; + remaining_weight -= item_weight; + } else { + upper_bound += (ratio * remaining_weight as f64).floor() as usize; + break; + } + } + + if upper_bound < baseline_value { + return Ok(None); + } + + let num_states = (num_items + 1) * (max_weight_plus_one); + let mut dp = vec![0; num_states]; + + for i in 1..=num_items { + let (item_index, _) = sorted_items[i - 1]; + let item_weight = weights[item_index]; + let item_value = values[item_index]; + + let i_minus_one_times_max_weight_plus_one = (i - 1) * max_weight_plus_one; + let i_times_max_weight_plus_one = i * max_weight_plus_one; + for w in (item_weight..=max_weight).rev() { + let prev_state = i_minus_one_times_max_weight_plus_one + w; + let curr_state = i_times_max_weight_plus_one + w; + dp[curr_state] = dp[prev_state].max(dp[prev_state - item_weight] + item_value); + } + } + + let mut items = Vec::with_capacity(num_items); + let mut i = num_items; + let mut w = max_weight; + let mut total_value = 0; + while i > 0 && total_value < baseline_value { + let (item_index, _) = sorted_items[i - 1]; + let item_weight = weights[item_index]; + let item_value = values[item_index]; + + let prev_state = (i - 1) * (max_weight_plus_one) + w; + let curr_state = i * (max_weight_plus_one) + w; + if dp[curr_state] != dp[prev_state] { + items.push(item_index); + w -= item_weight; + total_value += item_value; + } + i -= 1; + } + + if total_value >= baseline_value { + Ok(Some(SubSolution { items })) + } else { + Ok(None) + } + } +} \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/knapsack_redone/README.md b/tig-algorithms/src/knapsack/knapsack_redone/README.md new file mode 100644 index 0000000..a089866 --- /dev/null +++ b/tig-algorithms/src/knapsack/knapsack_redone/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** knapsack +* **Algorithm Name:** knapsack_redone +* **Copyright:** 2025 frogmarch +* **Identity of Submitter:** frogmarch +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/knapsack_redone/mod.rs b/tig-algorithms/src/knapsack/knapsack_redone/mod.rs new file mode 100644 index 0000000..6e15b1f --- /dev/null +++ b/tig-algorithms/src/knapsack/knapsack_redone/mod.rs @@ -0,0 +1,393 @@ +use anyhow::Result; +use rand::{rngs::StdRng, Rng, SeedableRng}; +use serde_json::{Map, Value}; +use tig_challenges::knapsack::*; + +fn calculate_density_variance(item_densities: &[(usize, f32)]) -> f32 { + if item_densities.len() < 2 { + return 0.5; + } + + let mut sum = 0.0; + for (_, density) in item_densities { + sum += *density; + } + let mean = sum / item_densities.len() as f32; + + let mut variance_sum = 0.0; + for (_, density) in item_densities { + let diff = *density - mean; + variance_sum += diff * diff; + } + let variance = variance_sum / item_densities.len() as f32; + + (variance.sqrt() / mean.abs()).clamp(0.1, 1.0) +} + +fn compute_solution( + challenge: &Challenge, + contribution_list: &mut [i32], + unselected_items: &mut Vec, + rng: &mut StdRng, +) -> Result> { + let mut selected_items = Vec::new(); + let mut total_weight = 0; + let mut total_value = 0; + + let mut inv_weights: Vec = Vec::with_capacity(challenge.weights.len()); + for &w in &challenge.weights { + inv_weights.push(1.0 / w as f32); + } + + let rcl_max = if challenge.difficulty.num_items <= 165 { + 9 + } else { + 10 + }; + + let mut item_densities: Vec<(usize, f32)> = Vec::with_capacity(unselected_items.len()); + for &idx in unselected_items.iter() { + let ratio = contribution_list[idx] as f32 * inv_weights[idx]; + item_densities.push((idx, ratio)); + } + + let density_variance = calculate_density_variance(&item_densities); + let adaptive_exponent = 1.4 + (density_variance - 0.6).clamp(-0.4, 0.2); + + let mut probs: Vec = Vec::with_capacity(rcl_max); + for rank in 0..rcl_max { + probs.push(1.0 / ((rank + 1) as f32).powf(adaptive_exponent)); + } + + let mut acc_probs: Vec = Vec::with_capacity(rcl_max); + let mut sum = 0.0; + for &prob in &probs { + sum += prob; + acc_probs.push(sum); + } + let total_prob_max = sum; + + let mut max_item_weight = 0; + for &w in &challenge.weights { + if w > max_item_weight { + max_item_weight = w; + } + } + + let list_size = 2; + let mut top_ranks = vec![0; list_size]; + + while !item_densities.is_empty() { + let num_candidates = item_densities.len(); + if num_candidates < 2 { + break; + } + + let actual_rcl_size = num_candidates.min(rcl_max); + let total_prob = if actual_rcl_size == rcl_max { + total_prob_max + } else { + acc_probs[actual_rcl_size - 1] + }; + + let random_threshold = rng.gen_range(0.0..total_prob); + let mut selected_rank = match acc_probs[..actual_rcl_size] + .binary_search_by(|prob| prob.partial_cmp(&random_threshold).unwrap()) + { + Ok(i) | Err(i) => i, + }; + if selected_rank >= actual_rcl_size { + selected_rank = actual_rcl_size - 1; + } + + let selected_item; + if selected_rank < list_size + && !selected_items.is_empty() + && top_ranks[selected_rank] < item_densities.len() + { + selected_rank = top_ranks[selected_rank]; + selected_item = item_densities[selected_rank].0; + } else { + item_densities + .select_nth_unstable_by(selected_rank, |a, b| b.1.partial_cmp(&a.1).unwrap()); + selected_item = item_densities[selected_rank].0; + } + + selected_items.push(selected_item); + total_weight += challenge.weights[selected_item]; + total_value += contribution_list[selected_item]; + + if total_weight + max_item_weight > challenge.max_weight { + item_densities.retain(|(idx, _)| { + total_weight + challenge.weights[*idx] <= challenge.max_weight + && *idx != selected_item + }); + } else { + item_densities.swap_remove(selected_rank); + } + + unsafe { + for x in 0..challenge.difficulty.num_items { + *contribution_list.get_unchecked_mut(x) += *challenge + .interaction_values + .get_unchecked(selected_item) + .get_unchecked(x); + } + + let mut first_density = f32::MIN; + let mut first_rank = 0; + let mut second_density = f32::MIN; + let mut second_rank = 0; + + for (i, density) in item_densities.iter_mut().enumerate() { + let interaction = *challenge + .interaction_values + .get_unchecked(selected_item) + .get_unchecked(density.0); + density.1 += interaction as f32 * inv_weights[density.0]; + let current_density = density.1; + + if current_density > first_density { + second_density = first_density; + second_rank = first_rank; + first_density = current_density; + first_rank = i; + } else if current_density > second_density { + second_density = current_density; + second_rank = i; + } + } + + top_ranks[0] = first_rank; + top_ranks[1] = second_rank; + } + } + unselected_items.clear(); + for i in 0..challenge.difficulty.num_items { + unselected_items.push(i); + } + + let mut sorted_selected = selected_items.clone(); + sorted_selected.sort_unstable_by(|a, b| b.cmp(a)); + + for &selected in &sorted_selected { + unselected_items.swap_remove(selected); + } + + let mut weight_item_pairs: Vec<(u32, usize)> = Vec::with_capacity(unselected_items.len()); + for &idx in unselected_items.iter() { + weight_item_pairs.push((challenge.weights[idx], idx)); + } + weight_item_pairs.sort_unstable_by_key(|&(weight, _)| weight); + + unselected_items.clear(); + for (_, idx) in weight_item_pairs { + unselected_items.push(idx); + } + + let local_search_iterations = if challenge.difficulty.num_items <= 165 { + 60 + } else { + 100 + }; + let mut feasible_adds = Vec::with_capacity(50); + let mut feasible_swaps = Vec::with_capacity(100); + + for _ in 0..local_search_iterations { + let mut improved = false; + + if total_weight < challenge.max_weight { + for (i, &cand) in unselected_items.iter().enumerate() { + let new_w = total_weight + challenge.weights[cand]; + if new_w > challenge.max_weight { + break; + } + let new_val = total_value + contribution_list[cand]; + if new_val > total_value { + feasible_adds.push(i); + } + } + if !feasible_adds.is_empty() { + let pick = rng.gen_range(0..feasible_adds.len()); + let add_idx = feasible_adds[pick]; + let new_item = unselected_items[add_idx]; + + unselected_items.remove(add_idx); + selected_items.push(new_item); + + total_weight += challenge.weights[new_item]; + total_value += contribution_list[new_item]; + improved = true; + + unsafe { + for x in 0..challenge.difficulty.num_items { + *contribution_list.get_unchecked_mut(x) += *challenge + .interaction_values + .get_unchecked(x) + .get_unchecked(new_item); + } + } + } + feasible_adds.clear(); + } + + let free_capacity = challenge.max_weight as i32 - total_weight as i32; + for (j, &rem_item) in selected_items.iter().enumerate() { + let rem_w = challenge.weights[rem_item] as i32; + + for (i, &cand_item) in unselected_items.iter().enumerate() { + let cand_w = challenge.weights[cand_item] as i32; + if rem_w + free_capacity < cand_w { + break; + } + + let val_diff = contribution_list[cand_item] + - contribution_list[rem_item] + - challenge.interaction_values[cand_item][rem_item]; + if val_diff > 0 { + feasible_swaps.push((i, j)); + } + } + } + + if !feasible_swaps.is_empty() { + let pick = rng.gen_range(0..feasible_swaps.len()); + let (unsel_idx, sel_idx) = feasible_swaps[pick]; + let new_item = unselected_items[unsel_idx]; + let remove_item = selected_items[sel_idx]; + + selected_items.swap_remove(sel_idx); + selected_items.push(new_item); + + let new_item_weight = challenge.weights[new_item]; + let remove_item_weight = challenge.weights[remove_item]; + + let current_pos = unsel_idx; + let mut target_pos = current_pos; + if new_item_weight != remove_item_weight { + target_pos = unselected_items + .binary_search_by(|&probe| challenge.weights[probe].cmp(&remove_item_weight)) + .unwrap_or_else(|e| e); + } + if current_pos != target_pos { + unsafe { + let ptr = unselected_items.as_mut_ptr(); + if target_pos < current_pos { + std::ptr::copy( + ptr.add(target_pos), + ptr.add(target_pos + 1), + current_pos - target_pos, + ); + } else { + target_pos = target_pos - 1; + std::ptr::copy( + ptr.add(current_pos + 1), + ptr.add(current_pos), + target_pos - current_pos, + ); + } + } + } + unselected_items[target_pos] = remove_item; + + total_value += contribution_list[new_item] + - contribution_list[remove_item] + - challenge.interaction_values[new_item][remove_item]; + total_weight = + total_weight + challenge.weights[new_item] - challenge.weights[remove_item]; + improved = true; + + unsafe { + for x in 0..challenge.difficulty.num_items { + *contribution_list.get_unchecked_mut(x) += *challenge + .interaction_values + .get_unchecked(x) + .get_unchecked(new_item) + - *challenge + .interaction_values + .get_unchecked(x) + .get_unchecked(remove_item); + } + } + } + feasible_swaps.clear(); + + if !improved { + break; + } + } + + if selected_items.is_empty() { + Ok(None) + } else { + Ok(Some(( + Solution { + items: selected_items, + }, + total_value, + ))) + } +} + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + let num_iterations: i32 = 5; + let mut rng = + StdRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap())); + + let mut best_solution: Option = None; + let mut best_value = 0; + + for _outer_iter in 0..num_iterations { + let mut best_local_solution: Option = None; + let mut best_local_value = 0; + + let k = 5; + for _ in 0..k { + let mut unselected_items: Vec = + Vec::with_capacity(challenge.difficulty.num_items); + for i in 0..challenge.difficulty.num_items { + unselected_items.push(i); + } + + let mut contribution_list: Vec = Vec::with_capacity(challenge.values.len()); + for &v in &challenge.values { + contribution_list.push(v as i32); + } + + let sol_result = compute_solution( + challenge, + &mut contribution_list, + &mut unselected_items, + &mut rng, + )?; + + let (solution, value) = match sol_result { + Some(x) => x, + None => continue, + }; + + if value > best_local_value { + best_local_value = value; + best_local_solution = Some(Solution { + items: solution.items.clone(), + }); + } + } + + if let Some(local_solution) = best_local_solution { + if best_local_value > best_value { + best_value = best_local_value; + best_solution = Some(local_solution); + } + } + } + + if let Some(solution) = best_solution { + let _ = save_solution(&solution); + } + Ok(()) +} diff --git a/tig-algorithms/src/knapsack/knapsplatt/README.md b/tig-algorithms/src/knapsack/knapsplatt/README.md new file mode 100644 index 0000000..f5c5f80 --- /dev/null +++ b/tig-algorithms/src/knapsack/knapsplatt/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** knapsack +* **Algorithm Name:** knapsplatt +* **Copyright:** 2025 Jeeperz +* **Identity of Submitter:** Jeeperz +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/knapsplatt/mod.rs b/tig-algorithms/src/knapsack/knapsplatt/mod.rs new file mode 100644 index 0000000..6613bed --- /dev/null +++ b/tig-algorithms/src/knapsack/knapsplatt/mod.rs @@ -0,0 +1,674 @@ +use anyhow::Result; +use serde_json::{Map, Value}; +use std::cmp::Ordering; +use tig_challenges::knapsack::*; + +#[derive(Clone, Copy)] +struct Params { + diff_lim: usize, + core_half_dp: usize, + core_half_ls: usize, + n_maxils: usize, + polish_k: usize, +} +impl Params { + fn for_problem_size(num_items: usize) -> Self { + let n_maxils = if num_items <= 600 { + 3 + } else if num_items <= 800 { + 4 + } else { + 5 + }; + + Self { + diff_lim: 4, + core_half_dp: 30, + core_half_ls: 35, + n_maxils, + polish_k: 10, + } + } +} +impl Default for Params { + fn default() -> Self { + Self::for_problem_size(400) + } +} + +#[inline] +fn weight_of(ch: &Challenge, items: &[usize]) -> i64 { + items.iter().map(|&i| ch.weights[i] as i64).sum() +} + +fn round0_scores(ch: &Challenge, out: &mut [i32]) { + let n = ch.difficulty.num_items; + for i in 0..n { + let row_sum: i32 = ch.interaction_values[i].iter().sum(); + out[i] = ch.values[i] as i32 + row_sum; + } +} + +struct State<'a> { + ch: &'a Challenge, + selected_bit: Vec, + contrib: Vec, + total_value: i64, + total_weight: i64, +} + +impl<'a> State<'a> { + fn new_empty(ch: &'a Challenge) -> Self { + let n = ch.difficulty.num_items; + let mut contrib = vec![0i32; n]; + for i in 0..n { + contrib[i] = ch.values[i] as i32; + } + Self { + ch, + selected_bit: vec![false; n], + contrib, + total_value: 0, + total_weight: 0, + } + } + fn selected_items(&self) -> Vec { + (0..self.ch.difficulty.num_items) + .filter(|&i| self.selected_bit[i]) + .collect() + } + #[inline] + fn capacity(&self) -> i64 { + self.ch.max_weight as i64 + } + #[inline] + fn slack(&self) -> i64 { + self.capacity() - self.total_weight + } + fn add_item(&mut self, i: usize) { + self.selected_bit[i] = true; + self.total_value += self.contrib[i] as i64; + self.total_weight += self.ch.weights[i] as i64; + let n = self.ch.difficulty.num_items; + for k in 0..n { + self.contrib[k] += self.ch.interaction_values[k][i] as i32; + } + } + fn remove_item(&mut self, j: usize) { + self.total_value -= self.contrib[j] as i64; + self.total_weight -= self.ch.weights[j] as i64; + let n = self.ch.difficulty.num_items; + for k in 0..n { + self.contrib[k] -= self.ch.interaction_values[k][j] as i32; + } + self.selected_bit[j] = false; + } + fn replace_item(&mut self, rm: usize, cand: usize) { + let w_c = self.ch.weights[cand] as i64; + if self.slack() >= w_c { + self.add_item(cand); + self.remove_item(rm); + } else { + self.remove_item(rm); + self.add_item(cand); + } + } + fn restore_snapshot( + &mut self, + snapshot_sel: &[usize], + snapshot_contrib: Vec, + snap_value: i64, + ) { + self.selected_bit.fill(false); + for &i in snapshot_sel { + self.selected_bit[i] = true; + } + self.contrib = snapshot_contrib; + self.total_value = snap_value; + self.total_weight = weight_of(self.ch, snapshot_sel); + } + #[inline] + fn remove_from_vec(v: &mut Vec, x: usize) { + if let Some(pos) = v.iter().position(|&y| y == x) { + v.swap_remove(pos); + } + } +} + +fn build_initial_solution(state: &mut State, order_scores: &[i32]) { + let n = state.ch.difficulty.num_items; + let mut order: Vec = (0..n).collect(); + order.sort_unstable_by(|&a, &b| { + let da = (order_scores[a] as f64) / (state.ch.weights[a] as f64); + let db = (order_scores[b] as f64) / (state.ch.weights[b] as f64); + db.partial_cmp(&da).unwrap_or(Ordering::Equal) + }); + for &i in &order { + let w = state.ch.weights[i] as i64; + if state.total_weight + w <= state.capacity() { + state.add_item(i); + } + } +} + +fn integer_core_target(ch: &Challenge, contrib: &[i32], core_half_dp: usize) -> Vec { + let n = ch.difficulty.num_items; + let mut order: Vec = (0..n).collect(); + order.sort_unstable_by(|&a, &b| { + let da = (contrib[a] as f64) / (ch.weights[a] as f64); + let db = (contrib[b] as f64) / (ch.weights[b] as f64); + db.partial_cmp(&da).unwrap_or(Ordering::Equal) + }); + let mut pref_w: i64 = 0; + let mut break_idx: usize = order.len().saturating_sub(1); + for (pos, &i) in order.iter().enumerate() { + let w = ch.weights[i] as i64; + if pref_w + w > ch.max_weight as i64 { + break_idx = pos; + break; + } + pref_w += w; + } + let left = break_idx.saturating_sub(core_half_dp); + let right = (break_idx + core_half_dp + 1).min(n); + let locked = &order[..left]; + let core = &order[left..right]; + let used_locked: i64 = locked.iter().map(|&i| ch.weights[i] as i64).sum(); + let rem_cap = ((ch.max_weight as i64) - used_locked).max(0) as usize; + let myw = rem_cap; + let myk = core.len(); + let mut dp: Vec = vec![i64::MIN / 4; myw + 1]; + dp[0] = 0; + let mut choose: Vec = vec![0u8; myk * (myw + 1)]; + let mut w_hi: usize = 0; + for (t, &it) in core.iter().enumerate() { + let wt = ch.weights[it] as usize; + if wt > myw { + continue; + } + let val = contrib[it] as i64; + let new_hi = (w_hi + wt).min(myw); + for w in (wt..=new_hi).rev() { + let cand = dp[w - wt] + val; + if cand > dp[w] { + dp[w] = cand; + choose[t * (myw + 1) + w] = 1; + } + } + w_hi = new_hi; + } + let mut selected: Vec = locked.to_vec(); + let mut w_star = (0..=myw).max_by_key(|&w| dp[w]).unwrap_or(0); + for t in (0..myk).rev() { + let it = core[t]; + let wt = ch.weights[it] as usize; + if wt <= w_star && choose[t * (myw + 1) + w_star] == 1 { + selected.push(it); + w_star -= wt; + } + } + selected.sort_unstable(); + selected +} + +fn apply_dp_target_via_ops(state: &mut State, target_sel: &[usize]) { + let n = state.ch.difficulty.num_items; + let mut in_target = vec![false; n]; + for &i in target_sel { + in_target[i] = true; + } + let mut to_remove: Vec = Vec::new(); + for i in 0..n { + if state.selected_bit[i] && !in_target[i] { + to_remove.push(i); + } + } + let mut to_add: Vec = Vec::new(); + for &i in target_sel { + if !state.selected_bit[i] { + to_add.push(i); + } + } + for &r in &to_remove { + state.remove_item(r); + } + for &a in &to_add { + state.add_item(a); + } +} + +fn build_ls_windows(state: &State, core_half_ls: usize) -> (Vec, Vec) { + let n = state.ch.difficulty.num_items; + let mut order: Vec = (0..n).collect(); + order.sort_unstable_by(|&a, &b| { + let da = (state.contrib[a] as f64) / (state.ch.weights[a] as f64); + let db = (state.contrib[b] as f64) / (state.ch.weights[b] as f64); + db.partial_cmp(&da).unwrap_or(Ordering::Equal) + }); + let mut best_unused = Vec::with_capacity(core_half_ls); + for &i in &order { + if !state.selected_bit[i] { + best_unused.push(i); + if best_unused.len() >= core_half_ls { + break; + } + } + } + let mut worst_used = Vec::with_capacity(core_half_ls); + for &i in order.iter().rev() { + if state.selected_bit[i] { + worst_used.push(i); + if worst_used.len() >= core_half_ls { + break; + } + } + } + (best_unused, worst_used) +} + +fn apply_best_add_windowed( + state: &mut State, + best_unused: &mut Vec, + worst_used: &mut Vec, +) -> bool { + let slack = state.slack(); + if slack <= 0 { + return false; + } + let mut best: Option<(usize, i64)> = None; + for &cand in &*best_unused { + let w = state.ch.weights[cand] as i64; + if w > slack { + continue; + } + let delta = state.contrib[cand] as i64; + if delta > 0 && best.map_or(true, |(_, bd)| delta > bd) { + best = Some((cand, delta)); + } + } + if let Some((cand, _)) = best { + state.add_item(cand); + State::remove_from_vec(best_unused, cand); + worst_used.push(cand); + true + } else { + false + } +} + +fn apply_best_swap11_equal_windowed( + state: &mut State, + best_unused: &mut Vec, + worst_used: &mut Vec, +) -> bool { + let mut best: Option<(usize, usize, i64)> = None; + for &rm in &*worst_used { + let w_rm = state.ch.weights[rm]; + for &cand in &*best_unused { + if state.ch.weights[cand] != w_rm { + continue; + } + let delta = (state.contrib[cand] as i64) + - (state.contrib[rm] as i64) + - (state.ch.interaction_values[cand][rm] as i64); + if delta > 0 && best.map_or(true, |(_, _, bd)| delta > bd) { + best = Some((cand, rm, delta)); + } + } + } + if let Some((cand, rm, _)) = best { + state.replace_item(rm, cand); + State::remove_from_vec(worst_used, rm); + best_unused.push(rm); + State::remove_from_vec(best_unused, cand); + worst_used.push(cand); + true + } else { + false + } +} + +fn apply_best_swap_diff_reduce_windowed( + state: &mut State, + params: &Params, + best_unused: &mut Vec, + worst_used: &mut Vec, +) -> bool { + let mut best: Option<(usize, usize, i64)> = None; + for &rm in &*worst_used { + let w_rm = state.ch.weights[rm] as i64; + for &cand in &*best_unused { + let w_c = state.ch.weights[cand] as i64; + if w_c >= w_rm { + continue; + } + let dw = (w_rm - w_c) as usize; + if dw == 0 || dw > params.diff_lim { + continue; + } + let delta = (state.contrib[cand] as i64) + - (state.contrib[rm] as i64) + - (state.ch.interaction_values[cand][rm] as i64); + if delta > 0 && best.map_or(true, |(_, _, bd)| delta > bd) { + best = Some((cand, rm, delta)); + } + } + } + if let Some((cand, rm, _)) = best { + state.replace_item(rm, cand); + State::remove_from_vec(worst_used, rm); + best_unused.push(rm); + State::remove_from_vec(best_unused, cand); + worst_used.push(cand); + true + } else { + false + } +} + +fn apply_best_swap_diff_increase_windowed( + state: &mut State, + params: &Params, + best_unused: &mut Vec, + worst_used: &mut Vec, +) -> bool { + if state.slack() <= 0 { + return false; + } + let mut best: Option<(usize, usize, f64)> = None; + for &rm in &*worst_used { + let w_rm = state.ch.weights[rm] as i64; + for &cand in &*best_unused { + let w_c = state.ch.weights[cand] as i64; + if w_c <= w_rm { + continue; + } + let dw = (w_c - w_rm) as i64; + if dw as usize > params.diff_lim { + continue; + } + if state.slack() < dw { + continue; + } + let delta = (state.contrib[cand] as i64) + - (state.contrib[rm] as i64) + - (state.ch.interaction_values[cand][rm] as i64); + if delta > 0 { + let ratio = (delta as f64) / (dw as f64); + if best.map_or(true, |(_, _, br)| ratio > br) { + best = Some((cand, rm, ratio)); + } + } + } + } + if let Some((cand, rm, _)) = best { + state.replace_item(rm, cand); + State::remove_from_vec(worst_used, rm); + best_unused.push(rm); + State::remove_from_vec(best_unused, cand); + worst_used.push(cand); + true + } else { + false + } +} + +fn polish_once(state: &mut State, params: &Params) { + let n = state.ch.difficulty.num_items; + let k = params.polish_k.min(n).max(16); + let mut idx: Vec = (0..n).collect(); + idx.sort_unstable_by(|&a, &b| { + let ra = (state.contrib[a] as f64) / (state.ch.weights[a] as f64); + let rb = (state.contrib[b] as f64) / (state.ch.weights[b] as f64); + rb.partial_cmp(&ra).unwrap_or(Ordering::Equal) + }); + let mut unused_top: Vec = Vec::new(); + for &i in &idx { + if !state.selected_bit[i] { + unused_top.push(i); + if unused_top.len() >= k { + break; + } + } + } + let mut used_worst: Vec = Vec::new(); + for &i in idx.iter().rev() { + if state.selected_bit[i] { + used_worst.push(i); + if used_worst.len() >= k { + break; + } + } + } + + let mut best_add: Option<(usize, i64)> = None; + let slack0 = state.slack(); + if slack0 > 0 { + for &i in &unused_top { + let w = state.ch.weights[i] as i64; + if w <= slack0 { + let d = state.contrib[i] as i64; + if d > 0 && best_add.map_or(true, |(_, bd)| d > bd) { + best_add = Some((i, d)); + } + } + } + } + if let Some((i, _)) = best_add { + state.add_item(i); + return; + } + + let mut best_swap: Option<(usize, usize, i64)> = None; + for &rm in &used_worst { + for &cand in &unused_top { + if state.ch.weights[cand] != state.ch.weights[rm] { + continue; + } + let d = (state.contrib[cand] as i64) + - (state.contrib[rm] as i64) + - (state.ch.interaction_values[cand][rm] as i64); + if d > 0 && best_swap.map_or(true, |(_, _, bd)| d > bd) { + best_swap = Some((cand, rm, d)); + } + } + } + if let Some((cand, rm, _)) = best_swap { + state.replace_item(rm, cand); + return; + } + + let mut best_swap_red: Option<(usize, usize, i64)> = None; + for &rm in &used_worst { + let w_rm = state.ch.weights[rm] as i64; + for &cand in &unused_top { + let w_c = state.ch.weights[cand] as i64; + if w_c >= w_rm { + continue; + } + let dw = (w_rm - w_c) as usize; + if dw == 0 || dw > params.diff_lim { + continue; + } + let d = (state.contrib[cand] as i64) + - (state.contrib[rm] as i64) + - (state.ch.interaction_values[cand][rm] as i64); + if d > 0 && best_swap_red.map_or(true, |(_, _, bd)| d > bd) { + best_swap_red = Some((cand, rm, d)); + } + } + } + if let Some((cand, rm, _)) = best_swap_red { + state.replace_item(rm, cand); + return; + } + + if state.slack() > 0 { + let mut best_swap_inc: Option<(usize, usize, f64, i64)> = None; + for &rm in &used_worst { + let w_rm = state.ch.weights[rm] as i64; + for &cand in &unused_top { + let w_c = state.ch.weights[cand] as i64; + if w_c <= w_rm { + continue; + } + let dw = w_c - w_rm; + if dw as usize > params.diff_lim { + continue; + } + if state.slack() < dw { + continue; + } + let d = (state.contrib[cand] as i64) + - (state.contrib[rm] as i64) + - (state.ch.interaction_values[cand][rm] as i64); + if d > 0 { + let r = (d as f64) / (dw as f64); + if best_swap_inc.map_or(true, |(_, _, br, bd)| d > bd || (d == bd && r > br)) { + best_swap_inc = Some((cand, rm, r, d)); + } + } + } + } + if let Some((cand, rm, _, _)) = best_swap_inc { + state.replace_item(rm, cand); + return; + } + } +} + +fn strategic_perturb_and_rebuild(state: &mut State, params: &Params) { + let sel = state.selected_items(); + let m = sel.len(); + if m == 0 { + return; + } + let mut bad = sel.clone(); + bad.sort_unstable_by(|&a, &b| { + let ra = (state.contrib[a] as f64) / (state.ch.weights[a] as f64); + let rb = (state.contrib[b] as f64) / (state.ch.weights[b] as f64); + ra.partial_cmp(&rb).unwrap_or(Ordering::Equal) + }); + let mut rem = (m / 10).max(1); + if rem > 10 { + rem = 10; + } + rem = rem.min(bad.len()); + for i in 0..rem { + let r = bad[i]; + if state.selected_bit[r] { + state.remove_item(r); + } + } + let (best_unused, _) = build_ls_windows(state, params.core_half_ls); + for &cand in &best_unused { + let w = state.ch.weights[cand] as i64; + if w <= state.slack() && (state.contrib[cand] as i64) > 0 { + state.add_item(cand); + } + } +} + +fn local_search_vnd(state: &mut State, params: &Params) { + let (mut best_unused, mut worst_used) = build_ls_windows(state, params.core_half_ls); + loop { + if apply_best_add_windowed(state, &mut best_unused, &mut worst_used) { + continue; + } + + if state.slack() > 0 { + let mut best_pair: Option<(usize, usize, i64)> = None; + let slack = state.slack(); + let bu_len = best_unused.len(); + for a_i in 0..bu_len { + let i = best_unused[a_i]; + let wi = state.ch.weights[i] as i64; + if wi >= slack { + continue; + } + let ci = state.contrib[i] as i64; + for a_j in (a_i + 1)..bu_len { + let j = best_unused[a_j]; + let wj = state.ch.weights[j] as i64; + let wsum = wi + wj; + if wsum > slack { + continue; + } + let cj = state.contrib[j] as i64; + let syn = state.ch.interaction_values[i][j] as i64; + let delta = ci + cj + syn; + if delta > 0 && best_pair.map_or(true, |(_, _, bd)| delta > bd) { + best_pair = Some((i, j, delta)); + } + } + } + if let Some((i, j, _)) = best_pair { + state.add_item(i); + state.add_item(j); + State::remove_from_vec(&mut best_unused, i); + State::remove_from_vec(&mut best_unused, j); + worst_used.push(i); + worst_used.push(j); + continue; + } + } + + if apply_best_swap_diff_reduce_windowed(state, params, &mut best_unused, &mut worst_used) { + continue; + } + if apply_best_swap11_equal_windowed(state, &mut best_unused, &mut worst_used) { + continue; + } + if apply_best_swap_diff_increase_windowed(state, params, &mut best_unused, &mut worst_used) + { + continue; + } + break; + } +} + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + let n = challenge.difficulty.num_items; + let params = Params::for_problem_size(n); + let mut build_scores = vec![0i32; n]; + round0_scores(challenge, &mut build_scores); + + let mut state = State::new_empty(challenge); + build_initial_solution(&mut state, &build_scores); + local_search_vnd(&mut state, ¶ms); + polish_once(&mut state, ¶ms); + + for _it in 0..params.n_maxils { + let prev_sel = state.selected_items(); + let prev_val = state.total_value; + let prev_contrib = state.contrib.clone(); + + let target = integer_core_target(challenge, &state.contrib, params.core_half_dp); + apply_dp_target_via_ops(&mut state, &target); + local_search_vnd(&mut state, ¶ms); + polish_once(&mut state, ¶ms); + + if state.total_value > prev_val { + continue; + } + + state.restore_snapshot(&prev_sel, prev_contrib.clone(), prev_val); + strategic_perturb_and_rebuild(&mut state, ¶ms); + local_search_vnd(&mut state, ¶ms); + polish_once(&mut state, ¶ms); + + if state.total_value <= prev_val { + state.restore_snapshot(&prev_sel, prev_contrib, prev_val); + break; + } + } + + let mut items = state.selected_items(); + items.sort_unstable(); + let _ = save_solution(&Solution { items }); + Ok(()) +} diff --git a/tig-algorithms/src/knapsack/mod.rs b/tig-algorithms/src/knapsack/mod.rs index 33ddf59..61c27aa 100644 --- a/tig-algorithms/src/knapsack/mod.rs +++ b/tig-algorithms/src/knapsack/mod.rs @@ -1,4 +1,5 @@ -// c003_a001 +pub mod dynamic; +pub use dynamic as c003_a001; // c003_a002 @@ -10,7 +11,8 @@ // c003_a006 -// c003_a007 +pub mod knapmaxxing; +pub use knapmaxxing as c003_a007; // c003_a008 @@ -34,7 +36,8 @@ // c003_a018 -// c003_a019 +pub mod knapheudp; +pub use knapheudp as c003_a019; // c003_a020 @@ -98,13 +101,15 @@ // c003_a050 -// c003_a051 +pub mod classic_quadkp; +pub use classic_quadkp as c003_a051; // c003_a052 // c003_a053 -// c003_a054 +pub mod quadkp_improved; +pub use quadkp_improved as c003_a054; // c003_a055 @@ -116,7 +121,8 @@ // c003_a059 -// c003_a060 +pub mod knap_one; +pub use knap_one as c003_a060; // c003_a061 @@ -124,9 +130,11 @@ // c003_a063 -// c003_a064 +pub mod quadkp_maximize; +pub use quadkp_maximize as c003_a064; -// c003_a065 +pub mod relative_quad_fast; +pub use relative_quad_fast as c003_a065; // c003_a066 @@ -134,7 +142,8 @@ // c003_a068 -// c003_a069 +pub mod new_relative_ultra; +pub use new_relative_ultra as c003_a069; // c003_a070 @@ -146,13 +155,16 @@ // c003_a074 -// c003_a075 +pub mod relative_opt_fast; +pub use relative_opt_fast as c003_a075; -// c003_a076 +pub mod relative_opt_mid; +pub use relative_opt_mid as c003_a076; // c003_a077 -// c003_a078 +pub mod relative_opt_optima; +pub use relative_opt_optima as c003_a078; // c003_a079 @@ -174,7 +186,8 @@ // c003_a088 -// c003_a089 +pub mod relative_raw_ultra; +pub use relative_raw_ultra as c003_a089; // c003_a090 @@ -182,17 +195,21 @@ // c003_a092 -// c003_a093 +pub mod knapsack_redone; +pub use knapsack_redone as c003_a093; -// c003_a094 +pub mod native_knapsack; +pub use native_knapsack as c003_a094; // c003_a095 // c003_a096 -// c003_a097 +pub mod fast_and_fun; +pub use fast_and_fun as c003_a097; -// c003_a098 +pub mod knapsplatt; +pub use knapsplatt as c003_a098; // c003_a099 diff --git a/tig-algorithms/src/knapsack/native_knapsack/README.md b/tig-algorithms/src/knapsack/native_knapsack/README.md new file mode 100644 index 0000000..a67f85c --- /dev/null +++ b/tig-algorithms/src/knapsack/native_knapsack/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** knapsack +* **Algorithm Name:** native_knapsack +* **Copyright:** 2025 Rootz +* **Identity of Submitter:** Rootz +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/native_knapsack/mod.rs b/tig-algorithms/src/knapsack/native_knapsack/mod.rs new file mode 100644 index 0000000..0d4ac33 --- /dev/null +++ b/tig-algorithms/src/knapsack/native_knapsack/mod.rs @@ -0,0 +1,396 @@ +use anyhow::Result; +use rand::{rngs::StdRng, Rng, SeedableRng}; +use serde_json::{Map, Value}; +use tig_challenges::knapsack::*; + +#[inline] +fn calculate_density_variance(item_densities: &[(usize, f32)]) -> f32 { + if item_densities.len() < 2 { + return 0.5; + } + + let mut sum = 0.0; + let mut variance_sum = 0.0; + let len = item_densities.len() as f32; + + for (_, density) in item_densities { + sum += *density; + } + let mean = sum / len; + + for (_, density) in item_densities { + let diff = *density - mean; + variance_sum += diff * diff; + } + let variance = variance_sum / len; + + (variance.sqrt() / mean.abs()).clamp(0.1, 1.0) +} + +fn compute_solution( + challenge: &Challenge, + contribution_list: &mut [i32], + unselected_items: &mut Vec, + rng: &mut StdRng, +) -> Result> { + let mut selected_items = Vec::new(); + let mut total_weight = 0; + let mut total_value = 0; + + let inv_weights: Vec = challenge.weights.iter().map(|&w| 1.0 / w as f32).collect(); + let rcl_max = 10; + let max_item_weight = *challenge.weights.iter().max().unwrap(); + + let mut item_densities: Vec<(usize, f32)> = Vec::with_capacity(unselected_items.len()); + for &idx in unselected_items.iter() { + let ratio = contribution_list[idx] as f32 * inv_weights[idx]; + item_densities.push((idx, ratio)); + } + + let density_variance = calculate_density_variance(&item_densities); + + let num_items = challenge.difficulty.num_items as f32; + + let mut densities: Vec = item_densities.iter().map(|(_, d)| *d).collect(); + densities.sort_by(|a, b| a.partial_cmp(b).unwrap()); + + let adaptive_range = if num_items >= 500.0 { + if densities.len() >= 10 { + let p10_idx = densities.len() / 10; + let p90_idx = densities.len() * 9 / 10; + let p10 = densities[p10_idx]; + let p90 = densities[p90_idx]; + let density_range = (p90 - p10).abs(); + (density_range * 0.3).clamp(0.8, 2.2) + } else { + 1.5 + } + } else { + 1.5 + }; + + let base_exponent = if num_items >= 500.0 { + 1.4 - ((num_items - 500.0) / 200.0).sqrt() * 0.2 + } else { + 1.4 + }; + let adaptive_exponent = + base_exponent + (density_variance - 0.6).clamp(-adaptive_range, adaptive_range); + + let mut probs: Vec = Vec::with_capacity(rcl_max); + for rank in 0..rcl_max { + probs.push(1.0 / ((rank + 1) as f32).powf(adaptive_exponent)); + } + + let mut acc_probs: Vec = Vec::with_capacity(rcl_max); + let mut sum = 0.0; + for &prob in &probs { + sum += prob; + acc_probs.push(sum); + } + let total_prob_max = sum; + + let list_size = 2; + let mut top_ranks = vec![0; list_size]; + + while !item_densities.is_empty() { + let num_candidates = item_densities.len(); + if num_candidates < 2 { + break; + } + + let actual_rcl_size = num_candidates.min(rcl_max); + let total_prob = if actual_rcl_size == rcl_max { + total_prob_max + } else { + acc_probs[actual_rcl_size - 1] + }; + + let random_threshold = rng.gen_range(0.0..total_prob); + let mut selected_rank = match acc_probs[..actual_rcl_size] + .binary_search_by(|prob| prob.partial_cmp(&random_threshold).unwrap()) + { + Ok(i) | Err(i) => i, + }; + if selected_rank >= actual_rcl_size { + selected_rank = actual_rcl_size - 1; + } + + let selected_item; + if selected_rank < list_size + && !selected_items.is_empty() + && top_ranks[selected_rank] < item_densities.len() + { + selected_rank = top_ranks[selected_rank]; + selected_item = item_densities[selected_rank].0; + } else { + item_densities + .select_nth_unstable_by(selected_rank, |a, b| b.1.partial_cmp(&a.1).unwrap()); + selected_item = item_densities[selected_rank].0; + } + + selected_items.push(selected_item); + total_weight += challenge.weights[selected_item]; + total_value += contribution_list[selected_item]; + + if total_weight + max_item_weight > challenge.max_weight { + item_densities.retain(|(idx, _)| { + total_weight + challenge.weights[*idx] <= challenge.max_weight + && *idx != selected_item + }); + } else { + item_densities.swap_remove(selected_rank); + } + + unsafe { + for x in 0..challenge.difficulty.num_items { + *contribution_list.get_unchecked_mut(x) += *challenge + .interaction_values + .get_unchecked(selected_item) + .get_unchecked(x); + } + + let mut first_density = f32::MIN; + let mut first_rank = 0; + let mut second_density = f32::MIN; + let mut second_rank = 0; + + for (i, density) in item_densities.iter_mut().enumerate() { + let interaction = *challenge + .interaction_values + .get_unchecked(selected_item) + .get_unchecked(density.0); + density.1 += interaction as f32 * inv_weights[density.0]; + let current_density = density.1; + + if current_density > first_density { + second_density = first_density; + second_rank = first_rank; + first_density = current_density; + first_rank = i; + } else if current_density > second_density { + second_density = current_density; + second_rank = i; + } + } + + top_ranks[0] = first_rank; + top_ranks[1] = second_rank; + } + } + + unselected_items.clear(); + unselected_items.extend(0..challenge.difficulty.num_items); + + selected_items.sort_unstable_by(|a, b| b.cmp(a)); + for &selected in &selected_items { + unselected_items.swap_remove(selected); + } + + unselected_items.sort_unstable_by_key(|&idx| challenge.weights[idx]); + + let mut feasible_adds = Vec::with_capacity(100); + let mut feasible_swaps = Vec::with_capacity(200); + + for _ in 0..50 { + let mut improved = false; + + if total_weight < challenge.max_weight { + feasible_adds.clear(); + for (i, &cand) in unselected_items.iter().enumerate() { + if total_weight + challenge.weights[cand] > challenge.max_weight { + break; + } + let new_val = total_value + contribution_list[cand]; + if new_val > total_value { + feasible_adds.push(i); + } + } + + if !feasible_adds.is_empty() { + let add_idx = feasible_adds[rng.gen_range(0..feasible_adds.len())]; + let new_item = unselected_items[add_idx]; + + unselected_items.remove(add_idx); + selected_items.push(new_item); + total_weight += challenge.weights[new_item]; + total_value += contribution_list[new_item]; + improved = true; + + unsafe { + for x in 0..challenge.difficulty.num_items { + *contribution_list.get_unchecked_mut(x) += *challenge + .interaction_values + .get_unchecked(x) + .get_unchecked(new_item); + } + } + } + } + + if !improved { + feasible_swaps.clear(); + let free_capacity = challenge.max_weight - total_weight; + + 'outer: for (j, &rem_item) in selected_items.iter().enumerate() { + let rem_w = challenge.weights[rem_item]; + let available_weight = free_capacity + rem_w; + + for (i, &cand_item) in unselected_items.iter().enumerate() { + if challenge.weights[cand_item] > available_weight { + break; + } + + let val_diff = contribution_list[cand_item] + - contribution_list[rem_item] + - challenge.interaction_values[cand_item][rem_item]; + if val_diff > 0 { + feasible_swaps.push((i, j)); + if feasible_swaps.len() >= 50 { + break 'outer; + } + } + } + } + + if !feasible_swaps.is_empty() { + let (unsel_idx, sel_idx) = feasible_swaps[rng.gen_range(0..feasible_swaps.len())]; + let new_item = unselected_items[unsel_idx]; + let remove_item = selected_items[sel_idx]; + + selected_items.swap_remove(sel_idx); + selected_items.push(new_item); + unselected_items.remove(unsel_idx); + + let insert_pos = unselected_items + .binary_search_by_key(&challenge.weights[remove_item], |&idx| { + challenge.weights[idx] + }) + .unwrap_or_else(|e| e); + unselected_items.insert(insert_pos, remove_item); + + total_value += contribution_list[new_item] + - contribution_list[remove_item] + - challenge.interaction_values[new_item][remove_item]; + total_weight = + total_weight + challenge.weights[new_item] - challenge.weights[remove_item]; + improved = true; + + unsafe { + for x in 0..challenge.difficulty.num_items { + *contribution_list.get_unchecked_mut(x) += *challenge + .interaction_values + .get_unchecked(x) + .get_unchecked(new_item) + - *challenge + .interaction_values + .get_unchecked(x) + .get_unchecked(remove_item); + } + } + } + } + + if !improved { + break; + } + } + + if selected_items.is_empty() { + Ok(None) + } else { + Ok(Some(( + Solution { + items: selected_items, + }, + total_value, + ))) + } +} + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + let num_iterations = 11; + let mut rng = + StdRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap())); + + let mut best_solution: Option = None; + let mut best_value = 0; + + let mut unselected_items: Vec = Vec::with_capacity(challenge.difficulty.num_items); + let mut contribution_list: Vec = Vec::with_capacity(challenge.values.len()); + + for _outer_iter in 0..num_iterations { + let mut best_local_solution: Option = None; + let mut best_local_value = 0; + + let k = 2; + for _ in 0..k { + unselected_items.clear(); + unselected_items.reserve(challenge.difficulty.num_items); + for i in 0..challenge.difficulty.num_items { + unselected_items.push(i); + } + + contribution_list.clear(); + contribution_list.reserve(challenge.values.len()); + for &v in &challenge.values { + contribution_list.push(v as i32); + } + + let sol_result = compute_solution( + challenge, + &mut contribution_list, + &mut unselected_items, + &mut rng, + )?; + + let (solution, value) = match sol_result { + Some(x) => x, + None => continue, + }; + + if value > best_local_value { + best_local_value = value; + best_local_solution = Some(Solution { + items: solution.items.clone(), + }); + } + } + + if let Some(local_solution) = best_local_solution { + if best_local_value > best_value { + best_value = best_local_value; + best_solution = Some(local_solution); + } + } + } + + if let Some(solution) = best_solution { + let _ = save_solution(&solution); + } + Ok(()) +} + +#[cfg(feature = "cuda")] +mod gpu_optimisation { + use super::*; + use cudarc::driver::*; + use std::{collections::HashMap, sync::Arc}; + use tig_challenges::CudaKernel; + + pub const KERNEL: Option = None; + + pub fn cuda_solve_challenge( + challenge: &Challenge, + dev: &Arc, + mut funcs: HashMap<&'static str, CudaFunction>, + ) -> anyhow::Result> { + solve_challenge(challenge) + } +} +#[cfg(feature = "cuda")] +pub use gpu_optimisation::{cuda_solve_challenge, KERNEL}; diff --git a/tig-algorithms/src/knapsack/new_relative_ultra/README.md b/tig-algorithms/src/knapsack/new_relative_ultra/README.md new file mode 100644 index 0000000..bb03b49 --- /dev/null +++ b/tig-algorithms/src/knapsack/new_relative_ultra/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** knapsack +* **Algorithm Name:** new_relative_ultra +* **Copyright:** 2025 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/new_relative_ultra/mod.rs b/tig-algorithms/src/knapsack/new_relative_ultra/mod.rs new file mode 100644 index 0000000..16bef6e --- /dev/null +++ b/tig-algorithms/src/knapsack/new_relative_ultra/mod.rs @@ -0,0 +1,279 @@ +use anyhow::{anyhow, Result}; +use serde_json::{Map, Value}; +use tig_challenges::knapsack::*; + + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> Result<()>, + hyperparameters: &Option>, +) -> Result<()> { + Err(anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + use anyhow::Result; + use rand::{rngs::StdRng, Rng, SeedableRng}; + use tig_challenges::knapsack::*; + + fn compute_solution( + challenge: &SubInstance, + contribution_list: &mut [i32], + unselected_items: &mut Vec, + rng: &mut StdRng, + ) -> Result> { + let mut selected_items = Vec::new(); + let mut total_weight = 0; + let mut total_value = 0; + + const RCL_MAX: usize = 10; + + let probs: Vec = (0..RCL_MAX) + .map(|rank| 1.0 / ((rank + 1) as f32).exp()) + .collect(); + + let mut acc_probs: Vec = Vec::with_capacity(RCL_MAX); + let mut sum = 0.0; + for &prob in &probs { + sum += prob; + acc_probs.push(sum); + } + let total_prob_max = sum; + let max_item_weight = challenge.weights.iter().max().unwrap(); + + let mut item_densities: Vec<(usize, f32)> = unselected_items + .iter() + .map(|&idx| { + let ratio = contribution_list[idx] as f32 / challenge.weights[idx] as f32; + (idx, ratio) + }) + .collect(); + + while !item_densities.is_empty() { + let num_candidates = item_densities.len(); + if num_candidates < 2 { + break; + } + + let actual_rcl_size = num_candidates.min(RCL_MAX); + + let total_prob = if actual_rcl_size == RCL_MAX { + total_prob_max + } else { + acc_probs[actual_rcl_size - 1] + }; + + let random_threshold = rng.gen_range(0.0..total_prob); + let mut selected_rank = match acc_probs[..actual_rcl_size].binary_search_by(|prob| { + prob.partial_cmp(&random_threshold).unwrap() + }) { Ok(i) | Err(i) => i }; + if selected_rank >= actual_rcl_size { + selected_rank = actual_rcl_size - 1; + } + + item_densities.select_nth_unstable_by(selected_rank, |a, b| { + b.1.partial_cmp(&a.1).unwrap() + }); + let selected_item = item_densities[selected_rank].0; + + selected_items.push(selected_item); + total_weight += challenge.weights[selected_item]; + total_value += contribution_list[selected_item]; + + unsafe { + for x in 0..challenge.difficulty.num_items { + *contribution_list.get_unchecked_mut(x) += + *challenge.interaction_values.get_unchecked(x).get_unchecked(selected_item); + } + } + + if total_weight + max_item_weight > challenge.max_weight { + item_densities.retain(|(idx, _)| { + total_weight + challenge.weights[*idx] <= challenge.max_weight && *idx != selected_item + }); + } else { + item_densities.swap_remove(selected_rank); + } + + unsafe { + for density in item_densities.iter_mut() { + let interaction = *challenge.interaction_values.get_unchecked(selected_item).get_unchecked(density.0); + let w = *challenge.weights.get_unchecked(density.0) as f32; + density.1 += interaction as f32 / w; + } + } + } + unselected_items.clear(); + unselected_items.extend(0..challenge.difficulty.num_items); + + let mut sorted_selected = selected_items.clone(); + sorted_selected.sort_unstable_by(|a, b| b.cmp(a)); + + for &selected in &sorted_selected { + unselected_items.swap_remove(selected); + } + + let local_search_iterations = 150; + for _ in 0..local_search_iterations { + let mut improved = false; + + let mut feasible_adds = Vec::new(); + for (i, &cand) in unselected_items.iter().enumerate() { + let new_w = total_weight + challenge.weights[cand]; + let new_val = total_value + contribution_list[cand]; + if new_w <= challenge.max_weight && new_val >= total_value { + feasible_adds.push(i); + } + } + if !feasible_adds.is_empty() { + let pick = rng.gen_range(0..feasible_adds.len()); + let add_idx = feasible_adds[pick]; + let new_item = unselected_items[add_idx]; + + unselected_items.swap_remove(add_idx); + selected_items.push(new_item); + + total_weight += challenge.weights[new_item]; + total_value += contribution_list[new_item]; + improved = true; + + unsafe { + for x in 0..challenge.difficulty.num_items { + *contribution_list.get_unchecked_mut(x) += + *challenge.interaction_values.get_unchecked(x).get_unchecked(new_item); + } + } + } + + let mut feasible_swaps = Vec::new(); + for (i, &cand_item) in unselected_items.iter().enumerate() { + let min_needed = + challenge.weights[cand_item] as i32 - (challenge.max_weight as i32 - total_weight as i32); + for (j, &rem_item) in selected_items.iter().enumerate() { + let rem_w = challenge.weights[rem_item] as i32; + if rem_w < min_needed { + continue; + } + let val_diff = contribution_list[cand_item] + - contribution_list[rem_item] + - challenge.interaction_values[cand_item][rem_item]; + if val_diff >= 0 { + feasible_swaps.push((i, j)); + } + } + } + + if !feasible_swaps.is_empty() { + let pick = rng.gen_range(0..feasible_swaps.len()); + let (unsel_idx, sel_idx) = feasible_swaps[pick]; + let new_item = unselected_items[unsel_idx]; + let remove_item = selected_items[sel_idx]; + + selected_items.swap_remove(sel_idx); + unselected_items.swap_remove(unsel_idx); + selected_items.push(new_item); + unselected_items.push(remove_item); + + total_value += contribution_list[new_item] + - contribution_list[remove_item] + - challenge.interaction_values[new_item][remove_item]; + total_weight = total_weight + challenge.weights[new_item] - challenge.weights[remove_item]; + improved = true; + + unsafe { + for x in 0..challenge.difficulty.num_items { + *contribution_list.get_unchecked_mut(x) += + *challenge.interaction_values.get_unchecked(x).get_unchecked(new_item) - + *challenge.interaction_values.get_unchecked(x).get_unchecked(remove_item); + } + } + } + + if !improved { + break; + } + } + + if selected_items.is_empty() { + Ok(None) + } else { + Ok(Some((SubSolution { items: selected_items }, total_value))) + } + } + + + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let mut solution = Solution { + sub_solutions: Vec::new(), + }; + for sub_instance in &challenge.sub_instances { + match solve_sub_instance(sub_instance)? { + Some(sub_solution) => solution.sub_solutions.push(sub_solution), + None => return Ok(None), + } + } + Ok(Some(solution)) + } + + pub fn solve_sub_instance(challenge: &SubInstance) -> Result> { + let mut rng = StdRng::seed_from_u64(u64::from_le_bytes( + challenge.seed[..8].try_into().unwrap(), + )); + + let mut best_solution: Option = None; + let mut best_value = 0; + + for _outer_iter in 0..50 { + let mut unselected_items: Vec = (0..challenge.difficulty.num_items).collect(); + let mut contribution_list = challenge + .values + .iter() + .map(|&v| v as i32) + .collect::>(); + + let sol_result = + compute_solution(challenge, &mut contribution_list, &mut unselected_items, &mut rng)?; + + let (solution, value) = match sol_result { + Some(x) => x, + None => continue, + }; + + if value > best_value { + best_value = value; + best_solution = Some(SubSolution { items: solution.items.clone() }); + } + + let threshold = lookup_threshold(challenge.difficulty.num_items); + if (challenge.baseline_value as f32) * (1.0 - threshold * 0.008) >= best_value as f32 { + return Ok(None); + } + else if challenge.baseline_value <= best_value as u32 { + return Ok(best_solution); + } + } + + Ok(best_solution) + } + + fn lookup_threshold(num_items: usize) -> f32 { + let points = vec![ + (100, 1.071), (105, 1.015), (110, 0.973), (120, 0.882), + (125, 0.791), (130, 0.770), (135, 0.760), (140, 0.749), + (145, 0.700), (150, 0.616), (155, 0.574), (160, 0.532), + (165, 0.511), (170, 0.494), (175, 0.485), (180, 0.476), + (190, 0.448), (195, 0.434), (200, 0.427), (205, 0.420), + (210, 0.420), (215, 0.385), (220, 0.350), (225, 0.347), + (230, 0.343), (235, 0.343), (240, 0.338), (245, 0.334), + (250, 0.329) + ]; + + points.iter() + .filter(|&&(x, _)| x <= num_items) + .max_by_key(|&&(x, _)| x) + .unwrap() + .1 + } +} \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/quadkp_improved/README.md b/tig-algorithms/src/knapsack/quadkp_improved/README.md new file mode 100644 index 0000000..f112184 --- /dev/null +++ b/tig-algorithms/src/knapsack/quadkp_improved/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** knapsack +* **Algorithm Name:** quadkp_improved +* **Copyright:** 2024 Rootz +* **Identity of Submitter:** Rootz +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/quadkp_improved/mod.rs b/tig-algorithms/src/knapsack/quadkp_improved/mod.rs new file mode 100644 index 0000000..ebb26ea --- /dev/null +++ b/tig-algorithms/src/knapsack/quadkp_improved/mod.rs @@ -0,0 +1,184 @@ +use anyhow::{anyhow, Result}; +use serde_json::{Map, Value}; +use tig_challenges::knapsack::*; + + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> Result<()>, + hyperparameters: &Option>, +) -> Result<()> { + Err(anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + // TIG's UI uses the pattern `tig_challenges::` to automatically detect your algorithm's challenge + use anyhow::Result; + use rand::{SeedableRng, Rng, rngs::StdRng}; + use tig_challenges::knapsack::*; + + + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let mut solution = Solution { + sub_solutions: Vec::new(), + }; + for sub_instance in &challenge.sub_instances { + match solve_sub_instance(sub_instance)? { + Some(sub_solution) => solution.sub_solutions.push(sub_solution), + None => return Ok(None), + } + } + Ok(Some(solution)) + } + + pub fn solve_sub_instance(challenge: &SubInstance) -> Result> { + let vertex_count = challenge.weights.len(); + + let mut item_scores: Vec<(usize, f32)> = (0..vertex_count) + .map(|index| { + let interaction_sum: i32 = challenge.interaction_values[index].iter().sum(); + let secondary_score = challenge.values[index] as f32 / challenge.weights[index] as f32; + let combined_score = (challenge.values[index] as f32 * 0.75 + interaction_sum as f32 * 0.15 + secondary_score * 0.1) + / challenge.weights[index] as f32; + (index, combined_score) + }) + .collect(); + + item_scores.sort_unstable_by(|a, b| b.1.partial_cmp(&a.1).unwrap()); + + let mut selected_items = Vec::with_capacity(vertex_count); + let mut unselected_items = Vec::with_capacity(vertex_count); + let mut current_weight = 0; + let mut current_value = 0; + + for &(index, _) in &item_scores { + if current_weight + challenge.weights[index] <= challenge.max_weight { + current_weight += challenge.weights[index]; + current_value += challenge.values[index] as i32; + + for &selected in &selected_items { + current_value += challenge.interaction_values[index][selected]; + } + selected_items.push(index); + } else { + unselected_items.push(index); + } + } + + let mut mutation_rates = vec![0; vertex_count]; + for index in 0..vertex_count { + mutation_rates[index] = challenge.values[index] as i32; + for &selected in &selected_items { + mutation_rates[index] += challenge.interaction_values[index][selected]; + } + } + + let max_generations = 150; + let mut cooling_schedule = vec![0; vertex_count]; + let mut rng = StdRng::seed_from_u64(challenge.seed[0] as u64); + + for generation in 0..max_generations { + let mut best_gain = 0; + let mut best_swap = None; + + for (u_index, &mutant) in unselected_items.iter().enumerate() { + if cooling_schedule[mutant] > 0 { + continue; + } + + let mutant_fitness = mutation_rates[mutant]; + let extra_weight = challenge.weights[mutant] as i32 - (challenge.max_weight as i32 - current_weight as i32); + + if mutant_fitness < 0 { + continue; + } + + for (c_index, &selected) in selected_items.iter().enumerate() { + if cooling_schedule[selected] > 0 { + continue; + } + + if extra_weight > 0 && (challenge.weights[selected] as i32) < extra_weight { + continue; + } + + let interaction_penalty = (challenge.interaction_values[mutant][selected] as f32 * 0.3) as i32; + let fitness_gain = mutant_fitness - mutation_rates[selected] - interaction_penalty; + + if fitness_gain > best_gain { + best_gain = fitness_gain; + best_swap = Some((u_index, c_index)); + } + } + } + + if let Some((u_index, c_index)) = best_swap { + let added_item = unselected_items[u_index]; + let removed_item = selected_items[c_index]; + + selected_items.swap_remove(c_index); + unselected_items.swap_remove(u_index); + selected_items.push(added_item); + unselected_items.push(removed_item); + + current_value += best_gain; + current_weight = current_weight + challenge.weights[added_item] - challenge.weights[removed_item]; + + if current_weight > challenge.max_weight { + continue; + } + + for index in 0..vertex_count { + mutation_rates[index] += challenge.interaction_values[index][added_item] + - challenge.interaction_values[index][removed_item]; + } + + cooling_schedule[added_item] = 3; + cooling_schedule[removed_item] = 3; + } + + if current_value as u32 >= challenge.baseline_value { + return Ok(Some(SubSolution { items: selected_items })); + } + + for cooling_rate in cooling_schedule.iter_mut() { + *cooling_rate = if *cooling_rate > 0 { *cooling_rate - 1 } else { 0 }; + } + + if current_value as u32 > (challenge.baseline_value * 9 / 10) { + let high_potential_items: Vec = unselected_items + .iter() + .filter(|&&i| challenge.values[i] as i32 > (challenge.baseline_value as i32 / 4)) + .copied() + .collect(); + + for &item in high_potential_items.iter().take(2) { + if current_weight + challenge.weights[item] <= challenge.max_weight { + selected_items.push(item); + unselected_items.retain(|&x| x != item); + current_weight += challenge.weights[item]; + current_value += challenge.values[item] as i32; + + for &selected in &selected_items { + if selected != item { + current_value += challenge.interaction_values[item][selected]; + } + } + + if current_value as u32 >= challenge.baseline_value { + return Ok(Some(SubSolution { items: selected_items })); + } + } + } + } + } + + if current_value as u32 >= challenge.baseline_value && current_weight <= challenge.max_weight { + Ok(Some(SubSolution { items: selected_items })) + } else { + Ok(None) + } + } +} \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/quadkp_maximize/README.md b/tig-algorithms/src/knapsack/quadkp_maximize/README.md new file mode 100644 index 0000000..acb97cc --- /dev/null +++ b/tig-algorithms/src/knapsack/quadkp_maximize/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** knapsack +* **Algorithm Name:** quadkp_maximize +* **Copyright:** 2024 codes_r_us +* **Identity of Submitter:** codes_r_us +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/quadkp_maximize/mod.rs b/tig-algorithms/src/knapsack/quadkp_maximize/mod.rs new file mode 100644 index 0000000..c0cb604 --- /dev/null +++ b/tig-algorithms/src/knapsack/quadkp_maximize/mod.rs @@ -0,0 +1,211 @@ +use anyhow::{anyhow, Result}; +use serde_json::{Map, Value}; +use tig_challenges::knapsack::*; + + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> Result<()>, + hyperparameters: &Option>, +) -> Result<()> { + Err(anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + use anyhow::Result; + use tig_challenges::knapsack::*; + + + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let mut solution = Solution { + sub_solutions: Vec::new(), + }; + for sub_instance in &challenge.sub_instances { + match solve_sub_instance(sub_instance)? { + Some(sub_solution) => solution.sub_solutions.push(sub_solution), + None => return Ok(None), + } + } + Ok(Some(solution)) + } + + pub fn solve_sub_instance(challenge: &SubInstance) -> Result> { + const WAIT_ITERATIONS: usize = 5; + const MAX_STAGNANT_ITERATIONS: usize = 15; + + let num_items = challenge.weights.len(); + let mut selected_items = vec![false; num_items]; + let mut total_value: i32 = 0; + let mut total_weight: u32 = 0; + let mut wait_map = vec![None; num_items]; + let values: Vec = challenge.values.iter().map(|&v| v as i32).collect(); + + let mut items_by_ratio: Vec<(usize, f64)> = (0..num_items) + .map(|i| { + let ratio = values[i] as f64 / challenge.weights[i] as f64; + (i, ratio) + }) + .collect(); + items_by_ratio.sort_unstable_by(|a, b| b.1.partial_cmp(&a.1).unwrap()); + + let mut interaction_gains = vec![0; num_items]; + let mut weight_reduction_candidates = Vec::with_capacity(num_items); + let mut available_items = Vec::with_capacity(num_items); + + let mut iteration_count = 0; + let mut stagnant_iterations = 0; + let mut max_total_value = total_value; + let weight_threshold = challenge.max_weight * 85 / 100; + let baseline_value = challenge.baseline_value as i32; + let max_weight = challenge.max_weight; + + let interaction_rows: Vec<&[i32]> = challenge + .interaction_values + .iter() + .map(|row| row.as_slice()) + .collect(); + + loop { + iteration_count += 1; + + for entry in &mut wait_map { + if let Some(iter) = entry { + if *iter <= iteration_count { + *entry = None; + } + } + } + + available_items.clear(); + available_items.extend( + items_by_ratio.iter() + .filter(|&&(i, _)| !selected_items[i] && wait_map[i].is_none()) + .copied() + ); + + let mut improvement_found = false; + let mut index = 0; + + while index < available_items.len() { + let (i, _) = available_items[index]; + let individual_value = values[i]; + let interaction_gain = interaction_gains[i]; + let gain = individual_value + interaction_gain; + let potential_weight = total_weight + challenge.weights[i]; + + if gain >= individual_value || + (gain >= individual_value - 2 && potential_weight <= weight_threshold) { + selected_items[i] = true; + total_value += gain; + total_weight = potential_weight; + + let interaction_row = interaction_rows[i]; + for (j, gain) in interaction_gains.iter_mut().enumerate() { + *gain += interaction_row[j]; + } + + improvement_found = true; + available_items.remove(index); + } else { + index += 1; + } + } + + if !improvement_found { + for &(i, _) in &available_items { + let new_item_value = values[i] + interaction_gains[i]; + let new_item_weight = challenge.weights[i]; + + if new_item_value <= values[i] { + continue; + } + + for j in 0..num_items { + if selected_items[j] { + let removal_loss = values[j] + interaction_gains[j]; + if total_value + new_item_value - removal_loss > total_value { + let remove_row = interaction_rows[j]; + let add_row = interaction_rows[i]; + + for k in 0..num_items { + interaction_gains[k] = interaction_gains[k] - remove_row[k] + add_row[k]; + } + + selected_items[j] = false; + total_value -= removal_loss; + total_weight -= challenge.weights[j]; + + selected_items[i] = true; + total_value += new_item_value; + total_weight += new_item_weight; + + wait_map[j] = Some(iteration_count + WAIT_ITERATIONS); + improvement_found = true; + break; + } + } + } + + if improvement_found { + break; + } else { + return Ok(None); + } + } + } + + if total_weight > max_weight { + weight_reduction_candidates.clear(); + for i in 0..num_items { + if selected_items[i] { + let loss = values[i] + interaction_gains[i]; + let ratio = challenge.weights[i] as f64 / (loss as f64).max(1.0); + weight_reduction_candidates.push((ratio, i)); + } + } + weight_reduction_candidates.sort_unstable_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); + + while total_weight > max_weight { + if let Some((_, item)) = weight_reduction_candidates.pop() { + let remove_row = interaction_rows[item]; + for k in 0..num_items { + interaction_gains[k] -= remove_row[k]; + } + selected_items[item] = false; + total_weight -= challenge.weights[item]; + total_value -= values[item] + interaction_gains[item]; + wait_map[item] = Some(iteration_count + WAIT_ITERATIONS); + } else { + break; + } + } + } + + if total_value >= baseline_value && total_weight <= max_weight { + let result_items: Vec = selected_items + .iter() + .enumerate() + .filter(|&(_, &is_selected)| is_selected) + .map(|(i, _)| i) + .collect(); + + return Ok(Some(SubSolution { + items: result_items, + })); + } + + if total_value > max_total_value { + max_total_value = total_value; + stagnant_iterations = 0; + } else { + stagnant_iterations += 1; + } + + if stagnant_iterations >= MAX_STAGNANT_ITERATIONS { + return Ok(None); + } + } + } +} \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/relative_opt_fast/README.md b/tig-algorithms/src/knapsack/relative_opt_fast/README.md new file mode 100644 index 0000000..4593c2c --- /dev/null +++ b/tig-algorithms/src/knapsack/relative_opt_fast/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** knapsack +* **Algorithm Name:** relative_opt_fast +* **Copyright:** 2025 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/relative_opt_fast/mod.rs b/tig-algorithms/src/knapsack/relative_opt_fast/mod.rs new file mode 100644 index 0000000..daba0c5 --- /dev/null +++ b/tig-algorithms/src/knapsack/relative_opt_fast/mod.rs @@ -0,0 +1,352 @@ +use anyhow::{anyhow, Result}; +use serde_json::{Map, Value}; +use tig_challenges::knapsack::*; + + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> Result<()>, + hyperparameters: &Option>, +) -> Result<()> { + Err(anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + use anyhow::Result; + use rand::{rngs::StdRng, Rng, SeedableRng}; + use tig_challenges::knapsack::*; + + fn compute_solution( + challenge: &SubInstance, + contribution_list: &mut [i32], + unselected_items: &mut Vec, + rng: &mut StdRng, + ) -> Result> { + let mut selected_items = Vec::new(); + let mut total_weight = 0; + let mut total_value = 0; + + let inv_weights : Vec = challenge.weights.iter().map(|&w| 1.0 / w as f32).collect(); + + const RCL_MAX: usize = 10; + + let probs: Vec = (0..RCL_MAX) + .map(|rank| 1.0 / ((rank + 1) as f32).exp()) + .collect(); + + let mut acc_probs: Vec = Vec::with_capacity(RCL_MAX); + let mut sum = 0.0; + for &prob in &probs { + sum += prob; + acc_probs.push(sum); + } + let total_prob_max = sum; + let max_item_weight = challenge.weights.iter().max().unwrap(); + + let mut item_densities: Vec<(usize, f32)> = unselected_items + .iter() + .map(|&idx| { + let ratio = contribution_list[idx] as f32 * inv_weights[idx]; + (idx, ratio) + }) + .collect(); + + let list_size = 2; + let mut top_ranks = vec![0; list_size]; + let mut top_densities = vec![f32::MIN; list_size]; + + while !item_densities.is_empty() { + let num_candidates = item_densities.len(); + if num_candidates < 2 { + break; + } + + let actual_rcl_size = num_candidates.min(RCL_MAX); + let total_prob = if actual_rcl_size == RCL_MAX { + total_prob_max + } else { + acc_probs[actual_rcl_size - 1] + }; + + let random_threshold = rng.gen_range(0.0..total_prob); + let mut selected_rank = match acc_probs[..actual_rcl_size].binary_search_by(|prob| { + prob.partial_cmp(&random_threshold).unwrap() + }) { Ok(i) | Err(i) => i }; + if selected_rank >= actual_rcl_size { + selected_rank = actual_rcl_size - 1; + } + let mut selected_item = 0; + if selected_rank < list_size && !selected_items.is_empty() { + selected_rank = top_ranks[selected_rank]; + selected_item = item_densities[selected_rank].0; + } else { + item_densities.select_nth_unstable_by(selected_rank, |a, b| { + b.1.partial_cmp(&a.1).unwrap() + }); + selected_item = item_densities[selected_rank].0; + } + + selected_items.push(selected_item); + total_weight += challenge.weights[selected_item]; + total_value += contribution_list[selected_item]; + + if total_weight + max_item_weight > challenge.max_weight { + item_densities.retain(|(idx, _)| { + total_weight + challenge.weights[*idx] <= challenge.max_weight && *idx != selected_item + }); + } else { + item_densities.swap_remove(selected_rank); + } + + unsafe { + for x in 0..challenge.difficulty.num_items { + *contribution_list.get_unchecked_mut(x) += + *challenge.interaction_values.get_unchecked(selected_item).get_unchecked(x); + } + + let mut first_density = f32::MIN; + let mut first_rank = 0; + let mut second_density = f32::MIN; + let mut second_rank = 0; + + for (i, density) in item_densities.iter_mut().enumerate() { + let interaction = unsafe { + *challenge.interaction_values.get_unchecked(selected_item).get_unchecked(density.0) + }; + density.1 += interaction as f32 * inv_weights[density.0]; + let current_density = density.1; + + if current_density > first_density { + second_density = first_density; + second_rank = first_rank; + first_density = current_density; + first_rank = i; + } else if current_density > second_density { + second_density = current_density; + second_rank = i; + } + } + + top_ranks[0] = first_rank; + top_ranks[1] = second_rank; + top_densities[0] = first_density; + top_densities[1] = second_density; + } + } + unselected_items.clear(); + unselected_items.extend(0..challenge.difficulty.num_items); + + let mut sorted_selected = selected_items.clone(); + sorted_selected.sort_unstable_by(|a, b| b.cmp(a)); + + for &selected in &sorted_selected { + unselected_items.swap_remove(selected); + } + + unselected_items.sort_unstable_by_key(|&idx| challenge.weights[idx]); + + let local_search_iterations = 150; + let mut feasible_adds = Vec::new(); + let mut feasible_swaps = Vec::new(); + for _ in 0..local_search_iterations { + let mut improved = false; + + if total_weight < challenge.max_weight { + for (i, &cand) in unselected_items.iter().enumerate() { + let new_w = total_weight + challenge.weights[cand]; + let new_val = total_value + contribution_list[cand]; + if new_w > challenge.max_weight { + break; + } + + if new_val >= total_value { + feasible_adds.push(i); + } + } + if !feasible_adds.is_empty() { + let pick = rng.gen_range(0..feasible_adds.len()); + let add_idx = feasible_adds[pick]; + let new_item = unselected_items[add_idx]; + + unselected_items.remove(add_idx); + selected_items.push(new_item); + + total_weight += challenge.weights[new_item]; + total_value += contribution_list[new_item]; + improved = true; + + unsafe { + for x in 0..challenge.difficulty.num_items { + *contribution_list.get_unchecked_mut(x) += + *challenge.interaction_values.get_unchecked(x).get_unchecked(new_item); + } + } + } + feasible_adds.clear(); + } + + let free_capacity = challenge.max_weight as i32 - total_weight as i32; + for (j, &rem_item) in selected_items.iter().enumerate() { + let rem_w = challenge.weights[rem_item] as i32; + + for (i, &cand_item) in unselected_items.iter().enumerate() { + let cand_w = challenge.weights[cand_item] as i32; + if rem_w + free_capacity < cand_w { + break; + } + + let val_diff = contribution_list[cand_item] + - contribution_list[rem_item] + - challenge.interaction_values[cand_item][rem_item]; + if val_diff >= 0 { + feasible_swaps.push((i, j)); + } + } + } + + if !feasible_swaps.is_empty() { + let pick = rng.gen_range(0..feasible_swaps.len()); + let (unsel_idx, sel_idx) = feasible_swaps[pick]; + let new_item = unselected_items[unsel_idx]; + let remove_item = selected_items[sel_idx]; + + selected_items.swap_remove(sel_idx); + selected_items.push(new_item); + + + let new_item_weight = challenge.weights[new_item]; + let remove_item_weight = challenge.weights[remove_item]; + + let current_pos = unsel_idx; + let mut target_pos = current_pos; + if new_item_weight != remove_item_weight { + target_pos = unselected_items + .binary_search_by(|&probe| challenge.weights[probe].cmp(&remove_item_weight)) + .unwrap_or_else(|e| e); + } + if current_pos != target_pos { + unsafe { + let ptr = unselected_items.as_mut_ptr(); + if target_pos < current_pos { + std::ptr::copy( + ptr.add(target_pos), + ptr.add(target_pos + 1), + current_pos - target_pos + ); + } else { + target_pos = target_pos - 1; + std::ptr::copy( + ptr.add(current_pos + 1), + ptr.add(current_pos), + target_pos - current_pos + ); + } + } + } + unselected_items[target_pos] = remove_item; + + + total_value += contribution_list[new_item] + - contribution_list[remove_item] + - challenge.interaction_values[new_item][remove_item]; + total_weight = total_weight + challenge.weights[new_item] - challenge.weights[remove_item]; + improved = true; + + unsafe { + for x in 0..challenge.difficulty.num_items { + *contribution_list.get_unchecked_mut(x) += + *challenge.interaction_values.get_unchecked(x).get_unchecked(new_item) - + *challenge.interaction_values.get_unchecked(x).get_unchecked(remove_item); + } + } + } + feasible_swaps.clear(); + + if !improved { + break; + } + } + + if selected_items.is_empty() { + Ok(None) + } else { + Ok(Some((SubSolution { items: selected_items }, total_value))) + } + } + + + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let mut solution = Solution { + sub_solutions: Vec::new(), + }; + for sub_instance in &challenge.sub_instances { + match solve_sub_instance(sub_instance)? { + Some(sub_solution) => solution.sub_solutions.push(sub_solution), + None => return Ok(None), + } + } + Ok(Some(solution)) + } + + pub fn solve_sub_instance(challenge: &SubInstance) -> Result> { + let mut rng = StdRng::seed_from_u64(u64::from_le_bytes( + challenge.seed[..8].try_into().unwrap(), + )); + + let mut best_solution: Option = None; + let mut best_value = 0; + + for _outer_iter in 0..200 { + let mut unselected_items: Vec = (0..challenge.difficulty.num_items).collect(); + let mut contribution_list = challenge + .values + .iter() + .map(|&v| v as i32) + .collect::>(); + + let sol_result = + compute_solution(challenge, &mut contribution_list, &mut unselected_items, &mut rng)?; + + let (solution, value) = match sol_result { + Some(x) => x, + None => continue, + }; + + if value > best_value { + best_value = value; + best_solution = Some(SubSolution { items: solution.items.clone() }); + } + + let threshold = lookup_threshold(challenge.difficulty.num_items); + if (challenge.baseline_value as f32) * (1.0 - threshold * 0.008) >= best_value as f32 { + return Ok(None); + } + else if challenge.baseline_value <= best_value as u32 { + return Ok(best_solution); + } + } + + Ok(best_solution) + } + + fn lookup_threshold(num_items: usize) -> f32 { + let points = vec![ + (100, 1.071), (105, 1.015), (110, 0.973), (120, 0.882), + (125, 0.791), (130, 0.770), (135, 0.760), (140, 0.749), + (145, 0.700), (150, 0.616), (155, 0.574), (160, 0.532), + (165, 0.511), (170, 0.494), (175, 0.485), (180, 0.476), + (190, 0.448), (195, 0.434), (200, 0.427), (205, 0.420), + (210, 0.420), (215, 0.385), (220, 0.350), (225, 0.347), + (230, 0.343), (235, 0.343), (240, 0.338), (245, 0.334), + (250, 0.329) + ]; + + points.iter() + .filter(|&&(x, _)| x <= num_items) + .max_by_key(|&&(x, _)| x) + .unwrap() + .1 + } +} \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/relative_opt_mid/README.md b/tig-algorithms/src/knapsack/relative_opt_mid/README.md new file mode 100644 index 0000000..7ed8318 --- /dev/null +++ b/tig-algorithms/src/knapsack/relative_opt_mid/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** knapsack +* **Algorithm Name:** relative_opt_mid +* **Copyright:** 2025 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/relative_opt_mid/mod.rs b/tig-algorithms/src/knapsack/relative_opt_mid/mod.rs new file mode 100644 index 0000000..d93ecef --- /dev/null +++ b/tig-algorithms/src/knapsack/relative_opt_mid/mod.rs @@ -0,0 +1,352 @@ +use anyhow::{anyhow, Result}; +use serde_json::{Map, Value}; +use tig_challenges::knapsack::*; + + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> Result<()>, + hyperparameters: &Option>, +) -> Result<()> { + Err(anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + use anyhow::Result; + use rand::{rngs::StdRng, Rng, SeedableRng}; + use tig_challenges::knapsack::*; + + fn compute_solution( + challenge: &SubInstance, + contribution_list: &mut [i32], + unselected_items: &mut Vec, + rng: &mut StdRng, + ) -> Result> { + let mut selected_items = Vec::new(); + let mut total_weight = 0; + let mut total_value = 0; + + let inv_weights : Vec = challenge.weights.iter().map(|&w| 1.0 / w as f32).collect(); + + const RCL_MAX: usize = 10; + + let probs: Vec = (0..RCL_MAX) + .map(|rank| 1.0 / ((rank + 1) as f32).exp()) + .collect(); + + let mut acc_probs: Vec = Vec::with_capacity(RCL_MAX); + let mut sum = 0.0; + for &prob in &probs { + sum += prob; + acc_probs.push(sum); + } + let total_prob_max = sum; + let max_item_weight = challenge.weights.iter().max().unwrap(); + + let mut item_densities: Vec<(usize, f32)> = unselected_items + .iter() + .map(|&idx| { + let ratio = contribution_list[idx] as f32 * inv_weights[idx]; + (idx, ratio) + }) + .collect(); + + let list_size = 2; + let mut top_ranks = vec![0; list_size]; + let mut top_densities = vec![f32::MIN; list_size]; + + while !item_densities.is_empty() { + let num_candidates = item_densities.len(); + if num_candidates < 2 { + break; + } + + let actual_rcl_size = num_candidates.min(RCL_MAX); + let total_prob = if actual_rcl_size == RCL_MAX { + total_prob_max + } else { + acc_probs[actual_rcl_size - 1] + }; + + let random_threshold = rng.gen_range(0.0..total_prob); + let mut selected_rank = match acc_probs[..actual_rcl_size].binary_search_by(|prob| { + prob.partial_cmp(&random_threshold).unwrap() + }) { Ok(i) | Err(i) => i }; + if selected_rank >= actual_rcl_size { + selected_rank = actual_rcl_size - 1; + } + let mut selected_item = 0; + if selected_rank < list_size && !selected_items.is_empty() { + selected_rank = top_ranks[selected_rank]; + selected_item = item_densities[selected_rank].0; + } else { + item_densities.select_nth_unstable_by(selected_rank, |a, b| { + b.1.partial_cmp(&a.1).unwrap() + }); + selected_item = item_densities[selected_rank].0; + } + + selected_items.push(selected_item); + total_weight += challenge.weights[selected_item]; + total_value += contribution_list[selected_item]; + + if total_weight + max_item_weight > challenge.max_weight { + item_densities.retain(|(idx, _)| { + total_weight + challenge.weights[*idx] <= challenge.max_weight && *idx != selected_item + }); + } else { + item_densities.swap_remove(selected_rank); + } + + unsafe { + for x in 0..challenge.difficulty.num_items { + *contribution_list.get_unchecked_mut(x) += + *challenge.interaction_values.get_unchecked(selected_item).get_unchecked(x); + } + + let mut first_density = f32::MIN; + let mut first_rank = 0; + let mut second_density = f32::MIN; + let mut second_rank = 0; + + for (i, density) in item_densities.iter_mut().enumerate() { + let interaction = unsafe { + *challenge.interaction_values.get_unchecked(selected_item).get_unchecked(density.0) + }; + density.1 += interaction as f32 * inv_weights[density.0]; + let current_density = density.1; + + if current_density > first_density { + second_density = first_density; + second_rank = first_rank; + first_density = current_density; + first_rank = i; + } else if current_density > second_density { + second_density = current_density; + second_rank = i; + } + } + + top_ranks[0] = first_rank; + top_ranks[1] = second_rank; + top_densities[0] = first_density; + top_densities[1] = second_density; + } + } + unselected_items.clear(); + unselected_items.extend(0..challenge.difficulty.num_items); + + let mut sorted_selected = selected_items.clone(); + sorted_selected.sort_unstable_by(|a, b| b.cmp(a)); + + for &selected in &sorted_selected { + unselected_items.swap_remove(selected); + } + + unselected_items.sort_unstable_by_key(|&idx| challenge.weights[idx]); + + let local_search_iterations = 150; + let mut feasible_adds = Vec::new(); + let mut feasible_swaps = Vec::new(); + for _ in 0..local_search_iterations { + let mut improved = false; + + if total_weight < challenge.max_weight { + for (i, &cand) in unselected_items.iter().enumerate() { + let new_w = total_weight + challenge.weights[cand]; + let new_val = total_value + contribution_list[cand]; + if new_w > challenge.max_weight { + break; + } + + if new_val >= total_value { + feasible_adds.push(i); + } + } + if !feasible_adds.is_empty() { + let pick = rng.gen_range(0..feasible_adds.len()); + let add_idx = feasible_adds[pick]; + let new_item = unselected_items[add_idx]; + + unselected_items.remove(add_idx); + selected_items.push(new_item); + + total_weight += challenge.weights[new_item]; + total_value += contribution_list[new_item]; + improved = true; + + unsafe { + for x in 0..challenge.difficulty.num_items { + *contribution_list.get_unchecked_mut(x) += + *challenge.interaction_values.get_unchecked(x).get_unchecked(new_item); + } + } + } + feasible_adds.clear(); + } + + let free_capacity = challenge.max_weight as i32 - total_weight as i32; + for (j, &rem_item) in selected_items.iter().enumerate() { + let rem_w = challenge.weights[rem_item] as i32; + + for (i, &cand_item) in unselected_items.iter().enumerate() { + let cand_w = challenge.weights[cand_item] as i32; + if rem_w + free_capacity < cand_w { + break; + } + + let val_diff = contribution_list[cand_item] + - contribution_list[rem_item] + - challenge.interaction_values[cand_item][rem_item]; + if val_diff >= 0 { + feasible_swaps.push((i, j)); + } + } + } + + if !feasible_swaps.is_empty() { + let pick = rng.gen_range(0..feasible_swaps.len()); + let (unsel_idx, sel_idx) = feasible_swaps[pick]; + let new_item = unselected_items[unsel_idx]; + let remove_item = selected_items[sel_idx]; + + selected_items.swap_remove(sel_idx); + selected_items.push(new_item); + + + let new_item_weight = challenge.weights[new_item]; + let remove_item_weight = challenge.weights[remove_item]; + + let current_pos = unsel_idx; + let mut target_pos = current_pos; + if new_item_weight != remove_item_weight { + target_pos = unselected_items + .binary_search_by(|&probe| challenge.weights[probe].cmp(&remove_item_weight)) + .unwrap_or_else(|e| e); + } + if current_pos != target_pos { + unsafe { + let ptr = unselected_items.as_mut_ptr(); + if target_pos < current_pos { + std::ptr::copy( + ptr.add(target_pos), + ptr.add(target_pos + 1), + current_pos - target_pos + ); + } else { + target_pos = target_pos - 1; + std::ptr::copy( + ptr.add(current_pos + 1), + ptr.add(current_pos), + target_pos - current_pos + ); + } + } + } + unselected_items[target_pos] = remove_item; + + + total_value += contribution_list[new_item] + - contribution_list[remove_item] + - challenge.interaction_values[new_item][remove_item]; + total_weight = total_weight + challenge.weights[new_item] - challenge.weights[remove_item]; + improved = true; + + unsafe { + for x in 0..challenge.difficulty.num_items { + *contribution_list.get_unchecked_mut(x) += + *challenge.interaction_values.get_unchecked(x).get_unchecked(new_item) - + *challenge.interaction_values.get_unchecked(x).get_unchecked(remove_item); + } + } + } + feasible_swaps.clear(); + + if !improved { + break; + } + } + + if selected_items.is_empty() { + Ok(None) + } else { + Ok(Some((SubSolution { items: selected_items }, total_value))) + } + } + + + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let mut solution = Solution { + sub_solutions: Vec::new(), + }; + for sub_instance in &challenge.sub_instances { + match solve_sub_instance(sub_instance)? { + Some(sub_solution) => solution.sub_solutions.push(sub_solution), + None => return Ok(None), + } + } + Ok(Some(solution)) + } + + pub fn solve_sub_instance(challenge: &SubInstance) -> Result> { + let mut rng = StdRng::seed_from_u64(u64::from_le_bytes( + challenge.seed[..8].try_into().unwrap(), + )); + + let mut best_solution: Option = None; + let mut best_value = 0; + + for _outer_iter in 0..200 { + let mut unselected_items: Vec = (0..challenge.difficulty.num_items).collect(); + let mut contribution_list = challenge + .values + .iter() + .map(|&v| v as i32) + .collect::>(); + + let sol_result = + compute_solution(challenge, &mut contribution_list, &mut unselected_items, &mut rng)?; + + let (solution, value) = match sol_result { + Some(x) => x, + None => continue, + }; + + if value > best_value { + best_value = value; + best_solution = Some(SubSolution { items: solution.items.clone() }); + } + + let threshold = lookup_threshold(challenge.difficulty.num_items); + if (challenge.baseline_value as f32) * (1.0 - threshold * 0.01) >= best_value as f32 { + return Ok(None); + } + else if challenge.baseline_value <= best_value as u32 { + return Ok(best_solution); + } + } + + Ok(best_solution) + } + + fn lookup_threshold(num_items: usize) -> f32 { + let points = vec![ + (100, 1.071), (105, 1.015), (110, 0.973), (120, 0.882), + (125, 0.791), (130, 0.770), (135, 0.760), (140, 0.749), + (145, 0.700), (150, 0.616), (155, 0.574), (160, 0.532), + (165, 0.511), (170, 0.494), (175, 0.485), (180, 0.476), + (190, 0.448), (195, 0.434), (200, 0.427), (205, 0.420), + (210, 0.420), (215, 0.385), (220, 0.350), (225, 0.347), + (230, 0.343), (235, 0.343), (240, 0.338), (245, 0.334), + (250, 0.329) + ]; + + points.iter() + .filter(|&&(x, _)| x <= num_items) + .max_by_key(|&&(x, _)| x) + .unwrap() + .1 + } +} \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/relative_opt_optima/README.md b/tig-algorithms/src/knapsack/relative_opt_optima/README.md new file mode 100644 index 0000000..896a253 --- /dev/null +++ b/tig-algorithms/src/knapsack/relative_opt_optima/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** knapsack +* **Algorithm Name:** relative_opt_optima +* **Copyright:** 2025 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/relative_opt_optima/mod.rs b/tig-algorithms/src/knapsack/relative_opt_optima/mod.rs new file mode 100644 index 0000000..5c05d22 --- /dev/null +++ b/tig-algorithms/src/knapsack/relative_opt_optima/mod.rs @@ -0,0 +1,352 @@ +use anyhow::{anyhow, Result}; +use serde_json::{Map, Value}; +use tig_challenges::knapsack::*; + + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> Result<()>, + hyperparameters: &Option>, +) -> Result<()> { + Err(anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + use anyhow::Result; + use rand::{rngs::StdRng, Rng, SeedableRng}; + use tig_challenges::knapsack::*; + + fn compute_solution( + challenge: &SubInstance, + contribution_list: &mut [i32], + unselected_items: &mut Vec, + rng: &mut StdRng, + ) -> Result> { + let mut selected_items = Vec::new(); + let mut total_weight = 0; + let mut total_value = 0; + + let inv_weights : Vec = challenge.weights.iter().map(|&w| 1.0 / w as f32).collect(); + + const RCL_MAX: usize = 10; + + let probs: Vec = (0..RCL_MAX) + .map(|rank| 1.0 / ((rank + 1) as f32).exp()) + .collect(); + + let mut acc_probs: Vec = Vec::with_capacity(RCL_MAX); + let mut sum = 0.0; + for &prob in &probs { + sum += prob; + acc_probs.push(sum); + } + let total_prob_max = sum; + let max_item_weight = challenge.weights.iter().max().unwrap(); + + let mut item_densities: Vec<(usize, f32)> = unselected_items + .iter() + .map(|&idx| { + let ratio = contribution_list[idx] as f32 * inv_weights[idx]; + (idx, ratio) + }) + .collect(); + + let list_size = 2; + let mut top_ranks = vec![0; list_size]; + let mut top_densities = vec![f32::MIN; list_size]; + + while !item_densities.is_empty() { + let num_candidates = item_densities.len(); + if num_candidates < 2 { + break; + } + + let actual_rcl_size = num_candidates.min(RCL_MAX); + let total_prob = if actual_rcl_size == RCL_MAX { + total_prob_max + } else { + acc_probs[actual_rcl_size - 1] + }; + + let random_threshold = rng.gen_range(0.0..total_prob); + let mut selected_rank = match acc_probs[..actual_rcl_size].binary_search_by(|prob| { + prob.partial_cmp(&random_threshold).unwrap() + }) { Ok(i) | Err(i) => i }; + if selected_rank >= actual_rcl_size { + selected_rank = actual_rcl_size - 1; + } + let mut selected_item = 0; + if selected_rank < list_size && !selected_items.is_empty() { + selected_rank = top_ranks[selected_rank]; + selected_item = item_densities[selected_rank].0; + } else { + item_densities.select_nth_unstable_by(selected_rank, |a, b| { + b.1.partial_cmp(&a.1).unwrap() + }); + selected_item = item_densities[selected_rank].0; + } + + selected_items.push(selected_item); + total_weight += challenge.weights[selected_item]; + total_value += contribution_list[selected_item]; + + if total_weight + max_item_weight > challenge.max_weight { + item_densities.retain(|(idx, _)| { + total_weight + challenge.weights[*idx] <= challenge.max_weight && *idx != selected_item + }); + } else { + item_densities.swap_remove(selected_rank); + } + + unsafe { + for x in 0..challenge.difficulty.num_items { + *contribution_list.get_unchecked_mut(x) += + *challenge.interaction_values.get_unchecked(selected_item).get_unchecked(x); + } + + let mut first_density = f32::MIN; + let mut first_rank = 0; + let mut second_density = f32::MIN; + let mut second_rank = 0; + + for (i, density) in item_densities.iter_mut().enumerate() { + let interaction = unsafe { + *challenge.interaction_values.get_unchecked(selected_item).get_unchecked(density.0) + }; + density.1 += interaction as f32 * inv_weights[density.0]; + let current_density = density.1; + + if current_density > first_density { + second_density = first_density; + second_rank = first_rank; + first_density = current_density; + first_rank = i; + } else if current_density > second_density { + second_density = current_density; + second_rank = i; + } + } + + top_ranks[0] = first_rank; + top_ranks[1] = second_rank; + top_densities[0] = first_density; + top_densities[1] = second_density; + } + } + unselected_items.clear(); + unselected_items.extend(0..challenge.difficulty.num_items); + + let mut sorted_selected = selected_items.clone(); + sorted_selected.sort_unstable_by(|a, b| b.cmp(a)); + + for &selected in &sorted_selected { + unselected_items.swap_remove(selected); + } + + unselected_items.sort_unstable_by_key(|&idx| challenge.weights[idx]); + + let local_search_iterations = 150; + let mut feasible_adds = Vec::new(); + let mut feasible_swaps = Vec::new(); + for _ in 0..local_search_iterations { + let mut improved = false; + + if total_weight < challenge.max_weight { + for (i, &cand) in unselected_items.iter().enumerate() { + let new_w = total_weight + challenge.weights[cand]; + let new_val = total_value + contribution_list[cand]; + if new_w > challenge.max_weight { + break; + } + + if new_val >= total_value { + feasible_adds.push(i); + } + } + if !feasible_adds.is_empty() { + let pick = rng.gen_range(0..feasible_adds.len()); + let add_idx = feasible_adds[pick]; + let new_item = unselected_items[add_idx]; + + unselected_items.remove(add_idx); + selected_items.push(new_item); + + total_weight += challenge.weights[new_item]; + total_value += contribution_list[new_item]; + improved = true; + + unsafe { + for x in 0..challenge.difficulty.num_items { + *contribution_list.get_unchecked_mut(x) += + *challenge.interaction_values.get_unchecked(x).get_unchecked(new_item); + } + } + } + feasible_adds.clear(); + } + + let free_capacity = challenge.max_weight as i32 - total_weight as i32; + for (j, &rem_item) in selected_items.iter().enumerate() { + let rem_w = challenge.weights[rem_item] as i32; + + for (i, &cand_item) in unselected_items.iter().enumerate() { + let cand_w = challenge.weights[cand_item] as i32; + if rem_w + free_capacity < cand_w { + break; + } + + let val_diff = contribution_list[cand_item] + - contribution_list[rem_item] + - challenge.interaction_values[cand_item][rem_item]; + if val_diff >= 0 { + feasible_swaps.push((i, j)); + } + } + } + + if !feasible_swaps.is_empty() { + let pick = rng.gen_range(0..feasible_swaps.len()); + let (unsel_idx, sel_idx) = feasible_swaps[pick]; + let new_item = unselected_items[unsel_idx]; + let remove_item = selected_items[sel_idx]; + + selected_items.swap_remove(sel_idx); + selected_items.push(new_item); + + + let new_item_weight = challenge.weights[new_item]; + let remove_item_weight = challenge.weights[remove_item]; + + let current_pos = unsel_idx; + let mut target_pos = current_pos; + if new_item_weight != remove_item_weight { + target_pos = unselected_items + .binary_search_by(|&probe| challenge.weights[probe].cmp(&remove_item_weight)) + .unwrap_or_else(|e| e); + } + if current_pos != target_pos { + unsafe { + let ptr = unselected_items.as_mut_ptr(); + if target_pos < current_pos { + std::ptr::copy( + ptr.add(target_pos), + ptr.add(target_pos + 1), + current_pos - target_pos + ); + } else { + target_pos = target_pos - 1; + std::ptr::copy( + ptr.add(current_pos + 1), + ptr.add(current_pos), + target_pos - current_pos + ); + } + } + } + unselected_items[target_pos] = remove_item; + + + total_value += contribution_list[new_item] + - contribution_list[remove_item] + - challenge.interaction_values[new_item][remove_item]; + total_weight = total_weight + challenge.weights[new_item] - challenge.weights[remove_item]; + improved = true; + + unsafe { + for x in 0..challenge.difficulty.num_items { + *contribution_list.get_unchecked_mut(x) += + *challenge.interaction_values.get_unchecked(x).get_unchecked(new_item) - + *challenge.interaction_values.get_unchecked(x).get_unchecked(remove_item); + } + } + } + feasible_swaps.clear(); + + if !improved { + break; + } + } + + if selected_items.is_empty() { + Ok(None) + } else { + Ok(Some((SubSolution { items: selected_items }, total_value))) + } + } + + + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let mut solution = Solution { + sub_solutions: Vec::new(), + }; + for sub_instance in &challenge.sub_instances { + match solve_sub_instance(sub_instance)? { + Some(sub_solution) => solution.sub_solutions.push(sub_solution), + None => return Ok(None), + } + } + Ok(Some(solution)) + } + + pub fn solve_sub_instance(challenge: &SubInstance) -> Result> { + let mut rng = StdRng::seed_from_u64(u64::from_le_bytes( + challenge.seed[..8].try_into().unwrap(), + )); + + let mut best_solution: Option = None; + let mut best_value = 0; + + for _outer_iter in 0..1000 { + let mut unselected_items: Vec = (0..challenge.difficulty.num_items).collect(); + let mut contribution_list = challenge + .values + .iter() + .map(|&v| v as i32) + .collect::>(); + + let sol_result = + compute_solution(challenge, &mut contribution_list, &mut unselected_items, &mut rng)?; + + let (solution, value) = match sol_result { + Some(x) => x, + None => continue, + }; + + if value > best_value { + best_value = value; + best_solution = Some(SubSolution { items: solution.items.clone() }); + } + + let threshold = lookup_threshold(challenge.difficulty.num_items); + if (challenge.baseline_value as f32) * (1.0 - threshold * 0.01) >= best_value as f32 { + return Ok(None); + } + else if challenge.baseline_value <= best_value as u32 { + return Ok(best_solution); + } + } + + Ok(best_solution) + } + + fn lookup_threshold(num_items: usize) -> f32 { + let points = vec![ + (100, 1.071), (105, 1.015), (110, 0.973), (120, 0.882), + (125, 0.791), (130, 0.770), (135, 0.760), (140, 0.749), + (145, 0.700), (150, 0.616), (155, 0.574), (160, 0.532), + (165, 0.511), (170, 0.494), (175, 0.485), (180, 0.476), + (190, 0.448), (195, 0.434), (200, 0.427), (205, 0.420), + (210, 0.420), (215, 0.385), (220, 0.350), (225, 0.347), + (230, 0.343), (235, 0.343), (240, 0.338), (245, 0.334), + (250, 0.329) + ]; + + points.iter() + .filter(|&&(x, _)| x <= num_items) + .max_by_key(|&&(x, _)| x) + .unwrap() + .1 + } +} \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/relative_quad_fast/README.md b/tig-algorithms/src/knapsack/relative_quad_fast/README.md new file mode 100644 index 0000000..2936812 --- /dev/null +++ b/tig-algorithms/src/knapsack/relative_quad_fast/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** knapsack +* **Algorithm Name:** relative_quad_fast +* **Copyright:** 2025 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/relative_quad_fast/mod.rs b/tig-algorithms/src/knapsack/relative_quad_fast/mod.rs new file mode 100644 index 0000000..2139c9c --- /dev/null +++ b/tig-algorithms/src/knapsack/relative_quad_fast/mod.rs @@ -0,0 +1,232 @@ +use anyhow::Result; +use rand::{rngs::StdRng, Rng, SeedableRng}; +use serde_json::{Map, Value}; +use tig_challenges::knapsack::*; + + fn compute_solution( + challenge: &Challenge, + contribution_list: &mut [i32], + unselected_items: &mut Vec, + rng: &mut StdRng, +) -> Result> { + let mut selected_items = Vec::new(); + let mut total_weight = 0; + let mut total_value = 0; + + const RCL_MAX: usize = 10; + + let probs: Vec = (0..RCL_MAX) + .map(|rank| 1.0 / ((rank + 1) as f32).exp()) + .collect(); + + let mut acc_probs: Vec = Vec::with_capacity(RCL_MAX); + let mut sum = 0.0; + for &prob in &probs { + sum += prob; + acc_probs.push(sum); + } + let total_prob_max = sum; + let max_item_weight = challenge.weights.iter().max().unwrap(); + + let mut item_densities: Vec<(usize, f32)> = unselected_items + .iter() + .map(|&idx| { + let ratio = contribution_list[idx] as f32 / challenge.weights[idx] as f32; + (idx, ratio) + }) + .collect(); + + while !item_densities.is_empty() { + let num_candidates = item_densities.len(); + if num_candidates < 2 { + break; + } + + let actual_rcl_size = num_candidates.min(RCL_MAX); + + let total_prob = if actual_rcl_size == RCL_MAX { + total_prob_max + } else { + acc_probs[actual_rcl_size - 1] + }; + + let random_threshold = rng.gen_range(0.0..total_prob); + let mut selected_rank = match acc_probs[..actual_rcl_size].binary_search_by(|prob| { + prob.partial_cmp(&random_threshold).unwrap() + }) { Ok(i) | Err(i) => i }; + if selected_rank >= actual_rcl_size { + selected_rank = actual_rcl_size - 1; + } + + item_densities.select_nth_unstable_by(selected_rank, |a, b| { + b.1.partial_cmp(&a.1).unwrap() + }); + let selected_item = item_densities[selected_rank].0; + + selected_items.push(selected_item); + total_weight += challenge.weights[selected_item]; + total_value += contribution_list[selected_item]; + + unsafe { + for x in 0..challenge.difficulty.num_items { + *contribution_list.get_unchecked_mut(x) += + *challenge.interaction_values.get_unchecked(x).get_unchecked(selected_item); + } + } + + if total_weight + max_item_weight > challenge.max_weight { + item_densities.retain(|(idx, _)| { + total_weight + challenge.weights[*idx] <= challenge.max_weight && *idx != selected_item + }); + } else { + item_densities.swap_remove(selected_rank); + } + + unsafe { + for density in item_densities.iter_mut() { + let interaction = *challenge.interaction_values.get_unchecked(selected_item).get_unchecked(density.0); + let w = *challenge.weights.get_unchecked(density.0) as f32; + density.1 += interaction as f32 / w; + } + } + } + unselected_items.clear(); + unselected_items.extend(0..challenge.difficulty.num_items); + + let mut sorted_selected = selected_items.clone(); + sorted_selected.sort_unstable_by(|a, b| b.cmp(a)); + + for &selected in &sorted_selected { + unselected_items.swap_remove(selected); + } + + let local_search_iterations = 150; + for _ in 0..local_search_iterations { + let mut improved = false; + + let mut feasible_adds = Vec::new(); + for (i, &cand) in unselected_items.iter().enumerate() { + let new_w = total_weight + challenge.weights[cand]; + let new_val = total_value + contribution_list[cand]; + if new_w <= challenge.max_weight && new_val >= total_value { + feasible_adds.push(i); + } + } + if !feasible_adds.is_empty() { + let pick = rng.gen_range(0..feasible_adds.len()); + let add_idx = feasible_adds[pick]; + let new_item = unselected_items[add_idx]; + + unselected_items.swap_remove(add_idx); + selected_items.push(new_item); + + total_weight += challenge.weights[new_item]; + total_value += contribution_list[new_item]; + improved = true; + + unsafe { + for x in 0..challenge.difficulty.num_items { + *contribution_list.get_unchecked_mut(x) += + *challenge.interaction_values.get_unchecked(x).get_unchecked(new_item); + } + } + } + + let mut feasible_swaps = Vec::new(); + for (i, &cand_item) in unselected_items.iter().enumerate() { + let min_needed = + challenge.weights[cand_item] as i32 - (challenge.max_weight as i32 - total_weight as i32); + for (j, &rem_item) in selected_items.iter().enumerate() { + let rem_w = challenge.weights[rem_item] as i32; + if rem_w < min_needed { + continue; + } + let val_diff = contribution_list[cand_item] + - contribution_list[rem_item] + - challenge.interaction_values[cand_item][rem_item]; + if val_diff >= 0 { + feasible_swaps.push((i, j)); + } + } + } + + if !feasible_swaps.is_empty() { + let pick = rng.gen_range(0..feasible_swaps.len()); + let (unsel_idx, sel_idx) = feasible_swaps[pick]; + let new_item = unselected_items[unsel_idx]; + let remove_item = selected_items[sel_idx]; + + selected_items.swap_remove(sel_idx); + unselected_items.swap_remove(unsel_idx); + selected_items.push(new_item); + unselected_items.push(remove_item); + + total_value += contribution_list[new_item] + - contribution_list[remove_item] + - challenge.interaction_values[new_item][remove_item]; + total_weight = total_weight + challenge.weights[new_item] - challenge.weights[remove_item]; + improved = true; + + unsafe { + for x in 0..challenge.difficulty.num_items { + *contribution_list.get_unchecked_mut(x) += + *challenge.interaction_values.get_unchecked(x).get_unchecked(new_item) - + *challenge.interaction_values.get_unchecked(x).get_unchecked(remove_item); + } + } + } + + if !improved { + break; + } + } + + if selected_items.is_empty() { + Ok(None) + } else { + Ok(Some((Solution { items: selected_items }, total_value))) + } + } + + + + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + let mut rng = StdRng::seed_from_u64(u64::from_le_bytes( + challenge.seed[..8].try_into().unwrap(), + )); + + let mut best_solution: Option = None; + let mut best_value = 0; + + for _outer_iter in 0..2 { + let mut unselected_items: Vec = (0..challenge.difficulty.num_items).collect(); + let mut contribution_list = challenge + .values + .iter() + .map(|&v| v as i32) + .collect::>(); + + let sol_result = + compute_solution(challenge, &mut contribution_list, &mut unselected_items, &mut rng)?; + + let (solution, value) = match sol_result { + Some(x) => x, + None => continue, + }; + + if value > best_value { + best_value = value; + best_solution = Some(Solution { items: solution.items.clone() }); + } + } + + if let Some(s) = best_solution { + let _ = save_solution(&s); + } + return Ok(()); + } \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/relative_raw_ultra/README.md b/tig-algorithms/src/knapsack/relative_raw_ultra/README.md new file mode 100644 index 0000000..6809ebd --- /dev/null +++ b/tig-algorithms/src/knapsack/relative_raw_ultra/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** knapsack +* **Algorithm Name:** relative_raw_ultra +* **Copyright:** 2025 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/knapsack/relative_raw_ultra/mod.rs b/tig-algorithms/src/knapsack/relative_raw_ultra/mod.rs new file mode 100644 index 0000000..ee44f22 --- /dev/null +++ b/tig-algorithms/src/knapsack/relative_raw_ultra/mod.rs @@ -0,0 +1,440 @@ +use anyhow::Result; +use rand::{rngs::StdRng, Rng, SeedableRng}; +use serde_json::{Map, Value}; +use tig_challenges::knapsack::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + Err(anyhow::anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + fn compute_solution( + challenge: &SubInstance, + contribution_list: &mut [i32], + unselected_items: &mut Vec, + rng: &mut StdRng, + ) -> Result> { + let mut selected_items = Vec::new(); + let mut total_weight = 0; + let mut total_value = 0; + + let inv_weights: Vec = challenge.weights.iter().map(|&w| 1.0 / w as f32).collect(); + + const RCL_MAX: usize = 10; + + let probs: Vec = (0..RCL_MAX) + .map(|rank| 1.0 / ((rank + 1) as f32).exp()) + .collect(); + + let mut acc_probs: Vec = Vec::with_capacity(RCL_MAX); + let mut sum = 0.0; + for &prob in &probs { + sum += prob; + acc_probs.push(sum); + } + let total_prob_max = sum; + let max_item_weight = challenge.weights.iter().max().unwrap(); + + let mut item_densities: Vec<(usize, f32)> = unselected_items + .iter() + .map(|&idx| { + let ratio = contribution_list[idx] as f32 * inv_weights[idx]; + (idx, ratio) + }) + .collect(); + + let list_size = 2; + let mut top_ranks = vec![0; list_size]; + let mut top_densities = vec![f32::MIN; list_size]; + + while !item_densities.is_empty() { + let num_candidates = item_densities.len(); + if num_candidates < 2 { + break; + } + + let actual_rcl_size = num_candidates.min(RCL_MAX); + let total_prob = if actual_rcl_size == RCL_MAX { + total_prob_max + } else { + acc_probs[actual_rcl_size - 1] + }; + + let random_threshold = rng.gen_range(0.0..total_prob); + let mut selected_rank = match acc_probs[..actual_rcl_size] + .binary_search_by(|prob| prob.partial_cmp(&random_threshold).unwrap()) + { + Ok(i) | Err(i) => i, + }; + if selected_rank >= actual_rcl_size { + selected_rank = actual_rcl_size - 1; + } + let mut selected_item = 0; + if selected_rank < list_size && !selected_items.is_empty() { + selected_rank = top_ranks[selected_rank]; + selected_item = item_densities[selected_rank].0; + } else { + item_densities + .select_nth_unstable_by(selected_rank, |a, b| b.1.partial_cmp(&a.1).unwrap()); + selected_item = item_densities[selected_rank].0; + } + + selected_items.push(selected_item); + total_weight += challenge.weights[selected_item]; + total_value += contribution_list[selected_item]; + + if total_weight + max_item_weight > challenge.max_weight { + item_densities.retain(|(idx, _)| { + total_weight + challenge.weights[*idx] <= challenge.max_weight + && *idx != selected_item + }); + } else { + item_densities.swap_remove(selected_rank); + } + + unsafe { + for x in 0..challenge.difficulty.num_items { + *contribution_list.get_unchecked_mut(x) += *challenge + .interaction_values + .get_unchecked(selected_item) + .get_unchecked(x); + } + + let mut first_density = f32::MIN; + let mut first_rank = 0; + let mut second_density = f32::MIN; + let mut second_rank = 0; + + for (i, density) in item_densities.iter_mut().enumerate() { + let interaction = unsafe { + *challenge + .interaction_values + .get_unchecked(selected_item) + .get_unchecked(density.0) + }; + density.1 += interaction as f32 * inv_weights[density.0]; + let current_density = density.1; + + if current_density > first_density { + second_density = first_density; + second_rank = first_rank; + first_density = current_density; + first_rank = i; + } else if current_density > second_density { + second_density = current_density; + second_rank = i; + } + } + + top_ranks[0] = first_rank; + top_ranks[1] = second_rank; + top_densities[0] = first_density; + top_densities[1] = second_density; + } + } + unselected_items.clear(); + unselected_items.extend(0..challenge.difficulty.num_items); + + let mut sorted_selected = selected_items.clone(); + sorted_selected.sort_unstable_by(|a, b| b.cmp(a)); + + for &selected in &sorted_selected { + unselected_items.swap_remove(selected); + } + + unselected_items.sort_unstable_by_key(|&idx| challenge.weights[idx]); + + let local_search_iterations = 150; + let mut feasible_adds = Vec::new(); + let mut feasible_swaps = Vec::new(); + for _ in 0..local_search_iterations { + let mut improved = false; + + if total_weight < challenge.max_weight { + for (i, &cand) in unselected_items.iter().enumerate() { + let new_w = total_weight + challenge.weights[cand]; + let new_val = total_value + contribution_list[cand]; + if new_w > challenge.max_weight { + break; + } + + if new_val >= total_value { + feasible_adds.push(i); + } + } + if !feasible_adds.is_empty() { + let pick = rng.gen_range(0..feasible_adds.len()); + let add_idx = feasible_adds[pick]; + let new_item = unselected_items[add_idx]; + + unselected_items.remove(add_idx); + selected_items.push(new_item); + + total_weight += challenge.weights[new_item]; + total_value += contribution_list[new_item]; + improved = true; + + unsafe { + for x in 0..challenge.difficulty.num_items { + *contribution_list.get_unchecked_mut(x) += *challenge + .interaction_values + .get_unchecked(x) + .get_unchecked(new_item); + } + } + } + feasible_adds.clear(); + } + + let free_capacity = challenge.max_weight as i32 - total_weight as i32; + for (j, &rem_item) in selected_items.iter().enumerate() { + let rem_w = challenge.weights[rem_item] as i32; + + for (i, &cand_item) in unselected_items.iter().enumerate() { + let cand_w = challenge.weights[cand_item] as i32; + if rem_w + free_capacity < cand_w { + break; + } + + let val_diff = contribution_list[cand_item] + - contribution_list[rem_item] + - challenge.interaction_values[cand_item][rem_item]; + if val_diff >= 0 { + feasible_swaps.push((i, j)); + } + } + } + + if !feasible_swaps.is_empty() { + let pick = rng.gen_range(0..feasible_swaps.len()); + let (unsel_idx, sel_idx) = feasible_swaps[pick]; + let new_item = unselected_items[unsel_idx]; + let remove_item = selected_items[sel_idx]; + + selected_items.swap_remove(sel_idx); + selected_items.push(new_item); + + let new_item_weight = challenge.weights[new_item]; + let remove_item_weight = challenge.weights[remove_item]; + + let current_pos = unsel_idx; + let mut target_pos = current_pos; + if new_item_weight != remove_item_weight { + target_pos = unselected_items + .binary_search_by(|&probe| { + challenge.weights[probe].cmp(&remove_item_weight) + }) + .unwrap_or_else(|e| e); + } + if current_pos != target_pos { + unsafe { + let ptr = unselected_items.as_mut_ptr(); + if target_pos < current_pos { + std::ptr::copy( + ptr.add(target_pos), + ptr.add(target_pos + 1), + current_pos - target_pos, + ); + } else { + target_pos = target_pos - 1; + std::ptr::copy( + ptr.add(current_pos + 1), + ptr.add(current_pos), + target_pos - current_pos, + ); + } + } + } + unselected_items[target_pos] = remove_item; + + total_value += contribution_list[new_item] + - contribution_list[remove_item] + - challenge.interaction_values[new_item][remove_item]; + total_weight = + total_weight + challenge.weights[new_item] - challenge.weights[remove_item]; + improved = true; + + unsafe { + for x in 0..challenge.difficulty.num_items { + *contribution_list.get_unchecked_mut(x) += *challenge + .interaction_values + .get_unchecked(x) + .get_unchecked(new_item) + - *challenge + .interaction_values + .get_unchecked(x) + .get_unchecked(remove_item); + } + } + } + feasible_swaps.clear(); + + if !improved { + break; + } + } + + if selected_items.is_empty() { + Ok(None) + } else { + Ok(Some(( + SubSolution { + items: selected_items, + }, + total_value, + ))) + } + } + + pub fn solve_challenge(challenge: &Challenge) -> Result> { + let mut solution = Solution { + sub_solutions: Vec::with_capacity(challenge.sub_instances.len()), + }; + + for _ in 0..challenge.sub_instances.len() { + solution + .sub_solutions + .push(SubSolution { items: Vec::new() }); + } + + let mut ratio_indices: Vec<(f64, usize)> = Vec::new(); + + for (index, sub_instance) in challenge.sub_instances.iter().enumerate() { + match solve_sub_instance(sub_instance, 1)? { + Some((sub_solution, best_value)) => { + let upper_ratio = best_value as f64 + * (1.0 + 0.011 * lookup_threshold(challenge.difficulty.num_items) as f64) + / sub_instance.baseline_value as f64; + + ratio_indices.push((upper_ratio, index)); + } + None => return Ok(None), + } + } + + ratio_indices.sort_by(|a, b| b.0.partial_cmp(&a.0).unwrap_or(std::cmp::Ordering::Equal)); + + let mut ratio_threshold = (1.0 + challenge.difficulty.better_than_baseline as f64 / 1000.0); + let average_ratio_sqr = 16.0 * ratio_threshold * ratio_threshold; + let mut sum_of_ratios_sqr = 0.0; + let mut instance_i = 0; + + for &(upper_ratio, index) in &ratio_indices { + let ratio_threshold_sqr = + (average_ratio_sqr - sum_of_ratios_sqr) / (16.0 - instance_i as f64); + ratio_threshold = ratio_threshold_sqr.sqrt(); + + if upper_ratio < ratio_threshold { + //return Ok(None); + } + + let sub_instance = &challenge.sub_instances[index]; + + match solve_sub_instance(sub_instance, 20)? { + Some((sub_solution, best_value)) => { + let ratio = best_value as f64 / sub_instance.baseline_value as f64; + let ratio_sqr = ratio * ratio; + sum_of_ratios_sqr += ratio_sqr; + + solution.sub_solutions[index] = sub_solution; + } + None => return Ok(None), + } + + instance_i += 1; + } + + Ok(Some(solution)) + } + + fn solve_sub_instance( + challenge: &SubInstance, + num_iterations: i32, + ) -> Result> { + let mut rng = + StdRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap())); + + let mut best_solution: Option = None; + let mut best_value = 0; + + for _outer_iter in 0..num_iterations { + let mut unselected_items: Vec = (0..challenge.difficulty.num_items).collect(); + let mut contribution_list = challenge + .values + .iter() + .map(|&v| v as i32) + .collect::>(); + + let sol_result = compute_solution( + challenge, + &mut contribution_list, + &mut unselected_items, + &mut rng, + )?; + + let (solution, value) = match sol_result { + Some(x) => x, + None => continue, + }; + + if value > best_value { + best_value = value; + best_solution = Some(SubSolution { + items: solution.items.clone(), + }); + } + } + + match best_solution { + Some(solution) => Ok(Some((solution, best_value))), + None => Ok(None), + } + } + + fn lookup_threshold(num_items: usize) -> f32 { + let points = vec![ + (100, 1.071), + (105, 1.015), + (110, 0.973), + (120, 0.882), + (125, 0.791), + (130, 0.770), + (135, 0.760), + (140, 0.749), + (145, 0.700), + (150, 0.616), + (155, 0.574), + (160, 0.532), + (165, 0.511), + (170, 0.494), + (175, 0.485), + (180, 0.476), + (190, 0.448), + (195, 0.434), + (200, 0.427), + (205, 0.420), + (210, 0.420), + (215, 0.385), + (220, 0.350), + (225, 0.347), + (230, 0.343), + (235, 0.343), + (240, 0.338), + (245, 0.334), + (250, 0.329), + ]; + + points + .iter() + .filter(|&&(x, _)| x <= num_items) + .max_by_key(|&&(x, _)| x) + .unwrap() + .1 + } +} diff --git a/tig-algorithms/src/satisfiability/better_sat/README.md b/tig-algorithms/src/satisfiability/better_sat/README.md new file mode 100644 index 0000000..c04665c --- /dev/null +++ b/tig-algorithms/src/satisfiability/better_sat/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** satisfiability +* **Algorithm Name:** better_sat +* **Copyright:** 2025 frogmarch +* **Identity of Submitter:** frogmarch +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/better_sat/mod.rs b/tig-algorithms/src/satisfiability/better_sat/mod.rs new file mode 100644 index 0000000..6466d01 --- /dev/null +++ b/tig-algorithms/src/satisfiability/better_sat/mod.rs @@ -0,0 +1,368 @@ +use rand::{rngs::SmallRng, SeedableRng, Rng}; +use serde_json::{Map, Value}; +use tig_challenges::satisfiability::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + let mut rng = SmallRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap()) as u64); + + let mut clauses = challenge.clauses.clone(); + let mut i = clauses.len(); + while i > 0 { + i -= 1; + let clause = &mut clauses[i]; + + if clause.len() > 1 { + let mut j = 1; + while j < clause.len() { + if clause[..j].contains(&clause[j]) { + clause.swap_remove(j); + } else { + j += 1; + } + } + } + + let mut is_tautology = false; + for &lit in clause.iter() { + if clause.contains(&-lit) { + is_tautology = true; + break; + } + } + + if is_tautology { + clauses.swap_remove(i); + } + } + + let mut p_single = vec![false; challenge.difficulty.num_variables]; + let mut n_single = vec![false; challenge.difficulty.num_variables]; + let mut clauses_ = clauses; + clauses = Vec::with_capacity(clauses_.len()); + let mut dead = false; + + while !(dead) { + let mut done = true; + for c in &clauses_ { + let mut c_: Vec = Vec::with_capacity(c.len()); + let mut skip = false; + for (i, l) in c.iter().enumerate() { + if (p_single[(l.abs() - 1) as usize] && *l > 0) + || (n_single[(l.abs() - 1) as usize] && *l < 0) + || c[(i + 1)..].contains(&-l) + { + skip = true; + break; + } else if p_single[(l.abs() - 1) as usize] + || n_single[(l.abs() - 1) as usize] + || c[(i + 1)..].contains(&l) + { + done = false; + continue; + } else { + c_.push(*l); + } + } + if skip { + done = false; + continue; + }; + match c_[..] { + [l] => { + done = false; + if l > 0 { + if n_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + p_single[(l.abs() - 1) as usize] = true; + } + } else { + if p_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + n_single[(l.abs() - 1) as usize] = true; + } + } + } + [] => { + dead = true; + break; + } + _ => { + clauses.push(c_); + } + } + } + if done { + break; + } else { + clauses_ = clauses; + clauses = Vec::with_capacity(clauses_.len()); + } + } + + if dead { + return Ok(()); + } + + let num_variables = challenge.difficulty.num_variables; + let num_clauses = clauses.len(); + + let mut p_clauses: Vec> = vec![Vec::new(); num_variables]; + let mut n_clauses: Vec> = vec![Vec::new(); num_variables]; + + for c in &clauses { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 { + if p_clauses[var].capacity() == 0 { + p_clauses[var] = Vec::with_capacity(clauses.len() / num_variables + 1); + } + } else { + if n_clauses[var].capacity() == 0 { + n_clauses[var] = Vec::with_capacity(clauses.len() / num_variables + 1); + } + } + } + } + + for (i, &ref c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 { + p_clauses[var].push(i); + } else { + n_clauses[var].push(i); + } + } + } + + let density = num_clauses as f64 / num_variables as f64; + let avg_clause_size = clauses.iter().map(|c| c.len()).sum::() as f64 / num_clauses as f64; + let clauses_ratio = challenge.difficulty.clauses_to_variables_percent as f64; + + let is_target_problem = num_variables >= 4500 && num_variables <= 5500 + && clauses_ratio >= 400.0 && clauses_ratio <= 450.0; + + let use_lower_variable_strategy = num_variables <= 7000; + + let nad = if use_lower_variable_strategy { 1.28 } else { 1.0 }; + + let mut variables = vec![false; num_variables]; + for v in 0..num_variables { + let num_p = p_clauses[v].len(); + let num_n = n_clauses[v].len(); + + let mut vad = nad + 1.0; + if num_n > 0 { + vad = num_p as f64 / num_n as f64; + } + + if use_lower_variable_strategy { + if vad <= nad { + variables[v] = rng.gen::() < 0.001; + } else { + let prob = (num_p as f64 + 0.5) / ((num_p + num_n) as f64 + 1.0); + variables[v] = rng.gen_bool(prob); + } + } else { + if vad <= nad { + variables[v] = rng.gen::() < 0.003; + } else { + let prob = num_p as f64 / (num_p + num_n).max(1) as f64; + variables[v] = rng.gen_bool(prob); + } + } + } + + let mut num_good_so_far: Vec = vec![0; num_clauses]; + for (i, &ref c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 && variables[var] { + num_good_so_far[i] += 1 + } else if l < 0 && !variables[var] { + num_good_so_far[i] += 1 + } + } + } + + let mut residual_ = Vec::with_capacity(num_clauses); + + for (i, &num_good) in num_good_so_far.iter().enumerate() { + if num_good == 0 { + residual_.push(i); + } + } + + let base_prob = 0.52; + let mut current_prob = base_prob; + + let check_interval = (50.0 * (1.0 + (density / 3.0).ln().max(0.0))).max(20.0) as usize; + let mut last_check_residual = residual_.len(); + + let max_fuel = 10_000_000_000.0; + let difficulty_factor = density * avg_clause_size.sqrt(); + let base_fuel = (2000.0 + 100.0 * difficulty_factor) * (num_variables as f64).sqrt(); + let flip_fuel = 200.0 + difficulty_factor; + let max_num_rounds = ((max_fuel - base_fuel) / flip_fuel) as usize; + let mut rounds = 0; + + unsafe { + loop { + if rounds >= max_num_rounds { + return Ok(()); + } + + if rounds % check_interval == 0 && rounds > 0 { + let progress = last_check_residual as i64 - residual_.len() as i64; + let progress_ratio = progress as f64 / last_check_residual.max(1) as f64; + + let progress_threshold = 0.15 + 0.05 * (density / 3.0).min(1.0); + + if progress <= 0 { + let prob_adjustment = 0.025 * (-progress as f64 / last_check_residual.max(1) as f64).min(1.0); + current_prob = (current_prob + prob_adjustment).min(0.9); + } else if progress_ratio > progress_threshold { + current_prob = base_prob; + } else { + current_prob = current_prob * 0.8 + base_prob * 0.2; + } + + last_check_residual = residual_.len(); + } + + if !residual_.is_empty() { + let rand_val = rng.gen::(); + + let mut i = residual_.len() - 1; + while !residual_.is_empty() { + let id = rand_val % residual_.len(); + i = residual_[id]; + if num_good_so_far[i] > 0 { + residual_.swap_remove(id); + } else { + break + } + } + if residual_.is_empty() { + break; + } + + let c = clauses.get_unchecked_mut(i); + + if c.len() > 1 { + let random_index = rand_val % c.len(); + c.swap(0, random_index); + } + + let mut zero_found = None; + 'outer: for &l in c.iter() { + let abs_l = l.abs() as usize - 1; + let clauses_to_check = if *variables.get_unchecked(abs_l) { + p_clauses.get_unchecked(abs_l) + } else { + n_clauses.get_unchecked(abs_l) + }; + + for &c in clauses_to_check { + if *num_good_so_far.get_unchecked(c) == 1 { + continue 'outer; + } + } + zero_found = Some(abs_l); + break; + } + + let v = if let Some(abs_l) = zero_found { + abs_l + } else if rand_val < (current_prob * (usize::MAX as f64)) as usize { + c[0].abs() as usize - 1 + } else { + let mut min_sad = usize::MAX; + let mut v_min_sad = c[0].abs() as usize - 1; + let mut min_weight = usize::MAX; + + for &l in c.iter() { + let abs_l = l.abs() as usize - 1; + let clauses_to_check = if *variables.get_unchecked(abs_l) { + p_clauses.get_unchecked(abs_l) + } else { + n_clauses.get_unchecked(abs_l) + }; + + let mut sad = 0; + + for &c_idx in clauses_to_check { + if *num_good_so_far.get_unchecked(c_idx) == 1 { + sad += 1; + } + if sad >= min_sad { + break; + } + } + + let appearances = p_clauses[abs_l].len() + n_clauses[abs_l].len(); + + let combined_weight = if is_target_problem { + sad + } else if use_lower_variable_strategy { + sad * 100 + (appearances / 10) + } else { + sad * 1000 + appearances + }; + + if combined_weight < min_weight { + min_sad = sad; + min_weight = combined_weight; + v_min_sad = abs_l; + } + + if min_sad <= 1 { + break; + } + } + v_min_sad + }; + + let was_true = *variables.get_unchecked(v); + let clauses_to_decrement = if was_true { + p_clauses.get_unchecked(v) + } else { + n_clauses.get_unchecked(v) + }; + let clauses_to_increment = if was_true { + n_clauses.get_unchecked(v) + } else { + p_clauses.get_unchecked(v) + }; + + for &cid in clauses_to_increment { + let num_good = num_good_so_far.get_unchecked_mut(cid); + *num_good += 1; + } + + for &cid in clauses_to_decrement { + let num_good = num_good_so_far.get_unchecked_mut(cid); + *num_good -= 1; + if *num_good == 0 { + residual_.push(cid); + } + } + + *variables.get_unchecked_mut(v) = !was_true; + } else { + break; + } + rounds += 1; + } + } + let _ = save_solution(&Solution { variables }); + return Ok(()); +} \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/fast_walk_sat/README.md b/tig-algorithms/src/satisfiability/fast_walk_sat/README.md new file mode 100644 index 0000000..c62ee44 --- /dev/null +++ b/tig-algorithms/src/satisfiability/fast_walk_sat/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** satisfiability +* **Algorithm Name:** fast_walk_sat +* **Copyright:** 2024 Dominic Kennedy +* **Identity of Submitter:** Dominic Kennedy +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/fast_walk_sat/mod.rs b/tig-algorithms/src/satisfiability/fast_walk_sat/mod.rs new file mode 100644 index 0000000..4a714f6 --- /dev/null +++ b/tig-algorithms/src/satisfiability/fast_walk_sat/mod.rs @@ -0,0 +1,219 @@ +use rand::{rngs::StdRng, Rng, SeedableRng}; +use std::collections::HashSet; +use serde_json::{Map, Value}; +use tig_challenges::satisfiability::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + let mut rng = StdRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap()) as u64); + + let mut p_single = vec![false; challenge.difficulty.num_variables]; + let mut n_single = vec![false; challenge.difficulty.num_variables]; + + let mut clauses_ = challenge.clauses.clone(); + let mut clauses: Vec> = Vec::with_capacity(clauses_.len()); + + let mut dead = false; + + while !(dead) { + let mut done = true; + for c in &clauses_ { + let mut c_: Vec = Vec::with_capacity(c.len()); + let mut skip = false; + for (i, l) in c.iter().enumerate() { + if (p_single[(l.abs() - 1) as usize] && *l > 0) + || (n_single[(l.abs() - 1) as usize] && *l < 0) + || c[(i + 1)..].contains(&-l) + { + skip = true; + break; + } else if p_single[(l.abs() - 1) as usize] + || n_single[(l.abs() - 1) as usize] + || c[(i + 1)..].contains(&l) + { + done = false; + continue; + } else { + c_.push(*l); + } + } + if skip { + done = false; + continue; + }; + match c_[..] { + [l] => { + done = false; + if l > 0 { + if n_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + p_single[(l.abs() - 1) as usize] = true; + } + } else { + if p_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + n_single[(l.abs() - 1) as usize] = true; + } + } + } + [] => { + dead = true; + break; + } + _ => { + clauses.push(c_); + } + } + } + if done { + break; + } else { + clauses_ = clauses; + clauses = Vec::with_capacity(clauses_.len()); + } + } + + if dead { + return Ok(()); + } + + let num_variables = challenge.difficulty.num_variables; + let num_clauses = clauses.len(); + + let mut p_clauses: Vec> = vec![vec![]; num_variables]; + let mut n_clauses: Vec> = vec![vec![]; num_variables]; + + let mut variables = vec![false; num_variables]; + for v in 0..num_variables { + if p_single[v] { + variables[v] = true + } else if n_single[v] { + variables[v] = false + } else { + variables[v] = rng.gen_bool(0.5) + } + } + let mut num_good_so_far: Vec = vec![0; num_clauses]; + + for (i, &ref c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 { + p_clauses[var].push(i); + if variables[var] { + num_good_so_far[i] += 1 + } + } else { + n_clauses[var].push(i); + if !variables[var] { + num_good_so_far[i] += 1 + } + } + } + } + + let mut residual = HashSet::with_capacity(num_clauses); + + for (i, &num_good) in num_good_so_far.iter().enumerate() { + if num_good == 0 { + residual.insert(i); + } + } + + let mut attempts = 0; + loop { + if attempts >= num_variables * 25 { + return Ok(()); + } + if let Some(&i) = residual.iter().next() { + let mut min_sad = clauses.len(); + let mut v_min_sad = vec![]; + let c = &clauses[i]; + for &l in c { + let mut sad = 0 as usize; + if variables[(l.abs() - 1) as usize] { + for &c in &p_clauses[(l.abs() - 1) as usize] { + if num_good_so_far[c] == 1 { + sad += 1; + if sad > min_sad { + break; + } + } + } + } else { + for &c in &n_clauses[(l.abs() - 1) as usize] { + if num_good_so_far[c] == 1 { + sad += 1; + if sad > min_sad { + break; + } + } + } + } + + if sad < min_sad { + min_sad = sad; + v_min_sad = vec![(l.abs() - 1) as usize]; + } else if sad == min_sad { + v_min_sad.push((l.abs() - 1) as usize); + } + } + let v = if min_sad == 0 { + if v_min_sad.len() == 1 { + v_min_sad[0] + } else { + v_min_sad[rng.gen_range(0..v_min_sad.len())] + } + } else { + if rng.gen_bool(0.5) { + let l = c[rng.gen_range(0..c.len())]; + (l.abs() - 1) as usize + } else { + v_min_sad[rng.gen_range(0..v_min_sad.len())] + } + }; + + for &c in &n_clauses[v] { + if variables[v] { + num_good_so_far[c] += 1; + if num_good_so_far[c] == 1 { + residual.remove(&c); + } + } else { + if num_good_so_far[c] == 1 { + residual.insert(c); + } + num_good_so_far[c] -= 1; + } + } + for &c in &p_clauses[v] { + if variables[v] { + if num_good_so_far[c] == 1 { + residual.insert(c); + } + num_good_so_far[c] -= 1; + } else { + num_good_so_far[c] += 1; + if num_good_so_far[c] == 1 { + residual.remove(&c); + } + } + } + + variables[v] = !variables[v]; + } else { + break; + } + attempts += 1; + } + + let _ = save_solution(&Solution { variables }); + return Ok(()); +} \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/inbound/README.md b/tig-algorithms/src/satisfiability/inbound/README.md new file mode 100644 index 0000000..32a5f65 --- /dev/null +++ b/tig-algorithms/src/satisfiability/inbound/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** satisfiability +* **Algorithm Name:** inbound +* **Copyright:** 2024 Clifford Algueraz +* **Identity of Submitter:** Clifford Algueraz +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/inbound/mod.rs b/tig-algorithms/src/satisfiability/inbound/mod.rs new file mode 100644 index 0000000..e755b98 --- /dev/null +++ b/tig-algorithms/src/satisfiability/inbound/mod.rs @@ -0,0 +1,236 @@ +use rand::{rngs::StdRng, Rng, SeedableRng}; +use std::collections::HashMap; +use serde_json::{Map, Value}; +use tig_challenges::satisfiability::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + let mut rng = StdRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap()) as u64); + + let mut p_single = vec![false; challenge.difficulty.num_variables]; + let mut n_single = vec![false; challenge.difficulty.num_variables]; + + let mut clauses_ = challenge.clauses.clone(); + let mut clauses: Vec> = Vec::with_capacity(clauses_.len()); + + let mut dead = false; + + while !(dead) { + let mut done = true; + for c in &clauses_ { + let mut c_: Vec = Vec::with_capacity(c.len()); + let mut skip = false; + for (i, l) in c.iter().enumerate() { + if (p_single[(l.abs() - 1) as usize] && *l > 0) + || (n_single[(l.abs() - 1) as usize] && *l < 0) + || c[(i + 1)..].contains(&-l) + { + skip = true; + break; + } else if p_single[(l.abs() - 1) as usize] + || n_single[(l.abs() - 1) as usize] + || c[(i + 1)..].contains(&l) + { + done = false; + continue; + } else { + c_.push(*l); + } + } + if skip { + done = false; + continue; + }; + match c_[..] { + [l] => { + done = false; + if l > 0 { + if n_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + p_single[(l.abs() - 1) as usize] = true; + } + } else { + if p_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + n_single[(l.abs() - 1) as usize] = true; + } + } + } + [] => { + dead = true; + break; + } + _ => { + clauses.push(c_); + } + } + } + if done { + break; + } else { + clauses_ = clauses; + clauses = Vec::with_capacity(clauses_.len()); + } + } + + if dead { + return Ok(()); + } + + let num_variables = challenge.difficulty.num_variables; + let num_clauses = clauses.len(); + + let mut p_clauses: Vec> = vec![vec![]; num_variables]; + let mut n_clauses: Vec> = vec![vec![]; num_variables]; + + let mut variables = vec![false; num_variables]; + for v in 0..num_variables { + if p_single[v] { + variables[v] = true + } else if n_single[v] { + variables[v] = false + } else { + variables[v] = rng.gen_bool(0.5) + } + } + let mut num_good_so_far: Vec = vec![0; num_clauses]; + + for (i, &ref c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 { + p_clauses[var].push(i); + if variables[var] { + num_good_so_far[i] += 1 + } + } else { + n_clauses[var].push(i); + if !variables[var] { + num_good_so_far[i] += 1 + } + } + } + } + + let mut residual_ = Vec::with_capacity(num_clauses); + let mut residual_indices = HashMap::with_capacity(num_clauses); + + for (i, &num_good) in num_good_so_far.iter().enumerate() { + if num_good == 0 { + residual_.push(i); + residual_indices.insert(i, residual_.len() - 1); + } + } + + let mut attempts = 0; + loop { + if attempts >= num_variables * 25 { + return Ok(()); + } + if !residual_.is_empty() { + let i = residual_[0]; + let mut min_sad = clauses.len(); + let mut v_min_sad = vec![]; + let c = &clauses[i]; + for &l in c { + let mut sad = 0 as usize; + if variables[(l.abs() - 1) as usize] { + for &c in &p_clauses[(l.abs() - 1) as usize] { + if num_good_so_far[c] == 1 { + sad += 1; + if sad > min_sad { + break; + } + } + } + } else { + for &c in &n_clauses[(l.abs() - 1) as usize] { + if num_good_so_far[c] == 1 { + sad += 1; + if sad > min_sad { + break; + } + } + } + } + + if sad < min_sad { + min_sad = sad; + v_min_sad = vec![(l.abs() - 1) as usize]; + } else if sad == min_sad { + v_min_sad.push((l.abs() - 1) as usize); + } + } + let v = if min_sad == 0 { + if v_min_sad.len() == 1 { + v_min_sad[0] + } else { + v_min_sad[rng.gen_range(0..(v_min_sad.len() as u32)) as usize] + } + } else { + if rng.gen_bool(0.5) { + let l = c[rng.gen_range(0..(c.len() as u32)) as usize]; + (l.abs() - 1) as usize + } else { + v_min_sad[rng.gen_range(0..(v_min_sad.len() as u32)) as usize] + } + }; + + if variables[v] { + for &c in &n_clauses[v] { + num_good_so_far[c] += 1; + if num_good_so_far[c] == 1 { + let i = residual_indices.remove(&c).unwrap(); + let last = residual_.pop().unwrap(); + if i < residual_.len() { + residual_[i] = last; + residual_indices.insert(last, i); + } + } + } + for &c in &p_clauses[v] { + if num_good_so_far[c] == 1 { + residual_.push(c); + residual_indices.insert(c, residual_.len() - 1); + } + num_good_so_far[c] -= 1; + } + } else { + for &c in &n_clauses[v] { + if num_good_so_far[c] == 1 { + residual_.push(c); + residual_indices.insert(c, residual_.len() - 1); + } + num_good_so_far[c] -= 1; + } + + for &c in &p_clauses[v] { + num_good_so_far[c] += 1; + if num_good_so_far[c] == 1 { + let i = residual_indices.remove(&c).unwrap(); + let last = residual_.pop().unwrap(); + if i < residual_.len() { + residual_[i] = last; + residual_indices.insert(last, i); + } + } + } + } + + variables[v] = !variables[v]; + } else { + break; + } + attempts += 1; + } + + let _ = save_solution(&Solution { variables }); + return Ok(()); +} \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/mod.rs b/tig-algorithms/src/satisfiability/mod.rs index e6bcd84..570e4fa 100644 --- a/tig-algorithms/src/satisfiability/mod.rs +++ b/tig-algorithms/src/satisfiability/mod.rs @@ -7,7 +7,8 @@ pub use schnoing as c001_a001; // c001_a004 -// c001_a005 +pub mod walk_sat; +pub use walk_sat as c001_a005; // c001_a006 @@ -19,9 +20,11 @@ pub use schnoing as c001_a001; // c001_a010 -// c001_a011 +pub mod fast_walk_sat; +pub use fast_walk_sat as c001_a011; -// c001_a012 +pub mod sprint_sat; +pub use sprint_sat as c001_a012; // c001_a013 @@ -33,7 +36,8 @@ pub use schnoing as c001_a001; // c001_a017 -// c001_a018 +pub mod inbound; +pub use inbound as c001_a018; // c001_a019 @@ -43,7 +47,8 @@ pub use schnoing as c001_a001; // c001_a022 -// c001_a023 +pub mod sat_allocd; +pub use sat_allocd as c001_a023; // c001_a024 @@ -59,13 +64,15 @@ pub use schnoing as c001_a001; // c001_a030 -// c001_a031 +pub mod sat_optima; +pub use sat_optima as c001_a031; // c001_a032 // c001_a033 -// c001_a034 +pub mod sat_global; +pub use sat_global as c001_a034; // c001_a035 @@ -79,21 +86,26 @@ pub use schnoing as c001_a001; // c001_a040 -// c001_a041 +pub mod sat_global_opt; +pub use sat_global_opt as c001_a041; -// c001_a042 +pub mod sat_adaptive; +pub use sat_adaptive as c001_a042; // c001_a043 // c001_a044 -// c001_a045 +pub mod sat_adaptive_opt_un; +pub use sat_adaptive_opt_un as c001_a045; // c001_a046 -// c001_a047 +pub mod sat_separate; +pub use sat_separate as c001_a047; -// c001_a048 +pub mod sat_separate_prob; +pub use sat_separate_prob as c001_a048; // c001_a049 @@ -103,11 +115,13 @@ pub use schnoing as c001_a001; // c001_a052 -// c001_a053 +pub mod sat_separate_opt; +pub use sat_separate_opt as c001_a053; // c001_a054 -// c001_a055 +pub mod sat_separate_opt_p; +pub use sat_separate_opt_p as c001_a055; // c001_a056 @@ -115,11 +129,14 @@ pub use schnoing as c001_a001; // c001_a058 -// c001_a059 +pub mod sat_unified; +pub use sat_unified as c001_a059; -// c001_a060 +pub mod sat_unified_opt; +pub use sat_unified_opt as c001_a060; -// c001_a061 +pub mod better_sat; +pub use better_sat as c001_a061; // c001_a062 @@ -129,7 +146,8 @@ pub use schnoing as c001_a001; // c001_a065 -// c001_a066 +pub mod sat_suma; +pub use sat_suma as c001_a066; // c001_a067 diff --git a/tig-algorithms/src/satisfiability/sat_adaptive/README.md b/tig-algorithms/src/satisfiability/sat_adaptive/README.md new file mode 100644 index 0000000..c5cfe96 --- /dev/null +++ b/tig-algorithms/src/satisfiability/sat_adaptive/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** satisfiability +* **Algorithm Name:** sat_adaptive +* **Copyright:** 2024 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/sat_adaptive/mod.rs b/tig-algorithms/src/satisfiability/sat_adaptive/mod.rs new file mode 100644 index 0000000..30e5397 --- /dev/null +++ b/tig-algorithms/src/satisfiability/sat_adaptive/mod.rs @@ -0,0 +1,284 @@ +use rand::{rngs::{SmallRng, StdRng}, Rng, SeedableRng}; +use std::collections::HashMap; +use serde_json::{Map, Value}; +use tig_challenges::satisfiability::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + let mut rng = SmallRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap()) as u64); + + let mut p_single = vec![false; challenge.difficulty.num_variables]; + let mut n_single = vec![false; challenge.difficulty.num_variables]; + + let mut clauses_ = challenge.clauses.clone(); + let mut clauses: Vec> = Vec::with_capacity(clauses_.len()); + + let mut rounds = 0; + + let mut dead = false; + + while !(dead) { + let mut done = true; + for c in &clauses_ { + let mut c_: Vec = Vec::with_capacity(c.len()); // Preallocate with capacity + let mut skip = false; + for (i, l) in c.iter().enumerate() { + if (p_single[(l.abs() - 1) as usize] && *l > 0) + || (n_single[(l.abs() - 1) as usize] && *l < 0) + || c[(i + 1)..].contains(&-l) + { + skip = true; + break; + } else if p_single[(l.abs() - 1) as usize] + || n_single[(l.abs() - 1) as usize] + || c[(i + 1)..].contains(&l) + { + done = false; + continue; + } else { + c_.push(*l); + } + } + if skip { + done = false; + continue; + }; + match c_[..] { + [l] => { + done = false; + if l > 0 { + if n_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + p_single[(l.abs() - 1) as usize] = true; + } + } else { + if p_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + n_single[(l.abs() - 1) as usize] = true; + } + } + } + [] => { + dead = true; + break; + } + _ => { + clauses.push(c_); + } + } + } + if done { + break; + } else { + clauses_ = clauses; + clauses = Vec::with_capacity(clauses_.len()); + } + } + + if dead { + return Ok(()); + } + + let num_variables = challenge.difficulty.num_variables; + let num_clauses = clauses.len(); + + let mut p_clauses: Vec> = vec![Vec::new(); num_variables]; + let mut n_clauses: Vec> = vec![Vec::new(); num_variables]; + + // Preallocate capacity for p_clauses and n_clauses + for c in &clauses { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 { + if p_clauses[var].capacity() == 0 { + p_clauses[var] = Vec::with_capacity(clauses.len() / num_variables + 1); + } + } else { + if n_clauses[var].capacity() == 0 { + n_clauses[var] = Vec::with_capacity(clauses.len() / num_variables + 1); + } + } + } + } + + for (i, &ref c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 { + p_clauses[var].push(i); + } else { + n_clauses[var].push(i); + } + } + } + + let mut variables = vec![false; num_variables]; + for v in 0..num_variables { + let num_p = p_clauses[v].len(); + let num_n = n_clauses[v].len(); + + let nad = 1.28; + let mut vad = nad + 1.0; + if num_n > 0 { + vad = num_p as f32 / num_n as f32; + } + + if vad <= nad { + variables[v] = false; + } else { + let prob = num_p as f64 / (num_p + num_n).max(1) as f64; + variables[v] = rng.gen_bool(prob) + } + } + + let mut num_good_so_far: Vec = vec![0; num_clauses]; + for (i, &ref c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 && variables[var] { + num_good_so_far[i] += 1 + } else if l < 0 && !variables[var] { + num_good_so_far[i] += 1 + } + } + } + + + let mut residual_ = Vec::with_capacity(num_clauses); + let mut residual_indices = vec![None; num_clauses]; + + for (i, &num_good) in num_good_so_far.iter().enumerate() { + if num_good == 0 { + residual_.push(i); + residual_indices[i] = Some(residual_.len() - 1); + } + } + + let base_prob = 0.52; + let mut current_prob = base_prob; + let check_interval = 50; + let mut last_check_residual = residual_.len(); + + let clauses_ratio = challenge.difficulty.clauses_to_variables_percent as f64; + let num_vars = challenge.difficulty.num_variables as f64; + let max_fuel = 2000000000.0; + let base_fuel = (2000.0 + 40.0 * clauses_ratio) * num_vars; + let flip_fuel = 350.0 + 0.9 * clauses_ratio; + let max_num_rounds = ((max_fuel - base_fuel) / flip_fuel) as usize; + loop { + if !residual_.is_empty() { + + let rand_val = rng.gen::(); + + let i = residual_[rand_val % residual_.len()]; + let mut min_sad = clauses.len(); + let mut v_min_sad = usize::MAX; + let c = &mut clauses[i]; + + if c.len() > 1 { + let random_index = rand_val % c.len(); + c.swap(0, random_index); + } + for &l in c.iter() { + let abs_l = l.abs() as usize - 1; + let clauses_to_check = if variables[abs_l] { &p_clauses[abs_l] } else { &n_clauses[abs_l] }; + + let mut sad = 0; + for &c in clauses_to_check { + if num_good_so_far[c] == 1 { + sad += 1; + } + } + + if sad < min_sad { + min_sad = sad; + v_min_sad = abs_l; + } + } + + if rounds % check_interval == 0 { + let progress = last_check_residual as i64 - residual_.len() as i64; + let progress_ratio = progress as f64 / last_check_residual as f64; + + let progress_threshold = 0.2 + 0.1 * f64::min(1.0, (clauses_ratio - 410.0) / 15.0); + + if progress <= 0 { + let prob_adjustment = 0.025 * (-progress as f64 / last_check_residual as f64).min(1.0); + current_prob = (current_prob + prob_adjustment).min(0.9); + } else if progress_ratio > progress_threshold { + current_prob = base_prob; + } else { + current_prob = current_prob * 0.8 + base_prob * 0.2; + } + + last_check_residual = residual_.len(); + } + + let v = if min_sad == 0 { + v_min_sad + } else if rng.gen_bool(current_prob) { + c[0].abs() as usize - 1 + } else { + v_min_sad + }; + + if variables[v] { + for &c in &n_clauses[v] { + num_good_so_far[c] += 1; + if num_good_so_far[c] == 1 { + let i = residual_indices[c].take().unwrap(); + let last = residual_.pop().unwrap(); + if i < residual_.len() { + residual_[i] = last; + residual_indices[last] = Some(i); + } + } + } + for &c in &p_clauses[v] { + if num_good_so_far[c] == 1 { + residual_.push(c); + residual_indices[c] = Some(residual_.len() - 1); + } + num_good_so_far[c] -= 1; + } + } else { + for &c in &n_clauses[v] { + if num_good_so_far[c] == 1 { + residual_.push(c); + residual_indices[c] = Some(residual_.len() - 1); + } + num_good_so_far[c] -= 1; + } + + for &c in &p_clauses[v] { + num_good_so_far[c] += 1; + if num_good_so_far[c] == 1 { + let i = residual_indices[c].take().unwrap(); + let last = residual_.pop().unwrap(); + if i < residual_.len() { + residual_[i] = last; + residual_indices[last] = Some(i); + } + } + } + } + + variables[v] = !variables[v]; + } else { + break; + } + rounds += 1; + if rounds >= max_num_rounds { + return Ok(()); + } + } + let _ = save_solution(&Solution { variables }); + return Ok(()); +} \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/sat_adaptive_opt_un/README.md b/tig-algorithms/src/satisfiability/sat_adaptive_opt_un/README.md new file mode 100644 index 0000000..a9bd954 --- /dev/null +++ b/tig-algorithms/src/satisfiability/sat_adaptive_opt_un/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** satisfiability +* **Algorithm Name:** sat_adaptive_opt_un +* **Copyright:** 2024 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/sat_adaptive_opt_un/mod.rs b/tig-algorithms/src/satisfiability/sat_adaptive_opt_un/mod.rs new file mode 100644 index 0000000..99b3f5a --- /dev/null +++ b/tig-algorithms/src/satisfiability/sat_adaptive_opt_un/mod.rs @@ -0,0 +1,286 @@ +use rand::{rngs::{SmallRng, StdRng}, Rng, SeedableRng}; +use std::collections::HashMap; +use serde_json::{Map, Value}; +use tig_challenges::satisfiability::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + let mut rng = SmallRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap()) as u64); + + let mut p_single = vec![false; challenge.difficulty.num_variables]; + let mut n_single = vec![false; challenge.difficulty.num_variables]; + + let mut clauses_ = challenge.clauses.clone(); + let mut clauses: Vec> = Vec::with_capacity(clauses_.len()); + + let mut rounds = 0; + + let mut dead = false; + + while !(dead) { + let mut done = true; + for c in &clauses_ { + let mut c_: Vec = Vec::with_capacity(c.len()); // Preallocate with capacity + let mut skip = false; + for (i, l) in c.iter().enumerate() { + if (p_single[(l.abs() - 1) as usize] && *l > 0) + || (n_single[(l.abs() - 1) as usize] && *l < 0) + || c[(i + 1)..].contains(&-l) + { + skip = true; + break; + } else if p_single[(l.abs() - 1) as usize] + || n_single[(l.abs() - 1) as usize] + || c[(i + 1)..].contains(&l) + { + done = false; + continue; + } else { + c_.push(*l); + } + } + if skip { + done = false; + continue; + }; + match c_[..] { + [l] => { + done = false; + if l > 0 { + if n_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + p_single[(l.abs() - 1) as usize] = true; + } + } else { + if p_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + n_single[(l.abs() - 1) as usize] = true; + } + } + } + [] => { + dead = true; + break; + } + _ => { + clauses.push(c_); + } + } + } + if done { + break; + } else { + clauses_ = clauses; + clauses = Vec::with_capacity(clauses_.len()); + } + } + + if dead { + return Ok(()); + } + + let num_variables = challenge.difficulty.num_variables; + let num_clauses = clauses.len(); + + let mut p_clauses: Vec> = vec![Vec::new(); num_variables]; + let mut n_clauses: Vec> = vec![Vec::new(); num_variables]; + + // Preallocate capacity for p_clauses and n_clauses + for c in &clauses { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 { + if p_clauses[var].capacity() == 0 { + p_clauses[var] = Vec::with_capacity(clauses.len() / num_variables + 1); + } + } else { + if n_clauses[var].capacity() == 0 { + n_clauses[var] = Vec::with_capacity(clauses.len() / num_variables + 1); + } + } + } + } + + for (i, &ref c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 { + p_clauses[var].push(i); + } else { + n_clauses[var].push(i); + } + } + } + + let mut variables = vec![false; num_variables]; + for v in 0..num_variables { + let num_p = p_clauses[v].len(); + let num_n = n_clauses[v].len(); + + let nad = 1.28; + let mut vad = nad + 1.0; + if num_n > 0 { + vad = num_p as f32 / num_n as f32; + } + + if vad <= nad { + variables[v] = false; + } else { + let prob = num_p as f64 / (num_p + num_n).max(1) as f64; + variables[v] = rng.gen_bool(prob) + } + } + + let mut num_good_so_far: Vec = vec![0; num_clauses]; + for (i, &ref c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 && variables[var] { + num_good_so_far[i] += 1 + } else if l < 0 && !variables[var] { + num_good_so_far[i] += 1 + } + } + } + + + let mut residual_ = Vec::with_capacity(num_clauses); + let mut residual_indices = vec![usize::MAX; num_clauses]; + + for (i, &num_good) in num_good_so_far.iter().enumerate() { + if num_good == 0 { + residual_.push(i); + residual_indices[i] = residual_.len() - 1; + } + } + + let base_prob = 0.52; + let mut current_prob = base_prob; + let check_interval = 50; + let mut last_check_residual = residual_.len(); + + let clauses_ratio = challenge.difficulty.clauses_to_variables_percent as f64; + let num_vars = challenge.difficulty.num_variables as f64; + let max_fuel = 2000000000.0; + let base_fuel = (2000.0 + 40.0 * clauses_ratio) * num_vars; + let flip_fuel = 350.0 + 0.9 * clauses_ratio; + let max_num_rounds = ((max_fuel - base_fuel) / flip_fuel) as usize; + loop { + if !residual_.is_empty() { + + let rand_val = rng.gen::(); + + let i = residual_[rand_val % residual_.len()]; + let mut min_sad = clauses.len(); + let mut v_min_sad = usize::MAX; + let c = &mut clauses[i]; + + if c.len() > 1 { + let random_index = rand_val % c.len(); + c.swap(0, random_index); + } + for &l in c.iter() { + let abs_l = l.abs() as usize - 1; + let clauses_to_check = if variables[abs_l] { &p_clauses[abs_l] } else { &n_clauses[abs_l] }; + + let mut sad = 0; + for &c in clauses_to_check { + if num_good_so_far[c] == 1 { + sad += 1; + } + if sad == min_sad { + break; + } + } + + if sad < min_sad { + min_sad = sad; + v_min_sad = abs_l; + } + } + + if rounds % check_interval == 0 { + let progress = last_check_residual as i64 - residual_.len() as i64; + let progress_ratio = progress as f64 / last_check_residual as f64; + + let progress_threshold = 0.2 + 0.1 * f64::min(1.0, (clauses_ratio - 410.0) / 15.0); + + if progress <= 0 { + let prob_adjustment = 0.025 * (-progress as f64 / last_check_residual as f64).min(1.0); + current_prob = (current_prob + prob_adjustment).min(0.9); + } else if progress_ratio > progress_threshold { + current_prob = base_prob; + } else { + current_prob = current_prob * 0.8 + base_prob * 0.2; + } + + last_check_residual = residual_.len(); + } + + let v = if min_sad == 0 { + v_min_sad + } else if rng.gen_bool(current_prob) { + c[0].abs() as usize - 1 + } else { + v_min_sad + }; + + if variables[v] { + for &c in &n_clauses[v] { + num_good_so_far[c] += 1; + if num_good_so_far[c] == 1 { + let i = residual_indices[c]; + residual_indices[c] = usize::MAX; + let last = residual_.pop().unwrap(); + if i < residual_.len() { + residual_[i] = last; + residual_indices[last] = i; + } + } + } + for &c in &p_clauses[v] { + if num_good_so_far[c] == 1 { + residual_.push(c); + residual_indices[c] = residual_.len() - 1; + } + num_good_so_far[c] -= 1; + } + } else { + for &c in &n_clauses[v] { + if num_good_so_far[c] == 1 { + residual_.push(c); + residual_indices[c] = residual_.len() - 1; + } + num_good_so_far[c] -= 1; + } + + for &c in &p_clauses[v] { + num_good_so_far[c] += 1; + if num_good_so_far[c] == 1 { + let i = residual_indices[c]; + residual_indices[c] = usize::MAX; + let last = residual_.pop().unwrap(); + if i < residual_.len() { + residual_[i] = last; + residual_indices[last] = i; + } + } + } + } + + variables[v] = !variables[v]; + } else { + break; + } + rounds += 1; + } + let _ = save_solution(&Solution { variables }); + return Ok(()); +} \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/sat_allocd/README.md b/tig-algorithms/src/satisfiability/sat_allocd/README.md new file mode 100644 index 0000000..8a36958 --- /dev/null +++ b/tig-algorithms/src/satisfiability/sat_allocd/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** satisfiability +* **Algorithm Name:** sat_allocd +* **Copyright:** 2024 AllFather +* **Identity of Submitter:** AllFather +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/sat_allocd/mod.rs b/tig-algorithms/src/satisfiability/sat_allocd/mod.rs new file mode 100644 index 0000000..c7ab352 --- /dev/null +++ b/tig-algorithms/src/satisfiability/sat_allocd/mod.rs @@ -0,0 +1,254 @@ +use rand::{rngs::StdRng, Rng, SeedableRng}; +use std::collections::HashMap; +use serde_json::{Map, Value}; +use tig_challenges::satisfiability::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + let mut rng = StdRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap()) as u64); + + let mut p_single = vec![false; challenge.difficulty.num_variables]; + let mut n_single = vec![false; challenge.difficulty.num_variables]; + + let mut clauses_ = challenge.clauses.clone(); + let mut clauses: Vec> = Vec::with_capacity(clauses_.len()); + + let mut rounds = 0; + + let mut dead = false; + + while !(dead) { + let mut done = true; + for c in &clauses_ { + let mut c_: Vec = Vec::with_capacity(c.len()); // Preallocate with capacity + let mut skip = false; + for (i, l) in c.iter().enumerate() { + if (p_single[(l.abs() - 1) as usize] && *l > 0) + || (n_single[(l.abs() - 1) as usize] && *l < 0) + || c[(i + 1)..].contains(&-l) + { + skip = true; + break; + } else if p_single[(l.abs() - 1) as usize] + || n_single[(l.abs() - 1) as usize] + || c[(i + 1)..].contains(&l) + { + done = false; + continue; + } else { + c_.push(*l); + } + } + if skip { + done = false; + continue; + }; + match c_[..] { + [l] => { + done = false; + if l > 0 { + if n_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + p_single[(l.abs() - 1) as usize] = true; + } + } else { + if p_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + n_single[(l.abs() - 1) as usize] = true; + } + } + } + [] => { + dead = true; + break; + } + _ => { + clauses.push(c_); + } + } + } + if done { + break; + } else { + clauses_ = clauses; + clauses = Vec::with_capacity(clauses_.len()); + } + } + + if dead { + return Ok(()); + } + + let num_variables = challenge.difficulty.num_variables; + let num_clauses = clauses.len(); + + let mut p_clauses: Vec> = vec![Vec::new(); num_variables]; + let mut n_clauses: Vec> = vec![Vec::new(); num_variables]; + + let mut variables = vec![false; num_variables]; + for v in 0..num_variables { + if p_single[v] { + variables[v] = true + } else if n_single[v] { + variables[v] = false + } else { + variables[v] = rng.gen_bool(0.5) + } + } + let mut num_good_so_far: Vec = vec![0; num_clauses]; + + // Preallocate capacity for p_clauses and n_clauses + for c in &clauses { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 { + if p_clauses[var].capacity() == 0 { + p_clauses[var] = Vec::with_capacity(clauses.len() / num_variables + 1); + } + } else { + if n_clauses[var].capacity() == 0 { + n_clauses[var] = Vec::with_capacity(clauses.len() / num_variables + 1); + } + } + } + } + + for (i, &ref c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 { + p_clauses[var].push(i); + if variables[var] { + num_good_so_far[i] += 1 + } + } else { + n_clauses[var].push(i); + if !variables[var] { + num_good_so_far[i] += 1 + } + } + } + } + + let mut residual_ = Vec::with_capacity(num_clauses); + let mut residual_indices = HashMap::with_capacity(num_clauses); + + for (i, &num_good) in num_good_so_far.iter().enumerate() { + if num_good == 0 { + residual_.push(i); + residual_indices.insert(i, residual_.len() - 1); + } + } + + loop { + if !residual_.is_empty() { + let i = residual_[0]; + let mut min_sad = clauses.len(); + let mut v_min_sad = Vec::with_capacity(clauses[i].len()); // Preallocate with capacity + let c = &clauses[i]; + for &l in c { + let mut sad = 0 as usize; + if variables[(l.abs() - 1) as usize] { + for &c in &p_clauses[(l.abs() - 1) as usize] { + if num_good_so_far[c] == 1 { + sad += 1; + if sad > min_sad { + break; + } + } + } + } else { + for &c in &n_clauses[(l.abs() - 1) as usize] { + if num_good_so_far[c] == 1 { + sad += 1; + if sad > min_sad { + break; + } + } + } + } + + if sad < min_sad { + min_sad = sad; + v_min_sad.clear(); + v_min_sad.push((l.abs() - 1) as usize); + } else if sad == min_sad { + v_min_sad.push((l.abs() - 1) as usize); + } + } + let v = if min_sad == 0 { + if v_min_sad.len() == 1 { + v_min_sad[0] + } else { + v_min_sad[rng.gen_range(0..(v_min_sad.len() as u32)) as usize] + } + } else { + if rng.gen_bool(0.5) { + let l = c[rng.gen_range(0..(c.len() as u32)) as usize]; + (l.abs() - 1) as usize + } else { + v_min_sad[rng.gen_range(0..(v_min_sad.len() as u32)) as usize] + } + }; + + if variables[v] { + for &c in &n_clauses[v] { + num_good_so_far[c] += 1; + if num_good_so_far[c] == 1 { + let i = residual_indices.remove(&c).unwrap(); + let last = residual_.pop().unwrap(); + if i < residual_.len() { + residual_[i] = last; + residual_indices.insert(last, i); + } + } + } + for &c in &p_clauses[v] { + if num_good_so_far[c] == 1 { + residual_.push(c); + residual_indices.insert(c, residual_.len() - 1); + } + num_good_so_far[c] -= 1; + } + } else { + for &c in &n_clauses[v] { + if num_good_so_far[c] == 1 { + residual_.push(c); + residual_indices.insert(c, residual_.len() - 1); + } + num_good_so_far[c] -= 1; + } + + for &c in &p_clauses[v] { + num_good_so_far[c] += 1; + if num_good_so_far[c] == 1 { + let i = residual_indices.remove(&c).unwrap(); + let last = residual_.pop().unwrap(); + if i < residual_.len() { + residual_[i] = last; + residual_indices.insert(last, i); + } + } + } + } + + variables[v] = !variables[v]; + } else { + break; + } + rounds += 1; + if rounds >= num_variables * 35 { + return Ok(()); + } + } + + let _ = save_solution(&Solution { variables }); + return Ok(()); +} \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/sat_global/README.md b/tig-algorithms/src/satisfiability/sat_global/README.md new file mode 100644 index 0000000..e91769b --- /dev/null +++ b/tig-algorithms/src/satisfiability/sat_global/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** satisfiability +* **Algorithm Name:** sat_global +* **Copyright:** 2024 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/sat_global/mod.rs b/tig-algorithms/src/satisfiability/sat_global/mod.rs new file mode 100644 index 0000000..8ce1147 --- /dev/null +++ b/tig-algorithms/src/satisfiability/sat_global/mod.rs @@ -0,0 +1,266 @@ +use rand::{rngs::StdRng, Rng, SeedableRng}; +use std::collections::HashMap; +use serde_json::{Map, Value}; +use tig_challenges::satisfiability::*; + + + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + let mut rng = StdRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap()) as u64); + + let mut p_single = vec![false; challenge.difficulty.num_variables]; + let mut n_single = vec![false; challenge.difficulty.num_variables]; + + let mut clauses_ = challenge.clauses.clone(); + let mut clauses: Vec> = Vec::with_capacity(clauses_.len()); + + let mut rounds = 0; + + let mut dead = false; + + while !(dead) { + let mut done = true; + for c in &clauses_ { + let mut c_: Vec = Vec::with_capacity(c.len()); // Preallocate with capacity + let mut skip = false; + for (i, l) in c.iter().enumerate() { + if (p_single[(l.abs() - 1) as usize] && *l > 0) + || (n_single[(l.abs() - 1) as usize] && *l < 0) + || c[(i + 1)..].contains(&-l) + { + skip = true; + break; + } else if p_single[(l.abs() - 1) as usize] + || n_single[(l.abs() - 1) as usize] + || c[(i + 1)..].contains(&l) + { + done = false; + continue; + } else { + c_.push(*l); + } + } + if skip { + done = false; + continue; + }; + match c_[..] { + [l] => { + done = false; + if l > 0 { + if n_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + p_single[(l.abs() - 1) as usize] = true; + } + } else { + if p_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + n_single[(l.abs() - 1) as usize] = true; + } + } + } + [] => { + dead = true; + break; + } + _ => { + clauses.push(c_); + } + } + } + if done { + break; + } else { + clauses_ = clauses; + clauses = Vec::with_capacity(clauses_.len()); + } + } + + if dead { + return Ok(()); + } + + let num_variables = challenge.difficulty.num_variables; + let num_clauses = clauses.len(); + + let mut p_clauses: Vec> = vec![Vec::new(); num_variables]; + let mut n_clauses: Vec> = vec![Vec::new(); num_variables]; + + // Preallocate capacity for p_clauses and n_clauses + for c in &clauses { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 { + if p_clauses[var].capacity() == 0 { + p_clauses[var] = Vec::with_capacity(clauses.len() / num_variables + 1); + } + } else { + if n_clauses[var].capacity() == 0 { + n_clauses[var] = Vec::with_capacity(clauses.len() / num_variables + 1); + } + } + } + } + + for (i, &ref c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 { + p_clauses[var].push(i); + } else { + n_clauses[var].push(i); + } + } + } + + let mut variables = vec![false; num_variables]; + for v in 0..num_variables { + let num_p = p_clauses[v].len(); + let num_n = n_clauses[v].len(); + + let vad = num_p as f32 / (num_p + num_n).max(1) as f32; + + if vad >= 1.8 { + variables[v] = true; + } else if vad <= 0.56 { + variables[v] = false; + } else { + if p_single[v] { + variables[v] = true + } else if n_single[v] { + variables[v] = false + } else { + variables[v] = rng.gen_bool(0.5) + } + } + } + + let mut num_good_so_far: Vec = vec![0; num_clauses]; + for (i, &ref c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 && variables[var] { + num_good_so_far[i] += 1 + } else if l < 0 && !variables[var] { + num_good_so_far[i] += 1 + } + } + } + + + let mut residual_ = Vec::with_capacity(num_clauses); + let mut residual_indices = HashMap::with_capacity(num_clauses); + + for (i, &num_good) in num_good_so_far.iter().enumerate() { + if num_good == 0 { + residual_.push(i); + residual_indices.insert(i, residual_.len() - 1); + } + } + + let clauses_ratio = challenge.difficulty.clauses_to_variables_percent as f64; + let num_vars = challenge.difficulty.num_variables as f64; + let max_fuel = 2000000000.0; + let base_fuel = (2000.0 + 40.0 * clauses_ratio) * num_vars; + let flip_fuel = 900.0 + 1.8 * clauses_ratio; + let max_num_rounds = ((max_fuel - base_fuel) / flip_fuel) as usize; + loop { + if !residual_.is_empty() { + + let rand_val = rng.gen::(); + + let i = residual_[rand_val % residual_.len()]; + let mut min_sad = clauses.len(); + let mut v_min_sad = usize::MAX; + let c = &mut clauses[i]; + + if c.len() > 1 { + let random_index = rand_val % c.len(); + c.swap(0, random_index); + } + for &l in c.iter() { + let abs_l = l.abs() as usize - 1; + let clauses_to_check = if variables[abs_l] { &p_clauses[abs_l] } else { &n_clauses[abs_l] }; + + let mut sad = 0; + for &c in clauses_to_check { + if num_good_so_far[c] == 1 { + sad += 1; + } + } + + if sad < min_sad { + min_sad = sad; + v_min_sad = abs_l; + } + } + + let v = if min_sad == 0 { + v_min_sad + } else if rng.gen_bool(0.5) { + c[0].abs() as usize - 1 + } else { + v_min_sad + }; + + if variables[v] { + for &c in &n_clauses[v] { + num_good_so_far[c] += 1; + if num_good_so_far[c] == 1 { + let i = residual_indices.remove(&c).unwrap(); + let last = residual_.pop().unwrap(); + if i < residual_.len() { + residual_[i] = last; + residual_indices.insert(last, i); + } + } + } + for &c in &p_clauses[v] { + if num_good_so_far[c] == 1 { + residual_.push(c); + residual_indices.insert(c, residual_.len() - 1); + } + num_good_so_far[c] -= 1; + } + } else { + for &c in &n_clauses[v] { + if num_good_so_far[c] == 1 { + residual_.push(c); + residual_indices.insert(c, residual_.len() - 1); + } + num_good_so_far[c] -= 1; + } + + for &c in &p_clauses[v] { + num_good_so_far[c] += 1; + if num_good_so_far[c] == 1 { + let i = residual_indices.remove(&c).unwrap(); + let last = residual_.pop().unwrap(); + if i < residual_.len() { + residual_[i] = last; + residual_indices.insert(last, i); + } + } + } + } + + variables[v] = !variables[v]; + } else { + break; + } + rounds += 1; + if rounds >= max_num_rounds { + return Ok(()); + } + } + let _ = save_solution(&Solution { variables }); + return Ok(()); +} \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/sat_global_opt/README.md b/tig-algorithms/src/satisfiability/sat_global_opt/README.md new file mode 100644 index 0000000..575d318 --- /dev/null +++ b/tig-algorithms/src/satisfiability/sat_global_opt/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** satisfiability +* **Algorithm Name:** sat_global_opt +* **Copyright:** 2024 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/sat_global_opt/mod.rs b/tig-algorithms/src/satisfiability/sat_global_opt/mod.rs new file mode 100644 index 0000000..b4699ec --- /dev/null +++ b/tig-algorithms/src/satisfiability/sat_global_opt/mod.rs @@ -0,0 +1,261 @@ +use rand::{rngs::{SmallRng, StdRng}, Rng, SeedableRng}; +use std::collections::HashMap; +use serde_json::{Map, Value}; +use tig_challenges::satisfiability::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + let mut rng = SmallRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap()) as u64); + + let mut p_single = vec![false; challenge.difficulty.num_variables]; + let mut n_single = vec![false; challenge.difficulty.num_variables]; + + let mut clauses_ = challenge.clauses.clone(); + let mut clauses: Vec> = Vec::with_capacity(clauses_.len()); + + let mut rounds = 0; + + let mut dead = false; + + while !(dead) { + let mut done = true; + for c in &clauses_ { + let mut c_: Vec = Vec::with_capacity(c.len()); // Preallocate with capacity + let mut skip = false; + for (i, l) in c.iter().enumerate() { + if (p_single[(l.abs() - 1) as usize] && *l > 0) + || (n_single[(l.abs() - 1) as usize] && *l < 0) + || c[(i + 1)..].contains(&-l) + { + skip = true; + break; + } else if p_single[(l.abs() - 1) as usize] + || n_single[(l.abs() - 1) as usize] + || c[(i + 1)..].contains(&l) + { + done = false; + continue; + } else { + c_.push(*l); + } + } + if skip { + done = false; + continue; + }; + match c_[..] { + [l] => { + done = false; + if l > 0 { + if n_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + p_single[(l.abs() - 1) as usize] = true; + } + } else { + if p_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + n_single[(l.abs() - 1) as usize] = true; + } + } + } + [] => { + dead = true; + break; + } + _ => { + clauses.push(c_); + } + } + } + if done { + break; + } else { + clauses_ = clauses; + clauses = Vec::with_capacity(clauses_.len()); + } + } + + if dead { + return Ok(()); + } + + let num_variables = challenge.difficulty.num_variables; + let num_clauses = clauses.len(); + + let mut p_clauses: Vec> = vec![Vec::new(); num_variables]; + let mut n_clauses: Vec> = vec![Vec::new(); num_variables]; + + // Preallocate capacity for p_clauses and n_clauses + for c in &clauses { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 { + if p_clauses[var].capacity() == 0 { + p_clauses[var] = Vec::with_capacity(clauses.len() / num_variables + 1); + } + } else { + if n_clauses[var].capacity() == 0 { + n_clauses[var] = Vec::with_capacity(clauses.len() / num_variables + 1); + } + } + } + } + + for (i, &ref c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 { + p_clauses[var].push(i); + } else { + n_clauses[var].push(i); + } + } + } + + let mut variables = vec![false; num_variables]; + for v in 0..num_variables { + let num_p = p_clauses[v].len(); + let num_n = n_clauses[v].len(); + + let nad = 1.28; + let mut vad = nad + 1.0; + if num_n > 0 { + vad = num_p as f32 / num_n as f32; + } + + if vad <= nad { + variables[v] = false; + } else { + let prob = num_p as f64 / (num_p + num_n).max(1) as f64; + variables[v] = rng.gen_bool(prob) + } + } + + let mut num_good_so_far: Vec = vec![0; num_clauses]; + for (i, &ref c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 && variables[var] { + num_good_so_far[i] += 1 + } else if l < 0 && !variables[var] { + num_good_so_far[i] += 1 + } + } + } + + + let mut residual_ = Vec::with_capacity(num_clauses); + let mut residual_indices = vec![None; num_clauses]; + + for (i, &num_good) in num_good_so_far.iter().enumerate() { + if num_good == 0 { + residual_.push(i); + residual_indices[i] = Some(residual_.len() - 1); + } + } + + let clauses_ratio = challenge.difficulty.clauses_to_variables_percent as f64; + let num_vars = challenge.difficulty.num_variables as f64; + let max_fuel = 2000000000.0; + let base_fuel = (2000.0 + 40.0 * clauses_ratio) * num_vars; + let flip_fuel = 350.0 + 0.9 * clauses_ratio; + let max_num_rounds = ((max_fuel - base_fuel) / flip_fuel) as usize; + loop { + if !residual_.is_empty() { + + let rand_val = rng.gen::(); + + let i = residual_[rand_val % residual_.len()]; + let mut min_sad = clauses.len(); + let mut v_min_sad = usize::MAX; + let c = &mut clauses[i]; + + if c.len() > 1 { + let random_index = rand_val % c.len(); + c.swap(0, random_index); + } + for &l in c.iter() { + let abs_l = l.abs() as usize - 1; + let clauses_to_check = if variables[abs_l] { &p_clauses[abs_l] } else { &n_clauses[abs_l] }; + + let mut sad = 0; + for &c in clauses_to_check { + if num_good_so_far[c] == 1 { + sad += 1; + } + } + + if sad < min_sad { + min_sad = sad; + v_min_sad = abs_l; + } + } + + let v = if min_sad == 0 { + v_min_sad + } else if rng.gen_bool(0.5) { + c[0].abs() as usize - 1 + } else { + v_min_sad + }; + + if variables[v] { + for &c in &n_clauses[v] { + num_good_so_far[c] += 1; + if num_good_so_far[c] == 1 { + let i = residual_indices[c].take().unwrap(); + let last = residual_.pop().unwrap(); + if i < residual_.len() { + residual_[i] = last; + residual_indices[last] = Some(i); + } + } + } + for &c in &p_clauses[v] { + if num_good_so_far[c] == 1 { + residual_.push(c); + residual_indices[c] = Some(residual_.len() - 1); + } + num_good_so_far[c] -= 1; + } + } else { + for &c in &n_clauses[v] { + if num_good_so_far[c] == 1 { + residual_.push(c); + residual_indices[c] = Some(residual_.len() - 1); + } + num_good_so_far[c] -= 1; + } + + for &c in &p_clauses[v] { + num_good_so_far[c] += 1; + if num_good_so_far[c] == 1 { + let i = residual_indices[c].take().unwrap(); + let last = residual_.pop().unwrap(); + if i < residual_.len() { + residual_[i] = last; + residual_indices[last] = Some(i); + } + } + } + } + + variables[v] = !variables[v]; + } else { + break; + } + rounds += 1; + if rounds >= max_num_rounds { + return Ok(()); + } + } + let _ = save_solution(&Solution { variables }); + return Ok(()); +} \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/sat_optima/README.md b/tig-algorithms/src/satisfiability/sat_optima/README.md new file mode 100644 index 0000000..83c58b8 --- /dev/null +++ b/tig-algorithms/src/satisfiability/sat_optima/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** satisfiability +* **Algorithm Name:** sat_optima +* **Copyright:** 2024 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/sat_optima/mod.rs b/tig-algorithms/src/satisfiability/sat_optima/mod.rs new file mode 100644 index 0000000..6810503 --- /dev/null +++ b/tig-algorithms/src/satisfiability/sat_optima/mod.rs @@ -0,0 +1,255 @@ +use rand::{rngs::StdRng, Rng, SeedableRng}; +use std::collections::HashMap; +use serde_json::{Map, Value}; +use tig_challenges::satisfiability::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + let mut rng = StdRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap()) as u64); + + let mut p_single = vec![false; challenge.difficulty.num_variables]; + let mut n_single = vec![false; challenge.difficulty.num_variables]; + + let mut clauses_ = challenge.clauses.clone(); + let mut clauses: Vec> = Vec::with_capacity(clauses_.len()); + + let mut rounds = 0; + + let mut dead = false; + + while !(dead) { + let mut done = true; + for c in &clauses_ { + let mut c_: Vec = Vec::with_capacity(c.len()); // Preallocate with capacity + let mut skip = false; + for (i, l) in c.iter().enumerate() { + if (p_single[(l.abs() - 1) as usize] && *l > 0) + || (n_single[(l.abs() - 1) as usize] && *l < 0) + || c[(i + 1)..].contains(&-l) + { + skip = true; + break; + } else if p_single[(l.abs() - 1) as usize] + || n_single[(l.abs() - 1) as usize] + || c[(i + 1)..].contains(&l) + { + done = false; + continue; + } else { + c_.push(*l); + } + } + if skip { + done = false; + continue; + }; + match c_[..] { + [l] => { + done = false; + if l > 0 { + if n_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + p_single[(l.abs() - 1) as usize] = true; + } + } else { + if p_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + n_single[(l.abs() - 1) as usize] = true; + } + } + } + [] => { + dead = true; + break; + } + _ => { + clauses.push(c_); + } + } + } + if done { + break; + } else { + clauses_ = clauses; + clauses = Vec::with_capacity(clauses_.len()); + } + } + + if dead { + return Ok(()); + } + + let num_variables = challenge.difficulty.num_variables; + let num_clauses = clauses.len(); + + let mut p_clauses: Vec> = vec![Vec::new(); num_variables]; + let mut n_clauses: Vec> = vec![Vec::new(); num_variables]; + + let mut variables = vec![false; num_variables]; + for v in 0..num_variables { + if p_single[v] { + variables[v] = true + } else if n_single[v] { + variables[v] = false + } else { + variables[v] = rng.gen_bool(0.5) + } + } + let mut num_good_so_far: Vec = vec![0; num_clauses]; + + // Preallocate capacity for p_clauses and n_clauses + for c in &clauses { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 { + if p_clauses[var].capacity() == 0 { + p_clauses[var] = Vec::with_capacity(clauses.len() / num_variables + 1); + } + } else { + if n_clauses[var].capacity() == 0 { + n_clauses[var] = Vec::with_capacity(clauses.len() / num_variables + 1); + } + } + } + } + + for (i, &ref c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 { + p_clauses[var].push(i); + if variables[var] { + num_good_so_far[i] += 1 + } + } else { + n_clauses[var].push(i); + if !variables[var] { + num_good_so_far[i] += 1 + } + } + } + } + + let mut residual_ = Vec::with_capacity(num_clauses); + let mut residual_indices = HashMap::with_capacity(num_clauses); + + for (i, &num_good) in num_good_so_far.iter().enumerate() { + if num_good == 0 { + residual_.push(i); + residual_indices.insert(i, residual_.len() - 1); + } + } + + loop { + if !residual_.is_empty() { + + let i = residual_[rng.gen_range(0..residual_.len())]; + let mut min_sad = clauses.len(); + let mut v_min_sad = Vec::with_capacity(clauses[i].len()); // Preallocate with capacity + let c = &clauses[i]; + for &l in c { + let mut sad = 0 as usize; + if variables[(l.abs() - 1) as usize] { + for &c in &p_clauses[(l.abs() - 1) as usize] { + if num_good_so_far[c] == 1 { + sad += 1; + if sad > min_sad { + break; + } + } + } + } else { + for &c in &n_clauses[(l.abs() - 1) as usize] { + if num_good_so_far[c] == 1 { + sad += 1; + if sad > min_sad { + break; + } + } + } + } + + if sad < min_sad { + min_sad = sad; + v_min_sad.clear(); + v_min_sad.push((l.abs() - 1) as usize); + } else if sad == min_sad { + v_min_sad.push((l.abs() - 1) as usize); + } + } + let v = if min_sad == 0 { + if v_min_sad.len() == 1 { + v_min_sad[0] + } else { + v_min_sad[rng.gen_range(0..(v_min_sad.len() as u32)) as usize] + } + } else { + if rng.gen_bool(0.5) { + let l = c[rng.gen_range(0..(c.len() as u32)) as usize]; + (l.abs() - 1) as usize + } else { + v_min_sad[rng.gen_range(0..(v_min_sad.len() as u32)) as usize] + } + }; + + if variables[v] { + for &c in &n_clauses[v] { + num_good_so_far[c] += 1; + if num_good_so_far[c] == 1 { + let i = residual_indices.remove(&c).unwrap(); + let last = residual_.pop().unwrap(); + if i < residual_.len() { + residual_[i] = last; + residual_indices.insert(last, i); + } + } + } + for &c in &p_clauses[v] { + if num_good_so_far[c] == 1 { + residual_.push(c); + residual_indices.insert(c, residual_.len() - 1); + } + num_good_so_far[c] -= 1; + } + } else { + for &c in &n_clauses[v] { + if num_good_so_far[c] == 1 { + residual_.push(c); + residual_indices.insert(c, residual_.len() - 1); + } + num_good_so_far[c] -= 1; + } + + for &c in &p_clauses[v] { + num_good_so_far[c] += 1; + if num_good_so_far[c] == 1 { + let i = residual_indices.remove(&c).unwrap(); + let last = residual_.pop().unwrap(); + if i < residual_.len() { + residual_[i] = last; + residual_indices.insert(last, i); + } + } + } + } + + variables[v] = !variables[v]; + } else { + break; + } + rounds += 1; + if rounds >= num_variables * 35 { + return Ok(()); + } + } + + let _ = save_solution(&Solution { variables }); + return Ok(()); +} \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/sat_separate/README.md b/tig-algorithms/src/satisfiability/sat_separate/README.md new file mode 100644 index 0000000..0025b5d --- /dev/null +++ b/tig-algorithms/src/satisfiability/sat_separate/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** satisfiability +* **Algorithm Name:** sat_separate +* **Copyright:** 2025 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/sat_separate/mod.rs b/tig-algorithms/src/satisfiability/sat_separate/mod.rs new file mode 100644 index 0000000..5cb095c --- /dev/null +++ b/tig-algorithms/src/satisfiability/sat_separate/mod.rs @@ -0,0 +1,308 @@ +use rand::{rngs::{SmallRng, StdRng}, Rng, SeedableRng}; +use std::collections::HashMap; +use serde_json::{Map, Value}; +use tig_challenges::satisfiability::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + let mut rng = SmallRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap()) as u64); + + let mut p_single = vec![false; challenge.difficulty.num_variables]; + let mut n_single = vec![false; challenge.difficulty.num_variables]; + + let mut clauses_ = challenge.clauses.clone(); + let mut clauses: Vec> = Vec::with_capacity(clauses_.len()); + + let mut rounds = 0; + + let mut dead = false; + + while !(dead) { + let mut done = true; + for c in &clauses_ { + let mut c_: Vec = Vec::with_capacity(c.len()); // Preallocate with capacity + let mut skip = false; + for (i, l) in c.iter().enumerate() { + if (p_single[(l.abs() - 1) as usize] && *l > 0) + || (n_single[(l.abs() - 1) as usize] && *l < 0) + || c[(i + 1)..].contains(&-l) + { + skip = true; + break; + } else if p_single[(l.abs() - 1) as usize] + || n_single[(l.abs() - 1) as usize] + || c[(i + 1)..].contains(&l) + { + done = false; + continue; + } else { + c_.push(*l); + } + } + if skip { + done = false; + continue; + }; + match c_[..] { + [l] => { + done = false; + if l > 0 { + if n_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + p_single[(l.abs() - 1) as usize] = true; + } + } else { + if p_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + n_single[(l.abs() - 1) as usize] = true; + } + } + } + [] => { + dead = true; + break; + } + _ => { + clauses.push(c_); + } + } + } + if done { + break; + } else { + clauses_ = clauses; + clauses = Vec::with_capacity(clauses_.len()); + } + } + + if dead { + return Ok(()); + } + + let num_variables = challenge.difficulty.num_variables; + let num_clauses = clauses.len(); + + let mut p_clauses: Vec> = vec![Vec::new(); num_variables]; + let mut n_clauses: Vec> = vec![Vec::new(); num_variables]; + + // Preallocate capacity for p_clauses and n_clauses + for c in &clauses { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 { + if p_clauses[var].capacity() == 0 { + p_clauses[var] = Vec::with_capacity(clauses.len() / num_variables + 1); + } + } else { + if n_clauses[var].capacity() == 0 { + n_clauses[var] = Vec::with_capacity(clauses.len() / num_variables + 1); + } + } + } + } + + for (i, &ref c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 { + p_clauses[var].push(i); + } else { + n_clauses[var].push(i); + } + } + } + + let mut variables = vec![false; num_variables]; + for v in 0..num_variables { + let num_p = p_clauses[v].len(); + let num_n = n_clauses[v].len(); + + let nad = 1.28; + let mut vad = nad + 1.0; + if num_n > 0 { + vad = num_p as f32 / num_n as f32; + } + + if vad <= nad { + variables[v] = false; + } else { + let prob = num_p as f64 / (num_p + num_n).max(1) as f64; + variables[v] = rng.gen_bool(prob) + } + } + + let mut num_good_so_far: Vec = vec![0; num_clauses]; + for (i, &ref c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 && variables[var] { + num_good_so_far[i] += 1 + } else if l < 0 && !variables[var] { + num_good_so_far[i] += 1 + } + } + } + + + let mut residual_ = Vec::with_capacity(num_clauses); + let mut residual_indices = vec![usize::MAX; num_clauses]; + + for (i, &num_good) in num_good_so_far.iter().enumerate() { + if num_good == 0 { + residual_.push(i); + residual_indices[i] = residual_.len() - 1; + } + } + + let base_prob = 0.52; + let mut current_prob = base_prob; + let check_interval = 50; + let mut last_check_residual = residual_.len(); + + let clauses_ratio = challenge.difficulty.clauses_to_variables_percent as f64; + let num_vars = challenge.difficulty.num_variables as f64; + let max_fuel = 2000000000.0; + let base_fuel = (2000.0 + 40.0 * clauses_ratio) * num_vars; + let flip_fuel = 350.0 + 0.9 * clauses_ratio; + let max_num_rounds = ((max_fuel - base_fuel) / flip_fuel) as usize; + unsafe { + loop { + if !residual_.is_empty() { + let rand_val = rng.gen::(); + let i = *residual_.get_unchecked(rand_val % residual_.len()); + let mut min_sad = clauses.len(); + let mut v_min_sad = usize::MAX; + let c = clauses.get_unchecked_mut(i); + + // if rounds % check_interval == 0 { + // let progress = last_check_residual as i64 - residual_.len() as i64; + // let progress_ratio = progress as f64 / last_check_residual as f64; + + // let progress_threshold = 0.2 + 0.1 * f64::min(1.0, (clauses_ratio - 410.0) / 15.0); + + // if progress <= 0 { + // let prob_adjustment = 0.025 * (-progress as f64 / last_check_residual as f64).min(1.0); + // current_prob = (current_prob + prob_adjustment).min(0.9); + // } else if progress_ratio > progress_threshold { + // current_prob = base_prob; + // } else { + // current_prob = current_prob * 0.8 + base_prob * 0.2; + // } + + // last_check_residual = residual_.len(); + // } + + if c.len() > 1 { + let random_index = rand_val % c.len(); + c.swap(0, random_index); + } + + let mut zero_found = None; + 'outer: for &l in c.iter() { + let abs_l = l.abs() as usize - 1; + let clauses_to_check = if *variables.get_unchecked(abs_l) { + p_clauses.get_unchecked(abs_l) + } else { + n_clauses.get_unchecked(abs_l) + }; + + for &c in clauses_to_check { + if *num_good_so_far.get_unchecked(c) == 1 { + continue 'outer; + } + } + zero_found = Some(abs_l); + break; + } + + let v = if let Some(abs_l) = zero_found { + abs_l + } else if rand_val < (current_prob * (usize::MAX as f64)) as usize { + c[0].abs() as usize - 1 + } else { + let mut min_sad = usize::MAX; + let mut v_min_sad = c[0].abs() as usize - 1; + + for &l in c.iter() { + let abs_l = l.abs() as usize - 1; + let clauses_to_check = if *variables.get_unchecked(abs_l) { + p_clauses.get_unchecked(abs_l) + } else { + n_clauses.get_unchecked(abs_l) + }; + + let mut sad = 0; + for &c in clauses_to_check { + if *num_good_so_far.get_unchecked(c) == 1 { + sad += 1; + } + if sad >= min_sad { + break; + } + } + + if sad < min_sad { + min_sad = sad; + v_min_sad = abs_l; + } + } + v_min_sad + }; + + let was_true = *variables.get_unchecked(v); + let clauses_to_decrement = if was_true { + p_clauses.get_unchecked(v) + } else { + n_clauses.get_unchecked(v) + }; + let clauses_to_increment = if was_true { + n_clauses.get_unchecked(v) + } else { + p_clauses.get_unchecked(v) + }; + + for &cid in clauses_to_increment { + let num_good = num_good_so_far.get_unchecked_mut(cid); + if *num_good == 0 { + // Remove from residual + let i = *residual_indices.get_unchecked(cid); + *residual_indices.get_unchecked_mut(cid) = usize::MAX; + let last = residual_.pop().unwrap(); + if i < residual_.len() { + *residual_.get_unchecked_mut(i) = last; + *residual_indices.get_unchecked_mut(last) = i; + } + } + *num_good += 1; + } + + for &cid in clauses_to_decrement { + let num_good = num_good_so_far.get_unchecked_mut(cid); + *num_good -= 1; + if *num_good == 0 { + // Add to residual + residual_.push(cid); + *residual_indices.get_unchecked_mut(cid) = residual_.len() - 1; + } + } + + *variables.get_unchecked_mut(v) = !was_true; + } else { + break; + } + rounds += 1; + // if rounds >= (max_num_rounds as f32 * 1.0) as usize { + // return Ok(()); + // } + } + } + let _ = save_solution(&Solution { variables }); + return Ok(()); +} \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/sat_separate_opt/README.md b/tig-algorithms/src/satisfiability/sat_separate_opt/README.md new file mode 100644 index 0000000..cfdcce1 --- /dev/null +++ b/tig-algorithms/src/satisfiability/sat_separate_opt/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** satisfiability +* **Algorithm Name:** sat_separate_opt +* **Copyright:** 2025 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/sat_separate_opt/mod.rs b/tig-algorithms/src/satisfiability/sat_separate_opt/mod.rs new file mode 100644 index 0000000..afb4772 --- /dev/null +++ b/tig-algorithms/src/satisfiability/sat_separate_opt/mod.rs @@ -0,0 +1,290 @@ +use rand::{rngs::{SmallRng, StdRng}, Rng, SeedableRng}; +use std::collections::HashMap; +use serde_json::{Map, Value}; +use tig_challenges::satisfiability::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + let mut rng = SmallRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap()) as u64); + + let mut p_single = vec![false; challenge.difficulty.num_variables]; + let mut n_single = vec![false; challenge.difficulty.num_variables]; + + let mut clauses_ = challenge.clauses.clone(); + let mut clauses: Vec> = Vec::with_capacity(clauses_.len()); + + let mut rounds = 0; + + let mut dead = false; + + while !(dead) { + let mut done = true; + for c in &clauses_ { + let mut c_: Vec = Vec::with_capacity(c.len()); // Preallocate with capacity + let mut skip = false; + for (i, l) in c.iter().enumerate() { + if (p_single[(l.abs() - 1) as usize] && *l > 0) + || (n_single[(l.abs() - 1) as usize] && *l < 0) + || c[(i + 1)..].contains(&-l) + { + skip = true; + break; + } else if p_single[(l.abs() - 1) as usize] + || n_single[(l.abs() - 1) as usize] + || c[(i + 1)..].contains(&l) + { + done = false; + continue; + } else { + c_.push(*l); + } + } + if skip { + done = false; + continue; + }; + match c_[..] { + [l] => { + done = false; + if l > 0 { + if n_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + p_single[(l.abs() - 1) as usize] = true; + } + } else { + if p_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + n_single[(l.abs() - 1) as usize] = true; + } + } + } + [] => { + dead = true; + break; + } + _ => { + clauses.push(c_); + } + } + } + if done { + break; + } else { + clauses_ = clauses; + clauses = Vec::with_capacity(clauses_.len()); + } + } + + if dead { + return Ok(()); + } + + let num_variables = challenge.difficulty.num_variables; + let num_clauses = clauses.len(); + + let mut p_clauses: Vec> = vec![Vec::new(); num_variables]; + let mut n_clauses: Vec> = vec![Vec::new(); num_variables]; + + // Preallocate capacity for p_clauses and n_clauses + for c in &clauses { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 { + if p_clauses[var].capacity() == 0 { + p_clauses[var] = Vec::with_capacity(clauses.len() / num_variables + 1); + } + } else { + if n_clauses[var].capacity() == 0 { + n_clauses[var] = Vec::with_capacity(clauses.len() / num_variables + 1); + } + } + } + } + + for (i, &ref c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 { + p_clauses[var].push(i); + } else { + n_clauses[var].push(i); + } + } + } + + let mut variables = vec![false; num_variables]; + for v in 0..num_variables { + let num_p = p_clauses[v].len(); + let num_n = n_clauses[v].len(); + + let nad = 1.28; + let mut vad = nad + 1.0; + if num_n > 0 { + vad = num_p as f32 / num_n as f32; + } + + if vad <= nad { + variables[v] = false; + } else { + let prob = num_p as f64 / (num_p + num_n).max(1) as f64; + variables[v] = rng.gen_bool(prob) + } + } + + let mut num_good_so_far: Vec = vec![0; num_clauses]; + for (i, &ref c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 && variables[var] { + num_good_so_far[i] += 1 + } else if l < 0 && !variables[var] { + num_good_so_far[i] += 1 + } + } + } + + + let mut residual_ = Vec::with_capacity(num_clauses); + + for (i, &num_good) in num_good_so_far.iter().enumerate() { + if num_good == 0 { + residual_.push(i); + } + } + + let base_prob = 0.52; + let mut current_prob = base_prob; + let check_interval = 50; + let mut last_check_residual = residual_.len(); + + let clauses_ratio = challenge.difficulty.clauses_to_variables_percent as f64; + let num_vars = challenge.difficulty.num_variables as f64; + let max_fuel = 2000000000.0; + let base_fuel = (2000.0 + 40.0 * clauses_ratio) * num_vars; + let flip_fuel = 350.0 + 0.9 * clauses_ratio; + let max_num_rounds = ((max_fuel - base_fuel) / flip_fuel) as usize; + unsafe { + loop { + if !residual_.is_empty() { + let rand_val = rng.gen::(); + + let mut i = residual_.len() - 1; + while !residual_.is_empty() { + let id = rand_val % residual_.len(); + i = *residual_.get_unchecked(id); + if num_good_so_far[i] > 0 { + residual_.swap_remove(id); + } else { + break + } + } + if residual_.is_empty() { + break; + } + + let mut min_sad = clauses.len(); + let mut v_min_sad = usize::MAX; + let c = clauses.get_unchecked_mut(i); + + if c.len() > 1 { + let random_index = rand_val % c.len(); + c.swap(0, random_index); + } + + let mut zero_found = None; + 'outer: for &l in c.iter() { + let abs_l = l.abs() as usize - 1; + let clauses_to_check = if *variables.get_unchecked(abs_l) { + p_clauses.get_unchecked(abs_l) + } else { + n_clauses.get_unchecked(abs_l) + }; + + for &c in clauses_to_check { + if *num_good_so_far.get_unchecked(c) == 1 { + continue 'outer; + } + } + zero_found = Some(abs_l); + break; + } + + let v = if let Some(abs_l) = zero_found { + abs_l + } else if rand_val < (current_prob * (usize::MAX as f64)) as usize { + c[0].abs() as usize - 1 + } else { + let mut min_sad = usize::MAX; + let mut v_min_sad = c[0].abs() as usize - 1; + + for &l in c.iter() { + let abs_l = l.abs() as usize - 1; + let clauses_to_check = if *variables.get_unchecked(abs_l) { + p_clauses.get_unchecked(abs_l) + } else { + n_clauses.get_unchecked(abs_l) + }; + + let mut sad = 0; + for &c in clauses_to_check { + if *num_good_so_far.get_unchecked(c) == 1 { + sad += 1; + } + if sad >= min_sad { + break; + } + } + + if sad < min_sad { + min_sad = sad; + v_min_sad = abs_l; + } + } + v_min_sad + }; + + let was_true = *variables.get_unchecked(v); + let clauses_to_decrement = if was_true { + p_clauses.get_unchecked(v) + } else { + n_clauses.get_unchecked(v) + }; + let clauses_to_increment = if was_true { + n_clauses.get_unchecked(v) + } else { + p_clauses.get_unchecked(v) + }; + + for &cid in clauses_to_increment { + let num_good = num_good_so_far.get_unchecked_mut(cid); + *num_good += 1; + } + + for &cid in clauses_to_decrement { + let num_good = num_good_so_far.get_unchecked_mut(cid); + *num_good -= 1; + if *num_good == 0 { + residual_.push(cid); + } + } + + *variables.get_unchecked_mut(v) = !was_true; + } else { + break; + } + rounds += 1; + // if rounds >= (max_num_rounds as f32 * 1.0) as usize { + // return Ok(()); + // } + } + } + let _ = save_solution(&Solution { variables }); + return Ok(()); +} \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/sat_separate_opt_p/README.md b/tig-algorithms/src/satisfiability/sat_separate_opt_p/README.md new file mode 100644 index 0000000..ec3562b --- /dev/null +++ b/tig-algorithms/src/satisfiability/sat_separate_opt_p/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** satisfiability +* **Algorithm Name:** sat_separate_opt_p +* **Copyright:** 2025 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/sat_separate_opt_p/mod.rs b/tig-algorithms/src/satisfiability/sat_separate_opt_p/mod.rs new file mode 100644 index 0000000..cfb4825 --- /dev/null +++ b/tig-algorithms/src/satisfiability/sat_separate_opt_p/mod.rs @@ -0,0 +1,311 @@ +use rand::{rngs::{SmallRng, StdRng}, Rng, SeedableRng}; +use std::collections::HashMap; +use serde_json::{Map, Value}; +use tig_challenges::satisfiability::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + let mut rng = SmallRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap()) as u64); + + let mut p_single = vec![false; challenge.difficulty.num_variables]; + let mut n_single = vec![false; challenge.difficulty.num_variables]; + + let mut clauses_ = challenge.clauses.clone(); + let mut clauses: Vec> = Vec::with_capacity(clauses_.len()); + + let mut rounds = 0; + + let mut dead = false; + + while !(dead) { + let mut done = true; + for c in &clauses_ { + let mut c_: Vec = Vec::with_capacity(c.len()); // Preallocate with capacity + let mut skip = false; + for (i, l) in c.iter().enumerate() { + if (p_single[(l.abs() - 1) as usize] && *l > 0) + || (n_single[(l.abs() - 1) as usize] && *l < 0) + || c[(i + 1)..].contains(&-l) + { + skip = true; + break; + } else if p_single[(l.abs() - 1) as usize] + || n_single[(l.abs() - 1) as usize] + || c[(i + 1)..].contains(&l) + { + done = false; + continue; + } else { + c_.push(*l); + } + } + if skip { + done = false; + continue; + }; + match c_[..] { + [l] => { + done = false; + if l > 0 { + if n_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + p_single[(l.abs() - 1) as usize] = true; + } + } else { + if p_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + n_single[(l.abs() - 1) as usize] = true; + } + } + } + [] => { + dead = true; + break; + } + _ => { + clauses.push(c_); + } + } + } + if done { + break; + } else { + clauses_ = clauses; + clauses = Vec::with_capacity(clauses_.len()); + } + } + + if dead { + return Ok(()); + } + + let num_variables = challenge.difficulty.num_variables; + let num_clauses = clauses.len(); + + let mut p_clauses: Vec> = vec![Vec::new(); num_variables]; + let mut n_clauses: Vec> = vec![Vec::new(); num_variables]; + + // Preallocate capacity for p_clauses and n_clauses + for c in &clauses { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 { + if p_clauses[var].capacity() == 0 { + p_clauses[var] = Vec::with_capacity(clauses.len() / num_variables + 1); + } + } else { + if n_clauses[var].capacity() == 0 { + n_clauses[var] = Vec::with_capacity(clauses.len() / num_variables + 1); + } + } + } + } + + for (i, &ref c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 { + p_clauses[var].push(i); + } else { + n_clauses[var].push(i); + } + } + } + + let mut variables = vec![false; num_variables]; + for v in 0..num_variables { + let num_p = p_clauses[v].len(); + let num_n = n_clauses[v].len(); + + let nad = 1.28; + let mut vad = nad + 1.0; + if num_n > 0 { + vad = num_p as f32 / num_n as f32; + } + + if vad <= nad { + variables[v] = false; + } else { + let prob = num_p as f64 / (num_p + num_n).max(1) as f64; + variables[v] = rng.gen_bool(prob) + } + } + + let mut num_good_so_far: Vec = vec![0; num_clauses]; + for (i, &ref c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 && variables[var] { + num_good_so_far[i] += 1 + } else if l < 0 && !variables[var] { + num_good_so_far[i] += 1 + } + } + } + + + let mut residual_ = Vec::with_capacity(num_clauses); + + for (i, &num_good) in num_good_so_far.iter().enumerate() { + if num_good == 0 { + residual_.push(i); + } + } + + let base_prob = 0.52; + let mut current_prob = base_prob; + let check_interval = 50; + let mut last_check_residual = residual_.len(); + + let clauses_ratio = challenge.difficulty.clauses_to_variables_percent as f64; + let num_vars = challenge.difficulty.num_variables as f64; + let max_fuel = 2000000000.0; + let base_fuel = (2000.0 + 40.0 * clauses_ratio) * num_vars; + let flip_fuel = 350.0 + 0.9 * clauses_ratio; + let max_num_rounds = ((max_fuel - base_fuel) / flip_fuel) as usize; + unsafe { + loop { + if !residual_.is_empty() { + let rand_val = rng.gen::(); + + let mut i = residual_.len() - 1; + while !residual_.is_empty() { + let id = rand_val % residual_.len(); + i = *residual_.get_unchecked(id); + if num_good_so_far[i] > 0 { + residual_.swap_remove(id); + } else { + break + } + } + if residual_.is_empty() { + break; + } + + let mut min_sad = clauses.len(); + let mut v_min_sad = usize::MAX; + let c = clauses.get_unchecked_mut(i); + + + if challenge.difficulty.clauses_to_variables_percent >= 410 { + if rounds % check_interval == 0 { + let progress = last_check_residual as i64 - residual_.len() as i64; + let progress_ratio = progress as f64 / last_check_residual as f64; + + let progress_threshold = 0.2 + 0.1 * f64::min(1.0, (clauses_ratio - 410.0) / 15.0); + + if progress <= 0 { + let prob_adjustment = 0.025 * (-progress as f64 / last_check_residual as f64).min(1.0); + current_prob = (current_prob + prob_adjustment).min(0.9); + } else if progress_ratio > progress_threshold { + current_prob = base_prob; + } else { + current_prob = current_prob * 0.8 + base_prob * 0.2; + } + + last_check_residual = residual_.len(); + } + } + + if c.len() > 1 { + let random_index = rand_val % c.len(); + c.swap(0, random_index); + } + + let mut zero_found = None; + 'outer: for &l in c.iter() { + let abs_l = l.abs() as usize - 1; + let clauses_to_check = if *variables.get_unchecked(abs_l) { + p_clauses.get_unchecked(abs_l) + } else { + n_clauses.get_unchecked(abs_l) + }; + + for &c in clauses_to_check { + if *num_good_so_far.get_unchecked(c) == 1 { + continue 'outer; + } + } + zero_found = Some(abs_l); + break; + } + + let v = if let Some(abs_l) = zero_found { + abs_l + } else if rand_val < (current_prob * (usize::MAX as f64)) as usize { + c[0].abs() as usize - 1 + } else { + let mut min_sad = usize::MAX; + let mut v_min_sad = c[0].abs() as usize - 1; + + for &l in c.iter() { + let abs_l = l.abs() as usize - 1; + let clauses_to_check = if *variables.get_unchecked(abs_l) { + p_clauses.get_unchecked(abs_l) + } else { + n_clauses.get_unchecked(abs_l) + }; + + let mut sad = 0; + for &c in clauses_to_check { + if *num_good_so_far.get_unchecked(c) == 1 { + sad += 1; + } + if sad >= min_sad { + break; + } + } + + if sad < min_sad { + min_sad = sad; + v_min_sad = abs_l; + } + } + v_min_sad + }; + + let was_true = *variables.get_unchecked(v); + let clauses_to_decrement = if was_true { + p_clauses.get_unchecked(v) + } else { + n_clauses.get_unchecked(v) + }; + let clauses_to_increment = if was_true { + n_clauses.get_unchecked(v) + } else { + p_clauses.get_unchecked(v) + }; + + for &cid in clauses_to_increment { + let num_good = num_good_so_far.get_unchecked_mut(cid); + *num_good += 1; + } + + for &cid in clauses_to_decrement { + let num_good = num_good_so_far.get_unchecked_mut(cid); + *num_good -= 1; + if *num_good == 0 { + residual_.push(cid); + } + } + + *variables.get_unchecked_mut(v) = !was_true; + } else { + break; + } + rounds += 1; + // if rounds >= (max_num_rounds as f32 * 1.0) as usize { + // return Ok(()); + // } + } + } + let _ = save_solution(&Solution { variables }); + return Ok(()); +} \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/sat_separate_prob/README.md b/tig-algorithms/src/satisfiability/sat_separate_prob/README.md new file mode 100644 index 0000000..14a5b12 --- /dev/null +++ b/tig-algorithms/src/satisfiability/sat_separate_prob/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** satisfiability +* **Algorithm Name:** sat_separate_prob +* **Copyright:** 2025 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/sat_separate_prob/mod.rs b/tig-algorithms/src/satisfiability/sat_separate_prob/mod.rs new file mode 100644 index 0000000..e1b6680 --- /dev/null +++ b/tig-algorithms/src/satisfiability/sat_separate_prob/mod.rs @@ -0,0 +1,308 @@ +use rand::{rngs::{SmallRng, StdRng}, Rng, SeedableRng}; +use std::collections::HashMap; +use serde_json::{Map, Value}; +use tig_challenges::satisfiability::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + let mut rng = SmallRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap()) as u64); + + let mut p_single = vec![false; challenge.difficulty.num_variables]; + let mut n_single = vec![false; challenge.difficulty.num_variables]; + + let mut clauses_ = challenge.clauses.clone(); + let mut clauses: Vec> = Vec::with_capacity(clauses_.len()); + + let mut rounds = 0; + + let mut dead = false; + + while !(dead) { + let mut done = true; + for c in &clauses_ { + let mut c_: Vec = Vec::with_capacity(c.len()); // Preallocate with capacity + let mut skip = false; + for (i, l) in c.iter().enumerate() { + if (p_single[(l.abs() - 1) as usize] && *l > 0) + || (n_single[(l.abs() - 1) as usize] && *l < 0) + || c[(i + 1)..].contains(&-l) + { + skip = true; + break; + } else if p_single[(l.abs() - 1) as usize] + || n_single[(l.abs() - 1) as usize] + || c[(i + 1)..].contains(&l) + { + done = false; + continue; + } else { + c_.push(*l); + } + } + if skip { + done = false; + continue; + }; + match c_[..] { + [l] => { + done = false; + if l > 0 { + if n_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + p_single[(l.abs() - 1) as usize] = true; + } + } else { + if p_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + n_single[(l.abs() - 1) as usize] = true; + } + } + } + [] => { + dead = true; + break; + } + _ => { + clauses.push(c_); + } + } + } + if done { + break; + } else { + clauses_ = clauses; + clauses = Vec::with_capacity(clauses_.len()); + } + } + + if dead { + return Ok(()); + } + + let num_variables = challenge.difficulty.num_variables; + let num_clauses = clauses.len(); + + let mut p_clauses: Vec> = vec![Vec::new(); num_variables]; + let mut n_clauses: Vec> = vec![Vec::new(); num_variables]; + + // Preallocate capacity for p_clauses and n_clauses + for c in &clauses { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 { + if p_clauses[var].capacity() == 0 { + p_clauses[var] = Vec::with_capacity(clauses.len() / num_variables + 1); + } + } else { + if n_clauses[var].capacity() == 0 { + n_clauses[var] = Vec::with_capacity(clauses.len() / num_variables + 1); + } + } + } + } + + for (i, &ref c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 { + p_clauses[var].push(i); + } else { + n_clauses[var].push(i); + } + } + } + + let mut variables = vec![false; num_variables]; + for v in 0..num_variables { + let num_p = p_clauses[v].len(); + let num_n = n_clauses[v].len(); + + let nad = 1.28; + let mut vad = nad + 1.0; + if num_n > 0 { + vad = num_p as f32 / num_n as f32; + } + + if vad <= nad { + variables[v] = false; + } else { + let prob = num_p as f64 / (num_p + num_n).max(1) as f64; + variables[v] = rng.gen_bool(prob) + } + } + + let mut num_good_so_far: Vec = vec![0; num_clauses]; + for (i, &ref c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 && variables[var] { + num_good_so_far[i] += 1 + } else if l < 0 && !variables[var] { + num_good_so_far[i] += 1 + } + } + } + + + let mut residual_ = Vec::with_capacity(num_clauses); + let mut residual_indices = vec![usize::MAX; num_clauses]; + + for (i, &num_good) in num_good_so_far.iter().enumerate() { + if num_good == 0 { + residual_.push(i); + residual_indices[i] = residual_.len() - 1; + } + } + + let base_prob = 0.52; + let mut current_prob = base_prob; + let check_interval = 50; + let mut last_check_residual = residual_.len(); + + let clauses_ratio = challenge.difficulty.clauses_to_variables_percent as f64; + let num_vars = challenge.difficulty.num_variables as f64; + let max_fuel = 2000000000.0; + let base_fuel = (2000.0 + 40.0 * clauses_ratio) * num_vars; + let flip_fuel = 350.0 + 0.9 * clauses_ratio; + let max_num_rounds = ((max_fuel - base_fuel) / flip_fuel) as usize; + unsafe { + loop { + if !residual_.is_empty() { + let rand_val = rng.gen::(); + let i = *residual_.get_unchecked(rand_val % residual_.len()); + let mut min_sad = clauses.len(); + let mut v_min_sad = usize::MAX; + let c = clauses.get_unchecked_mut(i); + + if rounds % check_interval == 0 { + let progress = last_check_residual as i64 - residual_.len() as i64; + let progress_ratio = progress as f64 / last_check_residual as f64; + + let progress_threshold = 0.2 + 0.1 * f64::min(1.0, (clauses_ratio - 410.0) / 15.0); + + if progress <= 0 { + let prob_adjustment = 0.025 * (-progress as f64 / last_check_residual as f64).min(1.0); + current_prob = (current_prob + prob_adjustment).min(0.9); + } else if progress_ratio > progress_threshold { + current_prob = base_prob; + } else { + current_prob = current_prob * 0.8 + base_prob * 0.2; + } + + last_check_residual = residual_.len(); + } + + if c.len() > 1 { + let random_index = rand_val % c.len(); + c.swap(0, random_index); + } + + let mut zero_found = None; + 'outer: for &l in c.iter() { + let abs_l = l.abs() as usize - 1; + let clauses_to_check = if *variables.get_unchecked(abs_l) { + p_clauses.get_unchecked(abs_l) + } else { + n_clauses.get_unchecked(abs_l) + }; + + for &c in clauses_to_check { + if *num_good_so_far.get_unchecked(c) == 1 { + continue 'outer; + } + } + zero_found = Some(abs_l); + break; + } + + let v = if let Some(abs_l) = zero_found { + abs_l + } else if rand_val < (current_prob * (usize::MAX as f64)) as usize { + c[0].abs() as usize - 1 + } else { + let mut min_sad = usize::MAX; + let mut v_min_sad = c[0].abs() as usize - 1; + + for &l in c.iter() { + let abs_l = l.abs() as usize - 1; + let clauses_to_check = if *variables.get_unchecked(abs_l) { + p_clauses.get_unchecked(abs_l) + } else { + n_clauses.get_unchecked(abs_l) + }; + + let mut sad = 0; + for &c in clauses_to_check { + if *num_good_so_far.get_unchecked(c) == 1 { + sad += 1; + } + if sad >= min_sad { + break; + } + } + + if sad < min_sad { + min_sad = sad; + v_min_sad = abs_l; + } + } + v_min_sad + }; + + let was_true = *variables.get_unchecked(v); + let clauses_to_decrement = if was_true { + p_clauses.get_unchecked(v) + } else { + n_clauses.get_unchecked(v) + }; + let clauses_to_increment = if was_true { + n_clauses.get_unchecked(v) + } else { + p_clauses.get_unchecked(v) + }; + + for &cid in clauses_to_increment { + let num_good = num_good_so_far.get_unchecked_mut(cid); + if *num_good == 0 { + // Remove from residual + let i = *residual_indices.get_unchecked(cid); + *residual_indices.get_unchecked_mut(cid) = usize::MAX; + let last = residual_.pop().unwrap(); + if i < residual_.len() { + *residual_.get_unchecked_mut(i) = last; + *residual_indices.get_unchecked_mut(last) = i; + } + } + *num_good += 1; + } + + for &cid in clauses_to_decrement { + let num_good = num_good_so_far.get_unchecked_mut(cid); + *num_good -= 1; + if *num_good == 0 { + // Add to residual + residual_.push(cid); + *residual_indices.get_unchecked_mut(cid) = residual_.len() - 1; + } + } + + *variables.get_unchecked_mut(v) = !was_true; + } else { + break; + } + rounds += 1; + // if rounds >= (max_num_rounds as f32 * 1.0) as usize { + // return Ok(()); + // } + } + } + let _ = save_solution(&Solution { variables }); + return Ok(()); +} \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/sat_suma/README.md b/tig-algorithms/src/satisfiability/sat_suma/README.md new file mode 100644 index 0000000..71ee176 --- /dev/null +++ b/tig-algorithms/src/satisfiability/sat_suma/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** satisfiability +* **Algorithm Name:** sat_suma +* **Copyright:** 2025 Rootz +* **Identity of Submitter:** Rootz +* **Identity of Creator of Algorithmic Method:** Rootz +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/sat_suma/mod.rs b/tig-algorithms/src/satisfiability/sat_suma/mod.rs new file mode 100644 index 0000000..ea51853 --- /dev/null +++ b/tig-algorithms/src/satisfiability/sat_suma/mod.rs @@ -0,0 +1,366 @@ +use rand::{rngs::SmallRng, SeedableRng, Rng}; +use serde_json::{Map, Value}; +use tig_challenges::satisfiability::*; +use crate::{seeded_hasher, HashSet}; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + let mut rng = SmallRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap())); + let hasher = seeded_hasher(&challenge.seed); + + let mut clauses = challenge.clauses.clone(); + let mut i = clauses.len(); + while i > 0 { + i -= 1; + let clause = &mut clauses[i]; + + if clause.len() > 1 { + let mut seen = HashSet::with_hasher(hasher.clone()); + let mut j = 0; + let mut tautology = false; + while j < clause.len() { + let lit = clause[j]; + if seen.contains(&-lit) { + tautology = true; + break; + } + if !seen.insert(lit) { + clause.swap_remove(j); + } else { + j += 1; + } + } + if tautology { + clauses.swap_remove(i); + i += 1; + continue; + } + } + } + + let mut p_single = vec![false; challenge.difficulty.num_variables]; + let mut n_single = vec![false; challenge.difficulty.num_variables]; + let mut clauses_ = clauses; + clauses = Vec::with_capacity(clauses_.len()); + let mut dead = false; + + while !dead { + let mut done = true; + for c in &clauses_ { + let mut c_: Vec = Vec::with_capacity(c.len()); + let mut skip = false; + for &l in c.iter() { + let idx = (l.abs() - 1) as usize; + if (p_single[idx] && l > 0) || (n_single[idx] && l < 0) { + skip = true; + break; + } + if p_single[idx] || n_single[idx] { + done = false; + continue; + } + c_.push(l); + } + if skip { + done = false; + continue; + }; + match c_[..] { + [l] => { + done = false; + if l > 0 { + if n_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + p_single[(l.abs() - 1) as usize] = true; + } + } else { + if p_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + n_single[(l.abs() - 1) as usize] = true; + } + } + } + [] => { + dead = true; + break; + } + _ => { + clauses.push(c_); + } + } + } + if done { + break; + } else { + clauses_ = clauses; + clauses = Vec::with_capacity(clauses_.len()); + } + } + + if dead { + return Ok(()); + } + + let num_variables = challenge.difficulty.num_variables; + let num_clauses = clauses.len(); + + let mut p_clauses: Vec> = vec![Vec::new(); num_variables]; + let mut n_clauses: Vec> = vec![Vec::new(); num_variables]; + + for (i, c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 { + p_clauses[var].push(i); + } else { + n_clauses[var].push(i); + } + } + } + + let density = num_clauses as f64 / num_variables as f64; + let avg_clause_size = clauses.iter().map(|c| c.len()).sum::() as f64 / num_clauses as f64; + + let nad = 1.0; + let random_threshold = if num_variables >= 30000 { 0.01 } else { 0.003 }; + + let mut variables = vec![false; num_variables]; + for v in 0..num_variables { + let num_p = p_clauses[v].len(); + let num_n = n_clauses[v].len(); + + if num_n == 0 && num_p > 0 { + variables[v] = true; + continue; + } else if num_p == 0 && num_n > 0 { + variables[v] = false; + continue; + } + + let vad = if num_n > 0 { num_p as f64 / num_n as f64 } else { nad + 1.0 }; + + if vad <= nad { + variables[v] = rng.gen_bool(random_threshold); + } else { + let prob = (num_p as f64 + 0.25) / ((num_p + num_n) as f64 + 1.2); + variables[v] = rng.gen_bool(prob); + } + } + + let mut num_good_so_far: Vec = vec![0; num_clauses]; + for (i, c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if (l > 0 && variables[var]) || (l < 0 && !variables[var]) { + num_good_so_far[i] = num_good_so_far[i].saturating_add(1); + } + } + } + + let mut residual_ = Vec::with_capacity(num_clauses); + + for (i, &num_good) in num_good_so_far.iter().enumerate() { + if num_good == 0 { + residual_.push(i); + } + } + if residual_.is_empty() { + for v in 0..num_variables { + if p_single[v] { + variables[v] = true; + } else if n_single[v] { + variables[v] = false; + } + } + let _ = save_solution(&Solution { variables }); + return Ok(()); + } + + let base_prob = 0.52; + let mut current_prob = base_prob; + + let large_problem_scale = ((num_variables as f64 - 25000.0) / 35000.0).max(0.0).min(1.0); + let base_interval = 60.0 - 30.0 * large_problem_scale; + let min_interval = if large_problem_scale > 0.0 { 15.0 } else { 25.0 }; + let density_factor = if density > 4.0 { 1.2 } else { 1.0 }; + let check_interval = (base_interval * density_factor * (1.0 + (density / 3.0).ln().max(0.0))).max(min_interval) as usize; + let max_random_prob = 0.9; + let prob_adjustment_factor = 0.025; + let smoothing_factor = 0.8; + + let mut last_check_residual = residual_.len(); + + let max_fuel = 10_000_000_000.0; + let difficulty_factor = density * avg_clause_size.sqrt(); + let scale_factor = if num_variables > 25000 { 1.5 } else { 1.0 }; + let base_fuel = (2000.0 + 100.0 * difficulty_factor) * (num_variables as f64).sqrt() * scale_factor; + let flip_fuel = (200.0 + difficulty_factor) / scale_factor; + let max_num_rounds = ((max_fuel - base_fuel) / flip_fuel) as usize; + let mut rounds = 0; + + unsafe { + loop { + if rounds >= max_num_rounds { + return Ok(()); + } + + if rounds % check_interval == 0 && rounds > 0 { + let progress = last_check_residual as i64 - residual_.len() as i64; + let progress_ratio = progress as f64 / last_check_residual.max(1) as f64; + + let progress_threshold = 0.15 + 0.05 * (density / 3.0).min(1.0); + + if progress <= 0 { + let prob_adjustment = prob_adjustment_factor * (-progress as f64 / last_check_residual.max(1) as f64).min(1.0); + current_prob = (current_prob + prob_adjustment).min(max_random_prob); + } else if progress_ratio > progress_threshold { + current_prob = base_prob; + } else { + current_prob = current_prob * smoothing_factor + base_prob * (1.0 - smoothing_factor); + } + + last_check_residual = residual_.len(); + } + + if !residual_.is_empty() { + let rand_val = rng.gen::(); + + let mut i = residual_.len() - 1; + while !residual_.is_empty() { + let id = rand_val % residual_.len(); + i = residual_[id]; + if num_good_so_far[i] > 0 { + residual_.swap_remove(id); + } else { + break + } + } + if residual_.is_empty() { + break; + } + + let c = clauses.get_unchecked_mut(i); + + if c.len() > 1 { + let random_index = rand_val % c.len(); + c.swap(0, random_index); + } + + let mut zero_found = None; + 'outer: for &l in c.iter() { + let abs_l = l.abs() as usize - 1; + let clauses_to_check = if *variables.get_unchecked(abs_l) { + p_clauses.get_unchecked(abs_l) + } else { + n_clauses.get_unchecked(abs_l) + }; + + for &c in clauses_to_check { + if *num_good_so_far.get_unchecked(c) == 1 { + continue 'outer; + } + } + zero_found = Some(abs_l); + break; + } + + let v = if let Some(abs_l) = zero_found { + abs_l + } else if rng.gen::() < current_prob { + c[0].abs() as usize - 1 + } else { + let mut min_sad = usize::MAX; + let mut v_min_sad = c[0].abs() as usize - 1; + let mut min_weight = usize::MAX; + + for &l in c.iter() { + let abs_l = l.abs() as usize - 1; + let clauses_to_check = if *variables.get_unchecked(abs_l) { + p_clauses.get_unchecked(abs_l) + } else { + n_clauses.get_unchecked(abs_l) + }; + + let mut sad = 0; + + for &c_idx in clauses_to_check { + if *num_good_so_far.get_unchecked(c_idx) == 1 { + sad += 1; + } + if sad >= min_sad { + break; + } + } + + if sad == 0 { + v_min_sad = abs_l; + break; + } + + let appearances = p_clauses.get_unchecked(abs_l).len() + n_clauses.get_unchecked(abs_l).len(); + let combined_weight = sad * 1000 + appearances; + + if combined_weight < min_weight { + min_sad = sad; + min_weight = combined_weight; + v_min_sad = abs_l; + } + + if min_sad <= 1 { + break; + } + } + v_min_sad + }; + + let was_true = *variables.get_unchecked(v); + let clauses_to_decrement = if was_true { + p_clauses.get_unchecked(v) + } else { + n_clauses.get_unchecked(v) + }; + let clauses_to_increment = if was_true { + n_clauses.get_unchecked(v) + } else { + p_clauses.get_unchecked(v) + }; + + for &cid in clauses_to_increment { + let num_good = num_good_so_far.get_unchecked_mut(cid); + *num_good = num_good.saturating_add(1); + } + + for &cid in clauses_to_decrement { + let num_good = num_good_so_far.get_unchecked_mut(cid); + let new_val = num_good.saturating_sub(1); + *num_good = new_val; + if new_val == 0 { + residual_.push(cid); + } + } + + *variables.get_unchecked_mut(v) = !was_true; + } else { + break; + } + rounds += 1; + } + } + + for v in 0..num_variables { + if p_single[v] { + variables[v] = true; + } else if n_single[v] { + variables[v] = false; + } + } + let _ = save_solution(&Solution { variables }); + return Ok(()); +} diff --git a/tig-algorithms/src/satisfiability/sat_unified/README.md b/tig-algorithms/src/satisfiability/sat_unified/README.md new file mode 100644 index 0000000..2bc32dc --- /dev/null +++ b/tig-algorithms/src/satisfiability/sat_unified/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** satisfiability +* **Algorithm Name:** sat_unified +* **Copyright:** 2025 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/sat_unified/mod.rs b/tig-algorithms/src/satisfiability/sat_unified/mod.rs new file mode 100644 index 0000000..de08943 --- /dev/null +++ b/tig-algorithms/src/satisfiability/sat_unified/mod.rs @@ -0,0 +1,226 @@ +use rand::{rngs::{SmallRng, StdRng}, Rng, SeedableRng}; +use std::collections::HashMap; +use serde_json::{Map, Value}; +use tig_challenges::satisfiability::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + let mut rng = SmallRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap()) as u64); + + let mut clauses = challenge.clauses.clone(); + let mut i = clauses.len(); + while i > 0 { + i -= 1; + { + let clause = &mut clauses[i]; + if clause[0] == clause[2] || clause[1] == clause[2] { + clause.pop(); + } + if clause[0] == clause[1] { + clause.swap_remove(1); + } + } + + let should_remove = { + let clause = &clauses[i]; + (clause.len() >= 2 && clause[0] == -clause[1]) || + (clause.len() >= 3 && (clause[0] == -clause[2] || clause[1] == -clause[2])) + }; + if should_remove { + clauses.swap_remove(i); + } + } + + let num_variables = challenge.difficulty.num_variables; + let num_clauses = clauses.len(); + + let mut p_clauses: Vec> = vec![Vec::new(); num_variables]; + let mut n_clauses: Vec> = vec![Vec::new(); num_variables]; + + for (i, &ref c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 { + p_clauses[var].push(i); + } else { + n_clauses[var].push(i); + } + } + } + + let mut variables = vec![false; num_variables]; + let (pad, nad) = (1.8, 0.56); + for v in 0..num_variables { + let num_p = p_clauses[v].len(); + let num_n = n_clauses[v].len(); + + let mut vad; + if num_n == 0 { + vad = pad + 1.0; + } else { + vad = num_p as f32 / num_n as f32; + } + + if vad > pad { + variables[v] = true; + } else if vad < nad { + variables[v] = false; + } else { + variables[v] = rng.gen_bool(0.5); + } + } + + let mut num_good_so_far: Vec = vec![0; num_clauses]; + for (i, &ref c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 && variables[var] { + num_good_so_far[i] += 1 + } else if l < 0 && !variables[var] { + num_good_so_far[i] += 1 + } + } + } + + + let mut residual_ = Vec::with_capacity(num_clauses); + + for (i, &num_good) in num_good_so_far.iter().enumerate() { + if num_good == 0 { + residual_.push(i); + } + } + + let base_prob = 0.52; + let mut current_prob = base_prob; + + let clauses_ratio = challenge.difficulty.clauses_to_variables_percent as f64; + let num_vars = challenge.difficulty.num_variables as f64; + let max_fuel = 2000000000.0; + let base_fuel = (2000.0 + 40.0 * clauses_ratio) * num_vars; + let flip_fuel = 350.0 + 0.9 * clauses_ratio; + let max_num_rounds = ((max_fuel - base_fuel) / flip_fuel) as usize; + let mut rounds = 0; + + unsafe { + loop { + if !residual_.is_empty() { + let rand_val = rng.gen::(); + + let mut i = residual_.len() - 1; + while !residual_.is_empty() { + let id = rand_val % residual_.len(); + i = *residual_.get_unchecked(id); + if num_good_so_far[i] > 0 { + residual_.swap_remove(id); + } else { + break + } + } + if residual_.is_empty() { + break; + } + + let c = clauses.get_unchecked_mut(i); + + if c.len() > 1 { + let random_index = rand_val % c.len(); + c.swap(0, random_index); + } + + let mut zero_found = None; + 'outer: for &l in c.iter() { + let abs_l = l.abs() as usize - 1; + let clauses_to_check = if *variables.get_unchecked(abs_l) { + p_clauses.get_unchecked(abs_l) + } else { + n_clauses.get_unchecked(abs_l) + }; + + for &c in clauses_to_check { + if *num_good_so_far.get_unchecked(c) == 1 { + continue 'outer; + } + } + zero_found = Some(abs_l); + break; + } + + let v = if let Some(abs_l) = zero_found { + abs_l + } else if rand_val < (current_prob * (usize::MAX as f64)) as usize { + c[0].abs() as usize - 1 + } else { + let mut min_sad = usize::MAX; + let mut v_min_sad = c[0].abs() as usize - 1; + + for &l in c.iter() { + let abs_l = l.abs() as usize - 1; + let clauses_to_check = if *variables.get_unchecked(abs_l) { + p_clauses.get_unchecked(abs_l) + } else { + n_clauses.get_unchecked(abs_l) + }; + + let mut sad = 0; + for &c in clauses_to_check { + if *num_good_so_far.get_unchecked(c) == 1 { + sad += 1; + } + if sad >= min_sad { + break; + } + } + + if sad < min_sad { + min_sad = sad; + v_min_sad = abs_l; + } + if sad == 1 { + break; + } + } + v_min_sad + }; + + let was_true = *variables.get_unchecked(v); + let clauses_to_decrement = if was_true { + p_clauses.get_unchecked(v) + } else { + n_clauses.get_unchecked(v) + }; + let clauses_to_increment = if was_true { + n_clauses.get_unchecked(v) + } else { + p_clauses.get_unchecked(v) + }; + + for &cid in clauses_to_increment { + let num_good = num_good_so_far.get_unchecked_mut(cid); + *num_good += 1; + } + + for &cid in clauses_to_decrement { + let num_good = num_good_so_far.get_unchecked_mut(cid); + *num_good -= 1; + if *num_good == 0 { + residual_.push(cid); + } + } + + *variables.get_unchecked_mut(v) = !was_true; + } else { + break; + } + rounds += 1; + // if rounds >= (max_num_rounds as f32 * 0.2) as usize { + // return Ok(()); + // } + } + } + let _ = save_solution(&Solution { variables }); + return Ok(()); +} \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/sat_unified_opt/README.md b/tig-algorithms/src/satisfiability/sat_unified_opt/README.md new file mode 100644 index 0000000..65cbad7 --- /dev/null +++ b/tig-algorithms/src/satisfiability/sat_unified_opt/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** satisfiability +* **Algorithm Name:** sat_unified_opt +* **Copyright:** 2025 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/sat_unified_opt/mod.rs b/tig-algorithms/src/satisfiability/sat_unified_opt/mod.rs new file mode 100644 index 0000000..9ca803a --- /dev/null +++ b/tig-algorithms/src/satisfiability/sat_unified_opt/mod.rs @@ -0,0 +1,229 @@ +use rand::{rngs::{SmallRng, StdRng}, Rng, SeedableRng}; +use std::collections::HashMap; +use serde_json::{Map, Value}; +use tig_challenges::satisfiability::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + let mut rng = SmallRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap()) as u64); + + let mut clauses = challenge.clauses.clone(); + let mut i = clauses.len(); + while i > 0 { + i -= 1; + { + let clause = &mut clauses[i]; + if clause[0] == clause[2] || clause[1] == clause[2] { + clause.pop(); + } + if clause[0] == clause[1] { + clause.swap_remove(1); + } + } + + let should_remove = { + let clause = &clauses[i]; + (clause.len() >= 2 && clause[0] == -clause[1]) || + (clause.len() >= 3 && (clause[0] == -clause[2] || clause[1] == -clause[2])) + }; + if should_remove { + clauses.swap_remove(i); + } + } + + let num_variables = challenge.difficulty.num_variables; + let num_clauses = clauses.len(); + + let mut p_clauses: Vec> = vec![Vec::new(); num_variables]; + let mut n_clauses: Vec> = vec![Vec::new(); num_variables]; + + for (i, &ref c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 { + p_clauses[var].push(i); + } else { + n_clauses[var].push(i); + } + } + } + + let mut variables = vec![false; num_variables]; + let (pad, nad) = (1.8, 0.56); + for v in 0..num_variables { + let num_p = p_clauses[v].len(); + let num_n = n_clauses[v].len(); + + let mut vad; + if num_n == 0 { + vad = pad + 1.0; + } else { + vad = num_p as f32 / num_n as f32; + } + + if vad > pad { + variables[v] = true; + } else if vad < nad { + variables[v] = false; + } else { + variables[v] = rng.gen_bool(0.5); + } + } + + let mut num_good_so_far: Vec = vec![0; num_clauses]; + for (i, &ref c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 && variables[var] { + num_good_so_far[i] += 1 + } else if l < 0 && !variables[var] { + num_good_so_far[i] += 1 + } + } + } + + + let mut residual_ = Vec::with_capacity(num_clauses); + + for (i, &num_good) in num_good_so_far.iter().enumerate() { + if num_good == 0 { + residual_.push(i); + } + } + + let base_prob = 0.52; + let mut current_prob = base_prob; + + let clauses_ratio = challenge.difficulty.clauses_to_variables_percent as f64; + let num_vars = challenge.difficulty.num_variables as f64; + let max_fuel = 2000000000.0; + let base_fuel = (2000.0 + 40.0 * clauses_ratio) * num_vars; + let flip_fuel = 350.0 + 0.9 * clauses_ratio; + let max_num_rounds = ((max_fuel - base_fuel) / flip_fuel) as usize; + let mut rounds = 0; + + unsafe { + loop { + if !residual_.is_empty() { + let rand_val = rng.gen::(); + + let mut i = residual_.len() - 1; + while !residual_.is_empty() { + let id = rand_val % residual_.len(); + i = *residual_.get_unchecked(id); + if *num_good_so_far.get_unchecked(i) > 0 { + residual_.swap_remove(id); + } else { + break + } + } + if residual_.is_empty() { + break; + } + + let c = clauses.get_unchecked_mut(i); + if c.len() > 1 { + let random_index = rand_val % c.len(); + c.swap(0, random_index); + } + + let v = if rand_val < (current_prob * (usize::MAX as f64)) as usize { + let mut zero_found = None; + + 'outer: for &l in c.iter() { + let abs_l = l.abs() as usize - 1; + let clauses_to_check = if l > 0 { + n_clauses.get_unchecked(abs_l) + } else { + p_clauses.get_unchecked(abs_l) + }; + + for &c in clauses_to_check.iter() { + if *num_good_so_far.get_unchecked(c) == 1 { + continue 'outer; + } + } + zero_found = Some(l); + break; + } + + if let Some(l) = zero_found { + l + } else { + *c.get_unchecked(0) + } + } else { + let mut min_sad = usize::MAX; + let mut v_min_sad = 0; + + for &l in c.iter() { + let abs_l = l.abs() as usize - 1; + let clauses_to_check = if l > 0 { + n_clauses.get_unchecked(abs_l) + } else { + p_clauses.get_unchecked(abs_l) + }; + + let mut sad = 0; + for &c in clauses_to_check.iter() { + if *num_good_so_far.get_unchecked(c) == 1 { + sad += 1; + } + if sad >= min_sad { + break; + } + } + + if sad < min_sad { + min_sad = sad; + v_min_sad = l; + } + if sad == 0 { + break; + } + } + v_min_sad + }; + + let v_idx = v.abs() as usize - 1; + let was_true = v < 0; + let clauses_to_decrement = if was_true { + p_clauses.get_unchecked(v_idx) + } else { + n_clauses.get_unchecked(v_idx) + }; + let clauses_to_increment = if was_true { + n_clauses.get_unchecked(v_idx) + } else { + p_clauses.get_unchecked(v_idx) + }; + + for &cid in clauses_to_increment.iter() { + let num_good = num_good_so_far.get_unchecked_mut(cid); + *num_good += 1; + } + + for &cid in clauses_to_decrement.iter() { + let num_good = num_good_so_far.get_unchecked_mut(cid); + *num_good -= 1; + if *num_good == 0 { + residual_.push(cid); + } + } + + *variables.get_unchecked_mut(v_idx) = !was_true; + } else { + break; + } + rounds += 1; + // if rounds >= (max_num_rounds as f32 * 0.2) as usize { + // return Ok(()); + // } + } + } + let _ = save_solution(&Solution { variables }); + return Ok(()); +} \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/sprint_sat/README.md b/tig-algorithms/src/satisfiability/sprint_sat/README.md new file mode 100644 index 0000000..c2a257d --- /dev/null +++ b/tig-algorithms/src/satisfiability/sprint_sat/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** satisfiability +* **Algorithm Name:** sprint_sat +* **Copyright:** 2024 Dominic Kennedy +* **Identity of Submitter:** Dominic Kennedy +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/sprint_sat/mod.rs b/tig-algorithms/src/satisfiability/sprint_sat/mod.rs new file mode 100644 index 0000000..3a8688b --- /dev/null +++ b/tig-algorithms/src/satisfiability/sprint_sat/mod.rs @@ -0,0 +1,236 @@ +use rand::{rngs::StdRng, Rng, SeedableRng}; +use std::collections::HashMap; +use serde_json::{Map, Value}; +use tig_challenges::satisfiability::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + let mut rng = StdRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap()) as u64); + + let mut p_single = vec![false; challenge.difficulty.num_variables]; + let mut n_single = vec![false; challenge.difficulty.num_variables]; + + let mut clauses_ = challenge.clauses.clone(); + let mut clauses: Vec> = Vec::with_capacity(clauses_.len()); + + let mut dead = false; + + while !(dead) { + let mut done = true; + for c in &clauses_ { + let mut c_: Vec = Vec::with_capacity(c.len()); + let mut skip = false; + for (i, l) in c.iter().enumerate() { + if (p_single[(l.abs() - 1) as usize] && *l > 0) + || (n_single[(l.abs() - 1) as usize] && *l < 0) + || c[(i + 1)..].contains(&-l) + { + skip = true; + break; + } else if p_single[(l.abs() - 1) as usize] + || n_single[(l.abs() - 1) as usize] + || c[(i + 1)..].contains(&l) + { + done = false; + continue; + } else { + c_.push(*l); + } + } + if skip { + done = false; + continue; + }; + match c_[..] { + [l] => { + done = false; + if l > 0 { + if n_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + p_single[(l.abs() - 1) as usize] = true; + } + } else { + if p_single[(l.abs() - 1) as usize] { + dead = true; + break; + } else { + n_single[(l.abs() - 1) as usize] = true; + } + } + } + [] => { + dead = true; + break; + } + _ => { + clauses.push(c_); + } + } + } + if done { + break; + } else { + clauses_ = clauses; + clauses = Vec::with_capacity(clauses_.len()); + } + } + + if dead { + return Ok(()); + } + + let num_variables = challenge.difficulty.num_variables; + let num_clauses = clauses.len(); + + let mut p_clauses: Vec> = vec![vec![]; num_variables]; + let mut n_clauses: Vec> = vec![vec![]; num_variables]; + + let mut variables = vec![false; num_variables]; + for v in 0..num_variables { + if p_single[v] { + variables[v] = true + } else if n_single[v] { + variables[v] = false + } else { + variables[v] = rng.gen_bool(0.5) + } + } + let mut num_good_so_far: Vec = vec![0; num_clauses]; + + for (i, &ref c) in clauses.iter().enumerate() { + for &l in c { + let var = (l.abs() - 1) as usize; + if l > 0 { + p_clauses[var].push(i); + if variables[var] { + num_good_so_far[i] += 1 + } + } else { + n_clauses[var].push(i); + if !variables[var] { + num_good_so_far[i] += 1 + } + } + } + } + + let mut residual_ = Vec::with_capacity(num_clauses); + let mut residual_indices = HashMap::with_capacity(num_clauses); + + for (i, &num_good) in num_good_so_far.iter().enumerate() { + if num_good == 0 { + residual_.push(i); + residual_indices.insert(i, residual_.len() - 1); + } + } + + let mut attempts = 0; + loop { + if attempts >= num_variables * 25 { + return Ok(()); + } + if !residual_.is_empty() { + let i = residual_[0]; + let mut min_sad = clauses.len(); + let mut v_min_sad = vec![]; + let c = &clauses[i]; + for &l in c { + let mut sad = 0 as usize; + if variables[(l.abs() - 1) as usize] { + for &c in &p_clauses[(l.abs() - 1) as usize] { + if num_good_so_far[c] == 1 { + sad += 1; + if sad > min_sad { + break; + } + } + } + } else { + for &c in &n_clauses[(l.abs() - 1) as usize] { + if num_good_so_far[c] == 1 { + sad += 1; + if sad > min_sad { + break; + } + } + } + } + + if sad < min_sad { + min_sad = sad; + v_min_sad = vec![(l.abs() - 1) as usize]; + } else if sad == min_sad { + v_min_sad.push((l.abs() - 1) as usize); + } + } + let v = if min_sad == 0 { + if v_min_sad.len() == 1 { + v_min_sad[0] + } else { + v_min_sad[rng.gen_range(0..v_min_sad.len())] + } + } else { + if rng.gen_bool(0.5) { + let l = c[rng.gen_range(0..c.len())]; + (l.abs() - 1) as usize + } else { + v_min_sad[rng.gen_range(0..v_min_sad.len())] + } + }; + + if variables[v] { + for &c in &n_clauses[v] { + num_good_so_far[c] += 1; + if num_good_so_far[c] == 1 { + let i = residual_indices.remove(&c).unwrap(); + let last = residual_.pop().unwrap(); + if i < residual_.len() { + residual_[i] = last; + residual_indices.insert(last, i); + } + } + } + for &c in &p_clauses[v] { + if num_good_so_far[c] == 1 { + residual_.push(c); + residual_indices.insert(c, residual_.len() - 1); + } + num_good_so_far[c] -= 1; + } + } else { + for &c in &n_clauses[v] { + if num_good_so_far[c] == 1 { + residual_.push(c); + residual_indices.insert(c, residual_.len() - 1); + } + num_good_so_far[c] -= 1; + } + + for &c in &p_clauses[v] { + num_good_so_far[c] += 1; + if num_good_so_far[c] == 1 { + let i = residual_indices.remove(&c).unwrap(); + let last = residual_.pop().unwrap(); + if i < residual_.len() { + residual_[i] = last; + residual_indices.insert(last, i); + } + } + } + } + + variables[v] = !variables[v]; + } else { + break; + } + attempts += 1; + } + + let _ = save_solution(&Solution { variables }); + return Ok(()); +} \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/walk_sat/README.md b/tig-algorithms/src/satisfiability/walk_sat/README.md new file mode 100644 index 0000000..040db5a --- /dev/null +++ b/tig-algorithms/src/satisfiability/walk_sat/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** satisfiability +* **Algorithm Name:** walk_sat +* **Copyright:** 2024 Chad Blanchard +* **Identity of Submitter:** Chad Blanchard +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/satisfiability/walk_sat/mod.rs b/tig-algorithms/src/satisfiability/walk_sat/mod.rs new file mode 100644 index 0000000..cd3ee43 --- /dev/null +++ b/tig-algorithms/src/satisfiability/walk_sat/mod.rs @@ -0,0 +1,45 @@ +// TIG's UI uses the pattern `tig_challenges::` to automatically detect your algorithm's challenge +use rand::prelude::*; +use rand::rngs::StdRng; +use rand::SeedableRng; +use serde_json::{Map, Value}; +use tig_challenges::satisfiability::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + let mut rng = StdRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap()) as u64); + let num_variables = challenge.difficulty.num_variables; + let max_flips = 1000; + + let mut variables: Vec = (0..num_variables).map(|_| rng.gen::()).collect(); + + for _ in 0..max_flips { + let mut unsatisfied_clauses: Vec<&Vec> = challenge + .clauses + .iter() + .filter(|clause| !clause_satisfied(clause, &variables)) + .collect(); + + if unsatisfied_clauses.is_empty() { + let _ = save_solution(&Solution { variables }); + return Ok(()); + } + + let clause = unsatisfied_clauses.choose(&mut rng).unwrap(); + let literal = clause.choose(&mut rng).unwrap(); + let var_idx = literal.abs() as usize - 1; + variables[var_idx] = !variables[var_idx]; + } + + Ok(()) +} + +fn clause_satisfied(clause: &Vec, variables: &[bool]) -> bool { + clause.iter().any(|&literal| { + let var_idx = literal.abs() as usize - 1; + (literal > 0 && variables[var_idx]) || (literal < 0 && !variables[var_idx]) + }) +} \ No newline at end of file diff --git a/tig-algorithms/src/vector_search/better_vector/README.md b/tig-algorithms/src/vector_search/better_vector/README.md new file mode 100644 index 0000000..157e7bf --- /dev/null +++ b/tig-algorithms/src/vector_search/better_vector/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vector_search +* **Algorithm Name:** better_vector +* **Copyright:** 2025 frogmarch +* **Identity of Submitter:** frogmarch +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vector_search/better_vector/kernels.cu b/tig-algorithms/src/vector_search/better_vector/kernels.cu new file mode 100644 index 0000000..6908d99 --- /dev/null +++ b/tig-algorithms/src/vector_search/better_vector/kernels.cu @@ -0,0 +1,23 @@ +/*! +Copyright 2025 frogmarch + +Identity of Submitter frogmarch + +UAI null + +Licensed under the TIG Inbound Game License v2.0 or (at your option) any later +version (the "License"); you may not use this file except in compliance with the +License. You may obtain a copy of the License at + +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the specific +language governing permissions and limitations under the License. +*/ + +extern "C" __global__ void do_nothing() +{ + // This kernel does nothing +} diff --git a/tig-algorithms/src/vector_search/better_vector/mod.rs b/tig-algorithms/src/vector_search/better_vector/mod.rs new file mode 100644 index 0000000..18682b8 --- /dev/null +++ b/tig-algorithms/src/vector_search/better_vector/mod.rs @@ -0,0 +1,522 @@ +use anyhow::{anyhow, Result}; +use cudarc::{ + driver::{safe::LaunchConfig, CudaModule, CudaStream, PushKernelArg}, + runtime::sys::cudaDeviceProp, +}; +use std::sync::Arc; +use serde_json::{Map, Value}; +use tig_challenges::vector_search::{Challenge, Solution}; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, + module: Arc, + stream: Arc, + prop: &cudaDeviceProp, +) -> anyhow::Result<()> { + Err(anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { use anyhow::Ok; + use tig_challenges::vector_search::*; + use std::cmp::Ordering; + use std::collections::{BinaryHeap, HashSet}; + + struct KDNode<'a> { + point: &'a [f32], + left: Option>>, + right: Option>>, + index: usize, + } + + impl<'a> KDNode<'a> { + fn new(point: &'a [f32], index: usize) -> Self { + KDNode { + point, + left: None, + right: None, + index, + } + } + } + fn quickselect_by(arr: &mut [(&[f32], usize)], k: usize, compare: &F) + where + F: Fn(&(&[f32], usize), &(&[f32], usize)) -> Ordering, + { + if arr.len() <= 1 { + return; + } + + let pivot_index = partition(arr, compare); + if k < pivot_index { + quickselect_by(&mut arr[..pivot_index], k, compare); + } else if k > pivot_index { + quickselect_by(&mut arr[pivot_index + 1..], k - pivot_index - 1, compare); + } + } + + fn partition(arr: &mut [(&[f32], usize)], compare: &F) -> usize + where + F: Fn(&(&[f32], usize), &(&[f32], usize)) -> Ordering, + { + let pivot_index = arr.len() >> 1; + arr.swap(pivot_index, arr.len() - 1); + + let mut store_index = 0; + for i in 0..arr.len() - 1 { + if compare(&arr[i], &arr[arr.len() - 1]) == Ordering::Less { + arr.swap(i, store_index); + store_index += 1; + } + } + arr.swap(store_index, arr.len() - 1); + store_index + } + + fn build_kd_tree<'a>(points: &mut [(&'a [f32], usize)]) -> Option>> { + if points.is_empty() { + return None; + } + + const NUM_DIMENSIONS: usize = 250; + let mut stack: Vec<(usize, usize, usize, Option<*mut KDNode<'a>>, bool)> = Vec::new(); + let mut root: Option>> = None; + + stack.push((0, points.len(), 0, None, false)); + + while let Some((start, end, depth, parent_ptr, is_left)) = stack.pop() { + if start >= end { + continue; + } + + let axis = depth % NUM_DIMENSIONS; + let median = (start + end) / 2; + quickselect_by(&mut points[start..end], median - start, &|a, b| { + a.0[axis].partial_cmp(&b.0[axis]).unwrap() + }); + + let (median_point, median_index) = points[median]; + let mut new_node = Box::new(KDNode::new(median_point, median_index)); + let new_node_ptr: *mut KDNode = &mut *new_node; + + if let Some(parent_ptr) = parent_ptr { + unsafe { + if is_left { + (*parent_ptr).left = Some(new_node); + } else { + (*parent_ptr).right = Some(new_node); + } + } + } else { + root = Some(new_node); + } + + stack.push((median + 1, end, depth + 1, Some(new_node_ptr), false)); + stack.push((start, median, depth + 1, Some(new_node_ptr), true)); + } + + root + } + + #[inline(always)] + fn squared_euclidean_distance(a: &[f32], b: &[f32]) -> f32 { + let mut sum = 0.0; + let mut i = 0; + let len = a.len(); + + if a.len() != b.len() || a.len() < 8 { + return f32::MAX; + } + + while i + 7 < len { + unsafe { + let diff0 = *a.get_unchecked(i) - *b.get_unchecked(i); + let diff1 = *a.get_unchecked(i + 1) - *b.get_unchecked(i + 1); + let diff2 = *a.get_unchecked(i + 2) - *b.get_unchecked(i + 2); + let diff3 = *a.get_unchecked(i + 3) - *b.get_unchecked(i + 3); + let diff4 = *a.get_unchecked(i + 4) - *b.get_unchecked(i + 4); + let diff5 = *a.get_unchecked(i + 5) - *b.get_unchecked(i + 5); + let diff6 = *a.get_unchecked(i + 6) - *b.get_unchecked(i + 6); + let diff7 = *a.get_unchecked(i + 7) - *b.get_unchecked(i + 7); + + sum += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3 + + diff4 * diff4 + diff5 * diff5 + diff6 * diff6 + diff7 * diff7; + } + + i += 8; + } + + while i < len { + unsafe { + let diff = *a.get_unchecked(i) - *b.get_unchecked(i); + sum += diff * diff; + } + i += 1; + } + sum + } + + #[inline(always)] + fn early_stopping_distance(a: &[f32], b: &[f32], current_min: f32) -> f32 { + let mut sum = 0.0; + let len = a.len(); + + if a.len() != b.len() || len < 8 { + return f32::MAX; + } + + let mut a_ptr = a.as_ptr(); + let mut b_ptr = b.as_ptr(); + let end_ptr = unsafe { a_ptr.add(len - 7) }; + + while a_ptr < end_ptr { + unsafe { + let a0 = *a_ptr.add(0); + let a1 = *a_ptr.add(1); + let a2 = *a_ptr.add(2); + let a3 = *a_ptr.add(3); + let a4 = *a_ptr.add(4); + let a5 = *a_ptr.add(5); + let a6 = *a_ptr.add(6); + let a7 = *a_ptr.add(7); + + let b0 = *b_ptr.add(0); + let b1 = *b_ptr.add(1); + let b2 = *b_ptr.add(2); + let b3 = *b_ptr.add(3); + let b4 = *b_ptr.add(4); + let b5 = *b_ptr.add(5); + let b6 = *b_ptr.add(6); + let b7 = *b_ptr.add(7); + + let block_sum = (a0 - b0).powi(2) + + (a1 - b1).powi(2) + + (a2 - b2).powi(2) + + (a3 - b3).powi(2) + + (a4 - b4).powi(2) + + (a5 - b5).powi(2) + + (a6 - b6).powi(2) + + (a7 - b7).powi(2); + + sum += block_sum; + } + + if sum > current_min { + return f32::MAX; + } + + a_ptr = unsafe { a_ptr.add(8) }; + b_ptr = unsafe { b_ptr.add(8) }; + } + + + let remaining = len - (unsafe { a_ptr.offset_from(a.as_ptr()) } as usize); + for i in 0..remaining { + unsafe { + let diff = *a_ptr.add(i) - *b_ptr.add(i); + sum += diff * diff; + } + } + + sum + } + + fn nearest_neighbor_search<'a>( + root: &Option>>, + target: &[f32], + best: &mut (f32, Option), + ) { + let num_dimensions = target.len(); + let mut stack = Vec::with_capacity(64); + + if let Some(node) = root { + stack.push((node.as_ref(), 0)); + } + + while let Some((node, depth)) = stack.pop() { + let axis = depth % num_dimensions; + let dist = early_stopping_distance(node.point, target, best.0); + + if dist < best.0 { + best.0 = dist; + best.1 = Some(node.index); + } + + let diff = target[axis] - node.point[axis]; + let sqr_diff = diff * diff; + + let (nearer, farther) = if diff < 0.0 { + (&node.left, &node.right) + } else { + (&node.right, &node.left) + }; + + if let Some(nearer_node) = nearer { + stack.push((nearer_node.as_ref(), depth + 1)); + } + + if sqr_diff < best.0 { + if let Some(farther_node) = farther { + stack.push((farther_node.as_ref(), depth + 1)); + } + } + } + } + + fn ejection_chain_search<'a>( + query: &[f32], + database: &'a [Vec], + initial_idx: usize, + initial_dist: f32, + candidates: &[(f32, &'a [f32], usize)], + max_chain_length: usize, + ) -> (f32, usize) { + let mut best_idx = initial_idx; + let mut best_dist = initial_dist; + let mut visited = HashSet::new(); + visited.insert(initial_idx); + + let mut current_idx = initial_idx; + let mut current_chain_length = 0; + + while current_chain_length < max_chain_length { + current_chain_length += 1; + let mut improved = false; + + let current_vector = &database[current_idx]; + + let mut neighbors = Vec::with_capacity(5); + for &(_, vec, idx) in candidates { + if !visited.contains(&idx) { + let dist_to_current = squared_euclidean_distance(current_vector, vec); + if dist_to_current < f32::MAX { + neighbors.push((dist_to_current, idx)); + } + } + } + + neighbors.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); + for &(_, neighbor_idx) in neighbors.iter().take(3) { + let dist = early_stopping_distance(query, &database[neighbor_idx], best_dist); + if dist < best_dist { + best_dist = dist; + best_idx = neighbor_idx; + current_idx = neighbor_idx; + visited.insert(neighbor_idx); + improved = true; + break; + } + } + + if !improved { + if neighbors.is_empty() { + break; + } + let random_pick = (std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .subsec_nanos() as usize) % neighbors.len(); + current_idx = neighbors[random_pick].1; + visited.insert(current_idx); + } + } + + let time_based_seed = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .subsec_nanos() as usize; + + for _ in 0..5 { + let random_idx = (time_based_seed % database.len() + 1) % database.len(); + if !visited.contains(&random_idx) { + let dist = early_stopping_distance(query, &database[random_idx], best_dist); + if dist < best_dist { + best_dist = dist; + best_idx = random_idx; + } + } + } + + for &(_, _, idx) in candidates.iter().take(10) { + if !visited.contains(&idx) { + let dist = early_stopping_distance(query, &database[idx], best_dist); + if dist < best_dist { + best_dist = dist; + best_idx = idx; + } + } + } + + (best_dist, best_idx) + } + + fn calculate_mean_vector(vectors: &[&[f32]]) -> Vec { + let num_vectors = vectors.len(); + let num_dimensions = 250; + + let mut mean_vector = vec![0.0f64; num_dimensions]; + + for vector in vectors { + for i in 0..num_dimensions { + mean_vector[i] += vector[i] as f64; + } + } + for i in 0..num_dimensions { + mean_vector[i] /= num_vectors as f64; + } + mean_vector.into_iter().map(|x| x as f32).collect() + } + + #[derive(Debug)] + struct FloatOrd(f32); + + impl PartialEq for FloatOrd { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } + } + + impl Eq for FloatOrd {} + + impl PartialOrd for FloatOrd { + fn partial_cmp(&self, other: &Self) -> Option { + self.0.partial_cmp(&other.0) + } + } + + impl Ord for FloatOrd { + fn cmp(&self, other: &Self) -> Ordering { + self.partial_cmp(other).unwrap_or(Ordering::Equal) + } + } + + fn filter_relevant_vectors<'a>( + database: &'a [Vec], + query_vectors: &[Vec], + k: usize, + ) -> Vec<(f32, &'a [f32], usize)> { + let query_refs: Vec<&[f32]> = query_vectors.iter().map(|v| &v[..]).collect(); + let mean_query_vector = calculate_mean_vector(&query_refs); + + let mut heap: BinaryHeap<(FloatOrd, usize)> = BinaryHeap::with_capacity(k); + + for (index, vector) in database.iter().enumerate() { + if heap.len() < k + { + let dist = squared_euclidean_distance(&mean_query_vector, vector); + let ord_dist = FloatOrd(dist); + + heap.push((ord_dist, index)); + } else if let Some(&(FloatOrd(top_dist), _)) = heap.peek() + { + let dist = early_stopping_distance(&mean_query_vector, vector, top_dist); + let ord_dist = FloatOrd(dist); + if dist < top_dist { + heap.pop(); + heap.push((ord_dist, index)); + } + } + } + heap.into_sorted_vec() + .into_iter() + .map(|(FloatOrd(dist), index)| (dist, &database[index][..], index)) + .collect() + } + + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let query_count = challenge.query_vectors.len(); + + let max_fuel = 10000000000.0; + let base_fuel = 760000000.0; + let alpha = 1630.0 * challenge.difficulty.num_queries as f64; + + let m = ((max_fuel - base_fuel) / alpha) as usize; + let n = (m as f32 * 1.2) as usize; + let r = n - m; + + let closest_vectors = filter_relevant_vectors( + &challenge.vector_database, + &challenge.query_vectors, + n, + ); + + let (m_slice, r_slice) = closest_vectors.split_at(m); + let m_vectors: Vec<_> = m_slice.to_vec(); + let r_vectors: Vec<_> = r_slice.to_vec(); + + let mut kd_tree_vectors: Vec<(&[f32], usize)> = m_vectors.iter().map(|&(_, v, i)| (v, i)).collect(); + let kd_tree = build_kd_tree(&mut kd_tree_vectors); + + let mut best_indexes = Vec::with_capacity(query_count); + let mut distances = Vec::with_capacity(query_count); + + for query in &challenge.query_vectors { + let mut best = (std::f32::MAX, None); + nearest_neighbor_search(&kd_tree, query, &mut best); + + distances.push(best.0); + best_indexes.push(best.1.unwrap_or(0)); + } + + let improvement_threshold = distances.iter().fold(0.0, |acc, &x| acc + x) / (distances.len() as f32) * 1.2; + let brute_force_count = (query_count as f32 * 0.15) as usize; + let mut distance_indices: Vec<_> = distances.iter().enumerate().collect(); + distance_indices.sort_unstable_by(|a, b| b.1.partial_cmp(a.1).unwrap()); + let high_distance_indices: Vec<_> = distance_indices.into_iter() + .take(brute_force_count) + .map(|(index, _)| index) + .collect(); + + for &query_index in &high_distance_indices { + let query = &challenge.query_vectors[query_index]; + let initial_dist = distances[query_index]; + let initial_idx = best_indexes[query_index]; + + if initial_dist > improvement_threshold { + let (improved_dist, improved_idx) = ejection_chain_search( + query, + &challenge.vector_database, + initial_idx, + initial_dist, + &closest_vectors, + 7 + ); + + if improved_dist < initial_dist { + best_indexes[query_index] = improved_idx; + distances[query_index] = improved_dist; + } else { + let mut best = (initial_dist, initial_idx); + + for &(_, vec, index) in &r_vectors { + let dist = early_stopping_distance(query, vec, best.0); + if dist < best.0 { + best = (dist, index); + } + } + + best_indexes[query_index] = best.1; + distances[query_index] = best.0; + } + } else { + let mut best = (initial_dist, initial_idx); + + for &(_, vec, index) in &r_vectors { + let dist = early_stopping_distance(query, vec, best.0); + if dist < best.0 { + best = (dist, index); + } + } + + best_indexes[query_index] = best.1; + } + } + + Ok(Some(Solution { + indexes: best_indexes, + })) + } +} \ No newline at end of file diff --git a/tig-algorithms/src/vector_search/brute_force_bacalhau/README.md b/tig-algorithms/src/vector_search/brute_force_bacalhau/README.md new file mode 100644 index 0000000..bd9a439 --- /dev/null +++ b/tig-algorithms/src/vector_search/brute_force_bacalhau/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vector_search +* **Algorithm Name:** brute_force_bacalhau +* **Copyright:** 2024 Louis Silva +* **Identity of Submitter:** Louis Silva +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vector_search/brute_force_bacalhau/kernels.cu b/tig-algorithms/src/vector_search/brute_force_bacalhau/kernels.cu new file mode 100644 index 0000000..6317239 --- /dev/null +++ b/tig-algorithms/src/vector_search/brute_force_bacalhau/kernels.cu @@ -0,0 +1,19 @@ +/*! +Copyright 2024 Louis Silva + +Licensed under the TIG Inbound Game License v1.0 or (at your option) any later +version (the "License"); you may not use this file except in compliance with the +License. You may obtain a copy of the License at + +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the specific +language governing permissions and limitations under the License. + */ + +extern "C" __global__ void do_nothing() +{ + // This kernel does nothing +} diff --git a/tig-algorithms/src/vector_search/brute_force_bacalhau/mod.rs b/tig-algorithms/src/vector_search/brute_force_bacalhau/mod.rs new file mode 100644 index 0000000..58da807 --- /dev/null +++ b/tig-algorithms/src/vector_search/brute_force_bacalhau/mod.rs @@ -0,0 +1,102 @@ +use anyhow::{anyhow, Result}; +use cudarc::{ + driver::{safe::LaunchConfig, CudaModule, CudaStream, PushKernelArg}, + runtime::sys::cudaDeviceProp, +}; +use serde_json::{Map, Value}; +use std::sync::Arc; +use tig_challenges::vector_search::{Challenge, Solution}; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, + module: Arc, + stream: Arc, + prop: &cudaDeviceProp, +) -> anyhow::Result<()> { + Err(anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + use anyhow::Result; + + use tig_challenges::vector_search::*; + + #[inline] + fn l2_norm(x: &[f32]) -> f32 { + x.iter().map(|&val| val * val).sum::().sqrt() + } + + #[inline] + fn euclidean_distance_with_precomputed_norm( + a_norm_sq: f32, + b_norm_sq: f32, + ab_dot_product: f32, + ) -> f32 { + (a_norm_sq + b_norm_sq - 2.0 * ab_dot_product).sqrt() + } + + pub fn solve_challenge(challenge: &Challenge) -> Result> { + let vector_database: &Vec> = &challenge.vector_database; + let query_vectors: &Vec> = &challenge.query_vectors; + let max_distance: f32 = challenge.max_distance; + + let mut indexes: Vec = Vec::with_capacity(query_vectors.len()); + let mut vector_norms_sq: Vec = Vec::with_capacity(vector_database.len()); + + let mut sum_norms_sq: f32 = 0.0; + let mut sum_squares: f32 = 0.0; + + for vector in vector_database { + let norm_sq: f32 = vector.iter().map(|&val| val * val).sum(); + sum_norms_sq += norm_sq.sqrt(); + sum_squares += norm_sq; + vector_norms_sq.push(norm_sq); + } + + let vector_norms_len: f32 = vector_norms_sq.len() as f32; + let std_dev: f32 = + ((sum_squares / vector_norms_len) - (sum_norms_sq / vector_norms_len).powi(2)).sqrt(); + let norm_threshold: f32 = 2.0 * std_dev; + + for query in query_vectors { + let query_norm_sq: f32 = query.iter().map(|&val| val * val).sum(); + + let mut closest_index: Option = None; + let mut closest_distance: f32 = f32::MAX; + + for (idx, vector) in vector_database.iter().enumerate() { + let vector_norm_sq = vector_norms_sq[idx]; + if ((vector_norm_sq.sqrt() - query_norm_sq.sqrt()).abs()) > norm_threshold { + continue; + } + + let ab_dot_product: f32 = query.iter().zip(vector).map(|(&x1, &x2)| x1 * x2).sum(); + let distance: f32 = euclidean_distance_with_precomputed_norm( + query_norm_sq, + vector_norm_sq, + ab_dot_product, + ); + + if distance <= max_distance { + closest_index = Some(idx); + break; // Early exit + } else if distance < closest_distance { + closest_index = Some(idx); + closest_distance = distance; + } + } + + if let Some(index) = closest_index { + indexes.push(index); + } else { + return Ok(None); + } + } + + Ok(Some(Solution { indexes })) + } +} diff --git a/tig-algorithms/src/vector_search/cluster_improved/README.md b/tig-algorithms/src/vector_search/cluster_improved/README.md new file mode 100644 index 0000000..12d749c --- /dev/null +++ b/tig-algorithms/src/vector_search/cluster_improved/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vector_search +* **Algorithm Name:** cluster_improved +* **Copyright:** 2025 Rootz +* **Identity of Submitter:** Rootz +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vector_search/cluster_improved/kernels.cu b/tig-algorithms/src/vector_search/cluster_improved/kernels.cu new file mode 100644 index 0000000..08494e4 --- /dev/null +++ b/tig-algorithms/src/vector_search/cluster_improved/kernels.cu @@ -0,0 +1,821 @@ +/*!Copyright 2025 Rootz + +Identity of Submitter Rootz + +UAI null + +Licensed under the TIG Inbound Game License v2.0 or (at your option) any later +version (the "License"); you may not use this file except in compliance with the +License. You may obtain a copy of the License at + +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the specific +language governing permissions and limitations under the License. +*/ +#include +#include + +#define MAX_FLOAT 3.402823466e+38F + +__device__ __forceinline__ float euclidean_distance(const float* __restrict__ a, const float* __restrict__ b, int dims) { + float sum = 0.0f; + float c = 0.0f; + int i; + + for (i = 0; i < dims - 15; i += 16) { + float d0=a[i]-b[i], d1=a[i+1]-b[i+1], d2=a[i+2]-b[i+2], d3=a[i+3]-b[i+3]; + float d4=a[i+4]-b[i+4], d5=a[i+5]-b[i+5], d6=a[i+6]-b[i+6], d7=a[i+7]-b[i+7]; + float d8=a[i+8]-b[i+8], d9=a[i+9]-b[i+9], d10=a[i+10]-b[i+10], d11=a[i+11]-b[i+11]; + float d12=a[i+12]-b[i+12], d13=a[i+13]-b[i+13], d14=a[i+14]-b[i+14], d15=a[i+15]-b[i+15]; + + float s0 = d0*d0 + d1*d1 + d2*d2 + d3*d3; + float s1 = d4*d4 + d5*d5 + d6*d6 + d7*d7; + float s2 = d8*d8 + d9*d9 + d10*d10 + d11*d11; + float s3 = d12*d12 + d13*d13 + d14*d14 + d15*d15; + + float partial = s0 + s1 + s2 + s3; + float y = partial - c; + float t = sum + y; + c = (t - sum) - y; + sum = t; + } + + for (; i < dims - 7; i += 8) { + float d0=a[i]-b[i], d1=a[i+1]-b[i+1], d2=a[i+2]-b[i+2], d3=a[i+3]-b[i+3]; + float d4=a[i+4]-b[i+4], d5=a[i+5]-b[i+5], d6=a[i+6]-b[i+6], d7=a[i+7]-b[i+7]; + float v,y,t; + v=d0*d0; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d1*d1; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d2*d2; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d3*d3; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d4*d4; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d5*d5; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d6*d6; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d7*d7; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + } + + for (; i < dims - 3; i += 4) { + float d0=a[i]-b[i], d1=a[i+1]-b[i+1], d2=a[i+2]-b[i+2], d3=a[i+3]-b[i+3]; + float v,y,t; + v=d0*d0; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d1*d1; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d2*d2; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d3*d3; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + } + + for (; i < dims; i++) { + float diff = a[i] - b[i]; + float squared = diff * diff; + float y = squared - c; + float t = sum + y; + c = (t - sum) - y; + sum = t; + } + return sum; +} + +__device__ __forceinline__ float euclidean_distance_high(const float* __restrict__ a, const float* __restrict__ b, int dims) { + float sum = 0.0f; + float c = 0.0f; + int i; + + for (i = 0; i < dims - 31; i += 32) { + float d0=a[i]-b[i], d1=a[i+1]-b[i+1], d2=a[i+2]-b[i+2], d3=a[i+3]-b[i+3]; + float d4=a[i+4]-b[i+4], d5=a[i+5]-b[i+5], d6=a[i+6]-b[i+6], d7=a[i+7]-b[i+7]; + float d8=a[i+8]-b[i+8], d9=a[i+9]-b[i+9], d10=a[i+10]-b[i+10], d11=a[i+11]-b[i+11]; + float d12=a[i+12]-b[i+12], d13=a[i+13]-b[i+13], d14=a[i+14]-b[i+14], d15=a[i+15]-b[i+15]; + float d16=a[i+16]-b[i+16], d17=a[i+17]-b[i+17], d18=a[i+18]-b[i+18], d19=a[i+19]-b[i+19]; + float d20=a[i+20]-b[i+20], d21=a[i+21]-b[i+21], d22=a[i+22]-b[i+22], d23=a[i+23]-b[i+23]; + float d24=a[i+24]-b[i+24], d25=a[i+25]-b[i+25], d26=a[i+26]-b[i+26], d27=a[i+27]-b[i+27]; + float d28=a[i+28]-b[i+28], d29=a[i+29]-b[i+29], d30=a[i+30]-b[i+30], d31=a[i+31]-b[i+31]; + float v,y,t; + v=d0*d0; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d1*d1; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d2*d2; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d3*d3; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d4*d4; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d5*d5; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d6*d6; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d7*d7; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d8*d8; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d9*d9; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d10*d10; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d11*d11; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d12*d12; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d13*d13; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d14*d14; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d15*d15; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d16*d16; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d17*d17; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d18*d18; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d19*d19; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d20*d20; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d21*d21; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d22*d22; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d23*d23; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d24*d24; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d25*d25; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d26*d26; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d27*d27; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d28*d28; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d29*d29; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d30*d30; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d31*d31; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + } + + for (; i < dims - 15; i += 16) { + float d0=a[i]-b[i], d1=a[i+1]-b[i+1], d2=a[i+2]-b[i+2], d3=a[i+3]-b[i+3]; + float d4=a[i+4]-b[i+4], d5=a[i+5]-b[i+5], d6=a[i+6]-b[i+6], d7=a[i+7]-b[i+7]; + float d8=a[i+8]-b[i+8], d9=a[i+9]-b[i+9], d10=a[i+10]-b[i+10], d11=a[i+11]-b[i+11]; + float d12=a[i+12]-b[i+12], d13=a[i+13]-b[i+13], d14=a[i+14]-b[i+14], d15=a[i+15]-b[i+15]; + + float v,y,t; + v=d0*d0; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d1*d1; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d2*d2; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d3*d3; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d4*d4; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d5*d5; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d6*d6; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d7*d7; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d8*d8; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d9*d9; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d10*d10; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d11*d11; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d12*d12; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d13*d13; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d14*d14; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d15*d15; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + } + + for (; i < dims - 7; i += 8) { + float d0=a[i]-b[i], d1=a[i+1]-b[i+1], d2=a[i+2]-b[i+2], d3=a[i+3]-b[i+3]; + float d4=a[i+4]-b[i+4], d5=a[i+5]-b[i+5], d6=a[i+6]-b[i+6], d7=a[i+7]-b[i+7]; + float v,y,t; + v=d0*d0; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d1*d1; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d2*d2; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d3*d3; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d4*d4; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d5*d5; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d6*d6; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d7*d7; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + } + + for (; i < dims - 3; i += 4) { + float d0=a[i]-b[i], d1=a[i+1]-b[i+1], d2=a[i+2]-b[i+2], d3=a[i+3]-b[i+3]; + float v,y,t; + v=d0*d0; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d1*d1; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d2*d2; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d3*d3; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + } + + for (; i < dims; i++) { + float diff = a[i] - b[i]; + float squared = diff * diff; + float y = squared - c; + float t = sum + y; + c = (t - sum) - y; + sum = t; + } + return sum; +} + +__device__ __forceinline__ float euclidean_distance_bounded(const float* __restrict__ a, const float* __restrict__ b, int dims, float limit) { + float sum = 0.0f; + float c = 0.0f; + float margin = fmaxf(1e-6f, 1.0e-4f * (1.0f + limit)); + int i; + for (i = 0; i < dims - 15; i += 16) { + float d0=a[i]-b[i], d1=a[i+1]-b[i+1], d2=a[i+2]-b[i+2], d3=a[i+3]-b[i+3]; + float d4=a[i+4]-b[i+4], d5=a[i+5]-b[i+5], d6=a[i+6]-b[i+6], d7=a[i+7]-b[i+7]; + float d8=a[i+8]-b[i+8], d9=a[i+9]-b[i+9], d10=a[i+10]-b[i+10], d11=a[i+11]-b[i+11]; + float d12=a[i+12]-b[i+12], d13=a[i+13]-b[i+13], d14=a[i+14]-b[i+14], d15=a[i+15]-b[i+15]; + float s0=d0*d0+d1*d1+d2*d2+d3*d3; + float s1=d4*d4+d5*d5+d6*d6+d7*d7; + float s2=d8*d8+d9*d9+d10*d10+d11*d11; + float s3=d12*d12+d13*d13+d14*d14+d15*d15; + float partial=s0+s1+s2+s3; + float y=partial-c; + float t=sum+y; + c=(t-sum)-y; + sum=t; + if (sum > limit + margin) return sum; + } + for (; i < dims - 7; i += 8) { + float d0=a[i]-b[i], d1=a[i+1]-b[i+1], d2=a[i+2]-b[i+2], d3=a[i+3]-b[i+3]; + float d4=a[i+4]-b[i+4], d5=a[i+5]-b[i+5], d6=a[i+6]-b[i+6], d7=a[i+7]-b[i+7]; + float v,y,t; + v=d0*d0; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d1*d1; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d2*d2; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d3*d3; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d4*d4; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d5*d5; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d6*d6; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d7*d7; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + if (sum > limit + margin) return sum; + } + for (; i < dims - 3; i += 4) { + float d0=a[i]-b[i], d1=a[i+1]-b[i+1], d2=a[i+2]-b[i+2], d3=a[i+3]-b[i+3]; + float v,y,t; + v=d0*d0; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d1*d1; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d2*d2; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d3*d3; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + if (sum > limit + margin) return sum; + } + for (; i < dims; i++) { + float diff=a[i]-b[i]; + float squared=diff*diff; + float y=squared-c; + float t=sum+y; + c=(t-sum)-y; + sum=t; + if (sum > limit + margin) return sum; + } + return sum; +} + +__device__ __forceinline__ float euclidean_distance_high_bounded(const float* __restrict__ a, const float* __restrict__ b, int dims, float limit) { + float sum=0.0f; + float c=0.0f; + float margin = fmaxf(1e-6f, 1.0e-4f * (1.0f + limit)); + int i; + for (i=0;i limit + margin) return sum; + } + for (; i < dims - 15; i += 16) { + float d0=a[i]-b[i], d1=a[i+1]-b[i+1], d2=a[i+2]-b[i+2], d3=a[i+3]-b[i+3]; + float d4=a[i+4]-b[i+4], d5=a[i+5]-b[i+5], d6=a[i+6]-b[i+6], d7=a[i+7]-b[i+7]; + float d8=a[i+8]-b[i+8], d9=a[i+9]-b[i+9], d10=a[i+10]-b[i+10], d11=a[i+11]-b[i+11]; + float d12=a[i+12]-b[i+12], d13=a[i+13]-b[i+13], d14=a[i+14]-b[i+14], d15=a[i+15]-b[i+15]; + + float v,y,t; + v=d0*d0; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d1*d1; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d2*d2; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d3*d3; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d4*d4; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d5*d5; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d6*d6; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d7*d7; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d8*d8; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d9*d9; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d10*d10; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d11*d11; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d12*d12; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d13*d13; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d14*d14; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d15*d15; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + if (sum > limit + margin) return sum; + } + for (; i < dims - 7; i += 8) { + float d0=a[i]-b[i], d1=a[i+1]-b[i+1], d2=a[i+2]-b[i+2], d3=a[i+3]-b[i+3]; + float d4=a[i+4]-b[i+4], d5=a[i+5]-b[i+5], d6=a[i+6]-b[i+6], d7=a[i+7]-b[i+7]; + float v,y,t; + v=d0*d0; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d1*d1; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d2*d2; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d3*d3; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d4*d4; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d5*d5; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d6*d6; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d7*d7; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + if (sum > limit + margin) return sum; + } + for (; i < dims - 3; i += 4) { + float d0=a[i]-b[i], d1=a[i+1]-b[i+1], d2=a[i+2]-b[i+2], d3=a[i+3]-b[i+3]; + float v,y,t; + v=d0*d0; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d1*d1; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d2*d2; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + v=d3*d3; y=v-c; t=sum+y; c=(t-sum)-y; sum=t; + if (sum > limit + margin) return sum; + } + for (; i < dims; i++) { + float diff=a[i]-b[i]; + float squared=diff*diff; + float y=squared-c; + float t=sum+y; + c=(t-sum)-y; + sum=t; + if (sum > limit + margin) return sum; + } + return sum; +} + +__device__ __forceinline__ float euclidean_distance_precise_bounded(const float* __restrict__ a, const float* __restrict__ b, int dims, float limit) { + double acc = 0.0; + double lim = (double)limit; + for (int i = 0; i < dims; i++) { + double d = (double)a[i] - (double)b[i]; + acc += d * d; + if (acc > lim) return (float)acc; + } + return (float)acc; +} + +extern "C" __global__ void deterministic_clustering( + const float* __restrict__ database_vectors, + float* __restrict__ cluster_centers, + int* __restrict__ cluster_assignments, + int* __restrict__ cluster_sizes, + int database_size, + int vector_dims, + int num_clusters, + int num_queries +) { + int cluster_idx = blockIdx.x; + int tid = threadIdx.x; + + if (cluster_idx >= num_clusters) return; + + long long seed_idx = ((long long)cluster_idx * 982451653LL + 1566083941LL) % (long long)database_size; + int stride = max(1, database_size / (num_clusters * 37)); + long long start_idx = seed_idx; + + for (int d = tid; d < vector_dims; d += blockDim.x) { + float acc = 0.0f; + long long idx = start_idx; + #pragma unroll + for (int k = 0; k < 4; ++k) { + int pos = (int)(idx % (long long)database_size); + acc += database_vectors[pos * vector_dims + d]; + idx += stride; + } + cluster_centers[cluster_idx * vector_dims + d] = acc * 0.25f; + } + + if (tid == 0) { + cluster_sizes[cluster_idx] = 0; + } +} + +extern "C" __global__ void assign_clusters( + const float* __restrict__ database_vectors, + const float* __restrict__ cluster_centers, + int* __restrict__ cluster_assignments, + int* __restrict__ cluster_sizes, + int database_size, + int vector_dims, + int num_clusters, + int num_queries +) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + const bool use_high = (num_queries > 3000) || (vector_dims >= 700); + if (thread_id < database_size) { + int vec_idx = thread_id; + const float* vector = database_vectors + vec_idx * vector_dims; + float min_dist = MAX_FLOAT; + int best_cluster = 0; + for (int c = 0; c < num_clusters; c++) { + const float* c_center = cluster_centers + c * vector_dims; + float dist = use_high ? euclidean_distance_high(vector, c_center, vector_dims) + : euclidean_distance(vector, c_center, vector_dims); + if (dist < min_dist) { + min_dist = dist; + best_cluster = c; + } + } + cluster_assignments[vec_idx] = best_cluster; + } +} + +extern "C" __global__ void exclusive_scan_sizes( + const int* cluster_sizes, + int* cluster_offsets, + int* write_offsets, + int num_clusters +) { + if (blockIdx.x == 0 && threadIdx.x == 0) { + int acc = 0; + for (int i = 0; i < num_clusters; i++) { + cluster_offsets[i] = acc; + write_offsets[i] = acc; + acc += cluster_sizes[i]; + } + } +} + +extern "C" __global__ void build_cluster_index( + const int* cluster_assignments, + int* write_offsets, + int* cluster_indices, + int database_size +) { + if (blockIdx.x == 0 && threadIdx.x == 0) { + for (int vec_idx = 0; vec_idx < database_size; vec_idx++) { + int cluster = cluster_assignments[vec_idx]; + int pos = write_offsets[cluster]; + cluster_indices[pos] = vec_idx; + write_offsets[cluster]++; + } + } +} + +extern "C" __global__ void count_block_cluster_sizes( + const int* cluster_assignments, + int* block_counts, + int database_size, + int num_clusters +) { + extern __shared__ int sdata[]; + int tid = threadIdx.x; + int block = blockIdx.x; + int base = block * blockDim.x; + int vec_idx = base + tid; + __shared__ int s_len; + if (tid == 0) { + int rem = database_size - base; + s_len = rem > blockDim.x ? blockDim.x : (rem > 0 ? rem : 0); + } + __syncthreads(); + if (s_len == 0) { + if (tid == 0) { + for (int c = 0; c < num_clusters; c++) { + block_counts[block * num_clusters + c] = 0; + } + } + return; + } + int cid = -1; + if (tid < s_len) cid = cluster_assignments[vec_idx]; + + for (int c = 0; c < num_clusters; c++) { + int* buf = sdata + c * blockDim.x; + if (tid < s_len) { + buf[tid] = (cid == c) ? 1 : 0; + } else if (tid < blockDim.x) { + buf[tid] = 0; + } + } + __syncthreads(); + + for (int stride = blockDim.x >> 1; stride > 0; stride >>= 1) { + if (tid < stride) { + int limit = (tid + stride < s_len) ? 1 : 0; + if (limit) { + for (int c = 0; c < num_clusters; c++) { + int* buf = sdata + c * blockDim.x; + buf[tid] += buf[tid + stride]; + } + } + } + __syncthreads(); + } + + if (tid == 0) { + for (int c = 0; c < num_clusters; c++) { + int* buf = sdata + c * blockDim.x; + block_counts[block * num_clusters + c] = buf[0]; + } + } +} + +extern "C" __global__ void exclusive_scan_block_counts( + const int* cluster_offsets, + const int* block_counts, + int* block_offsets, + int num_blocks, + int num_clusters +) { + if (blockIdx.x == 0 && threadIdx.x == 0) { + for (int c = 0; c < num_clusters; c++) { + int acc = cluster_offsets[c]; + for (int b = 0; b < num_blocks; b++) { + block_offsets[b * num_clusters + c] = acc; + acc += block_counts[b * num_clusters + c]; + } + } + } +} + +extern "C" __global__ void reduce_block_counts( + const int* block_counts, + int* cluster_sizes, + int num_blocks, + int num_clusters +) { + if (blockIdx.x == 0 && threadIdx.x == 0) { + for (int c = 0; c < num_clusters; c++) { + int acc = 0; + for (int b = 0; b < num_blocks; b++) { + acc += block_counts[b * num_clusters + c]; + } + cluster_sizes[c] = acc; + } + } +} + +extern "C" __global__ void parallel_build_cluster_index( + const int* cluster_assignments, + const int* block_offsets, + int* cluster_indices, + int database_size, + int num_clusters +) { + extern __shared__ int sdata[]; + int tid = threadIdx.x; + int block = blockIdx.x; + int base = block * blockDim.x; + int vec_idx = base + tid; + __shared__ int s_len; + if (tid == 0) { + int rem = database_size - base; + s_len = rem > blockDim.x ? blockDim.x : (rem > 0 ? rem : 0); + } + __syncthreads(); + if (s_len == 0) return; + int cid = -1; + if (tid < s_len) cid = cluster_assignments[vec_idx]; + for (int c = 0; c < num_clusters; c++) { + int* flags = sdata + c * blockDim.x; + if (tid < s_len) flags[tid] = (cid == c) ? 1 : 0; + else if (tid < blockDim.x) flags[tid] = 0; + } + __syncthreads(); + for (int c = 0; c < num_clusters; c++) { + int* flags = sdata + c * blockDim.x; + for (int offset = 1; offset < s_len; offset <<= 1) { + int v = 0; + if (tid >= offset && tid < s_len) v = flags[tid - offset]; + __syncthreads(); + if (tid < s_len) flags[tid] += v; + __syncthreads(); + } + if (tid < s_len && cid == c) { + int local_rank = flags[tid] - 1; + int base_off = block_offsets[block * num_clusters + c]; + cluster_indices[base_off + local_rank] = vec_idx; + } + __syncthreads(); + } +} + +extern "C" __global__ void cluster_search( + const float* __restrict__ query_vectors, + const float* __restrict__ database_vectors, + const float* __restrict__ cluster_centers, + const int* __restrict__ cluster_assignments, + const int* __restrict__ cluster_sizes, + const int* __restrict__ cluster_indices, + const int* __restrict__ cluster_offsets, + int* __restrict__ results, + int num_queries, + int database_size, + int vector_dims, + int num_clusters +) { + if (num_queries <= 3000) { + int query_idx = blockIdx.x; + if (query_idx >= num_queries) return; + + const float* query = query_vectors + query_idx * vector_dims; + + float cluster_dists[16]; + int cluster_order[16]; + + for (int cluster = 0; cluster < num_clusters; cluster++) { + const float* center = cluster_centers + cluster * vector_dims; + cluster_dists[cluster] = euclidean_distance(query, center, vector_dims); + cluster_order[cluster] = cluster; + } + + int clusters_to_search = (num_queries <= 1000) ? num_clusters : + (num_queries <= 2000) ? min(num_clusters, (num_clusters * 3) / 4) : + (num_queries <= 2800) ? min(num_clusters, (num_clusters * 2) / 3) : + min(num_clusters, max(2, num_clusters / 2)); + if (vector_dims >= 700) { + int target = max(3, clusters_to_search); + clusters_to_search = min(num_clusters, target); + } + for (int i = 0; i < clusters_to_search; i++) { + int best = i; + for (int j = i + 1; j < num_clusters; j++) { + if (cluster_dists[cluster_order[j]] < cluster_dists[cluster_order[best]]) { + best = j; + } + } + int temp = cluster_order[i]; + cluster_order[i] = cluster_order[best]; + cluster_order[best] = temp; + } + + float min_dist = MAX_FLOAT; + int best_idx = -1; + + for (int c_idx = 0; c_idx < clusters_to_search; c_idx++) { + int target_cluster = cluster_order[c_idx]; + if (cluster_sizes[target_cluster] <= 0) continue; + + int start = cluster_offsets[target_cluster]; + int end = start + cluster_sizes[target_cluster]; + for (int p = start; p < end; p++) { + int vec_idx = cluster_indices[p]; + const float* db_vector = database_vectors + vec_idx * vector_dims; + float dist = euclidean_distance_bounded(query, db_vector, vector_dims, min_dist); + if (dist < min_dist) { + min_dist = dist; + best_idx = vec_idx; + } else if (vector_dims >= 720 && num_queries <= 5000 && best_idx != -1 && dist <= min_dist + 0.0015f) { + float d2 = euclidean_distance_precise_bounded(query, db_vector, vector_dims, min_dist); + if (d2 < min_dist) { + min_dist = d2; + best_idx = vec_idx; + } + } + } + } + + if (min_dist == MAX_FLOAT) { + int base_stride = max(5, database_size / 2000); + int max_checks = min(database_size / base_stride, 2000); + + for (int phase = 0; phase < 2; phase++) { + int offset = phase * (base_stride / 2); + for (int i = 0; i < max_checks / 2; i++) { + int db_idx = (offset + i * base_stride) % database_size; + + const float* db_vector = database_vectors + db_idx * vector_dims; + float dist = euclidean_distance_bounded(query, db_vector, vector_dims, min_dist); + if (dist < min_dist) { + min_dist = dist; + best_idx = db_idx; + } + } + } + + if (best_idx != -1) { + int radius = min(25, base_stride); + int start_local = max(0, best_idx - radius); + int end_local = min(database_size, best_idx + radius + 1); + + for (int i = start_local; i < end_local; i++) { + if (i == best_idx) continue; + const float* db_vector = database_vectors + i * vector_dims; + float dist = euclidean_distance_bounded(query, db_vector, vector_dims, min_dist); + if (dist < min_dist) { + min_dist = dist; + best_idx = i; + } + } + } + } + + if (min_dist == MAX_FLOAT) { + best_idx = 0; + } + + results[query_idx] = best_idx; + } else { + int query_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (query_idx >= num_queries) return; + + const float* query = query_vectors + query_idx * vector_dims; + + float cluster_dists[16]; + int cluster_order[16]; + + for (int cluster = 0; cluster < num_clusters; cluster++) { + const float* center = cluster_centers + cluster * vector_dims; + cluster_dists[cluster] = euclidean_distance_high(query, center, vector_dims); + cluster_order[cluster] = cluster; + } + + int clusters_to_search = (num_queries <= 3500) ? min(num_clusters, 5) : + (num_queries <= 6000) ? min(num_clusters, 4) : + (num_queries <= 8000) ? min(num_clusters, 3) : + 2; + if (num_queries <= 5000 && vector_dims >= 720) { + clusters_to_search = num_clusters; + } else if (vector_dims >= 720) { + clusters_to_search = num_clusters; + } else if (vector_dims >= 700) { + clusters_to_search = max(clusters_to_search, min(num_clusters, (num_clusters * 3) / 4 + 1)); + } + for (int i = 0; i < clusters_to_search; i++) { + int best = i; + for (int j = i + 1; j < num_clusters; j++) { + if (cluster_dists[cluster_order[j]] < cluster_dists[cluster_order[best]]) { + best = j; + } + } + int temp = cluster_order[i]; + cluster_order[i] = cluster_order[best]; + cluster_order[best] = temp; + } + + float min_dist = MAX_FLOAT; + int best_idx = -1; + + for (int c_idx = 0; c_idx < clusters_to_search; c_idx++) { + int target_cluster = cluster_order[c_idx]; + if (cluster_sizes[target_cluster] <= 0) continue; + + int start = cluster_offsets[target_cluster]; + int end = start + cluster_sizes[target_cluster]; + for (int p = start; p < end; p++) { + int vec_idx = cluster_indices[p]; + const float* db_vector = database_vectors + vec_idx * vector_dims; + float dist = euclidean_distance_high_bounded(query, db_vector, vector_dims, min_dist); + if (vector_dims >= 720 && num_queries <= 5000 && dist <= min_dist + 0.0015f) { + float d2 = euclidean_distance_precise_bounded(query, db_vector, vector_dims, min_dist); + if (d2 < dist) dist = d2; + } + if (dist < min_dist) { + min_dist = dist; + best_idx = vec_idx; + } + } + } + + if (min_dist == MAX_FLOAT) { + int base_stride = (vector_dims >= 720) ? max(7, database_size / 900) : max(9, database_size / 1200); + int max_checks = min(database_size / base_stride, (vector_dims >= 720) ? 1600 : 1200); + + for (int phase = 0; phase < 2; phase++) { + int offset = phase * (base_stride / 3); + int phase_checks = max_checks / 2; + + for (int i = 0; i < phase_checks; i++) { + int db_idx = (offset + i * base_stride) % database_size; + + const float* db_vector = database_vectors + db_idx * vector_dims; + float dist = euclidean_distance_high_bounded(query, db_vector, vector_dims, min_dist); + if (dist < min_dist) { + min_dist = dist; + best_idx = db_idx; + } + } + } + + if (best_idx != -1) { + int radius = (vector_dims >= 720) ? min(32, (base_stride * 2) / 3) : min(18, base_stride / 2); + int start_local = max(0, best_idx - radius); + int end_local = min(database_size, best_idx + radius + 1); + + for (int i = start_local; i < end_local; i++) { + if (i == best_idx) continue; + const float* db_vector = database_vectors + i * vector_dims; + float dist = euclidean_distance_high_bounded(query, db_vector, vector_dims, min_dist); + if (dist < min_dist) { + min_dist = dist; + best_idx = i; + } + } + } + } + + if (min_dist == MAX_FLOAT) { + best_idx = 0; + } + + results[query_idx] = best_idx; + } +} diff --git a/tig-algorithms/src/vector_search/cluster_improved/mod.rs b/tig-algorithms/src/vector_search/cluster_improved/mod.rs new file mode 100644 index 0000000..1fa0257 --- /dev/null +++ b/tig-algorithms/src/vector_search/cluster_improved/mod.rs @@ -0,0 +1,230 @@ +use cudarc::{ + driver::{safe::LaunchConfig, CudaModule, CudaStream, PushKernelArg}, + runtime::sys::cudaDeviceProp, +}; +use std::sync::Arc; +use serde_json::{Map, Value}; +use tig_challenges::vector_search::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, + module: Arc, + stream: Arc, + _prop: &cudaDeviceProp, +) -> anyhow::Result<()> { + let vector_dims = challenge.vector_dims as i32; + let database_size = challenge.database_size as i32; + let num_queries = challenge.difficulty.num_queries as i32; + + let block_size: u32 = 128; + fn calculate_optimal_clusters(num_queries: i32, database_size: i32, vector_dims: i32) -> i32 { + let base_clusters = if num_queries <= 3000 { + 3 + } else if num_queries <= 5000 { + if vector_dims >= 720 { 6 } else { 4 } + } else if num_queries <= 7000 { + 5 + } else if num_queries <= 9000 { + 6 + } else if num_queries <= 11000 { + 7 + } else if num_queries <= 15000 { + 8 + } else if num_queries <= 20000 { + 9 + } else { + ((database_size as f32).sqrt() / 1000.0).max(8.0).min(12.0) as i32 + }; + + let memory_factor = if vector_dims > 1000 { 0.8 } else { 1.0 }; + ((base_clusters as f32 * memory_factor) as i32).max(3).min(12) + } + + let num_clusters = calculate_optimal_clusters(num_queries, database_size, vector_dims); + + let deterministic_clustering = module.load_function("deterministic_clustering")?; + let assign_clusters = module.load_function("assign_clusters")?; + + let cluster_search = module.load_function("cluster_search")?; + let exclusive_scan_sizes = module.load_function("exclusive_scan_sizes")?; + let count_block_cluster_sizes = module.load_function("count_block_cluster_sizes")?; + let reduce_block_counts = module.load_function("reduce_block_counts")?; + let exclusive_scan_block_counts = module.load_function("exclusive_scan_block_counts")?; + let parallel_build_cluster_index = module.load_function("parallel_build_cluster_index")?; + + let mut d_cluster_centers = stream.alloc_zeros::((num_clusters * vector_dims) as usize)?; + let mut d_cluster_assignments = stream.alloc_zeros::(database_size as usize)?; + let mut d_cluster_sizes = stream.alloc_zeros::(num_clusters as usize)?; + + let cluster_config = LaunchConfig { + grid_dim: (num_clusters as u32, 1, 1), + block_dim: (block_size, 1, 1), + shared_mem_bytes: 0, + }; + + unsafe { + stream + .launch_builder(&deterministic_clustering) + .arg(&challenge.d_database_vectors) + .arg(&mut d_cluster_centers) + .arg(&mut d_cluster_assignments) + .arg(&mut d_cluster_sizes) + .arg(&database_size) + .arg(&vector_dims) + .arg(&num_clusters) + .arg(&num_queries) + .launch(cluster_config)?; + } + + let assign_threads: u32 = 256; + let assign_blocks: u32 = ((database_size as u32) + assign_threads - 1) / assign_threads; + let assign_config = LaunchConfig { + grid_dim: (assign_blocks, 1, 1), + block_dim: (assign_threads, 1, 1), + shared_mem_bytes: 0, + }; + unsafe { + stream + .launch_builder(&assign_clusters) + .arg(&challenge.d_database_vectors) + .arg(&d_cluster_centers) + .arg(&mut d_cluster_assignments) + .arg(&mut d_cluster_sizes) + .arg(&database_size) + .arg(&vector_dims) + .arg(&num_clusters) + .arg(&num_queries) + .launch(assign_config)?; + } + + let mut d_cluster_offsets = stream.alloc_zeros::(num_clusters as usize)?; + let mut d_write_offsets = stream.alloc_zeros::(num_clusters as usize)?; + let scan_config = LaunchConfig { + grid_dim: (1, 1, 1), + block_dim: (1, 1, 1), + shared_mem_bytes: 0, + }; + {} + + let mut d_cluster_indices = stream.alloc_zeros::(database_size as usize)?; + let db_u32 = database_size as u32; + let fill_blocks: u32 = (db_u32 + block_size - 1) / block_size; + let fill_blocks_i32: i32 = fill_blocks as i32; + + let mut d_block_counts = stream.alloc_zeros::((fill_blocks as usize) * (num_clusters as usize))?; + let mut d_block_offsets = stream.alloc_zeros::((fill_blocks as usize) * (num_clusters as usize))?; + + let count_config = LaunchConfig { + grid_dim: (fill_blocks, 1, 1), + block_dim: (block_size, 1, 1), + shared_mem_bytes: ((num_clusters as u32) * block_size * 4) as u32, + }; + unsafe { + stream + .launch_builder(&count_block_cluster_sizes) + .arg(&d_cluster_assignments) + .arg(&mut d_block_counts) + .arg(&database_size) + .arg(&num_clusters) + .launch(count_config)?; + } + + let scan_blocks_config = LaunchConfig { + grid_dim: (1, 1, 1), + block_dim: (1, 1, 1), + shared_mem_bytes: 0, + }; + unsafe { + stream + .launch_builder(&reduce_block_counts) + .arg(&d_block_counts) + .arg(&mut d_cluster_sizes) + .arg(&fill_blocks_i32) + .arg(&num_clusters) + .launch(scan_blocks_config)?; + } + unsafe { + stream + .launch_builder(&exclusive_scan_sizes) + .arg(&d_cluster_sizes) + .arg(&mut d_cluster_offsets) + .arg(&mut d_write_offsets) + .arg(&num_clusters) + .launch(scan_config)?; + } + unsafe { + stream + .launch_builder(&exclusive_scan_block_counts) + .arg(&d_cluster_offsets) + .arg(&d_block_counts) + .arg(&mut d_block_offsets) + .arg(&fill_blocks_i32) + .arg(&num_clusters) + .launch(scan_blocks_config)?; + } + + let build_config = LaunchConfig { + grid_dim: (fill_blocks, 1, 1), + block_dim: (block_size, 1, 1), + shared_mem_bytes: ((num_clusters as u32) * block_size * 4) as u32, + }; + unsafe { + stream + .launch_builder(¶llel_build_cluster_index) + .arg(&d_cluster_assignments) + .arg(&d_block_offsets) + .arg(&mut d_cluster_indices) + .arg(&database_size) + .arg(&num_clusters) + .launch(build_config)?; + } + + let mut d_results = stream.alloc_zeros::(num_queries as usize)?; + + let search_config = if num_queries <= 3000 { + LaunchConfig { + grid_dim: (num_queries as u32, 1, 1), + block_dim: (1, 1, 1), + shared_mem_bytes: 0, + } + } else { + let threads_per_block = if vector_dims >= 720 { + 256 + } else { + 128 + }; + let blocks = ((num_queries as u32) + threads_per_block - 1) / threads_per_block; + LaunchConfig { + grid_dim: (blocks.min(2048), 1, 1), + block_dim: (threads_per_block, 1, 1), + shared_mem_bytes: 0, + } + }; + + unsafe { + stream + .launch_builder(&cluster_search) + .arg(&challenge.d_query_vectors) + .arg(&challenge.d_database_vectors) + .arg(&d_cluster_centers) + .arg(&d_cluster_assignments) + .arg(&d_cluster_sizes) + .arg(&d_cluster_indices) + .arg(&d_cluster_offsets) + .arg(&mut d_results) + .arg(&num_queries) + .arg(&database_size) + .arg(&vector_dims) + .arg(&num_clusters) + .launch(search_config)?; + } + stream.synchronize()?; + + let indices: Vec = stream.memcpy_dtov(&d_results)?; + let indexes = indices.iter().map(|&idx| idx as usize).collect(); + + let _ = save_solution(&Solution { indexes }); + return Ok(()); +} diff --git a/tig-algorithms/src/vector_search/improved_search_adp/README.md b/tig-algorithms/src/vector_search/improved_search_adp/README.md new file mode 100644 index 0000000..dcdd3d6 --- /dev/null +++ b/tig-algorithms/src/vector_search/improved_search_adp/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vector_search +* **Algorithm Name:** improved_search_adp +* **Copyright:** 2025 Rootz +* **Identity of Submitter:** Rootz +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vector_search/improved_search_adp/kernels.cu b/tig-algorithms/src/vector_search/improved_search_adp/kernels.cu new file mode 100644 index 0000000..c6604ca --- /dev/null +++ b/tig-algorithms/src/vector_search/improved_search_adp/kernels.cu @@ -0,0 +1,321 @@ +/*!Copyright 2025 Rootz + +Identity of Submitter Rootz + +UAI null + +Licensed under the TIG Inbound Game License v2.0 or (at your option) any later +version (the "License"); you may not use this file except in compliance with the +License. You may obtain a copy of the License at + +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the specific +language governing permissions and limitations under the License. +*/ +#include +#include + +#define MAX_FLOAT 3.402823466e+38F + +__device__ float euclidean_distance(const float* a, const float* b, int dims) { + float sum = 0.0f; + int i; + for (i = 0; i < dims - 3; i += 4) { + float diff0 = a[i] - b[i]; + float diff1 = a[i+1] - b[i+1]; + float diff2 = a[i+2] - b[i+2]; + float diff3 = a[i+3] - b[i+3]; + sum = fmaf(diff0, diff0, sum); + sum = fmaf(diff1, diff1, sum); + sum = fmaf(diff2, diff2, sum); + sum = fmaf(diff3, diff3, sum); + } + for (; i < dims; i++) { + float diff = a[i] - b[i]; + sum = fmaf(diff, diff, sum); + } + return sum; +} + +__device__ float euclidean_distance_high(const float* a, const float* b, int dims) { + float sum = 0.0f; + for (int i = 0; i < dims; i += 4) { + float diff0 = a[i] - b[i]; + float diff1 = a[i+1] - b[i+1]; + float diff2 = a[i+2] - b[i+2]; + float diff3 = a[i+3] - b[i+3]; + sum = fmaf(diff0, diff0, sum); + sum = fmaf(diff1, diff1, sum); + sum = fmaf(diff2, diff2, sum); + sum = fmaf(diff3, diff3, sum); + } + return sum; +} + +extern "C" __global__ void deterministic_clustering( + const float* database_vectors, + float* cluster_centers, + int* cluster_assignments, + int* cluster_sizes, + int database_size, + int vector_dims, + int num_clusters, + int num_queries +) { + int cluster_idx = blockIdx.x; + int tid = threadIdx.x; + + if (cluster_idx >= num_clusters) return; + + extern __shared__ float shared_mem[]; + float* center = shared_mem; + + for (int d = tid; d < vector_dims; d += blockDim.x) { + center[d] = 0.0f; + } + __syncthreads(); + + int seed_idx = ((cluster_idx * 982451653LL + 1566083941LL) % (long long)database_size); + const float* seed_vector = database_vectors + seed_idx * vector_dims; + + for (int d = tid; d < vector_dims; d += blockDim.x) { + center[d] = seed_vector[d]; + cluster_centers[cluster_idx * vector_dims + d] = seed_vector[d]; + } + + if (tid == 0) { + cluster_sizes[cluster_idx] = 0; + } + __syncthreads(); + + for (int vec_idx = tid; vec_idx < database_size; vec_idx += blockDim.x) { + const float* vector = database_vectors + vec_idx * vector_dims; + + float min_dist = MAX_FLOAT; + int best_cluster = 0; + + for (int c = 0; c < num_clusters; c++) { + const float* c_center = cluster_centers + c * vector_dims; + float dist = (num_queries <= 4000) ? + euclidean_distance(vector, c_center, vector_dims) : + euclidean_distance_high(vector, c_center, vector_dims); + if (dist < min_dist) { + min_dist = dist; + best_cluster = c; + } + } + + cluster_assignments[vec_idx] = best_cluster; + if (best_cluster == cluster_idx) { + atomicAdd(&cluster_sizes[cluster_idx], 1); + } + } +} + +extern "C" __global__ void cluster_search( + const float* query_vectors, + const float* database_vectors, + const float* cluster_centers, + const int* cluster_assignments, + const int* cluster_sizes, + int* results, + int num_queries, + int database_size, + int vector_dims, + int num_clusters +) { + if (num_queries <= 4000) { + int query_idx = blockIdx.x; + if (query_idx >= num_queries) return; + + const float* query = query_vectors + query_idx * vector_dims; + + float cluster_dists[8]; + for (int i = 0; i < num_clusters; i++) { + cluster_dists[i] = MAX_FLOAT; + } + + float best_dist[3] = {MAX_FLOAT, MAX_FLOAT, MAX_FLOAT}; + int best_clusters[3] = {-1, -1, -1}; + + for (int cluster = 0; cluster < num_clusters; cluster++) { + const float* center = cluster_centers + cluster * vector_dims; + float dist = euclidean_distance(query, center, vector_dims); + + cluster_dists[cluster] = dist; + + if (dist < best_dist[0]) { + best_dist[2] = best_dist[1]; + best_clusters[2] = best_clusters[1]; + best_dist[1] = best_dist[0]; + best_clusters[1] = best_clusters[0]; + best_dist[0] = dist; + best_clusters[0] = cluster; + } else if (dist < best_dist[1]) { + best_dist[2] = best_dist[1]; + best_clusters[2] = best_clusters[1]; + best_dist[1] = dist; + best_clusters[1] = cluster; + } else if (dist < best_dist[2]) { + best_dist[2] = dist; + best_clusters[2] = cluster; + } + } + + float min_dist = MAX_FLOAT; + int best_idx = -1; + + int target_cluster = best_clusters[0]; + if (target_cluster != -1 && cluster_sizes[target_cluster] > 0) { + for (int vec_idx = 0; vec_idx < database_size; vec_idx++) { + if (cluster_assignments[vec_idx] == target_cluster) { + const float* db_vector = database_vectors + vec_idx * vector_dims; + float dist = euclidean_distance(query, db_vector, vector_dims); + if (dist < min_dist) { + min_dist = dist; + best_idx = vec_idx; + } + } + } + } + + if (best_clusters[1] != -1 && cluster_sizes[best_clusters[1]] > 0) { + target_cluster = best_clusters[1]; + for (int vec_idx = 0; vec_idx < database_size; vec_idx++) { + if (cluster_assignments[vec_idx] == target_cluster) { + const float* db_vector = database_vectors + vec_idx * vector_dims; + float dist = euclidean_distance(query, db_vector, vector_dims); + if (dist < min_dist) { + min_dist = dist; + best_idx = vec_idx; + } + } + } + } + + if (best_clusters[2] != -1 && cluster_sizes[best_clusters[2]] > 0) { + target_cluster = best_clusters[2]; + for (int vec_idx = 0; vec_idx < database_size; vec_idx++) { + if (cluster_assignments[vec_idx] == target_cluster) { + const float* db_vector = database_vectors + vec_idx * vector_dims; + float dist = euclidean_distance(query, db_vector, vector_dims); + if (dist < min_dist) { + min_dist = dist; + best_idx = vec_idx; + } + } + } + } + + for (int cluster = 0; cluster < num_clusters; cluster++) { + if (cluster == best_clusters[0] || cluster == best_clusters[1] || cluster == best_clusters[2]) continue; + if (cluster_sizes[cluster] == 0) continue; + + for (int vec_idx = 0; vec_idx < database_size; vec_idx++) { + if (cluster_assignments[vec_idx] == cluster) { + const float* db_vector = database_vectors + vec_idx * vector_dims; + float dist = euclidean_distance(query, db_vector, vector_dims); + if (dist < min_dist) { + min_dist = dist; + best_idx = vec_idx; + } + } + } + } + + results[query_idx] = best_idx; + } else { + int query_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (query_idx >= num_queries) return; + + const float* query = query_vectors + query_idx * vector_dims; + + extern __shared__ float shared_mem[]; + float* cluster_dists = shared_mem; + int* cluster_indices = (int*)&shared_mem[num_clusters]; + + if (threadIdx.x < num_clusters) { + cluster_dists[threadIdx.x] = MAX_FLOAT; + cluster_indices[threadIdx.x] = -1; + } + + float best_dist[2] = {MAX_FLOAT, MAX_FLOAT}; + int best_clusters[2] = {-1, -1}; + + for (int cluster = 0; cluster < num_clusters; cluster++) { + const float* center = cluster_centers + cluster * vector_dims; + float dist = euclidean_distance_high(query, center, vector_dims); + + if (dist < best_dist[0]) { + best_dist[1] = best_dist[0]; + best_clusters[1] = best_clusters[0]; + best_dist[0] = dist; + best_clusters[0] = cluster; + } else if (dist < best_dist[1]) { + best_dist[1] = dist; + best_clusters[1] = cluster; + } + + if (cluster < num_clusters && threadIdx.x == 0) { + cluster_dists[cluster] = dist; + } + } + + float min_dist = MAX_FLOAT; + int best_idx = -1; + + int target_cluster = best_clusters[0]; + if (target_cluster != -1 && cluster_sizes[target_cluster] > 0) { + for (int vec_idx = 0; vec_idx < database_size; vec_idx++) { + if (cluster_assignments[vec_idx] == target_cluster) { + const float* db_vector = database_vectors + vec_idx * vector_dims; + float dist = euclidean_distance_high(query, db_vector, vector_dims); + if (dist < min_dist) { + min_dist = dist; + best_idx = vec_idx; + } + } + } + } + + if (min_dist == MAX_FLOAT && best_clusters[1] != -1 && cluster_sizes[best_clusters[1]] > 0) { + target_cluster = best_clusters[1]; + for (int vec_idx = 0; vec_idx < database_size; vec_idx++) { + if (cluster_assignments[vec_idx] == target_cluster) { + const float* db_vector = database_vectors + vec_idx * vector_dims; + float dist = euclidean_distance_high(query, db_vector, vector_dims); + if (dist < min_dist) { + min_dist = dist; + best_idx = vec_idx; + } + } + } + } + + if (min_dist == MAX_FLOAT) { + float search_radius = cluster_dists[0] * 2.0f; + + for (int cluster = 0; cluster < num_clusters; cluster++) { + if (cluster == best_clusters[0] || cluster == best_clusters[1]) continue; + if (cluster_dists[cluster] >= search_radius) continue; + if (cluster_sizes[cluster] == 0) continue; + + for (int vec_idx = 0; vec_idx < database_size; vec_idx++) { + if (cluster_assignments[vec_idx] == cluster) { + const float* db_vector = database_vectors + vec_idx * vector_dims; + float dist = euclidean_distance_high(query, db_vector, vector_dims); + if (dist < min_dist) { + min_dist = dist; + best_idx = vec_idx; + } + } + } + } + } + + results[query_idx] = best_idx; + } +} diff --git a/tig-algorithms/src/vector_search/improved_search_adp/mod.rs b/tig-algorithms/src/vector_search/improved_search_adp/mod.rs new file mode 100644 index 0000000..8aae5f4 --- /dev/null +++ b/tig-algorithms/src/vector_search/improved_search_adp/mod.rs @@ -0,0 +1,96 @@ +use cudarc::{ + driver::{safe::LaunchConfig, CudaModule, CudaStream, PushKernelArg}, + runtime::sys::cudaDeviceProp, +}; +use std::sync::Arc; +use serde_json::{Map, Value}; +use tig_challenges::vector_search::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, + module: Arc, + stream: Arc, + _prop: &cudaDeviceProp, +) -> anyhow::Result<()> { + let vector_dims = challenge.vector_dims as i32; + let database_size = challenge.database_size as i32; + let num_queries = challenge.difficulty.num_queries as i32; + + let block_size = 128; + let num_clusters = if num_queries <= 6000 { + 2 + } else if num_queries < 9000 { + 4 + } else if num_queries < 10000 { + 6 + } else { + 8 + }; + + let deterministic_clustering = module.load_function("deterministic_clustering")?; + let cluster_search = module.load_function("cluster_search")?; + + let mut d_cluster_centers = stream.alloc_zeros::((num_clusters * vector_dims) as usize)?; + let mut d_cluster_assignments = stream.alloc_zeros::(database_size as usize)?; + let mut d_cluster_sizes = stream.alloc_zeros::(num_clusters as usize)?; + + let cluster_config = LaunchConfig { + grid_dim: (num_clusters as u32, 1, 1), + block_dim: (block_size, 1, 1), + shared_mem_bytes: (vector_dims * 4) as u32, + }; + + unsafe { + stream.launch_builder(&deterministic_clustering) + .arg(&challenge.d_database_vectors) + .arg(&mut d_cluster_centers) + .arg(&mut d_cluster_assignments) + .arg(&mut d_cluster_sizes) + .arg(&database_size) + .arg(&vector_dims) + .arg(&num_clusters) + .arg(&num_queries) + .launch(cluster_config)?; + } + stream.synchronize()?; + + let mut d_results = stream.alloc_zeros::(num_queries as usize)?; + + let search_config = if num_queries <= 4000 { + LaunchConfig { + grid_dim: (num_queries as u32, 1, 1), + block_dim: (1, 1, 1), + shared_mem_bytes: 0, + } + } else { + LaunchConfig { + grid_dim: (num_queries as u32, 1, 1), + block_dim: (block_size, 1, 1), + shared_mem_bytes: (num_clusters * 8) as u32, + } + }; + + unsafe { + stream.launch_builder(&cluster_search) + .arg(&challenge.d_query_vectors) + .arg(&challenge.d_database_vectors) + .arg(&d_cluster_centers) + .arg(&d_cluster_assignments) + .arg(&d_cluster_sizes) + .arg(&mut d_results) + .arg(&num_queries) + .arg(&database_size) + .arg(&vector_dims) + .arg(&num_clusters) + .launch(search_config)?; + } + stream.synchronize()?; + + let indices = stream.memcpy_dtov(&d_results)?; + let indexes = indices.iter().map(|&idx| idx as usize).collect(); + + let _ = save_solution(&Solution { indexes }); + return Ok(()); +} diff --git a/tig-algorithms/src/vector_search/improved_search_new/README.md b/tig-algorithms/src/vector_search/improved_search_new/README.md new file mode 100644 index 0000000..4cd8568 --- /dev/null +++ b/tig-algorithms/src/vector_search/improved_search_new/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vector_search +* **Algorithm Name:** improved_search_new +* **Copyright:** 2025 Rootz +* **Identity of Submitter:** Rootz +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vector_search/improved_search_new/kernels.cu b/tig-algorithms/src/vector_search/improved_search_new/kernels.cu new file mode 100644 index 0000000..00df48e --- /dev/null +++ b/tig-algorithms/src/vector_search/improved_search_new/kernels.cu @@ -0,0 +1,490 @@ +/*!Copyright 2025 Rootz + +Identity of Submitter Rootz + +UAI null + +Licensed under the TIG Inbound Game License v2.0 or (at your option) any later +version (the "License"); you may not use this file except in compliance with the +License. You may obtain a copy of the License at + +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the specific +language governing permissions and limitations under the License. +*/ +#include +#include + +#define MAX_FLOAT 3.402823466e+38F + +__device__ float euclidean_distance(const float* a, const float* b, int dims) { + float sum = 0.0f; + float c = 0.0f; + int i; + + for (i = 0; i < dims - 31; i += 32) { + float d0=a[i]-b[i], d1=a[i+1]-b[i+1], d2=a[i+2]-b[i+2], d3=a[i+3]-b[i+3]; + float d4=a[i+4]-b[i+4], d5=a[i+5]-b[i+5], d6=a[i+6]-b[i+6], d7=a[i+7]-b[i+7]; + float d8=a[i+8]-b[i+8], d9=a[i+9]-b[i+9], d10=a[i+10]-b[i+10], d11=a[i+11]-b[i+11]; + float d12=a[i+12]-b[i+12], d13=a[i+13]-b[i+13], d14=a[i+14]-b[i+14], d15=a[i+15]-b[i+15]; + float d16=a[i+16]-b[i+16], d17=a[i+17]-b[i+17], d18=a[i+18]-b[i+18], d19=a[i+19]-b[i+19]; + float d20=a[i+20]-b[i+20], d21=a[i+21]-b[i+21], d22=a[i+22]-b[i+22], d23=a[i+23]-b[i+23]; + float d24=a[i+24]-b[i+24], d25=a[i+25]-b[i+25], d26=a[i+26]-b[i+26], d27=a[i+27]-b[i+27]; + float d28=a[i+28]-b[i+28], d29=a[i+29]-b[i+29], d30=a[i+30]-b[i+30], d31=a[i+31]-b[i+31]; + + float values[32] = {d0*d0, d1*d1, d2*d2, d3*d3, d4*d4, d5*d5, d6*d6, d7*d7, + d8*d8, d9*d9, d10*d10, d11*d11, d12*d12, d13*d13, d14*d14, d15*d15, + d16*d16, d17*d17, d18*d18, d19*d19, d20*d20, d21*d21, d22*d22, d23*d23, + d24*d24, d25*d25, d26*d26, d27*d27, d28*d28, d29*d29, d30*d30, d31*d31}; + + for (int j = 0; j < 32; j++) { + float y = values[j] - c; + float t = sum + y; + c = (t - sum) - y; + sum = t; + } + } + + for (; i < dims - 15; i += 16) { + float d0=a[i]-b[i], d1=a[i+1]-b[i+1], d2=a[i+2]-b[i+2], d3=a[i+3]-b[i+3]; + float d4=a[i+4]-b[i+4], d5=a[i+5]-b[i+5], d6=a[i+6]-b[i+6], d7=a[i+7]-b[i+7]; + float d8=a[i+8]-b[i+8], d9=a[i+9]-b[i+9], d10=a[i+10]-b[i+10], d11=a[i+11]-b[i+11]; + float d12=a[i+12]-b[i+12], d13=a[i+13]-b[i+13], d14=a[i+14]-b[i+14], d15=a[i+15]-b[i+15]; + + float values[16] = {d0*d0, d1*d1, d2*d2, d3*d3, d4*d4, d5*d5, d6*d6, d7*d7, + d8*d8, d9*d9, d10*d10, d11*d11, d12*d12, d13*d13, d14*d14, d15*d15}; + + for (int j = 0; j < 16; j++) { + float y = values[j] - c; + float t = sum + y; + c = (t - sum) - y; + sum = t; + } + } + + for (; i < dims - 7; i += 8) { + float d0=a[i]-b[i], d1=a[i+1]-b[i+1], d2=a[i+2]-b[i+2], d3=a[i+3]-b[i+3]; + float d4=a[i+4]-b[i+4], d5=a[i+5]-b[i+5], d6=a[i+6]-b[i+6], d7=a[i+7]-b[i+7]; + + float values[8] = {d0*d0, d1*d1, d2*d2, d3*d3, d4*d4, d5*d5, d6*d6, d7*d7}; + + for (int j = 0; j < 8; j++) { + float y = values[j] - c; + float t = sum + y; + c = (t - sum) - y; + sum = t; + } + } + + for (; i < dims - 3; i += 4) { + float d0=a[i]-b[i], d1=a[i+1]-b[i+1], d2=a[i+2]-b[i+2], d3=a[i+3]-b[i+3]; + + float values[4] = {d0*d0, d1*d1, d2*d2, d3*d3}; + + for (int j = 0; j < 4; j++) { + float y = values[j] - c; + float t = sum + y; + c = (t - sum) - y; + sum = t; + } + } + + for (; i < dims; i++) { + float diff = a[i] - b[i]; + float squared = diff * diff; + float y = squared - c; + float t = sum + y; + c = (t - sum) - y; + sum = t; + } + return sum; +} + +__device__ float euclidean_distance_high(const float* a, const float* b, int dims) { + float sum = 0.0f; + float c = 0.0f; + int i; + + for (i = 0; i < dims - 31; i += 32) { + float d0=a[i]-b[i], d1=a[i+1]-b[i+1], d2=a[i+2]-b[i+2], d3=a[i+3]-b[i+3]; + float d4=a[i+4]-b[i+4], d5=a[i+5]-b[i+5], d6=a[i+6]-b[i+6], d7=a[i+7]-b[i+7]; + float d8=a[i+8]-b[i+8], d9=a[i+9]-b[i+9], d10=a[i+10]-b[i+10], d11=a[i+11]-b[i+11]; + float d12=a[i+12]-b[i+12], d13=a[i+13]-b[i+13], d14=a[i+14]-b[i+14], d15=a[i+15]-b[i+15]; + float d16=a[i+16]-b[i+16], d17=a[i+17]-b[i+17], d18=a[i+18]-b[i+18], d19=a[i+19]-b[i+19]; + float d20=a[i+20]-b[i+20], d21=a[i+21]-b[i+21], d22=a[i+22]-b[i+22], d23=a[i+23]-b[i+23]; + float d24=a[i+24]-b[i+24], d25=a[i+25]-b[i+25], d26=a[i+26]-b[i+26], d27=a[i+27]-b[i+27]; + float d28=a[i+28]-b[i+28], d29=a[i+29]-b[i+29], d30=a[i+30]-b[i+30], d31=a[i+31]-b[i+31]; + + float values[32] = {d0*d0, d1*d1, d2*d2, d3*d3, d4*d4, d5*d5, d6*d6, d7*d7, + d8*d8, d9*d9, d10*d10, d11*d11, d12*d12, d13*d13, d14*d14, d15*d15, + d16*d16, d17*d17, d18*d18, d19*d19, d20*d20, d21*d21, d22*d22, d23*d23, + d24*d24, d25*d25, d26*d26, d27*d27, d28*d28, d29*d29, d30*d30, d31*d31}; + + for (int j = 0; j < 32; j++) { + float y = values[j] - c; + float t = sum + y; + c = (t - sum) - y; + sum = t; + } + } + + for (; i < dims - 15; i += 16) { + float d0=a[i]-b[i], d1=a[i+1]-b[i+1], d2=a[i+2]-b[i+2], d3=a[i+3]-b[i+3]; + float d4=a[i+4]-b[i+4], d5=a[i+5]-b[i+5], d6=a[i+6]-b[i+6], d7=a[i+7]-b[i+7]; + float d8=a[i+8]-b[i+8], d9=a[i+9]-b[i+9], d10=a[i+10]-b[i+10], d11=a[i+11]-b[i+11]; + float d12=a[i+12]-b[i+12], d13=a[i+13]-b[i+13], d14=a[i+14]-b[i+14], d15=a[i+15]-b[i+15]; + + float values[16] = {d0*d0, d1*d1, d2*d2, d3*d3, d4*d4, d5*d5, d6*d6, d7*d7, + d8*d8, d9*d9, d10*d10, d11*d11, d12*d12, d13*d13, d14*d14, d15*d15}; + + for (int j = 0; j < 16; j++) { + float y = values[j] - c; + float t = sum + y; + c = (t - sum) - y; + sum = t; + } + } + + for (; i < dims - 7; i += 8) { + float d0=a[i]-b[i], d1=a[i+1]-b[i+1], d2=a[i+2]-b[i+2], d3=a[i+3]-b[i+3]; + float d4=a[i+4]-b[i+4], d5=a[i+5]-b[i+5], d6=a[i+6]-b[i+6], d7=a[i+7]-b[i+7]; + + float values[8] = {d0*d0, d1*d1, d2*d2, d3*d3, d4*d4, d5*d5, d6*d6, d7*d7}; + + for (int j = 0; j < 8; j++) { + float y = values[j] - c; + float t = sum + y; + c = (t - sum) - y; + sum = t; + } + } + + for (; i < dims - 3; i += 4) { + float d0=a[i]-b[i], d1=a[i+1]-b[i+1], d2=a[i+2]-b[i+2], d3=a[i+3]-b[i+3]; + + float values[4] = {d0*d0, d1*d1, d2*d2, d3*d3}; + + for (int j = 0; j < 4; j++) { + float y = values[j] - c; + float t = sum + y; + c = (t - sum) - y; + sum = t; + } + } + + for (; i < dims; i++) { + float diff = a[i] - b[i]; + float squared = diff * diff; + float y = squared - c; + float t = sum + y; + c = (t - sum) - y; + sum = t; + } + return sum; +} + +extern "C" __global__ void deterministic_clustering( + const float* database_vectors, + float* cluster_centers, + int* cluster_assignments, + int* cluster_sizes, + int database_size, + int vector_dims, + int num_clusters, + int num_queries +) { + int cluster_idx = blockIdx.x; + int tid = threadIdx.x; + + if (cluster_idx >= num_clusters) return; + + extern __shared__ float shared_mem[]; + float* center = shared_mem; + + for (int d = tid; d < vector_dims; d += blockDim.x) { + center[d] = 0.0f; + } + __syncthreads(); + + int seed_idx = ((cluster_idx * 982451653LL + 1566083941LL) % (long long)database_size); + const float* seed_vector = database_vectors + seed_idx * vector_dims; + + for (int d = tid; d < vector_dims; d += blockDim.x) { + center[d] = seed_vector[d]; + cluster_centers[cluster_idx * vector_dims + d] = seed_vector[d]; + } + + if (tid == 0) { + cluster_sizes[cluster_idx] = 0; + } + __syncthreads(); + + for (int vec_idx = tid; vec_idx < database_size; vec_idx += blockDim.x) { + const float* vector = database_vectors + vec_idx * vector_dims; + + float min_dist = MAX_FLOAT; + int best_cluster = 0; + + for (int c = 0; c < num_clusters; c++) { + const float* c_center = cluster_centers + c * vector_dims; + float dist = (num_queries <= 4000) ? + euclidean_distance(vector, c_center, vector_dims) : + euclidean_distance_high(vector, c_center, vector_dims); + if (dist < min_dist) { + min_dist = dist; + best_cluster = c; + } + } + + cluster_assignments[vec_idx] = best_cluster; + if (best_cluster == cluster_idx) { + atomicAdd(&cluster_sizes[cluster_idx], 1); + } + } +} + +extern "C" __global__ void cluster_search( + const float* query_vectors, + const float* database_vectors, + const float* cluster_centers, + const int* cluster_assignments, + const int* cluster_sizes, + int* results, + int num_queries, + int database_size, + int vector_dims, + int num_clusters +) { + if (num_queries <= 4000) { + int query_idx = blockIdx.x; + if (query_idx >= num_queries) return; + + const float* query = query_vectors + query_idx * vector_dims; + + float best_dist[3] = {MAX_FLOAT, MAX_FLOAT, MAX_FLOAT}; + int best_clusters[3] = {-1, -1, -1}; + + for (int cluster = 0; cluster < num_clusters; cluster++) { + const float* center = cluster_centers + cluster * vector_dims; + float dist = euclidean_distance(query, center, vector_dims); + + if (dist < best_dist[0]) { + best_dist[2] = best_dist[1]; + best_clusters[2] = best_clusters[1]; + best_dist[1] = best_dist[0]; + best_clusters[1] = best_clusters[0]; + best_dist[0] = dist; + best_clusters[0] = cluster; + } else if (dist < best_dist[1]) { + best_dist[2] = best_dist[1]; + best_clusters[2] = best_clusters[1]; + best_dist[1] = dist; + best_clusters[1] = cluster; + } else if (dist < best_dist[2]) { + best_dist[2] = dist; + best_clusters[2] = cluster; + } + } + + float min_dist = MAX_FLOAT; + int best_idx = -1; + + int target_cluster = best_clusters[0]; + if (target_cluster != -1 && cluster_sizes[target_cluster] > 0) { + float top_dists[3] = {MAX_FLOAT, MAX_FLOAT, MAX_FLOAT}; + int top_indices[3] = {-1, -1, -1}; + + for (int vec_idx = 0; vec_idx < database_size; vec_idx++) { + if (cluster_assignments[vec_idx] == target_cluster) { + const float* db_vector = database_vectors + vec_idx * vector_dims; + float dist = euclidean_distance(query, db_vector, vector_dims); + + if (dist < top_dists[0]) { + top_dists[2] = top_dists[1]; + top_indices[2] = top_indices[1]; + top_dists[1] = top_dists[0]; + top_indices[1] = top_indices[0]; + top_dists[0] = dist; + top_indices[0] = vec_idx; + } else if (dist < top_dists[1]) { + top_dists[2] = top_dists[1]; + top_indices[2] = top_indices[1]; + top_dists[1] = dist; + top_indices[1] = vec_idx; + } else if (dist < top_dists[2]) { + top_dists[2] = dist; + top_indices[2] = vec_idx; + } + } + } + + if (top_dists[0] < min_dist) { + min_dist = top_dists[0]; + best_idx = top_indices[0]; + } + } + + if (best_clusters[1] != -1 && cluster_sizes[best_clusters[1]] > 0) { + target_cluster = best_clusters[1]; + for (int vec_idx = 0; vec_idx < database_size; vec_idx++) { + if (cluster_assignments[vec_idx] == target_cluster) { + const float* db_vector = database_vectors + vec_idx * vector_dims; + float dist = euclidean_distance(query, db_vector, vector_dims); + if (dist < min_dist) { + min_dist = dist; + best_idx = vec_idx; + } + } + } + } + + if (best_clusters[2] != -1 && cluster_sizes[best_clusters[2]] > 0) { + target_cluster = best_clusters[2]; + for (int vec_idx = 0; vec_idx < database_size; vec_idx++) { + if (cluster_assignments[vec_idx] == target_cluster) { + const float* db_vector = database_vectors + vec_idx * vector_dims; + float dist = euclidean_distance(query, db_vector, vector_dims); + if (dist < min_dist) { + min_dist = dist; + best_idx = vec_idx; + } + } + } + } + + for (int cluster = 0; cluster < num_clusters; cluster++) { + if (cluster == best_clusters[0] || cluster == best_clusters[1] || cluster == best_clusters[2]) continue; + if (cluster_sizes[cluster] == 0) continue; + + for (int vec_idx = 0; vec_idx < database_size; vec_idx++) { + if (cluster_assignments[vec_idx] == cluster) { + const float* db_vector = database_vectors + vec_idx * vector_dims; + float dist = euclidean_distance(query, db_vector, vector_dims); + if (dist < min_dist) { + min_dist = dist; + best_idx = vec_idx; + } + } + } + } + + results[query_idx] = best_idx; + } else { + int query_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (query_idx >= num_queries) return; + + const float* query = query_vectors + query_idx * vector_dims; + + float cluster_dists[8]; + int cluster_order[8]; + + for (int cluster = 0; cluster < num_clusters; cluster++) { + const float* center = cluster_centers + cluster * vector_dims; + cluster_dists[cluster] = euclidean_distance_high(query, center, vector_dims); + cluster_order[cluster] = cluster; + } + + for (int i = 0; i < num_clusters - 1; i++) { + for (int j = i + 1; j < num_clusters; j++) { + if (cluster_dists[cluster_order[i]] > cluster_dists[cluster_order[j]]) { + int temp = cluster_order[i]; + cluster_order[i] = cluster_order[j]; + cluster_order[j] = temp; + } + } + } + + float min_dist = MAX_FLOAT; + int best_idx = -1; + + int target_cluster = cluster_order[0]; + if (cluster_sizes[target_cluster] > 0) { + float top_dists[3] = {MAX_FLOAT, MAX_FLOAT, MAX_FLOAT}; + int top_indices[3] = {-1, -1, -1}; + + for (int vec_idx = 0; vec_idx < database_size; vec_idx++) { + if (cluster_assignments[vec_idx] == target_cluster) { + const float* db_vector = database_vectors + vec_idx * vector_dims; + float dist = euclidean_distance_high(query, db_vector, vector_dims); + + if (dist < top_dists[0]) { + top_dists[2] = top_dists[1]; + top_indices[2] = top_indices[1]; + top_dists[1] = top_dists[0]; + top_indices[1] = top_indices[0]; + top_dists[0] = dist; + top_indices[0] = vec_idx; + } else if (dist < top_dists[1]) { + top_dists[2] = top_dists[1]; + top_indices[2] = top_indices[1]; + top_dists[1] = dist; + top_indices[1] = vec_idx; + } else if (dist < top_dists[2]) { + top_dists[2] = dist; + top_indices[2] = vec_idx; + } + } + } + + if (top_dists[0] < min_dist) { + min_dist = top_dists[0]; + best_idx = top_indices[0]; + } + } + + if (min_dist == MAX_FLOAT && cluster_sizes[cluster_order[1]] > 0) { + target_cluster = cluster_order[1]; + for (int vec_idx = 0; vec_idx < database_size; vec_idx++) { + if (cluster_assignments[vec_idx] == target_cluster) { + const float* db_vector = database_vectors + vec_idx * vector_dims; + float dist = euclidean_distance_high(query, db_vector, vector_dims); + if (dist < min_dist) { + min_dist = dist; + best_idx = vec_idx; + } + } + } + } + + if (min_dist == MAX_FLOAT) { + int clusters_searched = 2; + + for (int expansion_round = 0; expansion_round < 5 && min_dist == MAX_FLOAT; expansion_round++) { + int max_clusters_this_round = (expansion_round == 0) ? 4 : (expansion_round == 1) ? 6 : (expansion_round == 2) ? 8 : (expansion_round == 3) ? num_clusters - 1 : num_clusters; + + for (int i = clusters_searched; i < num_clusters && i < max_clusters_this_round; i++) { + int cluster = cluster_order[i]; + if (cluster_sizes[cluster] == 0) continue; + + for (int vec_idx = 0; vec_idx < database_size; vec_idx++) { + if (cluster_assignments[vec_idx] == cluster) { + const float* db_vector = database_vectors + vec_idx * vector_dims; + float dist = euclidean_distance_high(query, db_vector, vector_dims); + if (dist < min_dist) { + min_dist = dist; + best_idx = vec_idx; + } + } + } + clusters_searched++; + } + } + } + + if (min_dist == MAX_FLOAT) { + for (int vec_idx = 0; vec_idx < database_size; vec_idx += 15) { + const float* db_vector = database_vectors + vec_idx * vector_dims; + float dist = euclidean_distance_high(query, db_vector, vector_dims); + if (dist < min_dist) { + min_dist = dist; + best_idx = vec_idx; + } + } + } + + results[query_idx] = best_idx; + } +} diff --git a/tig-algorithms/src/vector_search/improved_search_new/mod.rs b/tig-algorithms/src/vector_search/improved_search_new/mod.rs new file mode 100644 index 0000000..8aae5f4 --- /dev/null +++ b/tig-algorithms/src/vector_search/improved_search_new/mod.rs @@ -0,0 +1,96 @@ +use cudarc::{ + driver::{safe::LaunchConfig, CudaModule, CudaStream, PushKernelArg}, + runtime::sys::cudaDeviceProp, +}; +use std::sync::Arc; +use serde_json::{Map, Value}; +use tig_challenges::vector_search::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, + module: Arc, + stream: Arc, + _prop: &cudaDeviceProp, +) -> anyhow::Result<()> { + let vector_dims = challenge.vector_dims as i32; + let database_size = challenge.database_size as i32; + let num_queries = challenge.difficulty.num_queries as i32; + + let block_size = 128; + let num_clusters = if num_queries <= 6000 { + 2 + } else if num_queries < 9000 { + 4 + } else if num_queries < 10000 { + 6 + } else { + 8 + }; + + let deterministic_clustering = module.load_function("deterministic_clustering")?; + let cluster_search = module.load_function("cluster_search")?; + + let mut d_cluster_centers = stream.alloc_zeros::((num_clusters * vector_dims) as usize)?; + let mut d_cluster_assignments = stream.alloc_zeros::(database_size as usize)?; + let mut d_cluster_sizes = stream.alloc_zeros::(num_clusters as usize)?; + + let cluster_config = LaunchConfig { + grid_dim: (num_clusters as u32, 1, 1), + block_dim: (block_size, 1, 1), + shared_mem_bytes: (vector_dims * 4) as u32, + }; + + unsafe { + stream.launch_builder(&deterministic_clustering) + .arg(&challenge.d_database_vectors) + .arg(&mut d_cluster_centers) + .arg(&mut d_cluster_assignments) + .arg(&mut d_cluster_sizes) + .arg(&database_size) + .arg(&vector_dims) + .arg(&num_clusters) + .arg(&num_queries) + .launch(cluster_config)?; + } + stream.synchronize()?; + + let mut d_results = stream.alloc_zeros::(num_queries as usize)?; + + let search_config = if num_queries <= 4000 { + LaunchConfig { + grid_dim: (num_queries as u32, 1, 1), + block_dim: (1, 1, 1), + shared_mem_bytes: 0, + } + } else { + LaunchConfig { + grid_dim: (num_queries as u32, 1, 1), + block_dim: (block_size, 1, 1), + shared_mem_bytes: (num_clusters * 8) as u32, + } + }; + + unsafe { + stream.launch_builder(&cluster_search) + .arg(&challenge.d_query_vectors) + .arg(&challenge.d_database_vectors) + .arg(&d_cluster_centers) + .arg(&d_cluster_assignments) + .arg(&d_cluster_sizes) + .arg(&mut d_results) + .arg(&num_queries) + .arg(&database_size) + .arg(&vector_dims) + .arg(&num_clusters) + .launch(search_config)?; + } + stream.synchronize()?; + + let indices = stream.memcpy_dtov(&d_results)?; + let indexes = indices.iter().map(|&idx| idx as usize).collect(); + + let _ = save_solution(&Solution { indexes }); + return Ok(()); +} diff --git a/tig-algorithms/src/vector_search/invector/README.md b/tig-algorithms/src/vector_search/invector/README.md new file mode 100644 index 0000000..a2e82e5 --- /dev/null +++ b/tig-algorithms/src/vector_search/invector/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vector_search +* **Algorithm Name:** invector +* **Copyright:** 2024 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vector_search/invector/kernels.cu b/tig-algorithms/src/vector_search/invector/kernels.cu new file mode 100644 index 0000000..04415d0 --- /dev/null +++ b/tig-algorithms/src/vector_search/invector/kernels.cu @@ -0,0 +1,19 @@ +/*! +Copyright 2024 syebastian + +Licensed under the TIG Inbound Game License v1.0 or (at your option) any later +version (the "License"); you may not use this file except in compliance with the +License. You may obtain a copy of the License at + +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the specific +language governing permissions and limitations under the License. +*/ + +extern "C" __global__ void do_nothing() +{ + // This kernel does nothing +} diff --git a/tig-algorithms/src/vector_search/invector/mod.rs b/tig-algorithms/src/vector_search/invector/mod.rs new file mode 100644 index 0000000..fc92dd3 --- /dev/null +++ b/tig-algorithms/src/vector_search/invector/mod.rs @@ -0,0 +1,354 @@ +use anyhow::{anyhow, Result}; +use cudarc::{ + driver::{safe::LaunchConfig, CudaModule, CudaStream, PushKernelArg}, + runtime::sys::cudaDeviceProp, +}; +use std::sync::Arc; +use serde_json::{Map, Value}; +use tig_challenges::vector_search::{Challenge, Solution}; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, + module: Arc, + stream: Arc, + prop: &cudaDeviceProp, +) -> anyhow::Result<()> { + Err(anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + + use anyhow::Ok; + use tig_challenges::vector_search::*; + use std::cmp::Ordering; + use std::collections::BinaryHeap; + + struct KDNode<'a> { + point: &'a [f32], + left: Option>>, + right: Option>>, + index: usize, + } + + impl<'a> KDNode<'a> { + fn new(point: &'a [f32], index: usize) -> Self { + KDNode { + point, + left: None, + right: None, + index, + } + } + } + fn quickselect_by(arr: &mut [(&[f32], usize)], k: usize, compare: &F) + where + F: Fn(&(&[f32], usize), &(&[f32], usize)) -> Ordering, + { + if arr.len() <= 1 { + return; + } + + let pivot_index = partition(arr, compare); + if k < pivot_index { + quickselect_by(&mut arr[..pivot_index], k, compare); + } else if k > pivot_index { + quickselect_by(&mut arr[pivot_index + 1..], k - pivot_index - 1, compare); + } + } + + fn partition(arr: &mut [(&[f32], usize)], compare: &F) -> usize + where + F: Fn(&(&[f32], usize), &(&[f32], usize)) -> Ordering, + { + let pivot_index = arr.len() >> 1; + arr.swap(pivot_index, arr.len() - 1); + + let mut store_index = 0; + for i in 0..arr.len() - 1 { + if compare(&arr[i], &arr[arr.len() - 1]) == Ordering::Less { + arr.swap(i, store_index); + store_index += 1; + } + } + arr.swap(store_index, arr.len() - 1); + store_index + } + + fn build_kd_tree<'a>(points: &mut [(&'a [f32], usize)]) -> Option>> { + if points.is_empty() { + return None; + } + + const NUM_DIMENSIONS: usize = 250; + let mut stack: Vec<(usize, usize, usize, Option<*mut KDNode<'a>>, bool)> = Vec::new(); + let mut root: Option>> = None; + + stack.push((0, points.len(), 0, None, false)); + + while let Some((start, end, depth, parent_ptr, is_left)) = stack.pop() { + if start >= end { + continue; + } + + let axis = depth % NUM_DIMENSIONS; + let median = (start + end) / 2; + quickselect_by(&mut points[start..end], median - start, &|a, b| { + a.0[axis].partial_cmp(&b.0[axis]).unwrap() + }); + + let (median_point, median_index) = points[median]; + let mut new_node = Box::new(KDNode::new(median_point, median_index)); + let new_node_ptr: *mut KDNode = &mut *new_node; + + if let Some(parent_ptr) = parent_ptr { + unsafe { + if is_left { + (*parent_ptr).left = Some(new_node); + } else { + (*parent_ptr).right = Some(new_node); + } + } + } else { + root = Some(new_node); + } + + stack.push((median + 1, end, depth + 1, Some(new_node_ptr), false)); + stack.push((start, median, depth + 1, Some(new_node_ptr), true)); + } + + root + } + + #[inline(always)] + fn squared_euclidean_distance(a: &[f32], b: &[f32]) -> f32 { + let mut sum = 0.0; + for i in 0..a.len() { + unsafe { + let diff = *a.get_unchecked(i) - *b.get_unchecked(i); + sum += diff * diff; + } + } + sum + } + + #[inline(always)] + fn squared_euclidean_distance_limited(a: &[f32], b: &[f32], c : f32) -> f32 { + let mut sum = 0.0; + for i in 0..180 { + unsafe { + let diff = *a.get_unchecked(i) - *b.get_unchecked(i); + sum += diff * diff; + } + } + if sum > c { + sum; + } + for i in 180..a.len() { + unsafe { + let diff = *a.get_unchecked(i) - *b.get_unchecked(i); + sum += diff * diff; + } + } + sum + } + #[inline(always)] + fn early_stopping_distance(a: &[f32], b: &[f32], current_min: f32) -> f32 { + let mut sum = 0.0; + let mut i = 0; + let len = a.len(); + + if a.len() != b.len() || a.len() < 8 { + return f32::MAX; + } + + while i + 7 < len { + unsafe { + let diff0 = *a.get_unchecked(i) - *b.get_unchecked(i); + let diff1 = *a.get_unchecked(i + 1) - *b.get_unchecked(i + 1); + let diff2 = *a.get_unchecked(i + 2) - *b.get_unchecked(i + 2); + let diff3 = *a.get_unchecked(i + 3) - *b.get_unchecked(i + 3); + let diff4 = *a.get_unchecked(i + 4) - *b.get_unchecked(i + 4); + let diff5 = *a.get_unchecked(i + 5) - *b.get_unchecked(i + 5); + let diff6 = *a.get_unchecked(i + 6) - *b.get_unchecked(i + 6); + let diff7 = *a.get_unchecked(i + 7) - *b.get_unchecked(i + 7); + + sum += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3 + + diff4 * diff4 + diff5 * diff5 + diff6 * diff6 + diff7 * diff7; + } + + if sum > current_min { + return f32::MAX; + } + + i += 8; + } + + while i < len { + unsafe { + let diff = *a.get_unchecked(i) - *b.get_unchecked(i); + sum += diff * diff; + } + i += 1; + } + sum + } + + fn nearest_neighbor_search<'a>( + root: &Option>>, + target: &[f32], + best: &mut (f32, Option), + ) { + let num_dimensions = target.len(); + let mut stack = Vec::with_capacity(64); + + if let Some(node) = root { + stack.push((node.as_ref(), 0)); + } + + while let Some((node, depth)) = stack.pop() { + let axis = depth % num_dimensions; + let dist = early_stopping_distance(&node.point, target, best.0); + + if dist < best.0 { + best.0 = dist; + best.1 = Some(node.index); + } + + let diff = target[axis] - node.point[axis]; + let sqr_diff = diff * diff; + + let (nearer, farther) = if diff < 0.0 { + (&node.left, &node.right) + } else { + (&node.right, &node.left) + }; + + if let Some(nearer_node) = nearer { + stack.push((nearer_node.as_ref(), depth + 1)); + } + + if sqr_diff < best.0 { + if let Some(farther_node) = farther { + stack.push((farther_node.as_ref(), depth + 1)); + } + } + } + } + + fn calculate_mean_vector(vectors: &[&[f32]]) -> Vec { + let num_vectors = vectors.len(); + let num_dimensions = 250; + + let mut mean_vector = vec![0.0; num_dimensions]; + + for vector in vectors { + for i in 0..num_dimensions { + mean_vector[i] += vector[i]; + } + } + + for i in 0..num_dimensions { + mean_vector[i] /= num_vectors as f32; + } + + mean_vector + } + + #[derive(Debug)] + struct FloatOrd(f32); + + impl PartialEq for FloatOrd { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } + } + + impl Eq for FloatOrd {} + + impl PartialOrd for FloatOrd { + fn partial_cmp(&self, other: &Self) -> Option { + self.0.partial_cmp(&other.0) + } + } + + impl Ord for FloatOrd { + fn cmp(&self, other: &Self) -> Ordering { + + self.partial_cmp(other).unwrap_or(Ordering::Equal) + } + } + + fn filter_relevant_vectors<'a>( + database: &'a [Vec], + query_vectors: &[Vec], + k: usize, + ) -> Vec<(&'a [f32], usize)> { + let query_refs: Vec<&[f32]> = query_vectors.iter().map(|v| &v[..]).collect(); + let mean_query_vector = calculate_mean_vector(&query_refs); + + let mut heap: BinaryHeap<(FloatOrd, usize)> = BinaryHeap::with_capacity(k); + + for (index, vector) in database.iter().enumerate() { + if heap.len() < k + { + let dist = squared_euclidean_distance(&mean_query_vector, vector); + let ord_dist = FloatOrd(dist); + + heap.push((ord_dist, index)); + } else if let Some(&(FloatOrd(top_dist), _)) = heap.peek() + { + let dist = squared_euclidean_distance_limited(&mean_query_vector, vector, top_dist); + let ord_dist = FloatOrd(dist); + if dist < top_dist { + heap.pop(); + heap.push((ord_dist, index)); + } + } + } + let result: Vec<(&'a [f32], usize)> = heap + .into_iter() + .map(|(_, index)| (&database[index][..], index)) + .collect(); + + result + } + + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let query_count = challenge.query_vectors.len(); + + let max_fuel = 2000000000.0; + let base_fuel = 760000000.0; + let alpha = 1700.0 * challenge.difficulty.num_queries as f64; + + let subset_size = ((max_fuel - base_fuel) / alpha) as usize; + let subset = filter_relevant_vectors( + &challenge.vector_database, + &challenge.query_vectors, + subset_size, + ); + + + let kd_tree = build_kd_tree(&mut subset.clone()); + + + let mut best_indexes = Vec::with_capacity(challenge.query_vectors.len()); + + for query in challenge.query_vectors.iter() { + let mut best = (std::f32::MAX, None); + nearest_neighbor_search(&kd_tree, query, &mut best); + + if let Some(best_index) = best.1 { + best_indexes.push(best_index); + } + } + + + Ok(Some(Solution { + indexes: best_indexes, + })) + } +} \ No newline at end of file diff --git a/tig-algorithms/src/vector_search/invector_adj/README.md b/tig-algorithms/src/vector_search/invector_adj/README.md new file mode 100644 index 0000000..21fe74d --- /dev/null +++ b/tig-algorithms/src/vector_search/invector_adj/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vector_search +* **Algorithm Name:** invector_adj +* **Copyright:** 2025 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vector_search/invector_adj/kernels.cu b/tig-algorithms/src/vector_search/invector_adj/kernels.cu new file mode 100644 index 0000000..a1201d8 --- /dev/null +++ b/tig-algorithms/src/vector_search/invector_adj/kernels.cu @@ -0,0 +1,23 @@ +/*! +Copyright 2025 syebastian + +Identity of Submitter syebastian + +UAI null + +Licensed under the TIG Inbound Game License v2.0 or (at your option) any later +version (the "License"); you may not use this file except in compliance with the +License. You may obtain a copy of the License at + +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the specific +language governing permissions and limitations under the License. +*/ + +extern "C" __global__ void do_nothing() +{ + // This kernel does nothing +} diff --git a/tig-algorithms/src/vector_search/invector_adj/mod.rs b/tig-algorithms/src/vector_search/invector_adj/mod.rs new file mode 100644 index 0000000..60812e8 --- /dev/null +++ b/tig-algorithms/src/vector_search/invector_adj/mod.rs @@ -0,0 +1,394 @@ +use anyhow::{anyhow, Result}; +use cudarc::{ + driver::{safe::LaunchConfig, CudaModule, CudaStream, PushKernelArg}, + runtime::sys::cudaDeviceProp, +}; +use std::sync::Arc; +use serde_json::{Map, Value}; +use tig_challenges::vector_search::{Challenge, Solution}; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, + module: Arc, + stream: Arc, + prop: &cudaDeviceProp, +) -> anyhow::Result<()> { + Err(anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + + use anyhow::Ok; + use tig_challenges::vector_search::*; + use std::cmp::Ordering; + use std::collections::BinaryHeap; + + struct KDNode<'a> { + point: &'a [f32], + left: Option>>, + right: Option>>, + index: usize, + } + + impl<'a> KDNode<'a> { + fn new(point: &'a [f32], index: usize) -> Self { + KDNode { + point, + left: None, + right: None, + index, + } + } + } + fn quickselect_by(arr: &mut [(&[f32], usize)], k: usize, compare: &F) + where + F: Fn(&(&[f32], usize), &(&[f32], usize)) -> Ordering, + { + if arr.len() <= 1 { + return; + } + + let pivot_index = partition(arr, compare); + if k < pivot_index { + quickselect_by(&mut arr[..pivot_index], k, compare); + } else if k > pivot_index { + quickselect_by(&mut arr[pivot_index + 1..], k - pivot_index - 1, compare); + } + } + + fn partition(arr: &mut [(&[f32], usize)], compare: &F) -> usize + where + F: Fn(&(&[f32], usize), &(&[f32], usize)) -> Ordering, + { + let pivot_index = arr.len() >> 1; + arr.swap(pivot_index, arr.len() - 1); + + let mut store_index = 0; + for i in 0..arr.len() - 1 { + if compare(&arr[i], &arr[arr.len() - 1]) == Ordering::Less { + arr.swap(i, store_index); + store_index += 1; + } + } + arr.swap(store_index, arr.len() - 1); + store_index + } + + fn build_kd_tree<'a>(points: &mut [(&'a [f32], usize)]) -> Option>> { + if points.is_empty() { + return None; + } + + const NUM_DIMENSIONS: usize = 250; + let mut stack: Vec<(usize, usize, usize, Option<*mut KDNode<'a>>, bool)> = Vec::new(); + let mut root: Option>> = None; + + stack.push((0, points.len(), 0, None, false)); + + while let Some((start, end, depth, parent_ptr, is_left)) = stack.pop() { + if start >= end { + continue; + } + + let axis = depth % NUM_DIMENSIONS; + let median = (start + end) / 2; + quickselect_by(&mut points[start..end], median - start, &|a, b| { + a.0[axis].partial_cmp(&b.0[axis]).unwrap() + }); + + let (median_point, median_index) = points[median]; + let mut new_node = Box::new(KDNode::new(median_point, median_index)); + let new_node_ptr: *mut KDNode = &mut *new_node; + + if let Some(parent_ptr) = parent_ptr { + unsafe { + if is_left { + (*parent_ptr).left = Some(new_node); + } else { + (*parent_ptr).right = Some(new_node); + } + } + } else { + root = Some(new_node); + } + + stack.push((median + 1, end, depth + 1, Some(new_node_ptr), false)); + stack.push((start, median, depth + 1, Some(new_node_ptr), true)); + } + + root + } + + #[inline(always)] + fn squared_euclidean_distance(a: &[f32], b: &[f32]) -> f32 { + let mut sum = 0.0; + let mut i = 0; + let len = a.len(); + + if a.len() != b.len() || a.len() < 8 { + return f32::MAX; + } + + while i + 7 < len { + unsafe { + let diff0 = *a.get_unchecked(i) - *b.get_unchecked(i); + let diff1 = *a.get_unchecked(i + 1) - *b.get_unchecked(i + 1); + let diff2 = *a.get_unchecked(i + 2) - *b.get_unchecked(i + 2); + let diff3 = *a.get_unchecked(i + 3) - *b.get_unchecked(i + 3); + let diff4 = *a.get_unchecked(i + 4) - *b.get_unchecked(i + 4); + let diff5 = *a.get_unchecked(i + 5) - *b.get_unchecked(i + 5); + let diff6 = *a.get_unchecked(i + 6) - *b.get_unchecked(i + 6); + let diff7 = *a.get_unchecked(i + 7) - *b.get_unchecked(i + 7); + + sum += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3 + + diff4 * diff4 + diff5 * diff5 + diff6 * diff6 + diff7 * diff7; + } + + i += 8; + } + + while i < len { + unsafe { + let diff = *a.get_unchecked(i) - *b.get_unchecked(i); + sum += diff * diff; + } + i += 1; + } + sum + } + + #[inline(always)] + fn early_stopping_distance(a: &[f32], b: &[f32], current_min: f32) -> f32 { + let mut sum = 0.0; + let mut i = 0; + let len = a.len(); + + if a.len() != b.len() || a.len() < 8 { + return f32::MAX; + } + + while i + 7 < len { + unsafe { + let diff0 = *a.get_unchecked(i) - *b.get_unchecked(i); + let diff1 = *a.get_unchecked(i + 1) - *b.get_unchecked(i + 1); + let diff2 = *a.get_unchecked(i + 2) - *b.get_unchecked(i + 2); + let diff3 = *a.get_unchecked(i + 3) - *b.get_unchecked(i + 3); + let diff4 = *a.get_unchecked(i + 4) - *b.get_unchecked(i + 4); + let diff5 = *a.get_unchecked(i + 5) - *b.get_unchecked(i + 5); + let diff6 = *a.get_unchecked(i + 6) - *b.get_unchecked(i + 6); + let diff7 = *a.get_unchecked(i + 7) - *b.get_unchecked(i + 7); + + sum += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3 + + diff4 * diff4 + diff5 * diff5 + diff6 * diff6 + diff7 * diff7; + } + + if sum > current_min { + return f32::MAX; + } + + i += 8; + } + + while i < len { + unsafe { + let diff = *a.get_unchecked(i) - *b.get_unchecked(i); + sum += diff * diff; + } + i += 1; + } + sum + } + + fn nearest_neighbor_search<'a>( + root: &Option>>, + target: &[f32], + best: &mut (f32, Option), + ) { + let num_dimensions = target.len(); + let mut stack = Vec::with_capacity(64); + + if let Some(node) = root { + stack.push((node.as_ref(), 0)); + } + + while let Some((node, depth)) = stack.pop() { + let axis = depth % num_dimensions; + let dist = early_stopping_distance(node.point, target, best.0); + + if dist < best.0 { + best.0 = dist; + best.1 = Some(node.index); + } + + let diff = target[axis] - node.point[axis]; + let sqr_diff = diff * diff; + + let (nearer, farther) = if diff < 0.0 { + (&node.left, &node.right) + } else { + (&node.right, &node.left) + }; + + if let Some(nearer_node) = nearer { + stack.push((nearer_node.as_ref(), depth + 1)); + } + + if sqr_diff < best.0 { + if let Some(farther_node) = farther { + stack.push((farther_node.as_ref(), depth + 1)); + } + } + } + } + + fn calculate_mean_vector(vectors: &[&[f32]]) -> Vec { + let num_vectors = vectors.len(); + let num_dimensions = 250; + + let mut mean_vector = vec![0.0f64; num_dimensions]; + + for vector in vectors { + for i in 0..num_dimensions { + mean_vector[i] += vector[i] as f64; + } + } + for i in 0..num_dimensions { + mean_vector[i] /= num_vectors as f64; + } + mean_vector.into_iter().map(|x| x as f32).collect() + } + + #[derive(Debug)] + struct FloatOrd(f32); + + impl PartialEq for FloatOrd { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } + } + + impl Eq for FloatOrd {} + + impl PartialOrd for FloatOrd { + fn partial_cmp(&self, other: &Self) -> Option { + self.0.partial_cmp(&other.0) + } + } + + impl Ord for FloatOrd { + fn cmp(&self, other: &Self) -> Ordering { + + self.partial_cmp(other).unwrap_or(Ordering::Equal) + } + } + + fn filter_relevant_vectors<'a>( + database: &'a [Vec], + query_vectors: &[Vec], + k: usize, + ) -> Vec<(f32, &'a [f32], usize)> { + let query_refs: Vec<&[f32]> = query_vectors.iter().map(|v| &v[..]).collect(); + let mean_query_vector = calculate_mean_vector(&query_refs); + + let mut heap: BinaryHeap<(FloatOrd, usize)> = BinaryHeap::with_capacity(k); + + for (index, vector) in database.iter().enumerate() { + if heap.len() < k + { + let dist = squared_euclidean_distance(&mean_query_vector, vector); + let ord_dist = FloatOrd(dist); + + heap.push((ord_dist, index)); + } else if let Some(&(FloatOrd(top_dist), _)) = heap.peek() + { + let dist = early_stopping_distance(&mean_query_vector, vector, top_dist); + let ord_dist = FloatOrd(dist); + if dist < top_dist { + heap.pop(); + heap.push((ord_dist, index)); + } + } + } + heap.into_sorted_vec() + .into_iter() + .map(|(FloatOrd(dist), index)| (dist, &database[index][..], index)) + .collect() + } + + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let query_count = challenge.query_vectors.len(); + + let max_fuel = 10000000000.0; + let base_fuel = 760000000.0; + + let vector_cost = if challenge.difficulty.num_queries < 240 { + 1680.0 + } else if challenge.difficulty.num_queries < 450 { + 1700.0 + } else if challenge.difficulty.num_queries > 1250 { + 1740.0 + } else { + 1720.0 + }; + let alpha = vector_cost * challenge.difficulty.num_queries as f64; + + let m = ((max_fuel - base_fuel) / alpha) as usize; + let n = (m as f32 * 1.2) as usize; + let r = n - m; + + let closest_vectors = filter_relevant_vectors( + &challenge.vector_database, + &challenge.query_vectors, + n, + ); + + let (m_slice, r_slice) = closest_vectors.split_at(m); + let m_vectors: Vec<_> = m_slice.to_vec(); + let r_vectors: Vec<_> = r_slice.to_vec(); + + let mut kd_tree_vectors: Vec<(&[f32], usize)> = m_vectors.iter().map(|&(_, v, i)| (v, i)).collect(); + let kd_tree = build_kd_tree(&mut kd_tree_vectors); + + let mut best_indexes = Vec::with_capacity(query_count); + let mut distances = Vec::with_capacity(query_count); + + for query in &challenge.query_vectors { + let mut best = (std::f32::MAX, None); + nearest_neighbor_search(&kd_tree, query, &mut best); + + distances.push(best.0); + best_indexes.push(best.1.unwrap_or(0)); + } + + let brute_force_count = (query_count as f32 * 0.1) as usize; + let mut distance_indices: Vec<_> = distances.iter().enumerate().collect(); + distance_indices.sort_unstable_by(|a, b| b.1.partial_cmp(a.1).unwrap()); + let high_distance_indices: Vec<_> = distance_indices.into_iter() + .take(brute_force_count) + .map(|(index, _)| index) + .collect(); + + for &query_index in &high_distance_indices { + let query = &challenge.query_vectors[query_index]; + let mut best = (distances[query_index], best_indexes[query_index]); + let current_min = best.0; + + for &(_, vec, index) in &r_vectors { + let dist = early_stopping_distance(query, vec, current_min); + if dist < best.0 { + best = (dist, index); + } + } + + best_indexes[query_index] = best.1; + } + + Ok(Some(Solution { + indexes: best_indexes, + })) + } +} \ No newline at end of file diff --git a/tig-algorithms/src/vector_search/invector_fast/README.md b/tig-algorithms/src/vector_search/invector_fast/README.md new file mode 100644 index 0000000..db59679 --- /dev/null +++ b/tig-algorithms/src/vector_search/invector_fast/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vector_search +* **Algorithm Name:** invector_fast +* **Copyright:** 2025 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vector_search/invector_fast/kernels.cu b/tig-algorithms/src/vector_search/invector_fast/kernels.cu new file mode 100644 index 0000000..a1201d8 --- /dev/null +++ b/tig-algorithms/src/vector_search/invector_fast/kernels.cu @@ -0,0 +1,23 @@ +/*! +Copyright 2025 syebastian + +Identity of Submitter syebastian + +UAI null + +Licensed under the TIG Inbound Game License v2.0 or (at your option) any later +version (the "License"); you may not use this file except in compliance with the +License. You may obtain a copy of the License at + +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the specific +language governing permissions and limitations under the License. +*/ + +extern "C" __global__ void do_nothing() +{ + // This kernel does nothing +} diff --git a/tig-algorithms/src/vector_search/invector_fast/mod.rs b/tig-algorithms/src/vector_search/invector_fast/mod.rs new file mode 100644 index 0000000..f6fceba --- /dev/null +++ b/tig-algorithms/src/vector_search/invector_fast/mod.rs @@ -0,0 +1,407 @@ +use anyhow::{anyhow, Result}; +use cudarc::{ + driver::{safe::LaunchConfig, CudaModule, CudaStream, PushKernelArg}, + runtime::sys::cudaDeviceProp, +}; +use std::sync::Arc; +use serde_json::{Map, Value}; +use tig_challenges::vector_search::{Challenge, Solution}; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, + module: Arc, + stream: Arc, + prop: &cudaDeviceProp, +) -> anyhow::Result<()> { + Err(anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + + use anyhow::Ok; + use tig_challenges::vector_search::*; + use std::cmp::Ordering; + use std::collections::BinaryHeap; + + struct KDNode<'a> { + point: &'a [f32], + left: Option>>, + right: Option>>, + index: usize, + } + + impl<'a> KDNode<'a> { + fn new(point: &'a [f32], index: usize) -> Self { + KDNode { + point, + left: None, + right: None, + index, + } + } + } + fn quickselect_by(arr: &mut [(&[f32], usize)], k: usize, compare: &F) + where + F: Fn(&(&[f32], usize), &(&[f32], usize)) -> Ordering, + { + if arr.len() <= 1 { + return; + } + + let pivot_index = partition(arr, compare); + if k < pivot_index { + quickselect_by(&mut arr[..pivot_index], k, compare); + } else if k > pivot_index { + quickselect_by(&mut arr[pivot_index + 1..], k - pivot_index - 1, compare); + } + } + + fn partition(arr: &mut [(&[f32], usize)], compare: &F) -> usize + where + F: Fn(&(&[f32], usize), &(&[f32], usize)) -> Ordering, + { + let pivot_index = arr.len() >> 1; + arr.swap(pivot_index, arr.len() - 1); + + let mut store_index = 0; + for i in 0..arr.len() - 1 { + if compare(&arr[i], &arr[arr.len() - 1]) == Ordering::Less { + arr.swap(i, store_index); + store_index += 1; + } + } + arr.swap(store_index, arr.len() - 1); + store_index + } + + fn build_kd_tree<'a>(points: &mut [(&'a [f32], usize)]) -> Option>> { + if points.is_empty() { + return None; + } + + const NUM_DIMENSIONS: usize = 250; + let mut stack: Vec<(usize, usize, usize, Option<*mut KDNode<'a>>, bool)> = Vec::new(); + let mut root: Option>> = None; + + stack.push((0, points.len(), 0, None, false)); + + while let Some((start, end, depth, parent_ptr, is_left)) = stack.pop() { + if start >= end { + continue; + } + + let axis = depth % NUM_DIMENSIONS; + let median = (start + end) / 2; + quickselect_by(&mut points[start..end], median - start, &|a, b| { + a.0[axis].partial_cmp(&b.0[axis]).unwrap() + }); + + let (median_point, median_index) = points[median]; + let mut new_node = Box::new(KDNode::new(median_point, median_index)); + let new_node_ptr: *mut KDNode = &mut *new_node; + + if let Some(parent_ptr) = parent_ptr { + unsafe { + if is_left { + (*parent_ptr).left = Some(new_node); + } else { + (*parent_ptr).right = Some(new_node); + } + } + } else { + root = Some(new_node); + } + + stack.push((median + 1, end, depth + 1, Some(new_node_ptr), false)); + stack.push((start, median, depth + 1, Some(new_node_ptr), true)); + } + + root + } + + #[inline(always)] + fn squared_euclidean_distance(a: &[f32], b: &[f32]) -> f32 { + let mut sum = 0.0; + let mut i = 0; + let len = a.len(); + + if a.len() != b.len() || a.len() < 8 { + return f32::MAX; + } + + while i + 7 < len { + unsafe { + let diff0 = *a.get_unchecked(i) - *b.get_unchecked(i); + let diff1 = *a.get_unchecked(i + 1) - *b.get_unchecked(i + 1); + let diff2 = *a.get_unchecked(i + 2) - *b.get_unchecked(i + 2); + let diff3 = *a.get_unchecked(i + 3) - *b.get_unchecked(i + 3); + let diff4 = *a.get_unchecked(i + 4) - *b.get_unchecked(i + 4); + let diff5 = *a.get_unchecked(i + 5) - *b.get_unchecked(i + 5); + let diff6 = *a.get_unchecked(i + 6) - *b.get_unchecked(i + 6); + let diff7 = *a.get_unchecked(i + 7) - *b.get_unchecked(i + 7); + + sum += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3 + + diff4 * diff4 + diff5 * diff5 + diff6 * diff6 + diff7 * diff7; + } + + i += 8; + } + + while i < len { + unsafe { + let diff = *a.get_unchecked(i) - *b.get_unchecked(i); + sum += diff * diff; + } + i += 1; + } + sum + } + + #[inline(always)] + fn early_stopping_distance(a: &[f32], b: &[f32], current_min: f32) -> f32 { + let mut sum = 0.0; + let len = a.len(); + + if a.len() != b.len() || len < 8 { + return f32::MAX; + } + + let mut a_ptr = a.as_ptr(); + let mut b_ptr = b.as_ptr(); + let end_ptr = unsafe { a_ptr.add(len - 7) }; + + while a_ptr < end_ptr { + unsafe { + let a0 = *a_ptr.add(0); + let a1 = *a_ptr.add(1); + let a2 = *a_ptr.add(2); + let a3 = *a_ptr.add(3); + let a4 = *a_ptr.add(4); + let a5 = *a_ptr.add(5); + let a6 = *a_ptr.add(6); + let a7 = *a_ptr.add(7); + + let b0 = *b_ptr.add(0); + let b1 = *b_ptr.add(1); + let b2 = *b_ptr.add(2); + let b3 = *b_ptr.add(3); + let b4 = *b_ptr.add(4); + let b5 = *b_ptr.add(5); + let b6 = *b_ptr.add(6); + let b7 = *b_ptr.add(7); + + let block_sum = (a0 - b0).powi(2) + + (a1 - b1).powi(2) + + (a2 - b2).powi(2) + + (a3 - b3).powi(2) + + (a4 - b4).powi(2) + + (a5 - b5).powi(2) + + (a6 - b6).powi(2) + + (a7 - b7).powi(2); + + sum += block_sum; + } + + if sum > current_min { + return f32::MAX; + } + + a_ptr = unsafe { a_ptr.add(8) }; + b_ptr = unsafe { b_ptr.add(8) }; + } + + + let remaining = len - (unsafe { a_ptr.offset_from(a.as_ptr()) } as usize); + for i in 0..remaining { + unsafe { + let diff = *a_ptr.add(i) - *b_ptr.add(i); + sum += diff * diff; + } + } + + sum + } + + fn nearest_neighbor_search<'a>( + root: &Option>>, + target: &[f32], + best: &mut (f32, Option), + ) { + let num_dimensions = target.len(); + let mut stack = Vec::with_capacity(64); + + if let Some(node) = root { + stack.push((node.as_ref(), 0)); + } + + while let Some((node, depth)) = stack.pop() { + let axis = depth % num_dimensions; + let dist = early_stopping_distance(node.point, target, best.0); + + if dist < best.0 { + best.0 = dist; + best.1 = Some(node.index); + } + + let diff = target[axis] - node.point[axis]; + let sqr_diff = diff * diff; + + let (nearer, farther) = if diff < 0.0 { + (&node.left, &node.right) + } else { + (&node.right, &node.left) + }; + + if let Some(nearer_node) = nearer { + stack.push((nearer_node.as_ref(), depth + 1)); + } + + if sqr_diff < best.0 { + if let Some(farther_node) = farther { + stack.push((farther_node.as_ref(), depth + 1)); + } + } + } + } + + fn calculate_mean_vector(vectors: &[&[f32]]) -> Vec { + let num_vectors = vectors.len(); + let num_dimensions = 250; + + let mut mean_vector = vec![0.0f64; num_dimensions]; + + for vector in vectors { + for i in 0..num_dimensions { + mean_vector[i] += vector[i] as f64; + } + } + for i in 0..num_dimensions { + mean_vector[i] /= num_vectors as f64; + } + mean_vector.into_iter().map(|x| x as f32).collect() + } + + #[derive(Debug)] + struct FloatOrd(f32); + + impl PartialEq for FloatOrd { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } + } + + impl Eq for FloatOrd {} + + impl PartialOrd for FloatOrd { + fn partial_cmp(&self, other: &Self) -> Option { + self.0.partial_cmp(&other.0) + } + } + + impl Ord for FloatOrd { + fn cmp(&self, other: &Self) -> Ordering { + + self.partial_cmp(other).unwrap_or(Ordering::Equal) + } + } + + fn filter_relevant_vectors<'a>( + database: &'a [Vec], + query_vectors: &[Vec], + k: usize, + ) -> Vec<(f32, &'a [f32], usize)> { + let query_refs: Vec<&[f32]> = query_vectors.iter().map(|v| &v[..]).collect(); + let mean_query_vector = calculate_mean_vector(&query_refs); + + let mut heap: BinaryHeap<(FloatOrd, usize)> = BinaryHeap::with_capacity(k); + + for (index, vector) in database.iter().enumerate() { + if heap.len() < k + { + let dist = squared_euclidean_distance(&mean_query_vector, vector); + let ord_dist = FloatOrd(dist); + + heap.push((ord_dist, index)); + } else if let Some(&(FloatOrd(top_dist), _)) = heap.peek() + { + let dist = early_stopping_distance(&mean_query_vector, vector, top_dist); + let ord_dist = FloatOrd(dist); + if dist < top_dist { + heap.pop(); + heap.push((ord_dist, index)); + } + } + } + heap.into_sorted_vec() + .into_iter() + .map(|(FloatOrd(dist), index)| (dist, &database[index][..], index)) + .collect() + } + + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let query_count = challenge.query_vectors.len(); + + let max_fuel = 10000000000.0; + let base_fuel = 760000000.0; + let alpha = 1630.0 * challenge.difficulty.num_queries as f64; + + let m = ((max_fuel - base_fuel) / alpha) as usize; + let n = (m as f32 * 1.2) as usize; + let r = n - m; + + let closest_vectors = filter_relevant_vectors( + &challenge.vector_database, + &challenge.query_vectors, + n, + ); + + let (m_slice, r_slice) = closest_vectors.split_at(m); + let m_vectors: Vec<_> = m_slice.to_vec(); + let r_vectors: Vec<_> = r_slice.to_vec(); + + let mut kd_tree_vectors: Vec<(&[f32], usize)> = m_vectors.iter().map(|&(_, v, i)| (v, i)).collect(); + let kd_tree = build_kd_tree(&mut kd_tree_vectors); + + let mut best_indexes = Vec::with_capacity(query_count); + let mut distances = Vec::with_capacity(query_count); + + for query in &challenge.query_vectors { + let mut best = (std::f32::MAX, None); + nearest_neighbor_search(&kd_tree, query, &mut best); + + distances.push(best.0); + best_indexes.push(best.1.unwrap_or(0)); + } + + let brute_force_count = (query_count as f32 * 0.1) as usize; + let mut distance_indices: Vec<_> = distances.iter().enumerate().collect(); + distance_indices.sort_unstable_by(|a, b| b.1.partial_cmp(a.1).unwrap()); + let high_distance_indices: Vec<_> = distance_indices.into_iter() + .take(brute_force_count) + .map(|(index, _)| index) + .collect(); + + for &query_index in &high_distance_indices { + let query = &challenge.query_vectors[query_index]; + let mut best = (distances[query_index], best_indexes[query_index]); + let current_min = best.0; + + for &(_, vec, index) in &r_vectors { + let dist = early_stopping_distance(query, vec, current_min); + if dist < best.0 { + best = (dist, index); + } + } + + best_indexes[query_index] = best.1; + } + + Ok(Some(Solution { + indexes: best_indexes, + })) + } +} \ No newline at end of file diff --git a/tig-algorithms/src/vector_search/invector_hybrid/README.md b/tig-algorithms/src/vector_search/invector_hybrid/README.md new file mode 100644 index 0000000..7176640 --- /dev/null +++ b/tig-algorithms/src/vector_search/invector_hybrid/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vector_search +* **Algorithm Name:** invector_hybrid +* **Copyright:** 2024 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vector_search/invector_hybrid/kernels.cu b/tig-algorithms/src/vector_search/invector_hybrid/kernels.cu new file mode 100644 index 0000000..04415d0 --- /dev/null +++ b/tig-algorithms/src/vector_search/invector_hybrid/kernels.cu @@ -0,0 +1,19 @@ +/*! +Copyright 2024 syebastian + +Licensed under the TIG Inbound Game License v1.0 or (at your option) any later +version (the "License"); you may not use this file except in compliance with the +License. You may obtain a copy of the License at + +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the specific +language governing permissions and limitations under the License. +*/ + +extern "C" __global__ void do_nothing() +{ + // This kernel does nothing +} diff --git a/tig-algorithms/src/vector_search/invector_hybrid/mod.rs b/tig-algorithms/src/vector_search/invector_hybrid/mod.rs new file mode 100644 index 0000000..9662551 --- /dev/null +++ b/tig-algorithms/src/vector_search/invector_hybrid/mod.rs @@ -0,0 +1,382 @@ +use anyhow::{anyhow, Result}; +use cudarc::{ + driver::{safe::LaunchConfig, CudaModule, CudaStream, PushKernelArg}, + runtime::sys::cudaDeviceProp, +}; +use std::sync::Arc; +use serde_json::{Map, Value}; +use tig_challenges::vector_search::{Challenge, Solution}; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, + module: Arc, + stream: Arc, + prop: &cudaDeviceProp, +) -> anyhow::Result<()> { + Err(anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + + use anyhow::Ok; + use tig_challenges::vector_search::*; + use std::cmp::Ordering; + use std::collections::BinaryHeap; + + struct KDNode<'a> { + point: &'a [f32], + left: Option>>, + right: Option>>, + index: usize, + } + + impl<'a> KDNode<'a> { + fn new(point: &'a [f32], index: usize) -> Self { + KDNode { + point, + left: None, + right: None, + index, + } + } + } + fn quickselect_by(arr: &mut [(&[f32], usize)], k: usize, compare: &F) + where + F: Fn(&(&[f32], usize), &(&[f32], usize)) -> Ordering, + { + if arr.len() <= 1 { + return; + } + + let pivot_index = partition(arr, compare); + if k < pivot_index { + quickselect_by(&mut arr[..pivot_index], k, compare); + } else if k > pivot_index { + quickselect_by(&mut arr[pivot_index + 1..], k - pivot_index - 1, compare); + } + } + + fn partition(arr: &mut [(&[f32], usize)], compare: &F) -> usize + where + F: Fn(&(&[f32], usize), &(&[f32], usize)) -> Ordering, + { + let pivot_index = arr.len() >> 1; + arr.swap(pivot_index, arr.len() - 1); + + let mut store_index = 0; + for i in 0..arr.len() - 1 { + if compare(&arr[i], &arr[arr.len() - 1]) == Ordering::Less { + arr.swap(i, store_index); + store_index += 1; + } + } + arr.swap(store_index, arr.len() - 1); + store_index + } + + fn build_kd_tree<'a>(points: &mut [(&'a [f32], usize)]) -> Option>> { + if points.is_empty() { + return None; + } + + const NUM_DIMENSIONS: usize = 250; + let mut stack: Vec<(usize, usize, usize, Option<*mut KDNode<'a>>, bool)> = Vec::new(); + let mut root: Option>> = None; + + stack.push((0, points.len(), 0, None, false)); + + while let Some((start, end, depth, parent_ptr, is_left)) = stack.pop() { + if start >= end { + continue; + } + + let axis = depth % NUM_DIMENSIONS; + let median = (start + end) / 2; + quickselect_by(&mut points[start..end], median - start, &|a, b| { + a.0[axis].partial_cmp(&b.0[axis]).unwrap() + }); + + let (median_point, median_index) = points[median]; + let mut new_node = Box::new(KDNode::new(median_point, median_index)); + let new_node_ptr: *mut KDNode = &mut *new_node; + + if let Some(parent_ptr) = parent_ptr { + unsafe { + if is_left { + (*parent_ptr).left = Some(new_node); + } else { + (*parent_ptr).right = Some(new_node); + } + } + } else { + root = Some(new_node); + } + + stack.push((median + 1, end, depth + 1, Some(new_node_ptr), false)); + stack.push((start, median, depth + 1, Some(new_node_ptr), true)); + } + + root + } + + #[inline(always)] + fn squared_euclidean_distance(a: &[f32], b: &[f32]) -> f32 { + let mut sum = 0.0; + let mut i = 0; + let len = a.len(); + + if a.len() != b.len() || a.len() < 8 { + return f32::MAX; + } + + while i + 7 < len { + unsafe { + let diff0 = *a.get_unchecked(i) - *b.get_unchecked(i); + let diff1 = *a.get_unchecked(i + 1) - *b.get_unchecked(i + 1); + let diff2 = *a.get_unchecked(i + 2) - *b.get_unchecked(i + 2); + let diff3 = *a.get_unchecked(i + 3) - *b.get_unchecked(i + 3); + let diff4 = *a.get_unchecked(i + 4) - *b.get_unchecked(i + 4); + let diff5 = *a.get_unchecked(i + 5) - *b.get_unchecked(i + 5); + let diff6 = *a.get_unchecked(i + 6) - *b.get_unchecked(i + 6); + let diff7 = *a.get_unchecked(i + 7) - *b.get_unchecked(i + 7); + + sum += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3 + + diff4 * diff4 + diff5 * diff5 + diff6 * diff6 + diff7 * diff7; + } + + i += 8; + } + + while i < len { + unsafe { + let diff = *a.get_unchecked(i) - *b.get_unchecked(i); + sum += diff * diff; + } + i += 1; + } + sum + } + + #[inline(always)] + fn early_stopping_distance(a: &[f32], b: &[f32], current_min: f32) -> f32 { + let mut sum = 0.0; + let mut i = 0; + let len = a.len(); + + if a.len() != b.len() || a.len() < 8 { + return f32::MAX; + } + + while i + 7 < len { + unsafe { + let diff0 = *a.get_unchecked(i) - *b.get_unchecked(i); + let diff1 = *a.get_unchecked(i + 1) - *b.get_unchecked(i + 1); + let diff2 = *a.get_unchecked(i + 2) - *b.get_unchecked(i + 2); + let diff3 = *a.get_unchecked(i + 3) - *b.get_unchecked(i + 3); + let diff4 = *a.get_unchecked(i + 4) - *b.get_unchecked(i + 4); + let diff5 = *a.get_unchecked(i + 5) - *b.get_unchecked(i + 5); + let diff6 = *a.get_unchecked(i + 6) - *b.get_unchecked(i + 6); + let diff7 = *a.get_unchecked(i + 7) - *b.get_unchecked(i + 7); + + sum += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3 + + diff4 * diff4 + diff5 * diff5 + diff6 * diff6 + diff7 * diff7; + } + + if sum > current_min { + return f32::MAX; + } + + i += 8; + } + + while i < len { + unsafe { + let diff = *a.get_unchecked(i) - *b.get_unchecked(i); + sum += diff * diff; + } + i += 1; + } + sum + } + + fn nearest_neighbor_search<'a>( + root: &Option>>, + target: &[f32], + best: &mut (f32, Option), + ) { + let num_dimensions = target.len(); + let mut stack = Vec::with_capacity(64); + + if let Some(node) = root { + stack.push((node.as_ref(), 0)); + } + + while let Some((node, depth)) = stack.pop() { + let axis = depth % num_dimensions; + let dist = early_stopping_distance(&node.point, target, best.0); + + if dist < best.0 { + best.0 = dist; + best.1 = Some(node.index); + } + + let diff = target[axis] - node.point[axis]; + let sqr_diff = diff * diff; + + let (nearer, farther) = if diff < 0.0 { + (&node.left, &node.right) + } else { + (&node.right, &node.left) + }; + + if let Some(nearer_node) = nearer { + stack.push((nearer_node.as_ref(), depth + 1)); + } + + if sqr_diff < best.0 { + if let Some(farther_node) = farther { + stack.push((farther_node.as_ref(), depth + 1)); + } + } + } + } + fn calculate_mean_vector(vectors: &[&[f32]]) -> Vec { + let num_vectors = vectors.len(); + let num_dimensions = 250; + + let mut mean_vector = vec![0.0f64; num_dimensions]; + + for vector in vectors { + for i in 0..num_dimensions { + mean_vector[i] += vector[i] as f64; + } + } + for i in 0..num_dimensions { + mean_vector[i] /= num_vectors as f64; + } + mean_vector.into_iter().map(|x| x as f32).collect() + } + + #[derive(Debug)] + struct FloatOrd(f32); + + impl PartialEq for FloatOrd { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } + } + + impl Eq for FloatOrd {} + + impl PartialOrd for FloatOrd { + fn partial_cmp(&self, other: &Self) -> Option { + self.0.partial_cmp(&other.0) + } + } + + impl Ord for FloatOrd { + fn cmp(&self, other: &Self) -> Ordering { + + self.partial_cmp(other).unwrap_or(Ordering::Equal) + } + } + + fn filter_relevant_vectors<'a>( + database: &'a [Vec], + query_vectors: &[Vec], + k: usize, + ) -> Vec<(f32, &'a [f32], usize)> { + let query_refs: Vec<&[f32]> = query_vectors.iter().map(|v| &v[..]).collect(); + let mean_query_vector = calculate_mean_vector(&query_refs); + + let mut heap: BinaryHeap<(FloatOrd, usize)> = BinaryHeap::with_capacity(k); + + for (index, vector) in database.iter().enumerate() { + if heap.len() < k + { + let dist = squared_euclidean_distance(&mean_query_vector, vector); + let ord_dist = FloatOrd(dist); + + heap.push((ord_dist, index)); + } else if let Some(&(FloatOrd(top_dist), _)) = heap.peek() + { + let dist = early_stopping_distance(&mean_query_vector, vector, top_dist); + let ord_dist = FloatOrd(dist); + if dist < top_dist { + heap.pop(); + heap.push((ord_dist, index)); + } + } + } + heap.into_sorted_vec() + .into_iter() + .map(|(FloatOrd(dist), index)| (dist, &database[index][..], index)) + .collect() + } + + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let query_count = challenge.query_vectors.len(); + + let max_fuel = 2000000000.0; + let base_fuel = 760000000.0; + let alpha = 1700.0 * challenge.difficulty.num_queries as f64; + + let m = ((max_fuel - base_fuel) / alpha) as usize; + let n = (m as f32 * 1.2) as usize; + let r = n - m; + + let closest_vectors = filter_relevant_vectors( + &challenge.vector_database, + &challenge.query_vectors, + n, + ); + + let (m_slice, r_slice) = closest_vectors.split_at(m); + let m_vectors: Vec<_> = m_slice.to_vec(); + let r_vectors: Vec<_> = r_slice.to_vec(); + + let mut kd_tree_vectors: Vec<(&[f32], usize)> = m_vectors.iter().map(|&(_, v, i)| (v, i)).collect(); + let kd_tree = build_kd_tree(&mut kd_tree_vectors); + + let mut best_indexes = Vec::with_capacity(query_count); + let mut distances = Vec::with_capacity(query_count); + + for query in &challenge.query_vectors { + let mut best = (std::f32::MAX, None); + nearest_neighbor_search(&kd_tree, query, &mut best); + + distances.push(best.0); + best_indexes.push(best.1.unwrap_or(0)); + } + + let brute_force_count = (query_count as f32 * 0.1) as usize; + let mut distance_indices: Vec<_> = distances.iter().enumerate().collect(); + distance_indices.sort_unstable_by(|a, b| b.1.partial_cmp(a.1).unwrap()); + let high_distance_indices: Vec<_> = distance_indices.into_iter() + .take(brute_force_count) + .map(|(index, _)| index) + .collect(); + + for &query_index in &high_distance_indices { + let query = &challenge.query_vectors[query_index]; + let mut best = (distances[query_index], best_indexes[query_index]); + + for &(_, vec, index) in &r_vectors { + let dist = squared_euclidean_distance(query, vec); + if dist < best.0 { + best = (dist, index); + } + } + + best_indexes[query_index] = best.1; + } + + Ok(Some(Solution { + indexes: best_indexes, + })) + } +} \ No newline at end of file diff --git a/tig-algorithms/src/vector_search/invector_hybrid_adp/README.md b/tig-algorithms/src/vector_search/invector_hybrid_adp/README.md new file mode 100644 index 0000000..0002ff6 --- /dev/null +++ b/tig-algorithms/src/vector_search/invector_hybrid_adp/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vector_search +* **Algorithm Name:** invector_hybrid_adp +* **Copyright:** 2025 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vector_search/invector_hybrid_adp/kernels.cu b/tig-algorithms/src/vector_search/invector_hybrid_adp/kernels.cu new file mode 100644 index 0000000..a1201d8 --- /dev/null +++ b/tig-algorithms/src/vector_search/invector_hybrid_adp/kernels.cu @@ -0,0 +1,23 @@ +/*! +Copyright 2025 syebastian + +Identity of Submitter syebastian + +UAI null + +Licensed under the TIG Inbound Game License v2.0 or (at your option) any later +version (the "License"); you may not use this file except in compliance with the +License. You may obtain a copy of the License at + +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the specific +language governing permissions and limitations under the License. +*/ + +extern "C" __global__ void do_nothing() +{ + // This kernel does nothing +} diff --git a/tig-algorithms/src/vector_search/invector_hybrid_adp/mod.rs b/tig-algorithms/src/vector_search/invector_hybrid_adp/mod.rs new file mode 100644 index 0000000..af3a17e --- /dev/null +++ b/tig-algorithms/src/vector_search/invector_hybrid_adp/mod.rs @@ -0,0 +1,382 @@ +use anyhow::{anyhow, Result}; +use cudarc::{ + driver::{safe::LaunchConfig, CudaModule, CudaStream, PushKernelArg}, + runtime::sys::cudaDeviceProp, +}; +use std::sync::Arc; +use serde_json::{Map, Value}; +use tig_challenges::vector_search::{Challenge, Solution}; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, + module: Arc, + stream: Arc, + prop: &cudaDeviceProp, +) -> anyhow::Result<()> { + Err(anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + + use anyhow::Ok; + use tig_challenges::vector_search::*; + use std::cmp::Ordering; + use std::collections::BinaryHeap; + + struct KDNode<'a> { + point: &'a [f32], + left: Option>>, + right: Option>>, + index: usize, + } + + impl<'a> KDNode<'a> { + fn new(point: &'a [f32], index: usize) -> Self { + KDNode { + point, + left: None, + right: None, + index, + } + } + } + fn quickselect_by(arr: &mut [(&[f32], usize)], k: usize, compare: &F) + where + F: Fn(&(&[f32], usize), &(&[f32], usize)) -> Ordering, + { + if arr.len() <= 1 { + return; + } + + let pivot_index = partition(arr, compare); + if k < pivot_index { + quickselect_by(&mut arr[..pivot_index], k, compare); + } else if k > pivot_index { + quickselect_by(&mut arr[pivot_index + 1..], k - pivot_index - 1, compare); + } + } + + fn partition(arr: &mut [(&[f32], usize)], compare: &F) -> usize + where + F: Fn(&(&[f32], usize), &(&[f32], usize)) -> Ordering, + { + let pivot_index = arr.len() >> 1; + arr.swap(pivot_index, arr.len() - 1); + + let mut store_index = 0; + for i in 0..arr.len() - 1 { + if compare(&arr[i], &arr[arr.len() - 1]) == Ordering::Less { + arr.swap(i, store_index); + store_index += 1; + } + } + arr.swap(store_index, arr.len() - 1); + store_index + } + + fn build_kd_tree<'a>(points: &mut [(&'a [f32], usize)]) -> Option>> { + if points.is_empty() { + return None; + } + + const NUM_DIMENSIONS: usize = 250; + let mut stack: Vec<(usize, usize, usize, Option<*mut KDNode<'a>>, bool)> = Vec::new(); + let mut root: Option>> = None; + + stack.push((0, points.len(), 0, None, false)); + + while let Some((start, end, depth, parent_ptr, is_left)) = stack.pop() { + if start >= end { + continue; + } + + let axis = depth % NUM_DIMENSIONS; + let median = (start + end) / 2; + quickselect_by(&mut points[start..end], median - start, &|a, b| { + a.0[axis].partial_cmp(&b.0[axis]).unwrap() + }); + + let (median_point, median_index) = points[median]; + let mut new_node = Box::new(KDNode::new(median_point, median_index)); + let new_node_ptr: *mut KDNode = &mut *new_node; + + if let Some(parent_ptr) = parent_ptr { + unsafe { + if is_left { + (*parent_ptr).left = Some(new_node); + } else { + (*parent_ptr).right = Some(new_node); + } + } + } else { + root = Some(new_node); + } + + stack.push((median + 1, end, depth + 1, Some(new_node_ptr), false)); + stack.push((start, median, depth + 1, Some(new_node_ptr), true)); + } + + root + } + + #[inline(always)] + fn squared_euclidean_distance(a: &[f32], b: &[f32]) -> f32 { + let mut sum = 0.0; + let mut i = 0; + let len = a.len(); + + if a.len() != b.len() || a.len() < 8 { + return f32::MAX; + } + + while i + 7 < len { + unsafe { + let diff0 = *a.get_unchecked(i) - *b.get_unchecked(i); + let diff1 = *a.get_unchecked(i + 1) - *b.get_unchecked(i + 1); + let diff2 = *a.get_unchecked(i + 2) - *b.get_unchecked(i + 2); + let diff3 = *a.get_unchecked(i + 3) - *b.get_unchecked(i + 3); + let diff4 = *a.get_unchecked(i + 4) - *b.get_unchecked(i + 4); + let diff5 = *a.get_unchecked(i + 5) - *b.get_unchecked(i + 5); + let diff6 = *a.get_unchecked(i + 6) - *b.get_unchecked(i + 6); + let diff7 = *a.get_unchecked(i + 7) - *b.get_unchecked(i + 7); + + sum += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3 + + diff4 * diff4 + diff5 * diff5 + diff6 * diff6 + diff7 * diff7; + } + + i += 8; + } + + while i < len { + unsafe { + let diff = *a.get_unchecked(i) - *b.get_unchecked(i); + sum += diff * diff; + } + i += 1; + } + sum + } + + #[inline(always)] + fn early_stopping_distance(a: &[f32], b: &[f32], current_min: f32) -> f32 { + let mut sum = 0.0; + let mut i = 0; + let len = a.len(); + + if a.len() != b.len() || a.len() < 8 { + return f32::MAX; + } + + while i + 7 < len { + unsafe { + let diff0 = *a.get_unchecked(i) - *b.get_unchecked(i); + let diff1 = *a.get_unchecked(i + 1) - *b.get_unchecked(i + 1); + let diff2 = *a.get_unchecked(i + 2) - *b.get_unchecked(i + 2); + let diff3 = *a.get_unchecked(i + 3) - *b.get_unchecked(i + 3); + let diff4 = *a.get_unchecked(i + 4) - *b.get_unchecked(i + 4); + let diff5 = *a.get_unchecked(i + 5) - *b.get_unchecked(i + 5); + let diff6 = *a.get_unchecked(i + 6) - *b.get_unchecked(i + 6); + let diff7 = *a.get_unchecked(i + 7) - *b.get_unchecked(i + 7); + + sum += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3 + + diff4 * diff4 + diff5 * diff5 + diff6 * diff6 + diff7 * diff7; + } + + if sum > current_min { + return f32::MAX; + } + + i += 8; + } + + while i < len { + unsafe { + let diff = *a.get_unchecked(i) - *b.get_unchecked(i); + sum += diff * diff; + } + i += 1; + } + sum + } + + fn nearest_neighbor_search<'a>( + root: &Option>>, + target: &[f32], + best: &mut (f32, Option), + ) { + let num_dimensions = target.len(); + let mut stack = Vec::with_capacity(64); + + if let Some(node) = root { + stack.push((node.as_ref(), 0)); + } + + while let Some((node, depth)) = stack.pop() { + let axis = depth % num_dimensions; + let dist = early_stopping_distance(&node.point, target, best.0); + + if dist < best.0 { + best.0 = dist; + best.1 = Some(node.index); + } + + let diff = target[axis] - node.point[axis]; + let sqr_diff = diff * diff; + + let (nearer, farther) = if diff < 0.0 { + (&node.left, &node.right) + } else { + (&node.right, &node.left) + }; + + if let Some(nearer_node) = nearer { + stack.push((nearer_node.as_ref(), depth + 1)); + } + + if sqr_diff < best.0 { + if let Some(farther_node) = farther { + stack.push((farther_node.as_ref(), depth + 1)); + } + } + } + } + fn calculate_mean_vector(vectors: &[&[f32]]) -> Vec { + let num_vectors = vectors.len(); + let num_dimensions = 250; + + let mut mean_vector = vec![0.0f64; num_dimensions]; + + for vector in vectors { + for i in 0..num_dimensions { + mean_vector[i] += vector[i] as f64; + } + } + for i in 0..num_dimensions { + mean_vector[i] /= num_vectors as f64; + } + mean_vector.into_iter().map(|x| x as f32).collect() + } + + #[derive(Debug)] + struct FloatOrd(f32); + + impl PartialEq for FloatOrd { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } + } + + impl Eq for FloatOrd {} + + impl PartialOrd for FloatOrd { + fn partial_cmp(&self, other: &Self) -> Option { + self.0.partial_cmp(&other.0) + } + } + + impl Ord for FloatOrd { + fn cmp(&self, other: &Self) -> Ordering { + + self.partial_cmp(other).unwrap_or(Ordering::Equal) + } + } + + fn filter_relevant_vectors<'a>( + database: &'a [Vec], + query_vectors: &[Vec], + k: usize, + ) -> Vec<(f32, &'a [f32], usize)> { + let query_refs: Vec<&[f32]> = query_vectors.iter().map(|v| &v[..]).collect(); + let mean_query_vector = calculate_mean_vector(&query_refs); + + let mut heap: BinaryHeap<(FloatOrd, usize)> = BinaryHeap::with_capacity(k); + + for (index, vector) in database.iter().enumerate() { + if heap.len() < k + { + let dist = squared_euclidean_distance(&mean_query_vector, vector); + let ord_dist = FloatOrd(dist); + + heap.push((ord_dist, index)); + } else if let Some(&(FloatOrd(top_dist), _)) = heap.peek() + { + let dist = early_stopping_distance(&mean_query_vector, vector, top_dist); + let ord_dist = FloatOrd(dist); + if dist < top_dist { + heap.pop(); + heap.push((ord_dist, index)); + } + } + } + heap.into_sorted_vec() + .into_iter() + .map(|(FloatOrd(dist), index)| (dist, &database[index][..], index)) + .collect() + } + + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let query_count = challenge.query_vectors.len(); + + let max_fuel = 10000000000.0; + let base_fuel = 760000000.0; + let alpha = 1720.0 * challenge.difficulty.num_queries as f64; + + let m = ((max_fuel - base_fuel) / alpha) as usize; + let n = (m as f32 * 1.2) as usize; + let r = n - m; + + let closest_vectors = filter_relevant_vectors( + &challenge.vector_database, + &challenge.query_vectors, + n, + ); + + let (m_slice, r_slice) = closest_vectors.split_at(m); + let m_vectors: Vec<_> = m_slice.to_vec(); + let r_vectors: Vec<_> = r_slice.to_vec(); + + let mut kd_tree_vectors: Vec<(&[f32], usize)> = m_vectors.iter().map(|&(_, v, i)| (v, i)).collect(); + let kd_tree = build_kd_tree(&mut kd_tree_vectors); + + let mut best_indexes = Vec::with_capacity(query_count); + let mut distances = Vec::with_capacity(query_count); + + for query in &challenge.query_vectors { + let mut best = (std::f32::MAX, None); + nearest_neighbor_search(&kd_tree, query, &mut best); + + distances.push(best.0); + best_indexes.push(best.1.unwrap_or(0)); + } + + let brute_force_count = (query_count as f32 * 0.1) as usize; + let mut distance_indices: Vec<_> = distances.iter().enumerate().collect(); + distance_indices.sort_unstable_by(|a, b| b.1.partial_cmp(a.1).unwrap()); + let high_distance_indices: Vec<_> = distance_indices.into_iter() + .take(brute_force_count) + .map(|(index, _)| index) + .collect(); + + for &query_index in &high_distance_indices { + let query = &challenge.query_vectors[query_index]; + let mut best = (distances[query_index], best_indexes[query_index]); + + for &(_, vec, index) in &r_vectors { + let dist = squared_euclidean_distance(query, vec); + if dist < best.0 { + best = (dist, index); + } + } + + best_indexes[query_index] = best.1; + } + + Ok(Some(Solution { + indexes: best_indexes, + })) + } +} \ No newline at end of file diff --git a/tig-algorithms/src/vector_search/invector_revisited_s/README.md b/tig-algorithms/src/vector_search/invector_revisited_s/README.md new file mode 100644 index 0000000..dd02412 --- /dev/null +++ b/tig-algorithms/src/vector_search/invector_revisited_s/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vector_search +* **Algorithm Name:** invector_revisited_s +* **Copyright:** 2025 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vector_search/invector_revisited_s/kernels.cu b/tig-algorithms/src/vector_search/invector_revisited_s/kernels.cu new file mode 100644 index 0000000..a1201d8 --- /dev/null +++ b/tig-algorithms/src/vector_search/invector_revisited_s/kernels.cu @@ -0,0 +1,23 @@ +/*! +Copyright 2025 syebastian + +Identity of Submitter syebastian + +UAI null + +Licensed under the TIG Inbound Game License v2.0 or (at your option) any later +version (the "License"); you may not use this file except in compliance with the +License. You may obtain a copy of the License at + +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the specific +language governing permissions and limitations under the License. +*/ + +extern "C" __global__ void do_nothing() +{ + // This kernel does nothing +} diff --git a/tig-algorithms/src/vector_search/invector_revisited_s/mod.rs b/tig-algorithms/src/vector_search/invector_revisited_s/mod.rs new file mode 100644 index 0000000..c775e99 --- /dev/null +++ b/tig-algorithms/src/vector_search/invector_revisited_s/mod.rs @@ -0,0 +1,384 @@ +use anyhow::{anyhow, Result}; +use cudarc::{ + driver::{safe::LaunchConfig, CudaModule, CudaStream, PushKernelArg}, + runtime::sys::cudaDeviceProp, +}; +use std::sync::Arc; +use serde_json::{Map, Value}; +use tig_challenges::vector_search::{Challenge, Solution}; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, + module: Arc, + stream: Arc, + prop: &cudaDeviceProp, +) -> anyhow::Result<()> { + Err(anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + + use anyhow::Ok; + use tig_challenges::vector_search::*; + use std::cmp::Ordering; + use std::collections::BinaryHeap; + + struct KDNode<'a> { + point: &'a [f32], + left: Option>>, + right: Option>>, + index: usize, + } + + impl<'a> KDNode<'a> { + fn new(point: &'a [f32], index: usize) -> Self { + KDNode { + point, + left: None, + right: None, + index, + } + } + } + fn quickselect_by(arr: &mut [(&[f32], usize)], k: usize, compare: &F) + where + F: Fn(&(&[f32], usize), &(&[f32], usize)) -> Ordering, + { + if arr.len() <= 1 { + return; + } + + let pivot_index = partition(arr, compare); + if k < pivot_index { + quickselect_by(&mut arr[..pivot_index], k, compare); + } else if k > pivot_index { + quickselect_by(&mut arr[pivot_index + 1..], k - pivot_index - 1, compare); + } + } + + fn partition(arr: &mut [(&[f32], usize)], compare: &F) -> usize + where + F: Fn(&(&[f32], usize), &(&[f32], usize)) -> Ordering, + { + let pivot_index = arr.len() >> 1; + arr.swap(pivot_index, arr.len() - 1); + + let mut store_index = 0; + for i in 0..arr.len() - 1 { + if compare(&arr[i], &arr[arr.len() - 1]) == Ordering::Less { + arr.swap(i, store_index); + store_index += 1; + } + } + arr.swap(store_index, arr.len() - 1); + store_index + } + + fn build_kd_tree<'a>(points: &mut [(&'a [f32], usize)]) -> Option>> { + if points.is_empty() { + return None; + } + + const NUM_DIMENSIONS: usize = 250; + let mut stack: Vec<(usize, usize, usize, Option<*mut KDNode<'a>>, bool)> = Vec::new(); + let mut root: Option>> = None; + + stack.push((0, points.len(), 0, None, false)); + + while let Some((start, end, depth, parent_ptr, is_left)) = stack.pop() { + if start >= end { + continue; + } + + let axis = depth % NUM_DIMENSIONS; + let median = (start + end) / 2; + quickselect_by(&mut points[start..end], median - start, &|a, b| { + a.0[axis].partial_cmp(&b.0[axis]).unwrap() + }); + + let (median_point, median_index) = points[median]; + let mut new_node = Box::new(KDNode::new(median_point, median_index)); + let new_node_ptr: *mut KDNode = &mut *new_node; + + if let Some(parent_ptr) = parent_ptr { + unsafe { + if is_left { + (*parent_ptr).left = Some(new_node); + } else { + (*parent_ptr).right = Some(new_node); + } + } + } else { + root = Some(new_node); + } + + stack.push((median + 1, end, depth + 1, Some(new_node_ptr), false)); + stack.push((start, median, depth + 1, Some(new_node_ptr), true)); + } + + root + } + + #[inline(always)] + fn squared_euclidean_distance(a: &[f32], b: &[f32]) -> f32 { + let mut sum = 0.0; + let mut i = 0; + let len = a.len(); + + if a.len() != b.len() || a.len() < 8 { + return f32::MAX; + } + + while i + 7 < len { + unsafe { + let diff0 = *a.get_unchecked(i) - *b.get_unchecked(i); + let diff1 = *a.get_unchecked(i + 1) - *b.get_unchecked(i + 1); + let diff2 = *a.get_unchecked(i + 2) - *b.get_unchecked(i + 2); + let diff3 = *a.get_unchecked(i + 3) - *b.get_unchecked(i + 3); + let diff4 = *a.get_unchecked(i + 4) - *b.get_unchecked(i + 4); + let diff5 = *a.get_unchecked(i + 5) - *b.get_unchecked(i + 5); + let diff6 = *a.get_unchecked(i + 6) - *b.get_unchecked(i + 6); + let diff7 = *a.get_unchecked(i + 7) - *b.get_unchecked(i + 7); + + sum += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3 + + diff4 * diff4 + diff5 * diff5 + diff6 * diff6 + diff7 * diff7; + } + + i += 8; + } + + while i < len { + unsafe { + let diff = *a.get_unchecked(i) - *b.get_unchecked(i); + sum += diff * diff; + } + i += 1; + } + sum + } + + #[inline(always)] + fn early_stopping_distance(a: &[f32], b: &[f32], current_min: f32) -> f32 { + let mut sum = 0.0; + let mut i = 0; + let len = a.len(); + + if a.len() != b.len() || a.len() < 8 { + return f32::MAX; + } + + while i + 7 < len { + unsafe { + let diff0 = *a.get_unchecked(i) - *b.get_unchecked(i); + let diff1 = *a.get_unchecked(i + 1) - *b.get_unchecked(i + 1); + let diff2 = *a.get_unchecked(i + 2) - *b.get_unchecked(i + 2); + let diff3 = *a.get_unchecked(i + 3) - *b.get_unchecked(i + 3); + let diff4 = *a.get_unchecked(i + 4) - *b.get_unchecked(i + 4); + let diff5 = *a.get_unchecked(i + 5) - *b.get_unchecked(i + 5); + let diff6 = *a.get_unchecked(i + 6) - *b.get_unchecked(i + 6); + let diff7 = *a.get_unchecked(i + 7) - *b.get_unchecked(i + 7); + + sum += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3 + + diff4 * diff4 + diff5 * diff5 + diff6 * diff6 + diff7 * diff7; + } + + if sum > current_min { + return f32::MAX; + } + + i += 8; + } + + while i < len { + unsafe { + let diff = *a.get_unchecked(i) - *b.get_unchecked(i); + sum += diff * diff; + } + i += 1; + } + sum + } + + fn nearest_neighbor_search<'a>( + root: &Option>>, + target: &[f32], + best: &mut (f32, Option), + ) { + let num_dimensions = target.len(); + let mut stack = Vec::with_capacity(64); + + if let Some(node) = root { + stack.push((node.as_ref(), 0)); + } + + while let Some((node, depth)) = stack.pop() { + let axis = depth % num_dimensions; + let dist = early_stopping_distance(node.point, target, best.0); + + if dist < best.0 { + best.0 = dist; + best.1 = Some(node.index); + } + + let diff = target[axis] - node.point[axis]; + let sqr_diff = diff * diff; + + let (nearer, farther) = if diff < 0.0 { + (&node.left, &node.right) + } else { + (&node.right, &node.left) + }; + + if let Some(nearer_node) = nearer { + stack.push((nearer_node.as_ref(), depth + 1)); + } + + if sqr_diff < best.0 { + if let Some(farther_node) = farther { + stack.push((farther_node.as_ref(), depth + 1)); + } + } + } + } + + fn calculate_mean_vector(vectors: &[&[f32]]) -> Vec { + let num_vectors = vectors.len(); + let num_dimensions = 250; + + let mut mean_vector = vec![0.0f64; num_dimensions]; + + for vector in vectors { + for i in 0..num_dimensions { + mean_vector[i] += vector[i] as f64; + } + } + for i in 0..num_dimensions { + mean_vector[i] /= num_vectors as f64; + } + mean_vector.into_iter().map(|x| x as f32).collect() + } + + #[derive(Debug)] + struct FloatOrd(f32); + + impl PartialEq for FloatOrd { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } + } + + impl Eq for FloatOrd {} + + impl PartialOrd for FloatOrd { + fn partial_cmp(&self, other: &Self) -> Option { + self.0.partial_cmp(&other.0) + } + } + + impl Ord for FloatOrd { + fn cmp(&self, other: &Self) -> Ordering { + + self.partial_cmp(other).unwrap_or(Ordering::Equal) + } + } + + fn filter_relevant_vectors<'a>( + database: &'a [Vec], + query_vectors: &[Vec], + k: usize, + ) -> Vec<(f32, &'a [f32], usize)> { + let query_refs: Vec<&[f32]> = query_vectors.iter().map(|v| &v[..]).collect(); + let mean_query_vector = calculate_mean_vector(&query_refs); + + let mut heap: BinaryHeap<(FloatOrd, usize)> = BinaryHeap::with_capacity(k); + + for (index, vector) in database.iter().enumerate() { + if heap.len() < k + { + let dist = squared_euclidean_distance(&mean_query_vector, vector); + let ord_dist = FloatOrd(dist); + + heap.push((ord_dist, index)); + } else if let Some(&(FloatOrd(top_dist), _)) = heap.peek() + { + let dist = early_stopping_distance(&mean_query_vector, vector, top_dist); + let ord_dist = FloatOrd(dist); + if dist < top_dist { + heap.pop(); + heap.push((ord_dist, index)); + } + } + } + heap.into_sorted_vec() + .into_iter() + .map(|(FloatOrd(dist), index)| (dist, &database[index][..], index)) + .collect() + } + + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let query_count = challenge.query_vectors.len(); + + let max_fuel = 10000000000.0; + let base_fuel = 760000000.0; + let alpha = 1720.0 * challenge.difficulty.num_queries as f64; + + let m = ((max_fuel - base_fuel) / alpha) as usize; + let n = (m as f32 * 1.2) as usize; + let r = n - m; + + let closest_vectors = filter_relevant_vectors( + &challenge.vector_database, + &challenge.query_vectors, + n, + ); + + let (m_slice, r_slice) = closest_vectors.split_at(m); + let m_vectors: Vec<_> = m_slice.to_vec(); + let r_vectors: Vec<_> = r_slice.to_vec(); + + let mut kd_tree_vectors: Vec<(&[f32], usize)> = m_vectors.iter().map(|&(_, v, i)| (v, i)).collect(); + let kd_tree = build_kd_tree(&mut kd_tree_vectors); + + let mut best_indexes = Vec::with_capacity(query_count); + let mut distances = Vec::with_capacity(query_count); + + for query in &challenge.query_vectors { + let mut best = (std::f32::MAX, None); + nearest_neighbor_search(&kd_tree, query, &mut best); + + distances.push(best.0); + best_indexes.push(best.1.unwrap_or(0)); + } + + let brute_force_count = (query_count as f32 * 0.1) as usize; + let mut distance_indices: Vec<_> = distances.iter().enumerate().collect(); + distance_indices.sort_unstable_by(|a, b| b.1.partial_cmp(a.1).unwrap()); + let high_distance_indices: Vec<_> = distance_indices.into_iter() + .take(brute_force_count) + .map(|(index, _)| index) + .collect(); + + for &query_index in &high_distance_indices { + let query = &challenge.query_vectors[query_index]; + let mut best = (distances[query_index], best_indexes[query_index]); + let current_min = best.0; + + for &(_, vec, index) in &r_vectors { + let dist = early_stopping_distance(query, vec, current_min); + if dist < best.0 { + best = (dist, index); + } + } + + best_indexes[query_index] = best.1; + } + + Ok(Some(Solution { + indexes: best_indexes, + })) + } +} \ No newline at end of file diff --git a/tig-algorithms/src/vector_search/is_adp_optimal/README.md b/tig-algorithms/src/vector_search/is_adp_optimal/README.md new file mode 100644 index 0000000..bd7cfab --- /dev/null +++ b/tig-algorithms/src/vector_search/is_adp_optimal/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vector_search +* **Algorithm Name:** is_adp_optimal +* **Copyright:** 2025 OptimusMaximus +* **Identity of Submitter:** OptimusMaximus +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vector_search/is_adp_optimal/kernels.cu b/tig-algorithms/src/vector_search/is_adp_optimal/kernels.cu new file mode 100644 index 0000000..9254627 --- /dev/null +++ b/tig-algorithms/src/vector_search/is_adp_optimal/kernels.cu @@ -0,0 +1,323 @@ +/*! +Copyright 2025 OptimusMaximus + +Identity of Submitter OptimusMaximus + +UAI null + +Licensed under the TIG Inbound Game License v2.0 or (at your option) any later +version (the "License"); you may not use this file except in compliance with the +License. You may obtain a copy of the License at + +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the specific +language governing permissions and limitations under the License. +*/ + +#include +#include + +#define MAX_FLOAT 3.402823466e+38F + +__device__ float euclidean_distance(const float* a, const float* b, int dims) { + float sum = 0.0f; + int i; + for (i = 0; i < dims - 3; i += 4) { + float diff0 = a[i] - b[i]; + float diff1 = a[i+1] - b[i+1]; + float diff2 = a[i+2] - b[i+2]; + float diff3 = a[i+3] - b[i+3]; + sum = fmaf(diff0, diff0, sum); + sum = fmaf(diff1, diff1, sum); + sum = fmaf(diff2, diff2, sum); + sum = fmaf(diff3, diff3, sum); + } + for (; i < dims; i++) { + float diff = a[i] - b[i]; + sum = fmaf(diff, diff, sum); + } + return sum; +} + +__device__ float euclidean_distance_high(const float* a, const float* b, int dims) { + float sum = 0.0f; + for (int i = 0; i < dims; i += 4) { + float diff0 = a[i] - b[i]; + float diff1 = a[i+1] - b[i+1]; + float diff2 = a[i+2] - b[i+2]; + float diff3 = a[i+3] - b[i+3]; + sum = fmaf(diff0, diff0, sum); + sum = fmaf(diff1, diff1, sum); + sum = fmaf(diff2, diff2, sum); + sum = fmaf(diff3, diff3, sum); + } + return sum; +} + +extern "C" __global__ void deterministic_clustering( + const float* database_vectors, + float* cluster_centers, + int* cluster_assignments, + int* cluster_sizes, + int database_size, + int vector_dims, + int num_clusters, + int num_queries +) { + int cluster_idx = blockIdx.x; + int tid = threadIdx.x; + + if (cluster_idx >= num_clusters) return; + + extern __shared__ float shared_mem[]; + float* center = shared_mem; + + for (int d = tid; d < vector_dims; d += blockDim.x) { + center[d] = 0.0f; + } + __syncthreads(); + + int seed_idx = ((cluster_idx * 982451653LL + 1566083941LL) % (long long)database_size); + const float* seed_vector = database_vectors + seed_idx * vector_dims; + + for (int d = tid; d < vector_dims; d += blockDim.x) { + center[d] = seed_vector[d]; + cluster_centers[cluster_idx * vector_dims + d] = seed_vector[d]; + } + + if (tid == 0) { + cluster_sizes[cluster_idx] = 0; + } + __syncthreads(); + + for (int vec_idx = tid; vec_idx < database_size; vec_idx += blockDim.x) { + const float* vector = database_vectors + vec_idx * vector_dims; + + float min_dist = MAX_FLOAT; + int best_cluster = 0; + + for (int c = 0; c < num_clusters; c++) { + const float* c_center = cluster_centers + c * vector_dims; + float dist = (num_queries <= 4000) ? + euclidean_distance(vector, c_center, vector_dims) : + euclidean_distance_high(vector, c_center, vector_dims); + if (dist < min_dist) { + min_dist = dist; + best_cluster = c; + } + } + + cluster_assignments[vec_idx] = best_cluster; + if (best_cluster == cluster_idx) { + atomicAdd(&cluster_sizes[cluster_idx], 1); + } + } +} + +extern "C" __global__ void cluster_search( + const float* query_vectors, + const float* database_vectors, + const float* cluster_centers, + const int* cluster_assignments, + const int* cluster_sizes, + int* results, + int num_queries, + int database_size, + int vector_dims, + int num_clusters +) { + if (num_queries <= 4000) { + int query_idx = blockIdx.x; + if (query_idx >= num_queries) return; + + const float* query = query_vectors + query_idx * vector_dims; + + float cluster_dists[8]; + for (int i = 0; i < num_clusters; i++) { + cluster_dists[i] = MAX_FLOAT; + } + + float best_dist[3] = {MAX_FLOAT, MAX_FLOAT, MAX_FLOAT}; + int best_clusters[3] = {-1, -1, -1}; + + for (int cluster = 0; cluster < num_clusters; cluster++) { + const float* center = cluster_centers + cluster * vector_dims; + float dist = euclidean_distance(query, center, vector_dims); + + cluster_dists[cluster] = dist; + + if (dist < best_dist[0]) { + best_dist[2] = best_dist[1]; + best_clusters[2] = best_clusters[1]; + best_dist[1] = best_dist[0]; + best_clusters[1] = best_clusters[0]; + best_dist[0] = dist; + best_clusters[0] = cluster; + } else if (dist < best_dist[1]) { + best_dist[2] = best_dist[1]; + best_clusters[2] = best_clusters[1]; + best_dist[1] = dist; + best_clusters[1] = cluster; + } else if (dist < best_dist[2]) { + best_dist[2] = dist; + best_clusters[2] = cluster; + } + } + + float min_dist = MAX_FLOAT; + int best_idx = -1; + + int target_cluster = best_clusters[0]; + if (target_cluster != -1 && cluster_sizes[target_cluster] > 0) { + for (int vec_idx = 0; vec_idx < database_size; vec_idx++) { + if (cluster_assignments[vec_idx] == target_cluster) { + const float* db_vector = database_vectors + vec_idx * vector_dims; + float dist = euclidean_distance(query, db_vector, vector_dims); + if (dist < min_dist) { + min_dist = dist; + best_idx = vec_idx; + } + } + } + } + + if (best_clusters[1] != -1 && cluster_sizes[best_clusters[1]] > 0) { + target_cluster = best_clusters[1]; + for (int vec_idx = 0; vec_idx < database_size; vec_idx++) { + if (cluster_assignments[vec_idx] == target_cluster) { + const float* db_vector = database_vectors + vec_idx * vector_dims; + float dist = euclidean_distance(query, db_vector, vector_dims); + if (dist < min_dist) { + min_dist = dist; + best_idx = vec_idx; + } + } + } + } + + if (best_clusters[2] != -1 && cluster_sizes[best_clusters[2]] > 0) { + target_cluster = best_clusters[2]; + for (int vec_idx = 0; vec_idx < database_size; vec_idx++) { + if (cluster_assignments[vec_idx] == target_cluster) { + const float* db_vector = database_vectors + vec_idx * vector_dims; + float dist = euclidean_distance(query, db_vector, vector_dims); + if (dist < min_dist) { + min_dist = dist; + best_idx = vec_idx; + } + } + } + } + + for (int cluster = 0; cluster < num_clusters; cluster++) { + if (cluster == best_clusters[0] || cluster == best_clusters[1] || cluster == best_clusters[2]) continue; + if (cluster_sizes[cluster] == 0) continue; + + for (int vec_idx = 0; vec_idx < database_size; vec_idx++) { + if (cluster_assignments[vec_idx] == cluster) { + const float* db_vector = database_vectors + vec_idx * vector_dims; + float dist = euclidean_distance(query, db_vector, vector_dims); + if (dist < min_dist) { + min_dist = dist; + best_idx = vec_idx; + } + } + } + } + + results[query_idx] = best_idx; + } else { + int query_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (query_idx >= num_queries) return; + + const float* query = query_vectors + query_idx * vector_dims; + + extern __shared__ float shared_mem[]; + float* cluster_dists = shared_mem; + int* cluster_indices = (int*)&shared_mem[num_clusters]; + + if (threadIdx.x < num_clusters) { + cluster_dists[threadIdx.x] = MAX_FLOAT; + cluster_indices[threadIdx.x] = -1; + } + + float best_dist[2] = {MAX_FLOAT, MAX_FLOAT}; + int best_clusters[2] = {-1, -1}; + + for (int cluster = 0; cluster < num_clusters; cluster++) { + const float* center = cluster_centers + cluster * vector_dims; + float dist = euclidean_distance_high(query, center, vector_dims); + + if (dist < best_dist[0]) { + best_dist[1] = best_dist[0]; + best_clusters[1] = best_clusters[0]; + best_dist[0] = dist; + best_clusters[0] = cluster; + } else if (dist < best_dist[1]) { + best_dist[1] = dist; + best_clusters[1] = cluster; + } + + if (cluster < num_clusters && threadIdx.x == 0) { + cluster_dists[cluster] = dist; + } + } + + float min_dist = MAX_FLOAT; + int best_idx = -1; + + int target_cluster = best_clusters[0]; + if (target_cluster != -1 && cluster_sizes[target_cluster] > 0) { + for (int vec_idx = 0; vec_idx < database_size; vec_idx++) { + if (cluster_assignments[vec_idx] == target_cluster) { + const float* db_vector = database_vectors + vec_idx * vector_dims; + float dist = euclidean_distance_high(query, db_vector, vector_dims); + if (dist < min_dist) { + min_dist = dist; + best_idx = vec_idx; + } + } + } + } + + if (min_dist == MAX_FLOAT && best_clusters[1] != -1 && cluster_sizes[best_clusters[1]] > 0) { + target_cluster = best_clusters[1]; + for (int vec_idx = 0; vec_idx < database_size; vec_idx++) { + if (cluster_assignments[vec_idx] == target_cluster) { + const float* db_vector = database_vectors + vec_idx * vector_dims; + float dist = euclidean_distance_high(query, db_vector, vector_dims); + if (dist < min_dist) { + min_dist = dist; + best_idx = vec_idx; + } + } + } + } + + if (min_dist == MAX_FLOAT) { + float search_radius = cluster_dists[0] * 2.0f; + + for (int cluster = 0; cluster < num_clusters; cluster++) { + if (cluster == best_clusters[0] || cluster == best_clusters[1]) continue; + if (cluster_dists[cluster] >= search_radius) continue; + if (cluster_sizes[cluster] == 0) continue; + + for (int vec_idx = 0; vec_idx < database_size; vec_idx++) { + if (cluster_assignments[vec_idx] == cluster) { + const float* db_vector = database_vectors + vec_idx * vector_dims; + float dist = euclidean_distance_high(query, db_vector, vector_dims); + if (dist < min_dist) { + min_dist = dist; + best_idx = vec_idx; + } + } + } + } + } + + results[query_idx] = best_idx; + } +} diff --git a/tig-algorithms/src/vector_search/is_adp_optimal/mod.rs b/tig-algorithms/src/vector_search/is_adp_optimal/mod.rs new file mode 100644 index 0000000..9c4bcef --- /dev/null +++ b/tig-algorithms/src/vector_search/is_adp_optimal/mod.rs @@ -0,0 +1,102 @@ +use cudarc::{ + driver::{safe::LaunchConfig, CudaModule, CudaStream, PushKernelArg}, + runtime::sys::cudaDeviceProp, +}; +use std::sync::Arc; +use serde_json::{Map, Value}; +use tig_challenges::vector_search::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, + module: Arc, + stream: Arc, + _prop: &cudaDeviceProp, +) -> anyhow::Result<()> { + let vector_dims = challenge.vector_dims as i32; + let database_size = challenge.database_size as i32; + let num_queries = challenge.difficulty.num_queries as i32; + + let block_size = 128; + let num_clusters = if num_queries <= 6000 { + 2 + } else if num_queries < 9000 { + 4 + } else if num_queries < 10000 { + 6 + } else if num_queries < 11000 { + 10 + } else if num_queries < 12000 { + 12 + } else if num_queries < 14000 { + 14 + } else { + 14 + }; + + let deterministic_clustering = module.load_function("deterministic_clustering")?; + let cluster_search = module.load_function("cluster_search")?; + + let mut d_cluster_centers = stream.alloc_zeros::((num_clusters * vector_dims) as usize)?; + let mut d_cluster_assignments = stream.alloc_zeros::(database_size as usize)?; + let mut d_cluster_sizes = stream.alloc_zeros::(num_clusters as usize)?; + + let cluster_config = LaunchConfig { + grid_dim: (num_clusters as u32, 1, 1), + block_dim: (block_size, 1, 1), + shared_mem_bytes: (vector_dims * 4) as u32, + }; + + unsafe { + stream.launch_builder(&deterministic_clustering) + .arg(&challenge.d_database_vectors) + .arg(&mut d_cluster_centers) + .arg(&mut d_cluster_assignments) + .arg(&mut d_cluster_sizes) + .arg(&database_size) + .arg(&vector_dims) + .arg(&num_clusters) + .arg(&num_queries) + .launch(cluster_config)?; + } + stream.synchronize()?; + + let mut d_results = stream.alloc_zeros::(num_queries as usize)?; + + let search_config = if num_queries <= 4000 { + LaunchConfig { + grid_dim: (num_queries as u32, 1, 1), + block_dim: (1, 1, 1), + shared_mem_bytes: 0, + } + } else { + LaunchConfig { + grid_dim: (num_queries as u32, 1, 1), + block_dim: (block_size, 1, 1), + shared_mem_bytes: (num_clusters * 8) as u32, + } + }; + + unsafe { + stream.launch_builder(&cluster_search) + .arg(&challenge.d_query_vectors) + .arg(&challenge.d_database_vectors) + .arg(&d_cluster_centers) + .arg(&d_cluster_assignments) + .arg(&d_cluster_sizes) + .arg(&mut d_results) + .arg(&num_queries) + .arg(&database_size) + .arg(&vector_dims) + .arg(&num_clusters) + .launch(search_config)?; + } + stream.synchronize()?; + + let indices = stream.memcpy_dtov(&d_results)?; + let indexes = indices.iter().map(|&idx| idx as usize).collect(); + + let _ = save_solution(&Solution { indexes }); + return Ok(()); +} diff --git a/tig-algorithms/src/vector_search/mod.rs b/tig-algorithms/src/vector_search/mod.rs index ae3472a..1921faf 100644 --- a/tig-algorithms/src/vector_search/mod.rs +++ b/tig-algorithms/src/vector_search/mod.rs @@ -24,7 +24,8 @@ // c004_a013 -// c004_a014 +pub mod brute_force_bacalhau; +pub use brute_force_bacalhau as c004_a014; // c004_a015 @@ -48,7 +49,8 @@ // c004_a025 -// c004_a026 +pub mod optimax_gpu; +pub use optimax_gpu as c004_a026; // c004_a027 @@ -64,7 +66,8 @@ // c004_a033 -// c004_a034 +pub mod invector; +pub use invector as c004_a034; // c004_a035 @@ -80,7 +83,8 @@ // c004_a041 -// c004_a042 +pub mod invector_hybrid; +pub use invector_hybrid as c004_a042; // c004_a043 @@ -88,9 +92,11 @@ // c004_a045 -// c004_a046 +pub mod invector_hybrid_adp; +pub use invector_hybrid_adp as c004_a046; -// c004_a047 +pub mod invector_revisited_s; +pub use invector_revisited_s as c004_a047; // c004_a048 @@ -102,15 +108,18 @@ // c004_a052 -// c004_a053 +pub mod invector_adj; +pub use invector_adj as c004_a053; // c004_a054 -// c004_a055 +pub mod invector_fast; +pub use invector_fast as c004_a055; // c004_a056 -// c004_a057 +pub mod better_vector; +pub use better_vector as c004_a057; // c004_a058 @@ -124,7 +133,8 @@ // c004_a063 -// c004_a064 +pub mod improved_search_adp; +pub use improved_search_adp as c004_a064; // c004_a065 @@ -132,17 +142,20 @@ // c004_a067 -// c004_a068 +pub mod is_adp_optimal; +pub use is_adp_optimal as c004_a068; // c004_a069 -// c004_a070 +pub mod improved_search_new; +pub use improved_search_new as c004_a070; // c004_a071 // c004_a072 -// c004_a073 +pub mod cluster_improved; +pub use cluster_improved as c004_a073; // c004_a074 diff --git a/tig-algorithms/src/vector_search/optimax_gpu/README.md b/tig-algorithms/src/vector_search/optimax_gpu/README.md new file mode 100644 index 0000000..0cc7d3c --- /dev/null +++ b/tig-algorithms/src/vector_search/optimax_gpu/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vector_search +* **Algorithm Name:** optimax_gpu +* **Copyright:** 2024 bw-dev36 +* **Identity of Submitter:** bw-dev36 +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vector_search/optimax_gpu/kernels.cu b/tig-algorithms/src/vector_search/optimax_gpu/kernels.cu new file mode 100644 index 0000000..7f0e30f --- /dev/null +++ b/tig-algorithms/src/vector_search/optimax_gpu/kernels.cu @@ -0,0 +1,19 @@ +/*! +Copyright 2024 bw-dev36 + +Licensed under the TIG Inbound Game License v1.0 or (at your option) any later +version (the "License"); you may not use this file except in compliance with the +License. You may obtain a copy of the License at + +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the specific +language governing permissions and limitations under the License. +*/ + +extern "C" __global__ void do_nothing() +{ + // This kernel does nothing +} diff --git a/tig-algorithms/src/vector_search/optimax_gpu/mod.rs b/tig-algorithms/src/vector_search/optimax_gpu/mod.rs new file mode 100644 index 0000000..7b0b116 --- /dev/null +++ b/tig-algorithms/src/vector_search/optimax_gpu/mod.rs @@ -0,0 +1,477 @@ +use anyhow::{anyhow, Result}; +use cudarc::{ + driver::{safe::LaunchConfig, CudaModule, CudaStream, PushKernelArg}, + runtime::sys::cudaDeviceProp, +}; +use std::sync::Arc; +use serde_json::{Map, Value}; +use tig_challenges::vector_search::{Challenge, Solution}; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, + module: Arc, + stream: Arc, + prop: &cudaDeviceProp, +) -> anyhow::Result<()> { + Err(anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + use anyhow::Ok; + use tig_challenges::vector_search::*; + use std::cmp::Ordering; + use std::collections::BinaryHeap; + + struct KDNode<'a> { + point: &'a [f32], + left: Option>>, + right: Option>>, + index: usize, + } + + impl<'a> KDNode<'a> { + fn new(point: &'a [f32], index: usize) -> Self { + KDNode { + point, + left: None, + right: None, + index, + } + } + } + fn quickselect_by(arr: &mut [(&[f32], usize)], k: usize, compare: &F) + where + F: Fn(&(&[f32], usize), &(&[f32], usize)) -> Ordering, + { + if arr.len() <= 1 { + return; + } + + let pivot_index = partition(arr, compare); + if k < pivot_index { + quickselect_by(&mut arr[..pivot_index], k, compare); + } else if k > pivot_index { + quickselect_by(&mut arr[pivot_index + 1..], k - pivot_index - 1, compare); + } + } + + fn partition(arr: &mut [(&[f32], usize)], compare: &F) -> usize + where + F: Fn(&(&[f32], usize), &(&[f32], usize)) -> Ordering, + { + let pivot_index = arr.len() >> 1; + arr.swap(pivot_index, arr.len() - 1); + + let mut store_index = 0; + for i in 0..arr.len() - 1 { + if compare(&arr[i], &arr[arr.len() - 1]) == Ordering::Less { + arr.swap(i, store_index); + store_index += 1; + } + } + arr.swap(store_index, arr.len() - 1); + store_index + } + + fn build_kd_tree<'a>(points: &mut [(&'a [f32], usize)]) -> Option>> { + if points.is_empty() { + return None; + } + + const NUM_DIMENSIONS: usize = 250; + let mut stack: Vec<(usize, usize, usize, Option<*mut KDNode<'a>>, bool)> = Vec::new(); + let mut root: Option>> = None; + + stack.push((0, points.len(), 0, None, false)); + + while let Some((start, end, depth, parent_ptr, is_left)) = stack.pop() { + if start >= end { + continue; + } + + let axis = depth % NUM_DIMENSIONS; + let median = (start + end) / 2; + quickselect_by(&mut points[start..end], median - start, &|a, b| { + a.0[axis].partial_cmp(&b.0[axis]).unwrap() + }); + + let (median_point, median_index) = points[median]; + let mut new_node = Box::new(KDNode::new(median_point, median_index)); + let new_node_ptr: *mut KDNode = &mut *new_node; + + if let Some(parent_ptr) = parent_ptr { + unsafe { + if is_left { + (*parent_ptr).left = Some(new_node); + } else { + (*parent_ptr).right = Some(new_node); + } + } + } else { + root = Some(new_node); + } + + stack.push((median + 1, end, depth + 1, Some(new_node_ptr), false)); + stack.push((start, median, depth + 1, Some(new_node_ptr), true)); + } + + root + } + + #[inline(always)] + fn squared_euclidean_distance(a: &[f32], b: &[f32]) -> f32 { + let mut sum = 0.0; + for i in 0..a.len() { + let diff = a[i] - b[i]; + sum += diff * diff; + } + sum + } + + #[inline(always)] + fn early_stopping_distance(a: &[f32], b: &[f32], current_min: f32) -> f32 { + let mut sum = 0.0; + let mut i = 0; + while i + 3 < a.len() { + let diff0 = a[i] - b[i]; + let diff1 = a[i + 1] - b[i + 1]; + let diff2 = a[i + 2] - b[i + 2]; + let diff3 = a[i + 3] - b[i + 3]; + + sum += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3; + + if sum > current_min { + return f32::MAX; + } + + i += 4; + } + + while i < a.len() { + let diff = a[i] - b[i]; + sum += diff * diff; + + if sum > current_min { + return f32::MAX; + } + + i += 1; + } + + sum + } + + fn nearest_neighbor_search<'a>( + root: &Option>>, + target: &[f32], + best: &mut (f32, Option), + ) { + let num_dimensions = target.len(); + let mut stack = Vec::with_capacity(64); + + if let Some(node) = root { + stack.push((node.as_ref(), 0)); + } + + while let Some((node, depth)) = stack.pop() { + let axis = depth % num_dimensions; + let dist = early_stopping_distance(&node.point, target, best.0); + + if dist < best.0 { + best.0 = dist; + best.1 = Some(node.index); + } + + let diff = target[axis] - node.point[axis]; + let sqr_diff = diff * diff; + + if sqr_diff < best.0 { + if let Some(farther_node) = if diff < 0.0 { &node.right } else { &node.left } { + stack.push((farther_node.as_ref(), depth + 1)); + } + } + + if let Some(nearer_node) = if diff < 0.0 { &node.left } else { &node.right } { + stack.push((nearer_node.as_ref(), depth + 1)); + } + } + } + + fn calculate_mean_vector(vectors: &[&[f32]]) -> Vec { + let num_vectors = vectors.len(); + let num_dimensions = 250; + + let mut mean_vector = vec![0.0; num_dimensions]; + + for vector in vectors { + for i in 0..num_dimensions { + mean_vector[i] += vector[i]; + } + } + + for i in 0..num_dimensions { + mean_vector[i] /= num_vectors as f32; + } + + mean_vector + } + + #[derive(Debug)] + struct FloatOrd(f32); + + impl PartialEq for FloatOrd { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } + } + + impl Eq for FloatOrd {} + + impl PartialOrd for FloatOrd { + fn partial_cmp(&self, other: &Self) -> Option { + self.0.partial_cmp(&other.0) + } + } + + impl Ord for FloatOrd { + fn cmp(&self, other: &Self) -> Ordering { + + self.partial_cmp(other).unwrap_or(Ordering::Equal) + } + } + + fn filter_relevant_vectors<'a>( + database: &'a [Vec], + query_vectors: &[Vec], + k: usize, + ) -> Vec<(&'a [f32], usize)> { + let query_refs: Vec<&[f32]> = query_vectors.iter().map(|v| &v[..]).collect(); + let mean_query_vector = calculate_mean_vector(&query_refs); + + let mut heap: BinaryHeap<(FloatOrd, usize)> = BinaryHeap::with_capacity(k); + + for (index, vector) in database.iter().enumerate() { + let dist = squared_euclidean_distance(&mean_query_vector, vector); + let ord_dist = FloatOrd(dist); + if heap.len() < k { + heap.push((ord_dist, index)); + } else if let Some(&(FloatOrd(top_dist), _)) = heap.peek() { + if dist < top_dist { + heap.pop(); + heap.push((ord_dist, index)); + } + } + } + let result: Vec<(&'a [f32], usize)> = heap + .into_iter() + .map(|(_, index)| (&database[index][..], index)) + .collect(); + + result + } + + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let query_count = challenge.query_vectors.len(); + + let subset_size = match query_count { + 10..=19 if challenge.difficulty.better_than_baseline <= 470 => 4200, + 10..=19 if challenge.difficulty.better_than_baseline > 470 => 4200, + 20..=28 if challenge.difficulty.better_than_baseline <= 465 => 3000, + 20..=28 if challenge.difficulty.better_than_baseline > 465 => 6000, // need more fuel + 29..=50 if challenge.difficulty.better_than_baseline <= 480 => 2000, + 29..=45 if challenge.difficulty.better_than_baseline > 480 => 6000, + 46..=50 if challenge.difficulty.better_than_baseline > 480 => 5000, // need more fuel + 51..=70 if challenge.difficulty.better_than_baseline <= 480 => 3000, + 51..=70 if challenge.difficulty.better_than_baseline > 480 => 3000, // need more fuel + 71..=100 if challenge.difficulty.better_than_baseline <= 480 => 1500, + 71..=100 if challenge.difficulty.better_than_baseline > 480 => 2500, // need more fuel + _ => 1000, // need more fuel + }; + let subset = filter_relevant_vectors( + &challenge.vector_database, + &challenge.query_vectors, + subset_size, + ); + + + let kd_tree = build_kd_tree(&mut subset.clone()); + + + let mut best_indexes = Vec::with_capacity(challenge.query_vectors.len()); + + for query in challenge.query_vectors.iter() { + let mut best = (std::f32::MAX, None); + nearest_neighbor_search(&kd_tree, query, &mut best); + + if let Some(best_index) = best.1 { + best_indexes.push(best_index); + } + } + + + Ok(Some(Solution { + indexes: best_indexes, + })) + } + + #[cfg(feature = "cuda")] + mod gpu_optimisation { + use super::*; + use cudarc::driver::*; + use std::{collections::HashMap, sync::Arc}; + use tig_challenges::CudaKernel; + pub const KERNEL: Option = Some(CudaKernel { + src: r#" + + extern "C" __global__ void filter_vectors(float* query_mean, float* vectors, float* distances, int num_vectors, int num_dimensions) { + int idx = blockIdx.x * blockDim.x + threadIdx.x; + if (idx < num_vectors) { + float dist = 0.0; + for (int d = 0; d < num_dimensions; ++d) { + float diff = query_mean[d] - vectors[idx * num_dimensions + d]; + dist += diff * diff; + } + distances[idx] = dist; + } + } + + "#, + + funcs: &["filter_vectors"], + }); + + pub fn cuda_solve_challenge( + challenge: &Challenge, + dev: &Arc, + mut funcs: HashMap<&'static str, CudaFunction>, + ) -> anyhow::Result> { + let query_count = challenge.query_vectors.len(); + + let subset_size = match query_count { + 10..=19 if challenge.difficulty.better_than_baseline <= 470 => 4200, + 10..=19 if challenge.difficulty.better_than_baseline > 470 => 4200, + 20..=28 if challenge.difficulty.better_than_baseline <= 465 => 3000, + 20..=28 if challenge.difficulty.better_than_baseline > 465 => 6000, // need more fuel + 29..=50 if challenge.difficulty.better_than_baseline <= 480 => 2000, + 29..=45 if challenge.difficulty.better_than_baseline > 480 => 6000, + 46..=50 if challenge.difficulty.better_than_baseline > 480 => 5000, // need more fuel + 51..=70 if challenge.difficulty.better_than_baseline <= 480 => 3000, + 51..=70 if challenge.difficulty.better_than_baseline > 480 => 3000, // need more fuel + 71..=100 if challenge.difficulty.better_than_baseline <= 480 => 1500, + 71..=100 if challenge.difficulty.better_than_baseline > 480 => 2500, // need more fuel + _ => 1000, // need more fuel + }; + let subset = cuda_filter_relevant_vectors( + &challenge.vector_database, + &challenge.query_vectors, + subset_size, + dev, + funcs, + )?; + let kd_tree = build_kd_tree(&mut subset.clone()); + + + let mut best_indexes = Vec::with_capacity(challenge.query_vectors.len()); + + for query in challenge.query_vectors.iter() { + let mut best = (std::f32::MAX, None); + nearest_neighbor_search(&kd_tree, query, &mut best); + + if let Some(best_index) = best.1 { + best_indexes.push(best_index); + } + } + + + + + + Ok(Some(Solution { + indexes: best_indexes, + })) + } + + #[cfg(feature = "cuda")] + fn cuda_filter_relevant_vectors<'a>( + database: &'a [Vec], + query_vectors: &[Vec], + k: usize, + dev: &Arc, + mut funcs: HashMap<&'static str, CudaFunction>, + ) -> anyhow::Result> { + + let query_refs: Vec<&[f32]> = query_vectors.iter().map(|v| &v[..]).collect(); + let mean_query_vector = calculate_mean_vector(&query_refs); + + let num_vectors = database.len(); + let num_dimensions = 250; + let flattened_database: Vec = database.iter().flatten().cloned().collect(); + let database_dev = dev.htod_sync_copy(&flattened_database)?; + let mean_query_dev = dev.htod_sync_copy(&mean_query_vector)?; + let mut distances_dev = dev.alloc_zeros::(num_vectors)?; + let cfg = LaunchConfig { + block_dim: (256, 1, 1), + grid_dim: ((num_vectors as u32 + 255) / 256, 1, 1), + shared_mem_bytes: 0, + }; + unsafe { + funcs.remove("filter_vectors").unwrap().launch( + cfg, + ( + &mean_query_dev, + &database_dev, + &mut distances_dev, + num_vectors as i32, + num_dimensions as i32, + ), + ) + }?; + let mut distances_host = vec![0.0f32; num_vectors]; + dev.dtoh_sync_copy_into(&distances_dev, &mut distances_host)?; + let mut heap: BinaryHeap<(FloatOrd, usize)> = BinaryHeap::with_capacity(k); + + for (index, &distance) in distances_host.iter().enumerate() { + let ord_dist = FloatOrd(distance); + if heap.len() < k { + heap.push((ord_dist, index)); + } else if let Some(&(FloatOrd(top_dist), _)) = heap.peek() { + if distance < top_dist { + heap.pop(); + heap.push((ord_dist, index)); + } + } + } + let result: Vec<(&[f32], usize)> = heap + .into_iter() + .map(|(_, index)| (&database[index][..], index)) + .collect(); + + Ok(result) + } + + #[cfg(feature = "cuda")] + fn cuda_build_kd_tree<'a>(subset: &mut [(&'a [f32], usize)], + dev: &Arc, + funcs: &mut HashMap<&'static str, CudaFunction>, + ) -> Option>> { + None + } + + #[cfg(feature = "cuda")] + fn cuda_nearest_neighbor_search( + kd_tree: &Option>>, + query: &[f32], + best: &mut (f32, Option), + dev: &Arc, + funcs: &mut HashMap<&'static str, CudaFunction>, + ) -> anyhow::Result<()> { + Ok(()) + } + } + #[cfg(feature = "cuda")] + pub use gpu_optimisation::{cuda_solve_challenge, KERNEL}; +} \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/advanced_cw_adp/README.md b/tig-algorithms/src/vehicle_routing/advanced_cw_adp/README.md new file mode 100644 index 0000000..fdc5a84 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/advanced_cw_adp/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vehicle_routing +* **Algorithm Name:** advanced_cw_adp +* **Copyright:** 2024 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/advanced_cw_adp/mod.rs b/tig-algorithms/src/vehicle_routing/advanced_cw_adp/mod.rs new file mode 100644 index 0000000..c95ea46 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/advanced_cw_adp/mod.rs @@ -0,0 +1,461 @@ +use rand::{ + rngs::{SmallRng, StdRng}, + Rng, SeedableRng, +}; +use serde_json::{Map, Value}; +use tig_challenges::vehicle_routing::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + Err(anyhow::anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, + ) -> anyhow::Result<()> { + let mut global_best_solution: Option = None; + let mut global_best_cost = std::i32::MAX; + + const NUM_ITERATIONS: usize = 1000; + + let num_nodes = challenge.difficulty.num_nodes; + + let max_dist: f32 = challenge.distance_matrix[0].iter().sum::() as f32; + let p = challenge.baseline_total_distance as f32 / max_dist; + if p < 0.545 { + return Ok(()); + } + + let mut promising = false; + + // Try different parameter initializations + for init_value in [1.0, 2.0] { + let mut best_solution: Option = None; + let mut best_cost = std::i32::MAX; + + let mut rng = + StdRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap())); + + let mut current_params = vec![init_value; num_nodes]; + let mut savings_list = create_initial_savings_list(challenge); + recompute_and_sort_savings(&mut savings_list, ¤t_params, challenge); + + let mut current_solution = create_solution(challenge, ¤t_params, &savings_list); + let mut current_cost = + calculate_solution_cost(¤t_solution, &challenge.distance_matrix); + + if current_cost <= challenge.baseline_total_distance { + let _ = save_solution(¤t_solution); + return Ok(()); + } + + if (current_cost as f32 * 0.96) > challenge.baseline_total_distance as f32 && !promising + { + return Ok(()); + } else { + promising = true; + } + + let mut iterations_since_improvement = 0; + let mut stagnation_factor = 1.0; + + for _ in 0..NUM_ITERATIONS { + let neighbor_params = + generate_neighbor(¤t_params, &mut rng, stagnation_factor); + recompute_and_sort_savings(&mut savings_list, &neighbor_params, challenge); + + let mut neighbor_solution = + create_solution(challenge, &neighbor_params, &savings_list); + postprocess_solution( + &mut neighbor_solution, + &challenge.distance_matrix, + &challenge.demands, + challenge.max_capacity, + ); + + let neighbor_cost = + calculate_solution_cost(&neighbor_solution, &challenge.distance_matrix); + + let delta = neighbor_cost as f32 - current_cost as f32; + let scaling_factor = current_cost as f32 * 0.005; // Scale based on current solution cost + if delta <= 0.0 { + current_params = neighbor_params; + current_cost = neighbor_cost; + current_solution = neighbor_solution; + iterations_since_improvement = 0; + + if current_cost < best_cost { + best_cost = current_cost; + best_solution = Some(Solution { + routes: current_solution.routes.clone(), + }); + } + } else if rng.gen::() < (-delta / scaling_factor).exp() { + current_params = neighbor_params; + iterations_since_improvement = 0; + } else { + iterations_since_improvement += 1; + } + + if best_cost <= challenge.baseline_total_distance { + return Ok(best_solution); + } + } + + if best_cost < global_best_cost { + global_best_cost = best_cost; + global_best_solution = best_solution; + } + } + + Ok(global_best_solution) + } + + #[inline] + fn create_initial_savings_list(challenge: &Challenge) -> Vec<(f32, u8, u8)> { + let num_nodes = challenge.difficulty.num_nodes; + + let capacity = ((num_nodes - 1) * (num_nodes - 2)) / 2; + let mut savings = Vec::with_capacity(capacity); + + let max_distance = challenge + .distance_matrix + .iter() + .flat_map(|row| row.iter()) + .cloned() + .max() + .unwrap_or(0); + let threshold = max_distance / 2; + + for i in 1..num_nodes { + for j in (i + 1)..num_nodes { + if challenge.distance_matrix[i][j] <= threshold { + savings.push((0.0, i as u8, j as u8)); + } + } + } + savings + } + + #[inline] + fn recompute_and_sort_savings( + savings_list: &mut [(f32, u8, u8)], + params: &[f32], + challenge: &Challenge, + ) { + let distance_matrix = &challenge.distance_matrix; + + // Update the score for each pair. + for (score, i, j) in savings_list.iter_mut() { + let i = *i as usize; + let j = *j as usize; + *score = (params[i] + params[j]) + * (distance_matrix[0][i] as f32 + distance_matrix[j][0] as f32 + - distance_matrix[i][j] as f32); + } + + // Sort by descending order of the score. + savings_list + .sort_unstable_by(|a, b| b.0.partial_cmp(&a.0).unwrap_or(std::cmp::Ordering::Equal)); + } + + #[inline] + fn generate_neighbor(current: &[f32], rng: &mut R, k: f32) -> Vec { + current + .iter() + .map(|¶m| { + let delta = rng.gen_range(-0.05 * k..=0.05 * k); + (param + delta).clamp(1.0, 2.0) + }) + .collect() + } + + #[inline] + fn calculate_solution_cost(solution: &Solution, distance_matrix: &Vec>) -> i32 { + solution + .routes + .iter() + .map(|route| { + route + .windows(2) + .map(|w| distance_matrix[w[0]][w[1]]) + .sum::() + }) + .sum() + } + + #[inline] + fn create_solution( + challenge: &Challenge, + params: &[f32], + savings_list: &[(f32, u8, u8)], + ) -> Solution { + let num_nodes = challenge.difficulty.num_nodes; + let demands = &challenge.demands; + let max_capacity = challenge.max_capacity; + + let mut routes: Vec>> = vec![None; num_nodes]; + for i in 1..num_nodes { + routes[i] = Some(vec![i]); + } + let mut route_demands = demands.clone(); + + for &(_, i, j) in savings_list { + let (i, j) = (i as usize, j as usize); + if let (Some(left_route), Some(right_route)) = (routes[i].as_ref(), routes[j].as_ref()) + { + let (left_start, left_end) = + (*left_route.first().unwrap(), *left_route.last().unwrap()); + let (right_start, right_end) = + (*right_route.first().unwrap(), *right_route.last().unwrap()); + + // Check feasibility (same check as original). + if left_start == right_start + || route_demands[left_start] + route_demands[right_start] > max_capacity + { + continue; + } + + let mut new_route = routes[i].take().unwrap(); + let mut right_route = routes[j].take().unwrap(); + + // Reverse if needed (same as original). + if left_start == i { + new_route.reverse(); + } + if right_end == j { + right_route.reverse(); + } + + new_route.extend(right_route); + + let combined_demand = route_demands[left_start] + route_demands[right_start]; + let new_start = new_route[0]; + let new_end = *new_route.last().unwrap(); + + route_demands[new_start] = combined_demand; + route_demands[new_end] = combined_demand; + + routes[new_start] = Some(new_route.clone()); + routes[new_end] = Some(new_route); + } + } + + // Wrap each route with depot (0) at start and end. + Solution { + routes: routes + .into_iter() + .enumerate() + .filter_map(|(i, route)| { + route.filter(|r| r[0] == i) // only keep the "canonical" copy + }) + .map(|mut route| { + route.insert(0, 0); + route.push(0); + route + }) + .collect(), + } + } + + pub fn postprocess_solution( + solution: &mut Solution, + distance_matrix: &Vec>, + demands: &Vec, + max_capacity: i32, + ) { + loop { + let intra_improved = two_opt_all_routes(solution, distance_matrix); + let inter_route_improved = unsafe { + try_inter_route_swap_unsafe(solution, distance_matrix, demands, max_capacity) + }; + if !intra_improved && !inter_route_improved { + break; + } + } + } + + #[inline] + fn two_opt_all_routes(solution: &mut Solution, distance_matrix: &Vec>) -> bool { + let mut improved = false; + for route in &mut solution.routes { + if unsafe { two_opt_unsafe(route, distance_matrix) } { + improved = true; + } + } + improved + } + + #[inline] + unsafe fn two_opt_unsafe(route: &mut Vec, distance_matrix: &Vec>) -> bool { + let n = route.len(); + if n < 4 { + return false; + } + + let mut improved = false; + let route_ptr = route.as_mut_ptr(); + + for i in 1..(n - 2) { + let mut best_gain = 0; + let mut best_j = 0; + + for j in (i + 1)..(n - 1) { + let [r_im1, r_i, r_j, r_jp1] = [ + *route_ptr.add(i - 1), + *route_ptr.add(i), + *route_ptr.add(j), + *route_ptr.add(j + 1), + ]; + + let gain = distance_matrix[r_im1][r_i] + distance_matrix[r_j][r_jp1] + - distance_matrix[r_im1][r_j] + - distance_matrix[r_i][r_jp1]; + + if gain > best_gain { + best_gain = gain; + best_j = j; + } + } + + if best_gain > 0 { + let mut start = i; + let mut end = best_j; + while start < end { + let tmp = *route_ptr.add(start); + *route_ptr.add(start) = *route_ptr.add(end); + *route_ptr.add(end) = tmp; + start += 1; + end -= 1; + } + improved = true; + } + } + improved + } + #[inline] + unsafe fn try_inter_route_swap_unsafe( + solution: &mut Solution, + distance_matrix: &Vec>, + demands: &Vec, + max_capacity: i32, + ) -> bool { + let mut improved = false; + let num_routes = solution.routes.len(); + let routes_ptr = solution.routes.as_mut_ptr(); + + // Store all possible improvements: (improvement, route_i_idx, route_j_idx, new_route_i, new_route_j) + let mut all_improvements = Vec::new(); + + for i in 0..num_routes { + for j in (i + 1)..num_routes { + let route_i = &mut *routes_ptr.add(i); + let route_j = &mut *routes_ptr.add(j); + + if let Some((improvement, new_route_i, new_route_j)) = + unsafe_find_best_swap_with_value( + route_i, + route_j, + distance_matrix, + demands, + max_capacity, + ) + { + all_improvements.push((improvement, i, j, new_route_i, new_route_j)); + } + } + } + + // Sort improvements by descending order of improvement value + all_improvements.sort_unstable_by(|a, b| b.0.cmp(&a.0)); + + // Keep track of which routes have been modified + let mut modified_routes = vec![false; num_routes]; + + // Apply non-conflicting improvements + for (_, route_i_idx, route_j_idx, new_route_i, new_route_j) in all_improvements { + // Skip if either route has already been modified + if modified_routes[route_i_idx] || modified_routes[route_j_idx] { + continue; + } + + // Apply the swap + let route_i = &mut *routes_ptr.add(route_i_idx); + let route_j = &mut *routes_ptr.add(route_j_idx); + *route_i = new_route_i; + *route_j = new_route_j; + + // Mark both routes as modified + modified_routes[route_i_idx] = true; + modified_routes[route_j_idx] = true; + improved = true; + } + + improved + } + + #[inline] + unsafe fn unsafe_find_best_swap_with_value( + route1: &Vec, + route2: &Vec, + distance_matrix: &Vec>, + demands: &Vec, + max_capacity: i32, + ) -> Option<(i32, Vec, Vec)> { + let mut best_improvement = 0; + let mut best_swap = None; + + let r1_ptr = route1.as_ptr(); + let r2_ptr = route2.as_ptr(); + let r1_len = route1.len(); + let r2_len = route2.len(); + + let route1_demand: i32 = route1.iter().map(|&n| demands[n]).sum(); + let route2_demand: i32 = route2.iter().map(|&n| demands[n]).sum(); + + for i in 1..(r1_len - 1) { + for j in 1..(r2_len - 1) { + let [r1_im1, r1_i, r1_ip1] = + [*r1_ptr.add(i - 1), *r1_ptr.add(i), *r1_ptr.add(i + 1)]; + + let [r2_jm1, r2_j, r2_jp1] = + [*r2_ptr.add(j - 1), *r2_ptr.add(j), *r2_ptr.add(j + 1)]; + + let demand_delta = demands[r2_j] - demands[r1_i]; + + if route1_demand + demand_delta > max_capacity + || route2_demand - demand_delta > max_capacity + { + continue; + } + + let improvement = distance_matrix[r1_im1][r1_i] + + distance_matrix[r1_i][r1_ip1] + + distance_matrix[r2_jm1][r2_j] + + distance_matrix[r2_j][r2_jp1] + - distance_matrix[r1_im1][r2_j] + - distance_matrix[r2_j][r1_ip1] + - distance_matrix[r2_jm1][r1_i] + - distance_matrix[r1_i][r2_jp1]; + + if improvement > best_improvement { + best_improvement = improvement; + let mut new_route1 = route1.clone(); + let mut new_route2 = route2.clone(); + new_route1[i] = r2_j; + new_route2[j] = r1_i; + best_swap = Some((improvement, new_route1, new_route2)); + } + } + } + best_swap + } +} diff --git a/tig-algorithms/src/vehicle_routing/advanced_cw_opt/README.md b/tig-algorithms/src/vehicle_routing/advanced_cw_opt/README.md new file mode 100644 index 0000000..92f147e --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/advanced_cw_opt/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vehicle_routing +* **Algorithm Name:** advanced_cw_opt +* **Copyright:** 2024 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/advanced_cw_opt/mod.rs b/tig-algorithms/src/vehicle_routing/advanced_cw_opt/mod.rs new file mode 100644 index 0000000..8ef3ce3 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/advanced_cw_opt/mod.rs @@ -0,0 +1,461 @@ +use rand::{ + rngs::{SmallRng, StdRng}, + Rng, SeedableRng, +}; +use serde_json::{Map, Value}; +use tig_challenges::vehicle_routing::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + Err(anyhow::anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, + ) -> anyhow::Result<()> { + let mut global_best_solution: Option = None; + let mut global_best_cost = std::i32::MAX; + + const NUM_ITERATIONS: usize = 200; + + let num_nodes = challenge.difficulty.num_nodes; + + let max_dist: f32 = challenge.distance_matrix[0].iter().sum::() as f32; + let p = challenge.baseline_total_distance as f32 / max_dist; + if p < 0.545 { + return Ok(()); + } + + let mut promising = false; + + // Try different parameter initializations + for init_value in [1.0, 2.0] { + let mut best_solution: Option = None; + let mut best_cost = std::i32::MAX; + + let mut rng = + StdRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap())); + + let mut current_params = vec![init_value; num_nodes]; + let mut savings_list = create_initial_savings_list(challenge); + recompute_and_sort_savings(&mut savings_list, ¤t_params, challenge); + + let mut current_solution = create_solution(challenge, ¤t_params, &savings_list); + let mut current_cost = + calculate_solution_cost(¤t_solution, &challenge.distance_matrix); + + if current_cost <= challenge.baseline_total_distance { + let _ = save_solution(¤t_solution); + return Ok(()); + } + + if (current_cost as f32 * 0.96) > challenge.baseline_total_distance as f32 && !promising + { + return Ok(()); + } else { + promising = true; + } + + let mut iterations_since_improvement = 0; + let mut stagnation_factor = 1.0; + + for _ in 0..NUM_ITERATIONS { + let neighbor_params = + generate_neighbor(¤t_params, &mut rng, stagnation_factor); + recompute_and_sort_savings(&mut savings_list, &neighbor_params, challenge); + + let mut neighbor_solution = + create_solution(challenge, &neighbor_params, &savings_list); + postprocess_solution( + &mut neighbor_solution, + &challenge.distance_matrix, + &challenge.demands, + challenge.max_capacity, + ); + + let neighbor_cost = + calculate_solution_cost(&neighbor_solution, &challenge.distance_matrix); + + let delta = neighbor_cost as f32 - current_cost as f32; + let scaling_factor = current_cost as f32 * 0.005; // Scale based on current solution cost + if delta <= 0.0 { + current_params = neighbor_params; + current_cost = neighbor_cost; + current_solution = neighbor_solution; + iterations_since_improvement = 0; + + if current_cost < best_cost { + best_cost = current_cost; + best_solution = Some(Solution { + routes: current_solution.routes.clone(), + }); + } + } else if rng.gen::() < (-delta / scaling_factor).exp() { + current_params = neighbor_params; + iterations_since_improvement = 0; + } else { + iterations_since_improvement += 1; + } + + if best_cost <= challenge.baseline_total_distance { + return Ok(best_solution); + } + } + + if best_cost < global_best_cost { + global_best_cost = best_cost; + global_best_solution = best_solution; + } + } + + Ok(global_best_solution) + } + + #[inline] + fn create_initial_savings_list(challenge: &Challenge) -> Vec<(f32, u8, u8)> { + let num_nodes = challenge.difficulty.num_nodes; + + let capacity = ((num_nodes - 1) * (num_nodes - 2)) / 2; + let mut savings = Vec::with_capacity(capacity); + + let max_distance = challenge + .distance_matrix + .iter() + .flat_map(|row| row.iter()) + .cloned() + .max() + .unwrap_or(0); + let threshold = max_distance / 2; + + for i in 1..num_nodes { + for j in (i + 1)..num_nodes { + if challenge.distance_matrix[i][j] <= threshold { + savings.push((0.0, i as u8, j as u8)); + } + } + } + savings + } + + #[inline] + fn recompute_and_sort_savings( + savings_list: &mut [(f32, u8, u8)], + params: &[f32], + challenge: &Challenge, + ) { + let distance_matrix = &challenge.distance_matrix; + + // Update the score for each pair. + for (score, i, j) in savings_list.iter_mut() { + let i = *i as usize; + let j = *j as usize; + *score = (params[i] + params[j]) + * (distance_matrix[0][i] as f32 + distance_matrix[j][0] as f32 + - distance_matrix[i][j] as f32); + } + + // Sort by descending order of the score. + savings_list + .sort_unstable_by(|a, b| b.0.partial_cmp(&a.0).unwrap_or(std::cmp::Ordering::Equal)); + } + + #[inline] + fn generate_neighbor(current: &[f32], rng: &mut R, k: f32) -> Vec { + current + .iter() + .map(|¶m| { + let delta = rng.gen_range(-0.05 * k..=0.05 * k); + (param + delta).clamp(1.0, 2.0) + }) + .collect() + } + + #[inline] + fn calculate_solution_cost(solution: &Solution, distance_matrix: &Vec>) -> i32 { + solution + .routes + .iter() + .map(|route| { + route + .windows(2) + .map(|w| distance_matrix[w[0]][w[1]]) + .sum::() + }) + .sum() + } + + #[inline] + fn create_solution( + challenge: &Challenge, + params: &[f32], + savings_list: &[(f32, u8, u8)], + ) -> Solution { + let num_nodes = challenge.difficulty.num_nodes; + let demands = &challenge.demands; + let max_capacity = challenge.max_capacity; + + let mut routes: Vec>> = vec![None; num_nodes]; + for i in 1..num_nodes { + routes[i] = Some(vec![i]); + } + let mut route_demands = demands.clone(); + + for &(_, i, j) in savings_list { + let (i, j) = (i as usize, j as usize); + if let (Some(left_route), Some(right_route)) = (routes[i].as_ref(), routes[j].as_ref()) + { + let (left_start, left_end) = + (*left_route.first().unwrap(), *left_route.last().unwrap()); + let (right_start, right_end) = + (*right_route.first().unwrap(), *right_route.last().unwrap()); + + // Check feasibility (same check as original). + if left_start == right_start + || route_demands[left_start] + route_demands[right_start] > max_capacity + { + continue; + } + + let mut new_route = routes[i].take().unwrap(); + let mut right_route = routes[j].take().unwrap(); + + // Reverse if needed (same as original). + if left_start == i { + new_route.reverse(); + } + if right_end == j { + right_route.reverse(); + } + + new_route.extend(right_route); + + let combined_demand = route_demands[left_start] + route_demands[right_start]; + let new_start = new_route[0]; + let new_end = *new_route.last().unwrap(); + + route_demands[new_start] = combined_demand; + route_demands[new_end] = combined_demand; + + routes[new_start] = Some(new_route.clone()); + routes[new_end] = Some(new_route); + } + } + + // Wrap each route with depot (0) at start and end. + Solution { + routes: routes + .into_iter() + .enumerate() + .filter_map(|(i, route)| { + route.filter(|r| r[0] == i) // only keep the "canonical" copy + }) + .map(|mut route| { + route.insert(0, 0); + route.push(0); + route + }) + .collect(), + } + } + + pub fn postprocess_solution( + solution: &mut Solution, + distance_matrix: &Vec>, + demands: &Vec, + max_capacity: i32, + ) { + loop { + let intra_improved = two_opt_all_routes(solution, distance_matrix); + let inter_route_improved = unsafe { + try_inter_route_swap_unsafe(solution, distance_matrix, demands, max_capacity) + }; + if !intra_improved && !inter_route_improved { + break; + } + } + } + + #[inline] + fn two_opt_all_routes(solution: &mut Solution, distance_matrix: &Vec>) -> bool { + let mut improved = false; + for route in &mut solution.routes { + if unsafe { two_opt_unsafe(route, distance_matrix) } { + improved = true; + } + } + improved + } + + #[inline] + unsafe fn two_opt_unsafe(route: &mut Vec, distance_matrix: &Vec>) -> bool { + let n = route.len(); + if n < 4 { + return false; + } + + let mut improved = false; + let route_ptr = route.as_mut_ptr(); + + for i in 1..(n - 2) { + let mut best_gain = 0; + let mut best_j = 0; + + for j in (i + 1)..(n - 1) { + let [r_im1, r_i, r_j, r_jp1] = [ + *route_ptr.add(i - 1), + *route_ptr.add(i), + *route_ptr.add(j), + *route_ptr.add(j + 1), + ]; + + let gain = distance_matrix[r_im1][r_i] + distance_matrix[r_j][r_jp1] + - distance_matrix[r_im1][r_j] + - distance_matrix[r_i][r_jp1]; + + if gain > best_gain { + best_gain = gain; + best_j = j; + } + } + + if best_gain > 0 { + let mut start = i; + let mut end = best_j; + while start < end { + let tmp = *route_ptr.add(start); + *route_ptr.add(start) = *route_ptr.add(end); + *route_ptr.add(end) = tmp; + start += 1; + end -= 1; + } + improved = true; + } + } + improved + } + #[inline] + unsafe fn try_inter_route_swap_unsafe( + solution: &mut Solution, + distance_matrix: &Vec>, + demands: &Vec, + max_capacity: i32, + ) -> bool { + let mut improved = false; + let num_routes = solution.routes.len(); + let routes_ptr = solution.routes.as_mut_ptr(); + + // Store all possible improvements: (improvement, route_i_idx, route_j_idx, new_route_i, new_route_j) + let mut all_improvements = Vec::new(); + + for i in 0..num_routes { + for j in (i + 1)..num_routes { + let route_i = &mut *routes_ptr.add(i); + let route_j = &mut *routes_ptr.add(j); + + if let Some((improvement, new_route_i, new_route_j)) = + unsafe_find_best_swap_with_value( + route_i, + route_j, + distance_matrix, + demands, + max_capacity, + ) + { + all_improvements.push((improvement, i, j, new_route_i, new_route_j)); + } + } + } + + // Sort improvements by descending order of improvement value + all_improvements.sort_unstable_by(|a, b| b.0.cmp(&a.0)); + + // Keep track of which routes have been modified + let mut modified_routes = vec![false; num_routes]; + + // Apply non-conflicting improvements + for (_, route_i_idx, route_j_idx, new_route_i, new_route_j) in all_improvements { + // Skip if either route has already been modified + if modified_routes[route_i_idx] || modified_routes[route_j_idx] { + continue; + } + + // Apply the swap + let route_i = &mut *routes_ptr.add(route_i_idx); + let route_j = &mut *routes_ptr.add(route_j_idx); + *route_i = new_route_i; + *route_j = new_route_j; + + // Mark both routes as modified + modified_routes[route_i_idx] = true; + modified_routes[route_j_idx] = true; + improved = true; + } + + improved + } + + #[inline] + unsafe fn unsafe_find_best_swap_with_value( + route1: &Vec, + route2: &Vec, + distance_matrix: &Vec>, + demands: &Vec, + max_capacity: i32, + ) -> Option<(i32, Vec, Vec)> { + let mut best_improvement = 0; + let mut best_swap = None; + + let r1_ptr = route1.as_ptr(); + let r2_ptr = route2.as_ptr(); + let r1_len = route1.len(); + let r2_len = route2.len(); + + let route1_demand: i32 = route1.iter().map(|&n| demands[n]).sum(); + let route2_demand: i32 = route2.iter().map(|&n| demands[n]).sum(); + + for i in 1..(r1_len - 1) { + for j in 1..(r2_len - 1) { + let [r1_im1, r1_i, r1_ip1] = + [*r1_ptr.add(i - 1), *r1_ptr.add(i), *r1_ptr.add(i + 1)]; + + let [r2_jm1, r2_j, r2_jp1] = + [*r2_ptr.add(j - 1), *r2_ptr.add(j), *r2_ptr.add(j + 1)]; + + let demand_delta = demands[r2_j] - demands[r1_i]; + + if route1_demand + demand_delta > max_capacity + || route2_demand - demand_delta > max_capacity + { + continue; + } + + let improvement = distance_matrix[r1_im1][r1_i] + + distance_matrix[r1_i][r1_ip1] + + distance_matrix[r2_jm1][r2_j] + + distance_matrix[r2_j][r2_jp1] + - distance_matrix[r1_im1][r2_j] + - distance_matrix[r2_j][r1_ip1] + - distance_matrix[r2_jm1][r1_i] + - distance_matrix[r1_i][r2_jp1]; + + if improvement > best_improvement { + best_improvement = improvement; + let mut new_route1 = route1.clone(); + let mut new_route2 = route2.clone(); + new_route1[i] = r2_j; + new_route2[j] = r1_i; + best_swap = Some((improvement, new_route1, new_route2)); + } + } + } + best_swap + } +} diff --git a/tig-algorithms/src/vehicle_routing/advanced_heuristics/README.md b/tig-algorithms/src/vehicle_routing/advanced_heuristics/README.md new file mode 100644 index 0000000..de36de6 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/advanced_heuristics/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vehicle_routing +* **Algorithm Name:** advanced_heuristics +* **Copyright:** 2024 CodeAlchemist +* **Identity of Submitter:** CodeAlchemist +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/advanced_heuristics/mod.rs b/tig-algorithms/src/vehicle_routing/advanced_heuristics/mod.rs new file mode 100644 index 0000000..38bf970 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/advanced_heuristics/mod.rs @@ -0,0 +1,234 @@ +use anyhow::{anyhow, Result}; +use serde_json::{Map, Value}; +use tig_challenges::vehicle_routing::*; + + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> Result<()>, + hyperparameters: &Option>, +) -> Result<()> { + Err(anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + use rand::{rngs::{SmallRng, StdRng}, Rng, SeedableRng}; + use tig_challenges::vehicle_routing::*; + + + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let mut solution = Solution { + sub_solutions: Vec::new(), + }; + for sub_instance in &challenge.sub_instances { + match solve_sub_instance(sub_instance)? { + Some(sub_solution) => solution.sub_solutions.push(sub_solution), + None => return Ok(None), + } + } + Ok(Some(solution)) + } + + pub fn solve_sub_instance(challenge: &SubInstance) -> anyhow::Result> { + let max_dist: f32 = challenge.distance_matrix[0].iter().sum::() as f32; + let p = challenge.baseline_total_distance as f32 / max_dist; + if p < 0.57 { + return Ok(None) + } + + let mut best_solution: Option = None; + let mut best_cost = std::i32::MAX; + + const INITIAL_TEMPERATURE: f32 = 2.0; + const COOLING_RATE: f32 = 0.995; + const ITERATIONS_PER_TEMPERATURE: usize = 2; + + let num_nodes = challenge.difficulty.num_nodes; + + let mut current_params = vec![1.0; num_nodes]; + let mut savings_list = create_initial_savings_list(challenge); + recompute_and_sort_savings(&mut savings_list, ¤t_params, challenge); + + let mut current_solution = create_solution(challenge, ¤t_params, &savings_list); + let mut current_cost = calculate_solution_cost(¤t_solution, &challenge.distance_matrix); + + if current_cost <= challenge.baseline_total_distance { + return Ok(Some(current_solution)); + } + + if (current_cost as f32 * 0.96) > challenge.baseline_total_distance as f32 { + return Ok(None); + } + + let mut temperature = INITIAL_TEMPERATURE; + let mut rng = StdRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap())); + + while temperature > 1.0 { + for _ in 0..ITERATIONS_PER_TEMPERATURE { + let neighbor_params = generate_neighbor(¤t_params, &mut rng); + recompute_and_sort_savings(&mut savings_list, &neighbor_params, challenge); + let mut neighbor_solution = create_solution(challenge, &neighbor_params, &savings_list); + apply_local_search_until_no_improvement(&mut neighbor_solution, &challenge.distance_matrix); + let neighbor_cost = calculate_solution_cost(&neighbor_solution, &challenge.distance_matrix); + + let delta = neighbor_cost as f32 - current_cost as f32; + if delta < 0.0 || rng.gen::() < (-delta / temperature).exp() { + current_params = neighbor_params; + current_cost = neighbor_cost; + current_solution = neighbor_solution; + + if current_cost < best_cost { + best_cost = current_cost; + best_solution = Some(SubSolution { + routes: current_solution.routes.clone(), + }); + } + } + if best_cost <= challenge.baseline_total_distance { + return Ok(best_solution); + } + } + + temperature *= COOLING_RATE; + } + + Ok(best_solution) + } + + #[inline] + fn create_initial_savings_list(challenge: &SubInstance) -> Vec<(f32, u8, u8)> { + let num_nodes = challenge.difficulty.num_nodes; + let capacity = ((num_nodes - 1) * (num_nodes - 2)) / 2; + let mut savings = Vec::with_capacity(capacity); + for i in 1..num_nodes { + for j in (i + 1)..num_nodes { + savings.push((0.0, i as u8, j as u8)); + } + } + savings + } + + #[inline] + fn recompute_and_sort_savings(savings_list: &mut [(f32, u8, u8)], params: &[f32], challenge: &SubInstance) { + let distance_matrix = &challenge.distance_matrix; + + let mut zero_len = 0; + for (score, i, j) in savings_list.iter_mut() { + let i = *i as usize; + let j = *j as usize; + *score = params[i] * distance_matrix[0][i] as f32 + + params[j] * distance_matrix[j][0] as f32 - + params[i] * params[j] * distance_matrix[i][j] as f32; + } + + savings_list.sort_unstable_by(|a, b| b.0.partial_cmp(&a.0).unwrap()); + } + + #[inline] + fn generate_neighbor(current: &[f32], rng: &mut R) -> Vec { + current.iter().map(|¶m| { + let delta = rng.gen_range(-0.1..=0.1); + (param + delta).clamp(0.0, 2.0) + }).collect() + } + + #[inline] + fn apply_local_search_until_no_improvement(solution: &mut SubSolution, distance_matrix: &Vec>) { + let mut improved = true; + while improved { + improved = false; + for route in &mut solution.routes { + if two_opt(route, distance_matrix) { + improved = true; + } + } + } + } + #[inline] + fn two_opt(route: &mut Vec, distance_matrix: &Vec>) -> bool { + let n = route.len(); + let mut improved = false; + + for i in 1..n - 2 { + for j in i + 1..n - 1 { + let current_distance = distance_matrix[route[i - 1]][route[i]] + + distance_matrix[route[j]][route[j + 1]]; + let new_distance = distance_matrix[route[i - 1]][route[j]] + + distance_matrix[route[i]][route[j + 1]]; + + if new_distance < current_distance { + route[i..=j].reverse(); + improved = true; + } + } + } + + improved + } + + #[inline] + fn calculate_solution_cost(solution: &SubSolution, distance_matrix: &Vec>) -> i32 { + solution.routes.iter().map(|route| { + route.windows(2).map(|w| distance_matrix[w[0]][w[1]]).sum::() + }).sum() + } + + #[inline] + fn create_solution(challenge: &SubInstance, params: &[f32], savings_list: &[(f32, u8, u8)]) -> SubSolution { + let distance_matrix = &challenge.distance_matrix; + let max_capacity = challenge.max_capacity; + let num_nodes = challenge.difficulty.num_nodes; + let demands = &challenge.demands; + + let mut routes = vec![None; num_nodes]; + for i in 1..num_nodes { + routes[i] = Some(vec![i]); + } + let mut route_demands = demands.clone(); + + for &(_, i, j) in savings_list { + let (i, j) = (i as usize, j as usize); + if let (Some(left_route), Some(right_route)) = (routes[i].as_ref(), routes[j].as_ref()) { + let (left_start, left_end) = (*left_route.first().unwrap(), *left_route.last().unwrap()); + let (right_start, right_end) = (*right_route.first().unwrap(), *right_route.last().unwrap()); + + if left_start == right_start || route_demands[left_start] + route_demands[right_start] > max_capacity { + continue; + } + + let mut new_route = routes[i].take().unwrap(); + let mut right_route = routes[j].take().unwrap(); + + if left_start == i { new_route.reverse(); } + if right_end == j { right_route.reverse(); } + + new_route.extend(right_route); + + let combined_demand = route_demands[left_start] + route_demands[right_start]; + let new_start = new_route[0]; + let new_end = *new_route.last().unwrap(); + + route_demands[new_start] = combined_demand; + route_demands[new_end] = combined_demand; + + routes[new_start] = Some(new_route.clone()); + routes[new_end] = Some(new_route); + } + } + + SubSolution { + routes: routes + .into_iter() + .enumerate() + .filter_map(|(i, route)| route.filter(|r| r[0] == i)) + .map(|mut route| { + route.insert(0, 0); + route.push(0); + route + }) + .collect(), + } + } +} \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/advanced_routing/README.md b/tig-algorithms/src/vehicle_routing/advanced_routing/README.md new file mode 100644 index 0000000..5177b36 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/advanced_routing/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vehicle_routing +* **Algorithm Name:** advanced_routing +* **Copyright:** 2024 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/advanced_routing/mod.rs b/tig-algorithms/src/vehicle_routing/advanced_routing/mod.rs new file mode 100644 index 0000000..3c9e511 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/advanced_routing/mod.rs @@ -0,0 +1,228 @@ +use anyhow::{anyhow, Result}; +use serde_json::{Map, Value}; +use tig_challenges::vehicle_routing::*; + + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> Result<()>, + hyperparameters: &Option>, +) -> Result<()> { + Err(anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + use rand::{rngs::{SmallRng, StdRng}, Rng, SeedableRng}; + use tig_challenges::vehicle_routing::*; + + + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let mut solution = Solution { + sub_solutions: Vec::new(), + }; + for sub_instance in &challenge.sub_instances { + match solve_sub_instance(sub_instance)? { + Some(sub_solution) => solution.sub_solutions.push(sub_solution), + None => return Ok(None), + } + } + Ok(Some(solution)) + } + + pub fn solve_sub_instance(challenge: &SubInstance) -> anyhow::Result> { + let mut best_solution: Option = None; + let mut best_cost = std::i32::MAX; + + const INITIAL_TEMPERATURE: f32 = 2.0; + const COOLING_RATE: f32 = 0.995; + const ITERATIONS_PER_TEMPERATURE: usize = 2; + + let num_nodes = challenge.difficulty.num_nodes; + + let mut current_params = vec![1.0; num_nodes]; + let mut savings_list = create_initial_savings_list(challenge); + recompute_and_sort_savings(&mut savings_list, ¤t_params, challenge); + + let mut current_solution = create_solution(challenge, ¤t_params, &savings_list); + let mut current_cost = calculate_solution_cost(¤t_solution, &challenge.distance_matrix); + + if current_cost <= challenge.baseline_total_distance { + return Ok(Some(current_solution)); + } + + if (current_cost as f32 * 0.96) > challenge.baseline_total_distance as f32 { + return Ok(None); + } + + let mut temperature = INITIAL_TEMPERATURE; + let mut rng = StdRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap())); + + while temperature > 1.0 { + for _ in 0..ITERATIONS_PER_TEMPERATURE { + let neighbor_params = generate_neighbor(¤t_params, &mut rng); + recompute_and_sort_savings(&mut savings_list, &neighbor_params, challenge); + let mut neighbor_solution = create_solution(challenge, &neighbor_params, &savings_list); + apply_local_search_until_no_improvement(&mut neighbor_solution, &challenge.distance_matrix); + let neighbor_cost = calculate_solution_cost(&neighbor_solution, &challenge.distance_matrix); + + let delta = neighbor_cost as f32 - current_cost as f32; + if delta < 0.0 || rng.gen::() < (-delta / temperature).exp() { + current_params = neighbor_params; + current_cost = neighbor_cost; + current_solution = neighbor_solution; + + if current_cost < best_cost { + best_cost = current_cost; + best_solution = Some(SubSolution { + routes: current_solution.routes.clone(), + }); + } + } + if best_cost <= challenge.baseline_total_distance { + return Ok(best_solution); + } + } + + temperature *= COOLING_RATE; + } + + Ok(best_solution) + } + + #[inline] + fn create_initial_savings_list(challenge: &SubInstance) -> Vec<(f32, u8, u8)> { + let num_nodes = challenge.difficulty.num_nodes; + let capacity = ((num_nodes - 1) * (num_nodes - 2)) / 2; + let mut savings = Vec::with_capacity(capacity); + for i in 1..num_nodes { + for j in (i + 1)..num_nodes { + savings.push((0.0, i as u8, j as u8)); + } + } + savings + } + + #[inline] + fn recompute_and_sort_savings(savings_list: &mut [(f32, u8, u8)], params: &[f32], challenge: &SubInstance) { + let distance_matrix = &challenge.distance_matrix; + + let mut zero_len = 0; + for (score, i, j) in savings_list.iter_mut() { + let i = *i as usize; + let j = *j as usize; + *score = params[i] * distance_matrix[0][i] as f32 + + params[j] * distance_matrix[j][0] as f32 - + params[i] * params[j] * distance_matrix[i][j] as f32; + } + + savings_list.sort_unstable_by(|a, b| b.0.partial_cmp(&a.0).unwrap()); + } + + #[inline] + fn generate_neighbor(current: &[f32], rng: &mut R) -> Vec { + current.iter().map(|¶m| { + let delta = rng.gen_range(-0.1..=0.1); + (param + delta).clamp(0.0, 2.0) + }).collect() + } + + #[inline] + fn apply_local_search_until_no_improvement(solution: &mut SubSolution, distance_matrix: &Vec>) { + let mut improved = true; + while improved { + improved = false; + for route in &mut solution.routes { + if two_opt(route, distance_matrix) { + improved = true; + } + } + } + } + #[inline] + fn two_opt(route: &mut Vec, distance_matrix: &Vec>) -> bool { + let n = route.len(); + let mut improved = false; + + for i in 1..n - 2 { + for j in i + 1..n - 1 { + let current_distance = distance_matrix[route[i - 1]][route[i]] + + distance_matrix[route[j]][route[j + 1]]; + let new_distance = distance_matrix[route[i - 1]][route[j]] + + distance_matrix[route[i]][route[j + 1]]; + + if new_distance < current_distance { + route[i..=j].reverse(); + improved = true; + } + } + } + + improved + } + + #[inline] + fn calculate_solution_cost(solution: &SubSolution, distance_matrix: &Vec>) -> i32 { + solution.routes.iter().map(|route| { + route.windows(2).map(|w| distance_matrix[w[0]][w[1]]).sum::() + }).sum() + } + + #[inline] + fn create_solution(challenge: &SubInstance, params: &[f32], savings_list: &[(f32, u8, u8)]) -> SubSolution { + let distance_matrix = &challenge.distance_matrix; + let max_capacity = challenge.max_capacity; + let num_nodes = challenge.difficulty.num_nodes; + let demands = &challenge.demands; + + let mut routes = vec![None; num_nodes]; + for i in 1..num_nodes { + routes[i] = Some(vec![i]); + } + let mut route_demands = demands.clone(); + + for &(_, i, j) in savings_list { + let (i, j) = (i as usize, j as usize); + if let (Some(left_route), Some(right_route)) = (routes[i].as_ref(), routes[j].as_ref()) { + let (left_start, left_end) = (*left_route.first().unwrap(), *left_route.last().unwrap()); + let (right_start, right_end) = (*right_route.first().unwrap(), *right_route.last().unwrap()); + + if left_start == right_start || route_demands[left_start] + route_demands[right_start] > max_capacity { + continue; + } + + let mut new_route = routes[i].take().unwrap(); + let mut right_route = routes[j].take().unwrap(); + + if left_start == i { new_route.reverse(); } + if right_end == j { right_route.reverse(); } + + new_route.extend(right_route); + + let combined_demand = route_demands[left_start] + route_demands[right_start]; + let new_start = new_route[0]; + let new_end = *new_route.last().unwrap(); + + route_demands[new_start] = combined_demand; + route_demands[new_end] = combined_demand; + + routes[new_start] = Some(new_route.clone()); + routes[new_end] = Some(new_route); + } + } + + SubSolution { + routes: routes + .into_iter() + .enumerate() + .filter_map(|(i, route)| route.filter(|r| r[0] == i)) + .map(|mut route| { + route.insert(0, 0); + route.push(0); + route + }) + .collect(), + } + } +} \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/better_routing/README.md b/tig-algorithms/src/vehicle_routing/better_routing/README.md new file mode 100644 index 0000000..238e2b6 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/better_routing/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vehicle_routing +* **Algorithm Name:** better_routing +* **Copyright:** 2025 frogmarch +* **Identity of Submitter:** frogmarch +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/better_routing/mod.rs b/tig-algorithms/src/vehicle_routing/better_routing/mod.rs new file mode 100644 index 0000000..cfe032c --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/better_routing/mod.rs @@ -0,0 +1,1118 @@ +use anyhow::Result; +use serde_json::{Map, Value}; +use std::collections::BTreeSet; +use tig_challenges::vehicle_routing::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + Err(anyhow::anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let mut solution = Solution { + sub_solutions: Vec::new(), + }; + for sub_instance in &challenge.sub_instances { + let better_than_baseline = sub_instance.difficulty.better_than_baseline; + + let sub_solution = if better_than_baseline < 50 { + simple_solver::solve_sub_instance_simple(sub_instance)? + } else { + complex_solver::solve_sub_instance_complex(sub_instance)? + }; + + match sub_solution { + Some(sub_solution) => solution.sub_solutions.push(sub_solution), + None => return Ok(None), + } + } + Ok(Some(solution)) + } + + mod utils { + pub fn find_best_insertion( + route: &Vec, + remaining_nodes: Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Option<(usize, usize)> { + let alpha1 = 1; + let alpha2 = 0; + let lambda = 1; + + let mut best_c2 = None; + let mut best = None; + for insert_node in remaining_nodes { + let mut best_c1 = None; + + let mut curr_time = 0; + let mut curr_node = 0; + for pos in 1..route.len() { + let next_node = route[pos]; + let new_arrival_time = ready_times[insert_node] + .max(curr_time + distance_matrix[curr_node][insert_node]); + if new_arrival_time > due_times[insert_node] { + continue; + } + let old_arrival_time = ready_times[next_node] + .max(curr_time + distance_matrix[curr_node][next_node]); + + let c11 = distance_matrix[curr_node][insert_node] + + distance_matrix[insert_node][next_node] + - distance_matrix[curr_node][next_node]; + + let c12 = new_arrival_time - old_arrival_time; + + let c1 = -(alpha1 * c11 + alpha2 * c12); + let c2 = lambda * distance_matrix[0][insert_node] + c1; + + let c1_is_better = match best_c1 { + None => true, + Some(x) => c1 > x, + }; + + let c2_is_better = match best_c2 { + None => true, + Some(x) => c2 > x, + }; + + if c1_is_better + && c2_is_better + && is_feasible( + route, + distance_matrix, + service_time, + ready_times, + due_times, + insert_node, + new_arrival_time + service_time, + pos, + ) + { + best_c1 = Some(c1); + best_c2 = Some(c2); + best = Some((insert_node, pos)); + } + + curr_time = ready_times[next_node] + .max(curr_time + distance_matrix[curr_node][next_node]) + + service_time; + curr_node = next_node; + } + } + best + } + + pub fn is_feasible( + route: &Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + mut curr_node: usize, + mut curr_time: i32, + start_pos: usize, + ) -> bool { + let mut valid = true; + for pos in start_pos..route.len() { + let next_node = route[pos]; + curr_time += distance_matrix[curr_node][next_node]; + if curr_time > due_times[route[pos]] { + valid = false; + break; + } + curr_time = curr_time.max(ready_times[next_node]) + service_time; + curr_node = next_node; + } + valid + } + + pub fn compute_proximity( + i: usize, + j: usize, + distance_matrix: &Vec>, + ready_times: &Vec, + due_times: &Vec, + service_time: i32, + ) -> f64 { + let time_ij = distance_matrix[i][j]; + let expr1 = (ready_times[j] - time_ij - service_time - due_times[i]).max(0) as f64 + + (ready_times[i] + service_time + time_ij - due_times[j]).max(0) as f64; + let expr2 = (ready_times[i] - time_ij - service_time - due_times[j]).max(0) as f64 + + (ready_times[j] + service_time + time_ij - due_times[i]).max(0) as f64; + time_ij as f64 + expr1.min(expr2) + } + + pub fn find_best_insertion_in_route( + route: &Vec, + node: usize, + demands: &Vec, + max_capacity: i32, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Option<(usize, i32)> { + let current_demand: i32 = route[1..route.len() - 1].iter().map(|&n| demands[n]).sum(); + if current_demand + demands[node] > max_capacity { + return None; + } + + let mut best_pos = None; + let mut best_delta = i32::MAX; + + for pos in 1..route.len() { + let prev_node = route[pos - 1]; + let next_node = route[pos]; + let delta = distance_matrix[prev_node][node] + distance_matrix[node][next_node] + - distance_matrix[prev_node][next_node]; + + if check_feasible_insertion( + route, + node, + pos, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + if delta < best_delta { + best_delta = delta; + best_pos = Some(pos); + } + } + } + + best_pos.map(|pos| (pos, best_delta)) + } + + pub fn check_feasible_insertion( + route: &Vec, + insert_node: usize, + insert_pos: usize, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> bool { + if insert_pos == route.len() - 1 { + let last_node = route[insert_pos - 1]; + let arrival_time = if last_node == 0 { + 0 + } else { + let mut time = 0; + let mut node = 0; + for &n in route.iter().take(insert_pos) { + if n == 0 { + continue; + } + time += distance_matrix[node][n]; + time = time.max(ready_times[n]); + if time > due_times[n] { + return false; + } + time += service_time; + node = n; + } + time + }; + + let new_arrival = arrival_time + distance_matrix[last_node][insert_node]; + if new_arrival > due_times[insert_node] { + return false; + } + + let departure = new_arrival.max(ready_times[insert_node]) + service_time; + let final_arrival = departure + distance_matrix[insert_node][0]; + + return final_arrival <= due_times[0]; + } + + let mut curr_time = 0; + let mut curr_node = 0; + + for &node in route[..insert_pos].iter() { + if node == 0 { + continue; + } + let travel_time = distance_matrix[curr_node][node]; + curr_time += travel_time; + + if curr_time > due_times[node] { + return false; + } + + curr_time = curr_time.max(ready_times[node]) + service_time; + curr_node = node; + } + + let travel_time = distance_matrix[curr_node][insert_node]; + curr_time += travel_time; + if curr_time > due_times[insert_node] { + return false; + } + + curr_time = curr_time.max(ready_times[insert_node]) + service_time; + curr_node = insert_node; + + for &node in route[insert_pos..].iter() { + if node == 0 { + continue; + } + let travel_time = distance_matrix[curr_node][node]; + curr_time += travel_time; + + if curr_time > due_times[node] { + return false; + } + + curr_time = curr_time.max(ready_times[node]) + service_time; + curr_node = node; + } + + true + } + + pub fn calculate_route_distance( + route: &Vec, + distance_matrix: &Vec>, + ) -> i32 { + let mut distance = 0; + for i in 0..route.len() - 1 { + distance += distance_matrix[route[i]][route[i + 1]]; + } + distance + } + } + + mod simple_solver { + use super::utils::*; + use super::*; + + pub fn solve_sub_instance_simple( + challenge: &SubInstance, + ) -> anyhow::Result> { + let num_nodes = challenge.difficulty.num_nodes; + let max_capacity = challenge.max_capacity; + let demands = &challenge.demands; + let distance_matrix = &challenge.distance_matrix; + let service_time = challenge.service_time; + let ready_times = &challenge.ready_times; + let due_times = &challenge.due_times; + let mut routes = Vec::new(); + + let mut nodes: Vec = (1..num_nodes).collect(); + nodes.sort_by(|&a, &b| distance_matrix[0][a].cmp(&distance_matrix[0][b])); + + let mut remaining: BTreeSet = nodes.iter().cloned().collect(); + + while let Some(node) = nodes.pop() { + if !remaining.remove(&node) { + continue; + } + let mut route = vec![0, node, 0]; + let mut route_demand = demands[node]; + + while let Some((best_node, best_pos)) = find_best_insertion( + &route, + remaining + .iter() + .cloned() + .filter(|&n| route_demand + demands[n] <= max_capacity) + .collect(), + distance_matrix, + service_time, + ready_times, + due_times, + ) { + remaining.remove(&best_node); + route_demand += demands[best_node]; + route.insert(best_pos, best_node); + } + + routes.push(route); + } + + routes = do_local_searches( + num_nodes, + max_capacity, + demands, + distance_matrix, + &routes, + service_time, + ready_times, + due_times, + ); + Ok(Some(SubSolution { routes })) + } + + fn do_local_searches( + num_nodes: usize, + max_capacity: i32, + demands: &Vec, + distance_matrix: &Vec>, + routes: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Vec> { + let mut best_routes = routes.clone(); + let mut best_distance = calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &best_routes, + service_time, + ready_times, + due_times, + ) + .unwrap_or(i32::MAX); + let mut improved = true; + + while improved { + improved = false; + + let route_demands: Vec = best_routes + .iter() + .map(|route| route[1..route.len() - 1].iter().map(|&n| demands[n]).sum()) + .collect(); + + let mut node_positions = vec![(0, 0); num_nodes]; + for (i, route) in best_routes.iter().enumerate() { + for (j, &node) in route[1..route.len() - 1].iter().enumerate() { + node_positions[node] = (i, j + 1); + } + } + + let mut proximity_pairs = Vec::new(); + for i in 1..num_nodes { + if let Some((best_j, min_prox)) = (1..num_nodes) + .filter(|&j| j != i) + .map(|j| { + ( + j, + compute_proximity( + i, + j, + distance_matrix, + ready_times, + due_times, + service_time, + ), + ) + }) + .min_by(|(_, a_prox), (_, b_prox)| a_prox.partial_cmp(b_prox).unwrap()) + { + proximity_pairs.push((min_prox, i, best_j)); + } + } + proximity_pairs.sort_unstable_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); + + for (_corr, node, node2) in &proximity_pairs { + let node = *node; + let node2 = *node2; + let (route1_idx, pos1) = node_positions[node]; + let (route2_idx, _pos2) = node_positions[node2]; + if route1_idx == route2_idx { + continue; + } + + let target_route_demand = route_demands[route2_idx]; + if target_route_demand + demands[node] > max_capacity { + continue; + } + + let target_route = &best_routes[route2_idx]; + if let Some((best_pos, _delta_cost)) = find_best_insertion_in_route( + target_route, + node, + demands, + max_capacity, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + let mut new_routes = best_routes.clone(); + + if new_routes[route1_idx].len() > pos1 + && new_routes[route1_idx][pos1] == node + { + new_routes[route1_idx].remove(pos1); + new_routes[route2_idx].insert(best_pos, node); + + match calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &new_routes, + service_time, + ready_times, + due_times, + ) { + Ok(new_distance) => { + if new_distance < best_distance { + best_distance = new_distance; + best_routes = new_routes; + improved = true; + break; + } + } + Err(_) => continue, + } + } + } + } + + if !improved { + let current_routes = best_routes.clone(); + + for route_idx in 0..current_routes.len() { + let route = ¤t_routes[route_idx]; + + if route.len() < 4 { + continue; + } + + for i in 1..route.len() - 2 { + for j in i + 1..route.len() - 1 { + let mut new_route = Vec::with_capacity(route.len()); + + for k in 0..i { + new_route.push(route[k]); + } + + for k in (i..=j).rev() { + new_route.push(route[k]); + } + + for k in j + 1..route.len() { + new_route.push(route[k]); + } + + if !is_route_time_feasible( + &new_route, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + continue; + } + + let old_distance = calculate_route_distance(route, distance_matrix); + let new_distance = + calculate_route_distance(&new_route, distance_matrix); + + if new_distance < old_distance { + let mut new_routes = current_routes.clone(); + new_routes[route_idx] = new_route; + + match calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &new_routes, + service_time, + ready_times, + due_times, + ) { + Ok(total_distance) => { + if total_distance < best_distance { + best_distance = total_distance; + best_routes = new_routes; + improved = true; + break; + } + } + Err(_) => continue, + } + } + } + if improved { + break; + } + } + if improved { + break; + } + } + } + } + + best_routes + } + + fn is_route_time_feasible( + route: &Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> bool { + let mut curr_time = 0; + let mut curr_node = route[0]; + + for &next_node in route.iter().skip(1) { + curr_time += distance_matrix[curr_node][next_node]; + + if next_node != 0 && curr_time > due_times[next_node] { + return false; + } + + if next_node != 0 { + curr_time = curr_time.max(ready_times[next_node]); + curr_time += service_time; + } + + curr_node = next_node; + } + + true + } + } + + mod complex_solver { + use super::utils::*; + use super::*; + + pub fn solve_sub_instance_complex( + challenge: &SubInstance, + ) -> anyhow::Result> { + let num_nodes = challenge.difficulty.num_nodes; + let max_capacity = challenge.max_capacity; + let demands = &challenge.demands; + let distance_matrix = &challenge.distance_matrix; + let service_time = challenge.service_time; + let ready_times = &challenge.ready_times; + let due_times = &challenge.due_times; + let mut routes = Vec::new(); + + let mut nodes: Vec = (1..num_nodes).collect(); + nodes.sort_by_key(|&a| distance_matrix[0][a]); + + let mut remaining: BTreeSet = nodes.iter().cloned().collect(); + + while let Some(node) = nodes.pop() { + if !remaining.remove(&node) { + continue; + } + let mut route = vec![0, node, 0]; + let mut route_demand = demands[node]; + + while let Some((best_node, best_pos)) = find_best_insertion( + &route, + remaining + .iter() + .cloned() + .filter(|&n| route_demand + demands[n] <= max_capacity) + .collect(), + distance_matrix, + service_time, + ready_times, + due_times, + ) { + remaining.remove(&best_node); + route_demand += demands[best_node]; + route.insert(best_pos, best_node); + } + + routes.push(route); + } + + if !remaining.is_empty() && remaining.len() > num_nodes / 8 { + return Ok(None); + } + + routes = do_local_searches( + num_nodes, + max_capacity, + demands, + distance_matrix, + &routes, + service_time, + ready_times, + due_times, + ); + + Ok(Some(SubSolution { routes })) + } + + fn do_local_searches( + num_nodes: usize, + max_capacity: i32, + demands: &Vec, + distance_matrix: &Vec>, + routes: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Vec> { + let mut best_routes = routes.clone(); + let mut best_distance = calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &best_routes, + service_time, + ready_times, + due_times, + ) + .unwrap_or(i32::MAX); + let mut improved = true; + + let max_outer_iterations = 15; + let max_swap_iterations = 10; + let max_merge_iterations = 10; + let mut outer_iterations = 0; + + while improved && outer_iterations < max_outer_iterations { + improved = false; + outer_iterations += 1; + + let route_demands: Vec = best_routes + .iter() + .map(|route| route[1..route.len() - 1].iter().map(|&n| demands[n]).sum()) + .collect(); + + let mut node_positions = vec![(0, 0); num_nodes]; + for (i, route) in best_routes.iter().enumerate() { + for (j, &node) in route[1..route.len() - 1].iter().enumerate() { + node_positions[node] = (i, j + 1); + } + } + + let mut proximity_pairs = Vec::new(); + for i in 1..num_nodes { + if let Some((best_j, min_prox)) = (1..num_nodes) + .filter(|&j| j != i) + .map(|j| { + ( + j, + compute_proximity( + i, + j, + distance_matrix, + ready_times, + due_times, + service_time, + ), + ) + }) + .min_by(|(_, a_prox), (_, b_prox)| a_prox.partial_cmp(b_prox).unwrap()) + { + proximity_pairs.push((min_prox, i, best_j)); + } + } + proximity_pairs.sort_unstable_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); + + for (_, node, node2) in &proximity_pairs { + let node = *node; + let node2 = *node2; + let (route1_idx, pos1) = node_positions[node]; + let (route2_idx, _) = node_positions[node2]; + if route1_idx == route2_idx { + continue; + } + + let target_route_demand = route_demands[route2_idx]; + if target_route_demand + demands[node] > max_capacity { + continue; + } + + let target_route = &best_routes[route2_idx]; + if let Some((best_pos, _)) = find_best_insertion_in_route( + target_route, + node, + demands, + max_capacity, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + let mut new_routes = best_routes.clone(); + + if new_routes[route1_idx].len() > pos1 + && new_routes[route1_idx][pos1] == node + { + new_routes[route1_idx].remove(pos1); + new_routes[route2_idx].insert(best_pos, node); + + match calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &new_routes, + service_time, + ready_times, + due_times, + ) { + Ok(new_distance) => { + if new_distance < best_distance { + best_distance = new_distance; + best_routes = new_routes; + improved = true; + break; + } + } + Err(_) => continue, + } + } + } + } + + let mut swap_improved = true; + let mut swap_iterations = 0; + + while swap_improved && swap_iterations < max_swap_iterations { + swap_improved = false; + swap_iterations += 1; + + for route_idx in 0..best_routes.len() { + let route = best_routes[route_idx].clone(); + if route.len() <= 4 { + continue; + } + + for i in 1..route.len() - 1 { + for j in i + 1..route.len() - 1 { + if j == i + 1 { + continue; + } + + let mut new_route = route.clone(); + new_route.swap(i, j); + + if !is_route_feasible( + &new_route, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + continue; + } + + let new_route_distance = + calculate_route_distance(&new_route, distance_matrix); + let old_route_distance = + calculate_route_distance(&route, distance_matrix); + + if new_route_distance < old_route_distance { + let mut new_routes = best_routes.clone(); + new_routes[route_idx] = new_route; + + match calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &new_routes, + service_time, + ready_times, + due_times, + ) { + Ok(total_distance) => { + if total_distance < best_distance { + best_distance = total_distance; + best_routes = new_routes; + swap_improved = true; + improved = true; + break; + } + } + Err(_) => continue, + } + } + } + if swap_improved { + break; + } + } + if swap_improved { + break; + } + } + + if !swap_improved { + for route1_idx in 0..best_routes.len() { + let route1 = best_routes[route1_idx].clone(); + + for route2_idx in route1_idx + 1..best_routes.len() { + let route2 = best_routes[route2_idx].clone(); + + for i in 1..route1.len() - 1 { + let node1 = route1[i]; + + for j in 1..route2.len() - 1 { + let node2 = route2[j]; + + let route1_demand: i32 = route1[1..route1.len() - 1] + .iter() + .map(|&n| demands[n]) + .sum(); + let route2_demand: i32 = route2[1..route2.len() - 1] + .iter() + .map(|&n| demands[n]) + .sum(); + + let new_route1_demand = + route1_demand - demands[node1] + demands[node2]; + let new_route2_demand = + route2_demand - demands[node2] + demands[node1]; + + if new_route1_demand > max_capacity + || new_route2_demand > max_capacity + { + continue; + } + + let mut new_route1 = route1.clone(); + let mut new_route2 = route2.clone(); + new_route1[i] = node2; + new_route2[j] = node1; + + if !is_route_feasible( + &new_route1, + distance_matrix, + service_time, + ready_times, + due_times, + ) || !is_route_feasible( + &new_route2, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + continue; + } + + let old_distance = + calculate_route_distance(&route1, distance_matrix) + + calculate_route_distance( + &route2, + distance_matrix, + ); + let new_distance = + calculate_route_distance(&new_route1, distance_matrix) + + calculate_route_distance( + &new_route2, + distance_matrix, + ); + + if new_distance < old_distance { + let mut new_routes = best_routes.clone(); + new_routes[route1_idx] = new_route1; + new_routes[route2_idx] = new_route2; + + match calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &new_routes, + service_time, + ready_times, + due_times, + ) { + Ok(total_distance) => { + if total_distance < best_distance { + best_distance = total_distance; + best_routes = new_routes; + swap_improved = true; + improved = true; + break; + } + } + Err(_) => continue, + } + } + } + if swap_improved { + break; + } + } + if swap_improved { + break; + } + } + if swap_improved { + break; + } + } + } + } + + let mut merge_improved = true; + let mut merge_iterations = 0; + + while merge_improved && merge_iterations < max_merge_iterations { + merge_improved = false; + merge_iterations += 1; + + for i in 0..best_routes.len() { + if merge_improved { + break; + } + + for j in 0..best_routes.len() { + if i == j { + continue; + } + + let route1 = &best_routes[i]; + let route2 = &best_routes[j]; + + if route1.len() <= 2 || route2.len() <= 2 { + continue; + } + + let route1_demand: i32 = route1[1..route1.len() - 1] + .iter() + .map(|&n| demands[n]) + .sum(); + let route2_demand: i32 = route2[1..route2.len() - 1] + .iter() + .map(|&n| demands[n]) + .sum(); + + if route1_demand + route2_demand <= max_capacity { + let mut best_insertion_pos = None; + let mut best_insertion_delta = i32::MAX; + + for &node in &route2[1..route2.len() - 1] { + for pos in 1..route1.len() { + let prev = route1[pos - 1]; + let next = route1[pos]; + + let insertion_delta = distance_matrix[prev][node] + + distance_matrix[node][next] + - distance_matrix[prev][next]; + + if insertion_delta < best_insertion_delta { + let mut test_route = route1.clone(); + test_route.insert(pos, node); + + if is_route_feasible( + &test_route, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + best_insertion_pos = Some(pos); + best_insertion_delta = insertion_delta; + } + } + } + } + + if let Some(pos) = best_insertion_pos { + let mut new_route = route1.clone(); + + for (idx, &node) in + route2[1..route2.len() - 1].iter().enumerate() + { + new_route.insert(pos + idx, node); + } + + if is_route_feasible( + &new_route, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + let new_distance = + calculate_route_distance(&new_route, distance_matrix); + let old_distance = + calculate_route_distance(route1, distance_matrix) + + calculate_route_distance(route2, distance_matrix); + + if new_distance < old_distance { + let mut new_routes = best_routes.clone(); + new_routes[i] = new_route; + new_routes.remove(j); + + match calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &new_routes, + service_time, + ready_times, + due_times, + ) { + Ok(total_distance) => { + if total_distance < best_distance { + best_distance = total_distance; + best_routes = new_routes; + merge_improved = true; + improved = true; + break; + } + } + Err(_) => continue, + } + } + } + } + } + } + } + } + } + + best_routes + } + + fn is_route_feasible( + route: &Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> bool { + if route.len() == 2 && route[0] == 0 && route[1] == 0 { + return true; + } + + let mut curr_time = 0; + let mut curr_node = 0; + + for &next_node in route.iter().skip(1) { + curr_time += distance_matrix[curr_node][next_node]; + + if curr_time > due_times[next_node] { + return false; + } + + curr_time = curr_time.max(ready_times[next_node]); + + if next_node != 0 { + curr_time += service_time; + } + + curr_node = next_node; + } + + true + } + } +} diff --git a/tig-algorithms/src/vehicle_routing/clarke_wright/README.md b/tig-algorithms/src/vehicle_routing/clarke_wright/README.md new file mode 100644 index 0000000..5e280c5 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/clarke_wright/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vehicle_routing +* **Algorithm Name:** clarke_wright +* **Copyright:** 2024 Uncharted Trading Limited +* **Identity of Submitter:** Uncharted Trading Limited +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/clarke_wright/mod.rs b/tig-algorithms/src/vehicle_routing/clarke_wright/mod.rs new file mode 100644 index 0000000..75bdd2e --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/clarke_wright/mod.rs @@ -0,0 +1,95 @@ +use serde_json::{Map, Value}; +use tig_challenges::vehicle_routing::*; + + + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + let d = &challenge.distance_matrix; + let c = challenge.max_capacity; + let n = challenge.difficulty.num_nodes; + + // Clarke-Wright heuristic for node pairs based on their distances to depot + // vs distance between each other + let mut scores: Vec<(i32, usize, usize)> = Vec::new(); + for i in 1..n { + for j in (i + 1)..n { + scores.push((d[i][0] + d[0][j] - d[i][j], i, j)); + } + } + scores.sort_by(|a, b| b.0.cmp(&a.0)); // Sort in descending order by score + + // Create a route for every node + let mut routes: Vec>> = (0..n).map(|i| Some(vec![i])).collect(); + routes[0] = None; + let mut route_demands: Vec = challenge.demands.clone(); + + // Iterate through node pairs, starting from greatest score + for (s, i, j) in scores { + // Stop if score is negative + if s < 0 { + break; + } + + // Skip if joining the nodes is not possible + if routes[i].is_none() || routes[j].is_none() { + continue; + } + + let left_route = routes[i].as_ref().unwrap(); + let right_route = routes[j].as_ref().unwrap(); + let mut left_startnode = left_route[0]; + let right_startnode = right_route[0]; + let left_endnode = left_route[left_route.len() - 1]; + let mut right_endnode = right_route[right_route.len() - 1]; + let merged_demand = route_demands[left_startnode] + route_demands[right_startnode]; + + if left_startnode == right_startnode || merged_demand > c { + continue; + } + + let mut left_route = routes[i].take().unwrap(); + let mut right_route = routes[j].take().unwrap(); + routes[left_startnode] = None; + routes[right_startnode] = None; + routes[left_endnode] = None; + routes[right_endnode] = None; + + // reverse it + if left_startnode == i { + left_route.reverse(); + left_startnode = left_endnode; + } + if right_endnode == j { + right_route.reverse(); + right_endnode = right_startnode; + } + + let mut new_route = left_route; + new_route.extend(right_route); + + // Only the start and end nodes of routes are kept + routes[left_startnode] = Some(new_route.clone()); + routes[right_endnode] = Some(new_route); + route_demands[left_startnode] = merged_demand; + route_demands[right_endnode] = merged_demand; + } + + let _ = save_solution(&Solution { + routes: routes + .into_iter() + .enumerate() + .filter(|(i, x)| x.as_ref().is_some_and(|x| x[0] == *i)) + .map(|(_, mut x)| { + let mut route = vec![0]; + route.append(x.as_mut().unwrap()); + route.push(0); + route + }) + .collect(), + }); + return Ok(()); +} \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/clarke_wright_super/README.md b/tig-algorithms/src/vehicle_routing/clarke_wright_super/README.md new file mode 100644 index 0000000..3990fc9 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/clarke_wright_super/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vehicle_routing +* **Algorithm Name:** clarke_wright_super +* **Copyright:** 2024 OvErLoDe +* **Identity of Submitter:** OvErLoDe +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/clarke_wright_super/mod.rs b/tig-algorithms/src/vehicle_routing/clarke_wright_super/mod.rs new file mode 100644 index 0000000..77e5791 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/clarke_wright_super/mod.rs @@ -0,0 +1,101 @@ +use serde_json::{Map, Value}; +use tig_challenges::vehicle_routing::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + let d = &challenge.distance_matrix; + let c = challenge.max_capacity; + let n = challenge.difficulty.num_nodes; + + // Clarke-Wright heuristic for node pairs based on their distances to depot + // vs distance between each other + let mut scores: Vec<(i32, usize, usize)> = Vec::with_capacity((n * (n - 1)) / 2); + for i in 1..n { + let d_i0 = d[i][0]; // Cache this value to avoid repeated lookups + for j in (i + 1)..n { + let score = d_i0 + d[0][j] - d[i][j]; + scores.push((score, i, j)); + } + } + scores.sort_unstable_by(|a, b| b.0.cmp(&a.0)); // Sort in descending order by score + + // Create a route for every node + let mut routes: Vec>> = (0..n).map(|i| Some(vec![i])).collect(); + routes[0] = None; + let mut route_demands: Vec = challenge.demands.clone(); + + // Iterate through node pairs, starting from greatest score + for (s, i, j) in scores { + // Stop if score is negative + if s < 0 { + break; + } + + // Skip if joining the nodes is not possible + if routes[i].is_none() || routes[j].is_none() { + continue; + } + + // Directly get the routes + let (left_route, right_route) = (routes[i].as_ref().unwrap(), routes[j].as_ref().unwrap()); + + // Cache indices and demands + let (left_startnode, left_endnode) = (left_route[0], *left_route.last().unwrap()); + let (right_startnode, right_endnode) = (right_route[0], *right_route.last().unwrap()); + let merged_demand = route_demands[left_startnode] + route_demands[right_startnode]; + + // Check constraints + if left_startnode == right_startnode || merged_demand > c { + continue; + } + + // Merge routes + let mut left_route = routes[i].take().unwrap(); + let mut right_route = routes[j].take().unwrap(); + routes[left_startnode] = None; + routes[right_startnode] = None; + routes[left_endnode] = None; + routes[right_endnode] = None; + + // Reverse if needed + if left_startnode == i { + left_route.reverse(); + } + if right_endnode == j { + right_route.reverse(); + } + + // Create new route + let mut new_route = left_route; + new_route.extend(right_route); + + // Update routes and demands + let (start, end) = (*new_route.first().unwrap(), *new_route.last().unwrap()); + routes[start] = Some(new_route.clone()); + routes[end] = Some(new_route); + route_demands[start] = merged_demand; + route_demands[end] = merged_demand; + } + + let mut final_routes = Vec::new(); + + for (i, opt_route) in routes.into_iter().enumerate() { + if let Some(mut route) = opt_route { + if route[0] == i { + let mut full_route = Vec::with_capacity(route.len() + 2); + full_route.push(0); + full_route.append(&mut route); + full_route.push(0); + final_routes.push(full_route); + } + } + } + + let _ = save_solution(&Solution { + routes: final_routes, + }); + Ok(()) +} diff --git a/tig-algorithms/src/vehicle_routing/cw_heuristic/README.md b/tig-algorithms/src/vehicle_routing/cw_heuristic/README.md new file mode 100644 index 0000000..b79a5a2 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/cw_heuristic/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vehicle_routing +* **Algorithm Name:** cw_heuristic +* **Copyright:** 2024 Just +* **Identity of Submitter:** Just +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/cw_heuristic/mod.rs b/tig-algorithms/src/vehicle_routing/cw_heuristic/mod.rs new file mode 100644 index 0000000..2b8e76b --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/cw_heuristic/mod.rs @@ -0,0 +1,127 @@ +use anyhow::{anyhow, Result}; +use serde_json::{Map, Value}; +use tig_challenges::vehicle_routing::*; + + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> Result<()>, + hyperparameters: &Option>, +) -> Result<()> { + Err(anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + use tig_challenges::vehicle_routing::*; + + + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let mut solution = Solution { + sub_solutions: Vec::new(), + }; + for sub_instance in &challenge.sub_instances { + match solve_sub_instance(sub_instance)? { + Some(sub_solution) => solution.sub_solutions.push(sub_solution), + None => return Ok(None), + } + } + Ok(Some(solution)) + } + + pub fn solve_sub_instance(challenge: &SubInstance) -> anyhow::Result> { + let d = &challenge.distance_matrix; + let c = challenge.max_capacity; + let n = challenge.difficulty.num_nodes; + + let max_dist: f32 = challenge.distance_matrix[0].iter().sum::() as f32; + let p = challenge.baseline_total_distance as f32 / max_dist; + if p < 0.57 { + return Ok(None) + } + + // Clarke-Wright heuristic for node pairs based on their distances to depot + // vs distance between each other + let mut scores: Vec<(i32, usize, usize)> = Vec::with_capacity((n-1)*(n-2)/2); + for i in 1..n { + for j in (i + 1)..n { + scores.push((d[i][0] + d[0][j] - d[i][j], i, j)); + } + } + + scores.sort_unstable_by(|a, b| b.0.cmp(&a.0)); + + // Create a route for every node + let mut routes: Vec>> = (0..n).map(|i| Some(vec![i])).collect(); + routes[0] = None; + let mut route_demands: Vec = challenge.demands.clone(); + + // Iterate through node pairs, starting from greatest score + for (s, i, j) in scores { + // Stop if score is negative + if s < 0 { + break; + } + + // Skip if joining the nodes is not possible + if routes[i].is_none() || routes[j].is_none() { + continue; + } + + let left_route = routes[i].as_ref().unwrap(); + let right_route = routes[j].as_ref().unwrap(); + let mut left_startnode = left_route[0]; + let right_startnode = right_route[0]; + let left_endnode = left_route[left_route.len() - 1]; + let mut right_endnode = right_route[right_route.len() - 1]; + let merged_demand = route_demands[left_startnode] + route_demands[right_startnode]; + + if left_startnode == right_startnode || merged_demand > c { + continue; + } + + let mut left_route = routes[i].take().unwrap(); + let mut right_route = routes[j].take().unwrap(); + routes[left_startnode] = None; + routes[right_startnode] = None; + routes[left_endnode] = None; + routes[right_endnode] = None; + + // reverse it + if left_startnode == i { + left_route.reverse(); + left_startnode = left_endnode; + } + if right_endnode == j { + right_route.reverse(); + right_endnode = right_startnode; + } + + let mut new_route = left_route; + new_route.extend(right_route); + + // Only the start and end nodes of routes are kept + routes[left_startnode] = Some(new_route.clone()); + routes[right_endnode] = Some(new_route); + route_demands[left_startnode] = merged_demand; + route_demands[right_endnode] = merged_demand; + } + + let routes = routes + .into_iter() + .enumerate() + .filter(|(i, x)| x.as_ref().is_some_and(|x| x[0] == *i)) + .map(|(_, mut x)| { + let mut route = vec![0]; + route.append(x.as_mut().unwrap()); + route.push(0); + route + }) + .collect(); + + Ok(Some(SubSolution { + routes + })) + } +} \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/enhanced_cw/README.md b/tig-algorithms/src/vehicle_routing/enhanced_cw/README.md new file mode 100644 index 0000000..e55b4e7 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/enhanced_cw/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vehicle_routing +* **Algorithm Name:** enhanced_cw +* **Copyright:** 2025 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/enhanced_cw/mod.rs b/tig-algorithms/src/vehicle_routing/enhanced_cw/mod.rs new file mode 100644 index 0000000..35a86af --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/enhanced_cw/mod.rs @@ -0,0 +1,506 @@ +use rand::{ + rngs::{SmallRng, StdRng}, + Rng, SeedableRng, +}; +use serde_json::{Map, Value}; +use tig_challenges::vehicle_routing::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + Err(anyhow::anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, + ) -> anyhow::Result<()> { + let mut global_best_solution: Option = None; + let mut global_best_cost = std::i32::MAX; + + const NUM_ITERATIONS: usize = 10000; + let num_nodes = challenge.difficulty.num_nodes; + + let mut rng = + SmallRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap())); + + let max_dist: f32 = challenge.distance_matrix[0].iter().sum::() as f32; + let p = challenge.baseline_total_distance as f32 / max_dist; + if p < 0.55 { + return Ok(()); + } + + let mut promising = false; + for _ in 0..2 { + let mut current_params = vec![1.0; num_nodes]; + let mut raw_savings = vec![vec![0.0; num_nodes]; num_nodes]; + let mut savings_list = create_initial_savings_list(challenge, &mut raw_savings); + recompute_and_sort_savings(&mut savings_list, ¤t_params, &raw_savings); + + let mut current_solution = create_solution(challenge, ¤t_params, &savings_list); + let mut current_cost = + calculate_solution_cost(¤t_solution, &challenge.distance_matrix); + + if current_cost <= challenge.baseline_total_distance { + let _ = save_solution(¤t_solution); + return Ok(()); + } + + if (current_cost as f32 * 0.95) > challenge.baseline_total_distance as f32 && !promising + { + return Ok(()); + } else { + promising = true; + } + + let mut best_solution = Some(Solution { + routes: current_solution.routes.clone(), + }); + let mut best_cost = current_cost; + let mut best_initial_cost = current_cost; + + let mut iterations_since_improvement = 0; + let mut stagnation_factor = 1.0; + + for i in 0..NUM_ITERATIONS { + let neighbor_params = + generate_neighbor(¤t_params, &mut rng, i, NUM_ITERATIONS); + recompute_and_sort_savings(&mut savings_list, &neighbor_params, &raw_savings); + + let mut neighbor_solution = + create_solution(challenge, &neighbor_params, &savings_list); + postprocess_solution( + &mut neighbor_solution, + &challenge.distance_matrix, + &challenge.demands, + challenge.max_capacity, + ); + + let neighbor_cost = + calculate_solution_cost(&neighbor_solution, &challenge.distance_matrix); + + let delta = neighbor_cost - current_cost; + if delta <= 0 { + current_params = neighbor_params; + current_cost = neighbor_cost; + current_solution = neighbor_solution; + + if current_cost < best_cost { + best_cost = current_cost; + best_solution = Some(Solution { + routes: current_solution.routes.clone(), + }); + } + } else { + iterations_since_improvement += 1; + } + if best_cost <= challenge.baseline_total_distance { + return Ok(best_solution); + } + } + + if best_cost < global_best_cost { + global_best_cost = best_cost; + global_best_solution = best_solution; + } + } + + Ok(global_best_solution) + } + + #[inline] + fn create_initial_savings_list( + challenge: &Challenge, + raw_savings: &mut [Vec], + ) -> Vec<(u32, u8, u8)> { + let num_nodes = challenge.difficulty.num_nodes; + + let capacity = ((num_nodes - 1) * (num_nodes - 2)) / 2; + let mut savings = Vec::with_capacity(capacity); + + let max_distance = challenge + .distance_matrix + .iter() + .flat_map(|row| row.iter()) + .cloned() + .max() + .unwrap_or(0); + let threshold = max_distance / 3; + + for i in 1..num_nodes { + for j in (i + 1)..num_nodes { + if challenge.distance_matrix[i][j] <= threshold { + let saving = challenge.distance_matrix[0][i] as f32 + + challenge.distance_matrix[j][0] as f32 + - challenge.distance_matrix[i][j] as f32; + + if saving > 0.0 { + savings.push((0u32, i as u8, j as u8)); + + raw_savings[i][j] = saving; + raw_savings[j][i] = saving; + } + } + } + } + savings + } + + fn recompute_and_sort_savings( + savings_list: &mut [(u32, u8, u8)], + params: &[f32], + raw_savings: &[Vec], + ) { + unsafe { + for (score, i, j) in savings_list.iter_mut() { + let i = *i as usize; + let j = *j as usize; + *score = !((*params.get_unchecked(i) + *params.get_unchecked(j)) + * raw_savings.get_unchecked(i).get_unchecked(j)) + .to_bits(); + } + + let mut counts = [0u32; 256]; + let mut buf = Vec::with_capacity(savings_list.len()); + buf.set_len(savings_list.len()); + + let savings_ptr: *mut (u32, u8, u8) = savings_list.as_mut_ptr(); + let buf_ptr: *mut (u32, u8, u8) = buf.as_mut_ptr(); + + for shift in [0, 8, 16, 24] { + counts.fill(0); + + for i in 0..savings_list.len() { + let bits = (*savings_ptr.add(i)).0; + let byte = ((bits >> shift) & 0xFF) as usize; + counts[byte] += 1; + } + + let mut total = 0u32; + for count in counts.iter_mut() { + let c = *count; + *count = total; + total += c; + } + + for i in 0..savings_list.len() { + let item = *savings_ptr.add(i); + let bits = item.0; + let byte = ((bits >> shift) & 0xFF) as usize; + let pos = counts[byte]; + *buf_ptr.add(pos as usize) = item; + counts[byte] += 1; + } + + std::ptr::copy_nonoverlapping(buf_ptr, savings_ptr, savings_list.len()); + } + } + } + + fn generate_neighbor( + current: &[f32], + rng: &mut R, + iteration: usize, + max_iterations: usize, + ) -> Vec { + let progress = iteration as f32 / max_iterations as f32; + let max_delta = 0.05 * (1.0 - progress) + 0.01 * progress; + + let mut result = Vec::with_capacity(current.len()); + + for ¶m in current { + // Randomly decide whether to update this parameter + if rng.gen_bool(0.1) { + let delta = rng.gen_range(-max_delta..max_delta); + result.push((param + delta).clamp(1.0, 2.0)); + } else { + // Keep the original parameter value + result.push(param); + } + } + + result + } + + #[inline] + fn calculate_solution_cost(solution: &Solution, distance_matrix: &Vec>) -> i32 { + solution + .routes + .iter() + .map(|route| { + route + .windows(2) + .map(|w| distance_matrix[w[0]][w[1]]) + .sum::() + }) + .sum() + } + + #[inline] + fn create_solution( + challenge: &Challenge, + params: &[f32], + savings_list: &[(u32, u8, u8)], + ) -> Solution { + let num_nodes = challenge.difficulty.num_nodes; + let demands = &challenge.demands; + let max_capacity = challenge.max_capacity; + + let mut routes = Vec::with_capacity(num_nodes); + routes.resize_with(num_nodes, || None); + + // Initialize routes + for i in 1..num_nodes { + let mut route = Vec::with_capacity(4); + route.push(i); + routes[i] = Some(route); + } + let mut route_demands = demands.clone(); + + for &(_, i, j) in savings_list { + let (i, j) = (i as usize, j as usize); + + let (Some(left_route), Some(right_route)) = (routes[i].as_ref(), routes[j].as_ref()) + else { + continue; + }; + + let left_start = *left_route.first().unwrap(); + let left_end = *left_route.last().unwrap(); + let right_start = *right_route.first().unwrap(); + let right_end = *right_route.last().unwrap(); + + // Early skip for invalid combinations + if left_start == right_start + || route_demands[left_start] + route_demands[right_start] > max_capacity + { + continue; + } + + // Take ownership of routes + let mut new_route = routes[i].take().unwrap(); + let right_route = routes[j].take().unwrap(); + + // Pre-allocate space for the combined route + new_route.reserve(right_route.len()); + + // Handle route orientation + if left_start == i { + new_route.reverse(); + } + + // Extend route efficiently + if right_end == j { + new_route.extend(right_route.into_iter().rev()); + } else { + new_route.extend(right_route); + } + + // Update route information + let combined_demand = route_demands[left_start] + route_demands[right_start]; + let new_start = new_route[0]; + let new_end = *new_route.last().unwrap(); + + route_demands[new_start] = combined_demand; + route_demands[new_end] = combined_demand; + + // Store the new route + routes[new_start] = Some(new_route.clone()); + routes[new_end] = Some(new_route); + } + + // Point 3: Optimize final route construction + Solution { + routes: routes + .into_iter() + .enumerate() + .filter_map(|(i, route)| { + route.and_then(|mut r| { + if r[0] == i { + let mut final_route = Vec::with_capacity(r.len() + 2); + final_route.push(0); + final_route.extend(r); + final_route.push(0); + Some(final_route) + } else { + None + } + }) + }) + .collect(), + } + } + + pub fn postprocess_solution( + solution: &mut Solution, + distance_matrix: &Vec>, + demands: &Vec, + max_capacity: i32, + ) { + // Create copies of the routes + let original_routes = solution.routes.clone(); + let mut best_routes = original_routes.clone(); + + // Try first-improvement strategy on the original solution + let mut routes_to_check: Vec = vec![true; solution.routes.len()]; + + loop { + let mut improved = false; + for (idx, route) in solution.routes.iter_mut().enumerate() { + if !routes_to_check[idx] { + continue; + } + + if unsafe { two_opt_first_unsafe(route, distance_matrix) } { + improved = true; + routes_to_check[idx] = true; + } else { + routes_to_check[idx] = false; + } + } + if !improved { + break; + } + } + + // Do the same for best-improvement strategy + let mut routes_to_check = vec![true; best_routes.len()]; + loop { + let mut improved = false; + for (idx, route) in best_routes.iter_mut().enumerate() { + if !routes_to_check[idx] { + continue; + } + + if unsafe { two_opt_best_unsafe(route, distance_matrix) } { + improved = true; + routes_to_check[idx] = true; + } else { + routes_to_check[idx] = false; + } + } + if !improved { + break; + } + } + + // Calculate costs and use the better solution + let first_cost = calculate_solution_cost( + &Solution { + routes: solution.routes.clone(), + }, + distance_matrix, + ); + let best_cost = calculate_solution_cost( + &Solution { + routes: best_routes.clone(), + }, + distance_matrix, + ); + + if best_cost < first_cost { + solution.routes = best_routes; + } + } + + #[inline] + unsafe fn two_opt_best_unsafe(route: &mut Vec, distance_matrix: &Vec>) -> bool { + let n = route.len(); + if n < 4 { + return false; + } + + let mut improved = false; + let route_ptr = route.as_mut_ptr(); + + for i in 1..(n - 2) { + let mut best_gain = 0; + let mut best_j = 0; + + for j in (i + 1)..(n - 1) { + let [r_im1, r_i, r_j, r_jp1] = [ + *route_ptr.add(i - 1), + *route_ptr.add(i), + *route_ptr.add(j), + *route_ptr.add(j + 1), + ]; + + let gain = distance_matrix[r_im1][r_i] + distance_matrix[r_j][r_jp1] + - distance_matrix[r_im1][r_j] + - distance_matrix[r_i][r_jp1]; + + if gain > best_gain { + best_gain = gain; + best_j = j; + } + } + + if best_gain > 0 { + let mut start = i; + let mut end = best_j; + while start < end { + let tmp = *route_ptr.add(start); + *route_ptr.add(start) = *route_ptr.add(end); + *route_ptr.add(end) = tmp; + start += 1; + end -= 1; + } + improved = true; + } + } + improved + } + + #[inline] + unsafe fn two_opt_first_unsafe( + route: &mut Vec, + distance_matrix: &Vec>, + ) -> bool { + let n = route.len(); + if n < 4 { + return false; + } + + let mut improved = false; + let route_ptr = route.as_mut_ptr(); + + for i in 1..(n - 2) { + for j in (i + 1)..(n - 1) { + let [r_im1, r_i, r_j, r_jp1] = [ + *route_ptr.add(i - 1), + *route_ptr.add(i), + *route_ptr.add(j), + *route_ptr.add(j + 1), + ]; + + let gain = distance_matrix[r_im1][r_i] + distance_matrix[r_j][r_jp1] + - distance_matrix[r_im1][r_j] + - distance_matrix[r_i][r_jp1]; + + if gain > 0 { + let mut start = i; + let mut end = j; + while start < end { + let tmp = *route_ptr.add(start); + *route_ptr.add(start) = *route_ptr.add(end); + *route_ptr.add(end) = tmp; + start += 1; + end -= 1; + } + improved = true; + break; // Exit the inner loop after first improvement + } + } + if improved { + break; // Exit the outer loop after first improvement + } + } + improved + } +} diff --git a/tig-algorithms/src/vehicle_routing/enhanced_heuristics/README.md b/tig-algorithms/src/vehicle_routing/enhanced_heuristics/README.md new file mode 100644 index 0000000..0cf1bba --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/enhanced_heuristics/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vehicle_routing +* **Algorithm Name:** enhanced_heuristics +* **Copyright:** 2024 CodeAlchemist +* **Identity of Submitter:** CodeAlchemist +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/enhanced_heuristics/mod.rs b/tig-algorithms/src/vehicle_routing/enhanced_heuristics/mod.rs new file mode 100644 index 0000000..47fa22d --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/enhanced_heuristics/mod.rs @@ -0,0 +1,329 @@ +use anyhow::{anyhow, Result}; +use serde_json::{Map, Value}; +use tig_challenges::vehicle_routing::*; + + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> Result<()>, + hyperparameters: &Option>, +) -> Result<()> { + Err(anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + // TIG's UI uses the pattern `tig_challenges::` to automatically detect your algorithm's challenge + use rand::{rngs::{SmallRng, StdRng}, Rng, SeedableRng}; + use tig_challenges::vehicle_routing::*; + + + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let mut solution = Solution { + sub_solutions: Vec::new(), + }; + for sub_instance in &challenge.sub_instances { + match solve_sub_instance(sub_instance)? { + Some(sub_solution) => solution.sub_solutions.push(sub_solution), + None => return Ok(None), + } + } + Ok(Some(solution)) + } + + pub fn solve_sub_instance(challenge: &SubInstance) -> anyhow::Result> { + let max_dist: f32 = challenge.distance_matrix[0].iter().sum::() as f32; + let p = challenge.baseline_total_distance as f32 / max_dist; + if p < 0.57 { + return Ok(None) + } + + let mut best_solution: Option = None; + let mut best_cost = std::i32::MAX; + + const INITIAL_TEMPERATURE: f32 = 2.0; + const COOLING_RATE: f32 = 0.995; + const ITERATIONS_PER_TEMPERATURE: usize = 2; + + let num_nodes = challenge.difficulty.num_nodes; + + let mut current_params = vec![1.0; num_nodes]; + let mut savings_list = create_initial_savings_list(challenge); + recompute_and_sort_savings(&mut savings_list, ¤t_params, challenge); + + let mut current_solution = create_solution(challenge, ¤t_params, &savings_list); + let mut current_cost = calculate_solution_cost(¤t_solution, &challenge.distance_matrix); + + if current_cost <= challenge.baseline_total_distance { + return Ok(Some(current_solution)); + } + + if (current_cost as f32 * 0.96) > challenge.baseline_total_distance as f32 { + return Ok(None); + } + + let mut temperature = INITIAL_TEMPERATURE; + let mut rng = StdRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap())); + + while temperature > 1.0 { + for _ in 0..ITERATIONS_PER_TEMPERATURE { + let neighbor_params = generate_neighbor(¤t_params, &mut rng); + recompute_and_sort_savings(&mut savings_list, &neighbor_params, challenge); + let mut neighbor_solution = create_solution(challenge, &neighbor_params, &savings_list); + apply_local_search_until_no_improvement(&mut neighbor_solution, &challenge.distance_matrix); + let neighbor_cost = calculate_solution_cost(&neighbor_solution, &challenge.distance_matrix); + + let delta = neighbor_cost as f32 - current_cost as f32; + if delta < 0.0 || rng.gen::() < (-delta / temperature).exp() { + current_params = neighbor_params; + current_cost = neighbor_cost; + current_solution = neighbor_solution; + + if current_cost < best_cost { + best_cost = current_cost; + best_solution = Some(SubSolution { + routes: current_solution.routes.clone(), + }); + } + } + if best_cost <= challenge.baseline_total_distance { + return Ok(best_solution); + } + } + + temperature *= COOLING_RATE; + } + + if let Some(best_sol) = &best_solution { + let mut solution = SubSolution { + routes: best_sol.routes.clone() + }; + if try_inter_route_swap(&mut solution, &challenge.distance_matrix, &challenge.demands, challenge.max_capacity) { + let new_cost = calculate_solution_cost(&solution, &challenge.distance_matrix); + if new_cost < best_cost { + best_solution = Some(solution); + } + } + } + Ok(best_solution) + } + + #[inline] + fn create_initial_savings_list(challenge: &SubInstance) -> Vec<(f32, u8, u8)> { + let num_nodes = challenge.difficulty.num_nodes; + let capacity = ((num_nodes - 1) * (num_nodes - 2)) / 2; + let mut savings = Vec::with_capacity(capacity); + + let max_distance = challenge.distance_matrix.iter().flat_map(|row| row.iter()).cloned().max().unwrap_or(0); + let threshold = max_distance / 2; + + for i in 1..num_nodes { + for j in (i + 1)..num_nodes { + if challenge.distance_matrix[i][j] <= threshold { + savings.push((0.0, i as u8, j as u8)); + } + } + } + savings + } + + #[inline] + fn recompute_and_sort_savings(savings_list: &mut [(f32, u8, u8)], params: &[f32], challenge: &SubInstance) { + let distance_matrix = &challenge.distance_matrix; + + let mut zero_len = 0; + for (score, i, j) in savings_list.iter_mut() { + let i = *i as usize; + let j = *j as usize; + *score = params[i] * distance_matrix[0][i] as f32 + + params[j] * distance_matrix[j][0] as f32 - + params[i] * params[j] * distance_matrix[i][j] as f32; + } + + savings_list.sort_unstable_by(|a, b| b.0.partial_cmp(&a.0).unwrap()); + } + + #[inline] + fn generate_neighbor(current: &[f32], rng: &mut R) -> Vec { + current.iter().map(|¶m| { + let delta = rng.gen_range(-0.1..=0.1); + (param + delta).clamp(0.0, 2.0) + }).collect() + } + + #[inline] + fn apply_local_search_until_no_improvement(solution: &mut SubSolution, distance_matrix: &Vec>) { + let mut improved = true; + while improved { + improved = false; + for route in &mut solution.routes { + if two_opt(route, distance_matrix) { + improved = true; + } + } + } + } + #[inline] + fn two_opt(route: &mut Vec, distance_matrix: &Vec>) -> bool { + let n = route.len(); + let mut improved = false; + + for i in 1..n - 2 { + for j in i + 1..n - 1 { + let current_distance = distance_matrix[route[i - 1]][route[i]] + + distance_matrix[route[j]][route[j + 1]]; + let new_distance = distance_matrix[route[i - 1]][route[j]] + + distance_matrix[route[i]][route[j + 1]]; + + if new_distance < current_distance { + route[i..=j].reverse(); + improved = true; + } + } + } + + improved + } + + #[inline] + fn calculate_solution_cost(solution: &SubSolution, distance_matrix: &Vec>) -> i32 { + solution.routes.iter().map(|route| { + route.windows(2).map(|w| distance_matrix[w[0]][w[1]]).sum::() + }).sum() + } + + #[inline] + fn create_solution(challenge: &SubInstance, params: &[f32], savings_list: &[(f32, u8, u8)]) -> SubSolution { + let distance_matrix = &challenge.distance_matrix; + let max_capacity = challenge.max_capacity; + let num_nodes = challenge.difficulty.num_nodes; + let demands = &challenge.demands; + + let mut routes = vec![None; num_nodes]; + for i in 1..num_nodes { + routes[i] = Some(vec![i]); + } + let mut route_demands = demands.clone(); + + for &(_, i, j) in savings_list { + let (i, j) = (i as usize, j as usize); + if let (Some(left_route), Some(right_route)) = (routes[i].as_ref(), routes[j].as_ref()) { + let (left_start, left_end) = (*left_route.first().unwrap(), *left_route.last().unwrap()); + let (right_start, right_end) = (*right_route.first().unwrap(), *right_route.last().unwrap()); + + if left_start == right_start || route_demands[left_start] + route_demands[right_start] > max_capacity { + continue; + } + + let mut new_route = routes[i].take().unwrap(); + let mut right_route = routes[j].take().unwrap(); + + if left_start == i { new_route.reverse(); } + if right_end == j { right_route.reverse(); } + + new_route.extend(right_route); + + let combined_demand = route_demands[left_start] + route_demands[right_start]; + let new_start = new_route[0]; + let new_end = *new_route.last().unwrap(); + + route_demands[new_start] = combined_demand; + route_demands[new_end] = combined_demand; + + routes[new_start] = Some(new_route.clone()); + routes[new_end] = Some(new_route); + } + } + + SubSolution { + routes: routes + .into_iter() + .enumerate() + .filter_map(|(i, route)| route.filter(|r| r[0] == i)) + .map(|mut route| { + route.insert(0, 0); + route.push(0); + route + }) + .collect(), + } + } + + + #[inline] + fn try_inter_route_swap( + solution: &mut SubSolution, + distance_matrix: &Vec>, + demands: &Vec, + max_capacity: i32 + ) -> bool { + let mut improved = false; + let num_routes = solution.routes.len(); + + for i in 0..num_routes { + for j in i + 1..num_routes { + if let Some(better_routes) = find_best_swap( + &solution.routes[i], + &solution.routes[j], + distance_matrix, + demands, + max_capacity + ) { + solution.routes[i] = better_routes.0; + solution.routes[j] = better_routes.1; + improved = true; + } + } + } + + improved + } + + #[inline] + fn find_best_swap( + route1: &Vec, + route2: &Vec, + distance_matrix: &Vec>, + demands: &Vec, + max_capacity: i32 + ) -> Option<(Vec, Vec)> { + let mut best_improvement = 0; + let mut best_swap = None; + + for i in 1..route1.len() - 1 { + for j in 1..route2.len() - 1 { + let route1_demand: i32 = route1.iter().map(|&n| demands[n]).sum(); + let route2_demand: i32 = route2.iter().map(|&n| demands[n]).sum(); + let demand_delta = demands[route2[j]] - demands[route1[i]]; + + if route1_demand + demand_delta > max_capacity || + route2_demand - demand_delta > max_capacity { + continue; + } + + let old_cost = distance_matrix[route1[i-1]][route1[i]] + + distance_matrix[route1[i]][route1[i+1]] + + distance_matrix[route2[j-1]][route2[j]] + + distance_matrix[route2[j]][route2[j+1]]; + + let new_cost = distance_matrix[route1[i-1]][route2[j]] + + distance_matrix[route2[j]][route1[i+1]] + + distance_matrix[route2[j-1]][route1[i]] + + distance_matrix[route1[i]][route2[j+1]]; + + let improvement = old_cost - new_cost; + if improvement > best_improvement { + best_improvement = improvement; + let mut new_route1 = route1.clone(); + let mut new_route2 = route2.clone(); + new_route1[i] = route2[j]; + new_route2[j] = route1[i]; + best_swap = Some((new_route1, new_route2)); + } + } + } + + best_swap + } +} \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/enhanced_routing/README.md b/tig-algorithms/src/vehicle_routing/enhanced_routing/README.md new file mode 100644 index 0000000..db17af7 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/enhanced_routing/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vehicle_routing +* **Algorithm Name:** enhanced_routing +* **Copyright:** 2024 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/enhanced_routing/mod.rs b/tig-algorithms/src/vehicle_routing/enhanced_routing/mod.rs new file mode 100644 index 0000000..09b9d4f --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/enhanced_routing/mod.rs @@ -0,0 +1,322 @@ +use anyhow::{anyhow, Result}; +use serde_json::{Map, Value}; +use tig_challenges::vehicle_routing::*; + + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> Result<()>, + hyperparameters: &Option>, +) -> Result<()> { + Err(anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + use rand::{rngs::{SmallRng, StdRng}, Rng, SeedableRng}; + use tig_challenges::vehicle_routing::*; + + + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let mut solution = Solution { + sub_solutions: Vec::new(), + }; + for sub_instance in &challenge.sub_instances { + match solve_sub_instance(sub_instance)? { + Some(sub_solution) => solution.sub_solutions.push(sub_solution), + None => return Ok(None), + } + } + Ok(Some(solution)) + } + + pub fn solve_sub_instance(challenge: &SubInstance) -> anyhow::Result> { + let mut best_solution: Option = None; + let mut best_cost = std::i32::MAX; + + const INITIAL_TEMPERATURE: f32 = 2.0; + const COOLING_RATE: f32 = 0.995; + const ITERATIONS_PER_TEMPERATURE: usize = 2; + + let num_nodes = challenge.difficulty.num_nodes; + + let mut current_params = vec![1.0; num_nodes]; + let mut savings_list = create_initial_savings_list(challenge); + recompute_and_sort_savings(&mut savings_list, ¤t_params, challenge); + + let mut current_solution = create_solution(challenge, ¤t_params, &savings_list); + let mut current_cost = calculate_solution_cost(¤t_solution, &challenge.distance_matrix); + + if current_cost <= challenge.baseline_total_distance { + return Ok(Some(current_solution)); + } + + if (current_cost as f32 * 0.96) > challenge.baseline_total_distance as f32 { + return Ok(None); + } + + let mut temperature = INITIAL_TEMPERATURE; + let mut rng = StdRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap())); + + while temperature > 1.0 { + for _ in 0..ITERATIONS_PER_TEMPERATURE { + let neighbor_params = generate_neighbor(¤t_params, &mut rng); + recompute_and_sort_savings(&mut savings_list, &neighbor_params, challenge); + let mut neighbor_solution = create_solution(challenge, &neighbor_params, &savings_list); + apply_local_search_until_no_improvement(&mut neighbor_solution, &challenge.distance_matrix); + let neighbor_cost = calculate_solution_cost(&neighbor_solution, &challenge.distance_matrix); + + let delta = neighbor_cost as f32 - current_cost as f32; + if delta < 0.0 || rng.gen::() < (-delta / temperature).exp() { + current_params = neighbor_params; + current_cost = neighbor_cost; + current_solution = neighbor_solution; + + if current_cost < best_cost { + best_cost = current_cost; + best_solution = Some(SubSolution { + routes: current_solution.routes.clone(), + }); + } + } + if best_cost <= challenge.baseline_total_distance { + return Ok(best_solution); + } + } + + temperature *= COOLING_RATE; + } + + if let Some(best_sol) = &best_solution { + let mut solution = SubSolution { + routes: best_sol.routes.clone() + }; + if try_inter_route_swap(&mut solution, &challenge.distance_matrix, &challenge.demands, challenge.max_capacity) { + let new_cost = calculate_solution_cost(&solution, &challenge.distance_matrix); + if new_cost < best_cost { + best_solution = Some(solution); + } + } + } + Ok(best_solution) + } + + #[inline] + fn create_initial_savings_list(challenge: &SubInstance) -> Vec<(f32, u8, u8)> { + let num_nodes = challenge.difficulty.num_nodes; + let capacity = ((num_nodes - 1) * (num_nodes - 2)) / 2; + let mut savings = Vec::with_capacity(capacity); + + let max_distance = challenge.distance_matrix.iter().flat_map(|row| row.iter()).cloned().max().unwrap_or(0); + let threshold = max_distance / 2; + + for i in 1..num_nodes { + for j in (i + 1)..num_nodes { + if challenge.distance_matrix[i][j] <= threshold { + savings.push((0.0, i as u8, j as u8)); + } + } + } + savings + } + + #[inline] + fn recompute_and_sort_savings(savings_list: &mut [(f32, u8, u8)], params: &[f32], challenge: &SubInstance) { + let distance_matrix = &challenge.distance_matrix; + + let mut zero_len = 0; + for (score, i, j) in savings_list.iter_mut() { + let i = *i as usize; + let j = *j as usize; + *score = params[i] * distance_matrix[0][i] as f32 + + params[j] * distance_matrix[j][0] as f32 - + params[i] * params[j] * distance_matrix[i][j] as f32; + } + + savings_list.sort_unstable_by(|a, b| b.0.partial_cmp(&a.0).unwrap()); + } + + #[inline] + fn generate_neighbor(current: &[f32], rng: &mut R) -> Vec { + current.iter().map(|¶m| { + let delta = rng.gen_range(-0.1..=0.1); + (param + delta).clamp(0.0, 2.0) + }).collect() + } + + #[inline] + fn apply_local_search_until_no_improvement(solution: &mut SubSolution, distance_matrix: &Vec>) { + let mut improved = true; + while improved { + improved = false; + for route in &mut solution.routes { + if two_opt(route, distance_matrix) { + improved = true; + } + } + } + } + #[inline] + fn two_opt(route: &mut Vec, distance_matrix: &Vec>) -> bool { + let n = route.len(); + let mut improved = false; + + for i in 1..n - 2 { + for j in i + 1..n - 1 { + let current_distance = distance_matrix[route[i - 1]][route[i]] + + distance_matrix[route[j]][route[j + 1]]; + let new_distance = distance_matrix[route[i - 1]][route[j]] + + distance_matrix[route[i]][route[j + 1]]; + + if new_distance < current_distance { + route[i..=j].reverse(); + improved = true; + } + } + } + + improved + } + + #[inline] + fn calculate_solution_cost(solution: &SubSolution, distance_matrix: &Vec>) -> i32 { + solution.routes.iter().map(|route| { + route.windows(2).map(|w| distance_matrix[w[0]][w[1]]).sum::() + }).sum() + } + + #[inline] + fn create_solution(challenge: &SubInstance, params: &[f32], savings_list: &[(f32, u8, u8)]) -> SubSolution { + let distance_matrix = &challenge.distance_matrix; + let max_capacity = challenge.max_capacity; + let num_nodes = challenge.difficulty.num_nodes; + let demands = &challenge.demands; + + let mut routes = vec![None; num_nodes]; + for i in 1..num_nodes { + routes[i] = Some(vec![i]); + } + let mut route_demands = demands.clone(); + + for &(_, i, j) in savings_list { + let (i, j) = (i as usize, j as usize); + if let (Some(left_route), Some(right_route)) = (routes[i].as_ref(), routes[j].as_ref()) { + let (left_start, left_end) = (*left_route.first().unwrap(), *left_route.last().unwrap()); + let (right_start, right_end) = (*right_route.first().unwrap(), *right_route.last().unwrap()); + + if left_start == right_start || route_demands[left_start] + route_demands[right_start] > max_capacity { + continue; + } + + let mut new_route = routes[i].take().unwrap(); + let mut right_route = routes[j].take().unwrap(); + + if left_start == i { new_route.reverse(); } + if right_end == j { right_route.reverse(); } + + new_route.extend(right_route); + + let combined_demand = route_demands[left_start] + route_demands[right_start]; + let new_start = new_route[0]; + let new_end = *new_route.last().unwrap(); + + route_demands[new_start] = combined_demand; + route_demands[new_end] = combined_demand; + + routes[new_start] = Some(new_route.clone()); + routes[new_end] = Some(new_route); + } + } + + SubSolution { + routes: routes + .into_iter() + .enumerate() + .filter_map(|(i, route)| route.filter(|r| r[0] == i)) + .map(|mut route| { + route.insert(0, 0); + route.push(0); + route + }) + .collect(), + } + } + + + #[inline] + fn try_inter_route_swap( + solution: &mut SubSolution, + distance_matrix: &Vec>, + demands: &Vec, + max_capacity: i32 + ) -> bool { + let mut improved = false; + let num_routes = solution.routes.len(); + + for i in 0..num_routes { + for j in i + 1..num_routes { + if let Some(better_routes) = find_best_swap( + &solution.routes[i], + &solution.routes[j], + distance_matrix, + demands, + max_capacity + ) { + solution.routes[i] = better_routes.0; + solution.routes[j] = better_routes.1; + improved = true; + } + } + } + + improved + } + + #[inline] + fn find_best_swap( + route1: &Vec, + route2: &Vec, + distance_matrix: &Vec>, + demands: &Vec, + max_capacity: i32 + ) -> Option<(Vec, Vec)> { + let mut best_improvement = 0; + let mut best_swap = None; + + for i in 1..route1.len() - 1 { + for j in 1..route2.len() - 1 { + let route1_demand: i32 = route1.iter().map(|&n| demands[n]).sum(); + let route2_demand: i32 = route2.iter().map(|&n| demands[n]).sum(); + let demand_delta = demands[route2[j]] - demands[route1[i]]; + + if route1_demand + demand_delta > max_capacity || + route2_demand - demand_delta > max_capacity { + continue; + } + + let old_cost = distance_matrix[route1[i-1]][route1[i]] + + distance_matrix[route1[i]][route1[i+1]] + + distance_matrix[route2[j-1]][route2[j]] + + distance_matrix[route2[j]][route2[j+1]]; + + let new_cost = distance_matrix[route1[i-1]][route2[j]] + + distance_matrix[route2[j]][route1[i+1]] + + distance_matrix[route2[j-1]][route1[i]] + + distance_matrix[route1[i]][route2[j+1]]; + + let improvement = old_cost - new_cost; + if improvement > best_improvement { + best_improvement = improvement; + let mut new_route1 = route1.clone(); + let mut new_route2 = route2.clone(); + new_route1[i] = route2[j]; + new_route2[j] = route1[i]; + best_swap = Some((new_route1, new_route2)); + } + } + } + + best_swap + } +} \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/enhanced_solomon/README.md b/tig-algorithms/src/vehicle_routing/enhanced_solomon/README.md new file mode 100644 index 0000000..7cbe0a1 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/enhanced_solomon/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vehicle_routing +* **Algorithm Name:** enhanced_solomon +* **Copyright:** 2025 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/enhanced_solomon/mod.rs b/tig-algorithms/src/vehicle_routing/enhanced_solomon/mod.rs new file mode 100644 index 0000000..7817e55 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/enhanced_solomon/mod.rs @@ -0,0 +1,358 @@ +use std::collections::BTreeSet; +use serde_json::{Map, Value}; +use tig_challenges::vehicle_routing::*; + + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + let num_nodes = challenge.difficulty.num_nodes; + let max_capacity = challenge.max_capacity; + let demands = &challenge.demands; + let distance_matrix = &challenge.distance_matrix; + let service_time = challenge.service_time; + let ready_times = &challenge.ready_times; + let due_times = &challenge.due_times; + let mut routes = Vec::new(); + + let mut nodes: Vec = (1..num_nodes).collect(); + nodes.sort_by(|&a, &b| distance_matrix[0][a].cmp(&distance_matrix[0][b])); + + let mut remaining: BTreeSet = nodes.iter().cloned().collect(); + + // popping furthest node from depot + while let Some(node) = nodes.pop() { + if !remaining.remove(&node) { + continue; + } + let mut route = vec![0, node, 0]; + let mut route_demand = demands[node]; + + while let Some((best_node, best_pos)) = find_best_insertion( + &route, + remaining + .iter() + .cloned() + .filter(|&n| route_demand + demands[n] <= max_capacity) + .collect(), + distance_matrix, + service_time, + ready_times, + due_times, + ) { + remaining.remove(&best_node); + route_demand += demands[best_node]; + route.insert(best_pos, best_node); + } + + routes.push(route); + } + + routes = do_local_searches( + num_nodes, + max_capacity, + demands, + distance_matrix, + &routes, + service_time, + ready_times, + due_times, + ); + let _ = save_solution(&Solution { routes }); + return Ok(()); +} + +fn do_local_searches( + num_nodes: usize, + max_capacity: i32, + demands: &Vec, + distance_matrix: &Vec>, + routes: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, +) -> Vec> { + let mut best_routes = routes.clone(); + let mut best_distance = calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &best_routes, + service_time, + ready_times, + due_times, + ).unwrap_or(i32::MAX); + let mut improved = true; + + while improved { + improved = false; + + let route_demands: Vec = best_routes.iter() + .map(|route| route[1..route.len()-1].iter().map(|&n| demands[n]).sum()) + .collect(); + + let mut node_positions = vec![(0, 0); num_nodes]; + for (i, route) in best_routes.iter().enumerate() { + for (j, &node) in route[1..route.len() - 1].iter().enumerate() { + node_positions[node] = (i, j + 1); + } + } + + let mut proximity_pairs = Vec::new(); + for i in 1..num_nodes { + if let Some((best_j, min_prox)) = (1..num_nodes) + .filter(|&j| j != i) + .map(|j| (j, compute_proximity(i, j, distance_matrix, ready_times, due_times, service_time))) + .min_by(|(_, a_prox), (_, b_prox)| a_prox.partial_cmp(b_prox).unwrap()) + { + proximity_pairs.push((min_prox, i, best_j)); + } + } + proximity_pairs.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); + + for (corr, node, node2) in &proximity_pairs { + let node = *node; + let node2 = *node2; + let (route1_idx, pos1) = node_positions[node]; + let (route2_idx, pos2) = node_positions[node2]; + if route1_idx == route2_idx { + continue; + } + + let target_route_demand = route_demands[route2_idx]; + if target_route_demand + demands[node] > max_capacity { + continue; + } + + let target_route = &best_routes[route2_idx]; + if let Some((best_pos, delta_cost)) = find_best_insertion_in_route( + target_route, + node, + demands, + max_capacity, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + let mut new_routes = best_routes.clone(); + + if new_routes[route1_idx].len() > pos1 && new_routes[route1_idx][pos1] == node { + new_routes[route1_idx].remove(pos1); + new_routes[route2_idx].insert(best_pos, node); + + match calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &new_routes, + service_time, + ready_times, + due_times, + ) { + Ok(new_distance) => { + if new_distance < best_distance { + best_distance = new_distance; + best_routes = new_routes; + improved = true; + break; + } + } + Err(_) => continue, + } + } + } + } + } + + best_routes +} + +fn compute_proximity( + i: usize, + j: usize, + distance_matrix: &Vec>, + ready_times: &Vec, + due_times: &Vec, + service_time: i32, +) -> f64 { + let time_ij = distance_matrix[i][j]; + let expr1 = (ready_times[j] - time_ij - service_time - due_times[i]).max(0) as f64 + + (ready_times[i] + service_time + time_ij - due_times[j]).max(0) as f64; + let expr2 = (ready_times[i] - time_ij - service_time - due_times[j]).max(0) as f64 + + (ready_times[j] + service_time + time_ij - due_times[i]).max(0) as f64; + time_ij as f64 + expr1.min(expr2) +} + +fn find_best_insertion_in_route( + route: &Vec, + node: usize, + demands: &Vec, + max_capacity: i32, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, +) -> Option<(usize, i32)> { + let current_demand: i32 = route[1..route.len()-1].iter().map(|&n| demands[n]).sum(); + if current_demand + demands[node] > max_capacity { + return None; + } + + let mut best_pos = None; + let mut best_delta = i32::MAX; + + for pos in 1..route.len() { + let prev_node = route[pos-1]; + let next_node = route[pos]; + let delta = distance_matrix[prev_node][node] + distance_matrix[node][next_node] - distance_matrix[prev_node][next_node]; + + if check_feasible_insertion(route, node, pos, distance_matrix, service_time, ready_times, due_times) { + if delta < best_delta { + best_delta = delta; + best_pos = Some(pos); + } + } + } + + best_pos.map(|pos| (pos, best_delta)) +} + +fn check_feasible_insertion( + route: &Vec, + insert_node: usize, + insert_pos: usize, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, +) -> bool { + let mut curr_time = 0; + let mut curr_node = 0; + for &node in route[..insert_pos].iter() { + if node == 0 { continue; } + curr_time += distance_matrix[curr_node][node]; + curr_time = curr_time.max(ready_times[node]); + if curr_time > due_times[node] { + return false; + } + curr_time += service_time; + curr_node = node; + } + + curr_time += distance_matrix[curr_node][insert_node]; + curr_time = curr_time.max(ready_times[insert_node]); + if curr_time > due_times[insert_node] { + return false; + } + curr_time += service_time; + curr_node = insert_node; + + for &node in route[insert_pos..].iter() { + if node == 0 { continue; } + curr_time += distance_matrix[curr_node][node]; + curr_time = curr_time.max(ready_times[node]); + if curr_time > due_times[node] { + return false; + } + curr_time += service_time; + curr_node = node; + } + + true +} + +pub fn find_best_insertion( + route: &Vec, + remaining_nodes: Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, +) -> Option<(usize, usize)> { + let alpha1 = 1; + let alpha2 = 0; + let lambda = 1; + + let mut best_c2 = None; + let mut best = None; + for insert_node in remaining_nodes { + let mut best_c1 = None; + + let mut curr_time = 0; + let mut curr_node = 0; + for pos in 1..route.len() { + let next_node = route[pos]; + let new_arrival_time = + ready_times[insert_node].max(curr_time + distance_matrix[curr_node][insert_node]); + if new_arrival_time > due_times[insert_node] { + continue; + } + let old_arrival_time = + ready_times[next_node].max(curr_time + distance_matrix[curr_node][next_node]); + + // Distance criterion: c11 = d(i,u) + d(u,j) - mu * d(i,j) + let c11 = distance_matrix[curr_node][insert_node] + + distance_matrix[insert_node][next_node] + - distance_matrix[curr_node][next_node]; + + // Time criterion: c12 = b_ju - b_j (the shift in arrival time at position 'pos'). + let c12 = new_arrival_time - old_arrival_time; + + let c1 = -(alpha1 * c11 + alpha2 * c12); + let c2 = lambda * distance_matrix[0][insert_node] + c1; + + if best_c1.is_none_or(|x| c1 > x) + && best_c2.is_none_or(|x| c2 > x) + && is_feasible( + route, + distance_matrix, + service_time, + ready_times, + due_times, + insert_node, + new_arrival_time + service_time, + pos, + ) + { + best_c1 = Some(c1); + best_c2 = Some(c2); + best = Some((insert_node, pos)); + } + + curr_time = ready_times[next_node] + .max(curr_time + distance_matrix[curr_node][next_node]) + + service_time; + curr_node = next_node; + } + } + best +} + +fn is_feasible( + route: &Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + mut curr_node: usize, + mut curr_time: i32, + start_pos: usize, +) -> bool { + let mut valid = true; + for pos in start_pos..route.len() { + let next_node = route[pos]; + curr_time += distance_matrix[curr_node][next_node]; + if curr_time > due_times[route[pos]] { + valid = false; + break; + } + curr_time = curr_time.max(ready_times[next_node]) + service_time; + curr_node = next_node; + } + valid +} diff --git a/tig-algorithms/src/vehicle_routing/mod.rs b/tig-algorithms/src/vehicle_routing/mod.rs index 930b4fb..d29c337 100644 --- a/tig-algorithms/src/vehicle_routing/mod.rs +++ b/tig-algorithms/src/vehicle_routing/mod.rs @@ -1,4 +1,5 @@ -// c002_a001 +pub mod clarke_wright; +pub use clarke_wright as c002_a001; // c002_a002 @@ -66,9 +67,11 @@ // c002_a034 -// c002_a035 +pub mod cw_heuristic; +pub use cw_heuristic as c002_a035; -// c002_a036 +pub mod clarke_wright_super; +pub use clarke_wright_super as c002_a036; // c002_a037 @@ -94,7 +97,8 @@ // c002_a048 -// c002_a049 +pub mod advanced_routing; +pub use advanced_routing as c002_a049; // c002_a050 @@ -102,31 +106,40 @@ // c002_a052 -// c002_a053 +pub mod enhanced_routing; +pub use enhanced_routing as c002_a053; -// c002_a054 +pub mod advanced_heuristics; +pub use advanced_heuristics as c002_a054; // c002_a055 // c002_a056 -// c002_a057 +pub mod enhanced_heuristics; +pub use enhanced_heuristics as c002_a057; // c002_a058 -// c002_a059 +pub mod advanced_cw_opt; +pub use advanced_cw_opt as c002_a059; -// c002_a060 +pub mod advanced_cw_adp; +pub use advanced_cw_adp as c002_a060; -// c002_a061 +pub mod enhanced_cw; +pub use enhanced_cw as c002_a061; -// c002_a062 +pub mod new_enhanced_cw; +pub use new_enhanced_cw as c002_a062; // c002_a063 -// c002_a064 +pub mod new_enhanced_cw_opt; +pub use new_enhanced_cw_opt as c002_a064; -// c002_a065 +pub mod new_enhanced_cw_low; +pub use new_enhanced_cw_low as c002_a065; // c002_a066 @@ -134,25 +147,31 @@ // c002_a068 -// c002_a069 +pub mod enhanced_solomon; +pub use enhanced_solomon as c002_a069; // c002_a070 // c002_a071 -// c002_a072 +pub mod better_routing; +pub use better_routing as c002_a072; -// c002_a073 +pub mod routing_redone; +pub use routing_redone as c002_a073; -// c002_a074 +pub mod sausage; +pub use sausage as c002_a074; -// c002_a075 +pub mod native_routing; +pub use native_routing as c002_a075; // c002_a076 // c002_a077 -// c002_a078 +pub mod simple_ls_zero; +pub use simple_ls_zero as c002_a078; // c002_a079 @@ -168,7 +187,8 @@ // c002_a085 -// c002_a086 +pub mod vrptw_ultimate; +pub use vrptw_ultimate as c002_a086; // c002_a087 diff --git a/tig-algorithms/src/vehicle_routing/native_routing/README.md b/tig-algorithms/src/vehicle_routing/native_routing/README.md new file mode 100644 index 0000000..786bda2 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/native_routing/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vehicle_routing +* **Algorithm Name:** native_routing +* **Copyright:** 2025 Rootz +* **Identity of Submitter:** Rootz +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/native_routing/mod.rs b/tig-algorithms/src/vehicle_routing/native_routing/mod.rs new file mode 100644 index 0000000..b322399 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/native_routing/mod.rs @@ -0,0 +1,1426 @@ +use serde_json::{Map, Value}; +use std::collections::BTreeSet; +use tig_challenges::vehicle_routing::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + Err(anyhow::anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let mut solution = Solution { + sub_solutions: Vec::new(), + }; + for sub_instance in &challenge.sub_instances { + let better_than_baseline = sub_instance.difficulty.better_than_baseline; + + let sub_solution = if better_than_baseline < 50 { + simple_solver::solve_sub_instance_simple(sub_instance)? + } else { + complex_solver::solve_sub_instance_complex(sub_instance)? + }; + + match sub_solution { + Some(sub_solution) => solution.sub_solutions.push(sub_solution), + None => return Ok(None), + } + } + Ok(Some(solution)) + } + + fn calc_routes_total_distance( + num_nodes: usize, + max_capacity: i32, + demands: &Vec, + distance_matrix: &Vec>, + routes: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> anyhow::Result { + let mut total_distance = 0; + for route in routes { + total_distance += utils::calculate_route_distance(route, distance_matrix); + } + Ok(total_distance) + } + + mod utils { + + pub fn precompute_proximity_matrix( + num_nodes: usize, + distance_matrix: &Vec>, + ready_times: &Vec, + due_times: &Vec, + service_time: i32, + ) -> Vec> { + let mut proximity_matrix = vec![vec![0.0; num_nodes]; num_nodes]; + for i in 1..num_nodes { + for j in 1..num_nodes { + if i != j { + proximity_matrix[i][j] = compute_proximity( + i, + j, + distance_matrix, + ready_times, + due_times, + service_time, + ); + } + } + } + proximity_matrix + } + + pub fn calculate_route_demands(routes: &Vec>, demands: &Vec) -> Vec { + routes + .iter() + .map(|route| route[1..route.len() - 1].iter().map(|&n| demands[n]).sum()) + .collect() + } + + pub fn find_best_insertion( + route: &Vec, + remaining_nodes: Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Option<(usize, usize)> { + let alpha1 = 1; + let alpha2 = 0; + let lambda = 1; + + let mut best_c2 = None; + let mut best = None; + for insert_node in remaining_nodes { + let mut best_c1 = None; + + let mut curr_time = 0; + let mut curr_node = 0; + for pos in 1..route.len() { + let next_node = route[pos]; + let new_arrival_time = ready_times[insert_node] + .max(curr_time + distance_matrix[curr_node][insert_node]); + if new_arrival_time > due_times[insert_node] { + continue; + } + let old_arrival_time = ready_times[next_node] + .max(curr_time + distance_matrix[curr_node][next_node]); + + let c11 = distance_matrix[curr_node][insert_node] + + distance_matrix[insert_node][next_node] + - distance_matrix[curr_node][next_node]; + + let c12 = new_arrival_time - old_arrival_time; + + let c1 = -(alpha1 * c11 + alpha2 * c12); + let c2 = lambda * distance_matrix[0][insert_node] + c1; + + let c1_is_better = match best_c1 { + None => true, + Some(x) => c1 > x, + }; + + let c2_is_better = match best_c2 { + None => true, + Some(x) => c2 > x, + }; + + if c1_is_better + && c2_is_better + && is_feasible( + route, + distance_matrix, + service_time, + ready_times, + due_times, + insert_node, + new_arrival_time + service_time, + pos, + ) + { + best_c1 = Some(c1); + best_c2 = Some(c2); + best = Some((insert_node, pos)); + } + + curr_time = ready_times[next_node] + .max(curr_time + distance_matrix[curr_node][next_node]) + + service_time; + curr_node = next_node; + } + } + best + } + + pub fn is_feasible( + route: &Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + mut curr_node: usize, + mut curr_time: i32, + start_pos: usize, + ) -> bool { + let mut valid = true; + for pos in start_pos..route.len() { + let next_node = route[pos]; + curr_time += distance_matrix[curr_node][next_node]; + if curr_time > due_times[route[pos]] { + valid = false; + break; + } + curr_time = curr_time.max(ready_times[next_node]) + service_time; + curr_node = next_node; + } + valid + } + + pub fn compute_proximity( + i: usize, + j: usize, + distance_matrix: &Vec>, + ready_times: &Vec, + due_times: &Vec, + service_time: i32, + ) -> f64 { + let time_ij = distance_matrix[i][j]; + let expr1 = (ready_times[j] - time_ij - service_time - due_times[i]).max(0) as f64 + + (ready_times[i] + service_time + time_ij - due_times[j]).max(0) as f64; + let expr2 = (ready_times[i] - time_ij - service_time - due_times[j]).max(0) as f64 + + (ready_times[j] + service_time + time_ij - due_times[i]).max(0) as f64; + time_ij as f64 + expr1.min(expr2) + } + + pub fn find_best_insertion_in_route( + route: &Vec, + node: usize, + demands: &Vec, + max_capacity: i32, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Option<(usize, i32)> { + let current_demand: i32 = route[1..route.len() - 1].iter().map(|&n| demands[n]).sum(); + if current_demand + demands[node] > max_capacity { + return None; + } + + let mut best_pos = None; + let mut best_delta = i32::MAX; + + for pos in 1..route.len() { + let prev_node = route[pos - 1]; + let next_node = route[pos]; + let delta = distance_matrix[prev_node][node] + distance_matrix[node][next_node] + - distance_matrix[prev_node][next_node]; + + if check_feasible_insertion( + route, + node, + pos, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + if delta < best_delta { + best_delta = delta; + best_pos = Some(pos); + } + } + } + + best_pos.map(|pos| (pos, best_delta)) + } + + pub fn check_feasible_insertion( + route: &Vec, + insert_node: usize, + insert_pos: usize, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> bool { + if insert_pos == route.len() - 1 { + let last_node = route[insert_pos - 1]; + let arrival_time = if last_node == 0 { + 0 + } else { + let mut time = 0; + let mut node = 0; + for &n in route.iter().take(insert_pos) { + if n == 0 { + continue; + } + time += distance_matrix[node][n]; + time = time.max(ready_times[n]); + if time > due_times[n] { + return false; + } + time += service_time; + node = n; + } + time + }; + + let new_arrival = arrival_time + distance_matrix[last_node][insert_node]; + if new_arrival > due_times[insert_node] { + return false; + } + + let departure = new_arrival.max(ready_times[insert_node]) + service_time; + let final_arrival = departure + distance_matrix[insert_node][0]; + + return final_arrival <= due_times[0]; + } + + let mut curr_time = 0; + let mut curr_node = 0; + + for &node in route[..insert_pos].iter() { + if node == 0 { + continue; + } + let travel_time = distance_matrix[curr_node][node]; + curr_time += travel_time; + + if curr_time > due_times[node] { + return false; + } + + curr_time = curr_time.max(ready_times[node]) + service_time; + curr_node = node; + } + + let travel_time = distance_matrix[curr_node][insert_node]; + curr_time += travel_time; + if curr_time > due_times[insert_node] { + return false; + } + + curr_time = curr_time.max(ready_times[insert_node]) + service_time; + curr_node = insert_node; + + for &node in route[insert_pos..].iter() { + if node == 0 { + continue; + } + let travel_time = distance_matrix[curr_node][node]; + curr_time += travel_time; + + if curr_time > due_times[node] { + return false; + } + + curr_time = curr_time.max(ready_times[node]) + service_time; + curr_node = node; + } + + true + } + + pub fn calculate_route_distance( + route: &Vec, + distance_matrix: &Vec>, + ) -> i32 { + let mut distance = 0; + for i in 0..route.len() - 1 { + distance += distance_matrix[route[i]][route[i + 1]]; + } + distance + } + + pub fn apply_efficient_2opt( + route: &Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Vec { + let mut best_route = route.clone(); + let mut best_distance = calculate_route_distance(&best_route, distance_matrix); + let mut improved = true; + let mut iteration = 0; + let max_iterations = (route.len() / 2).min(20); + + while improved && iteration < max_iterations { + improved = false; + iteration += 1; + + for i in 1..best_route.len() - 2 { + for j in i + 2..best_route.len() - 1 { + let mut new_route = Vec::with_capacity(best_route.len()); + + for k in 0..i { + new_route.push(best_route[k]); + } + + for k in (i..=j).rev() { + new_route.push(best_route[k]); + } + + for k in j + 1..best_route.len() { + new_route.push(best_route[k]); + } + + if is_route_time_feasible_fast( + &new_route, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + let new_distance = + calculate_route_distance(&new_route, distance_matrix); + + if new_distance < best_distance { + best_distance = new_distance; + best_route = new_route; + improved = true; + break; + } + } + } + if improved { + break; + } + } + } + + best_route + } + + fn is_route_time_feasible_fast( + route: &Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> bool { + let mut curr_time = 0; + let mut curr_node = route[0]; + + for &next_node in route.iter().skip(1) { + curr_time += distance_matrix[curr_node][next_node]; + + if next_node != 0 && curr_time > due_times[next_node] { + return false; + } + + if next_node != 0 { + curr_time = curr_time.max(ready_times[next_node]); + curr_time += service_time; + } + + curr_node = next_node; + } + + true + } + + pub fn apply_size_filtered_local_search( + route: &Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Vec { + if route.len() <= 4 { + return route.clone(); + } + + apply_smart_local_search(route, distance_matrix, service_time, ready_times, due_times) + } + + pub fn apply_smart_local_search( + route: &Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Vec { + if route.len() <= 3 { + return route.clone(); + } + + let mut current_route = + apply_efficient_2opt(route, distance_matrix, service_time, ready_times, due_times); + + if route.len() > 6 { + current_route = apply_limited_or_opt( + ¤t_route, + distance_matrix, + service_time, + ready_times, + due_times, + ); + } + + current_route + } + + fn apply_limited_or_opt( + route: &Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Vec { + let mut best_route = route.clone(); + let mut best_distance = calculate_route_distance(&best_route, distance_matrix); + let mut improved = true; + let mut iteration = 0; + let max_iterations = 3; + + while improved && iteration < max_iterations { + improved = false; + iteration += 1; + + for segment_size in 1..=2 { + for i in 1..best_route.len() - segment_size { + if i + segment_size >= best_route.len() - 1 { + continue; + } + + let segment: Vec = best_route[i..i + segment_size].to_vec(); + + for insert_pos in 1..best_route.len() { + if insert_pos >= i && insert_pos <= i + segment_size { + continue; + } + + let mut new_route = best_route.clone(); + + for _ in 0..segment_size { + new_route.remove(i); + } + + let actual_insert_pos = if insert_pos > i + segment_size { + insert_pos - segment_size + } else { + insert_pos + }; + + for (idx, &node) in segment.iter().enumerate() { + new_route.insert(actual_insert_pos + idx, node); + } + + if is_route_time_feasible_fast( + &new_route, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + let new_distance = + calculate_route_distance(&new_route, distance_matrix); + if new_distance < best_distance { + best_distance = new_distance; + best_route = new_route; + improved = true; + break; + } + } + } + if improved { + break; + } + } + if improved { + break; + } + } + } + + best_route + } + + pub fn update_node_positions_for_routes( + node_positions: &mut Vec<(usize, usize)>, + routes: &Vec>, + route_indices: &[usize], + ) { + for &route_idx in route_indices { + let route = &routes[route_idx]; + for (j, &node) in route[1..route.len() - 1].iter().enumerate() { + node_positions[node] = (route_idx, j + 1); + } + } + } + } + + mod simple_solver { + use super::utils::*; + use super::*; + + pub fn solve_sub_instance_simple( + challenge: &SubInstance, + ) -> anyhow::Result> { + let num_nodes = challenge.difficulty.num_nodes; + let max_capacity = challenge.max_capacity; + let demands = &challenge.demands; + let distance_matrix = &challenge.distance_matrix; + let service_time = challenge.service_time; + let ready_times = &challenge.ready_times; + let due_times = &challenge.due_times; + let mut routes = Vec::new(); + + let mut nodes: Vec = (1..num_nodes).collect(); + nodes.sort_by(|&a, &b| distance_matrix[0][a].cmp(&distance_matrix[0][b])); + + let mut remaining: BTreeSet = nodes.iter().cloned().collect(); + + while let Some(node) = nodes.pop() { + if !remaining.remove(&node) { + continue; + } + let mut route = vec![0, node, 0]; + let mut route_demand = demands[node]; + + while let Some((best_node, best_pos)) = find_best_insertion( + &route, + remaining + .iter() + .cloned() + .filter(|&n| route_demand + demands[n] <= max_capacity) + .collect(), + distance_matrix, + service_time, + ready_times, + due_times, + ) { + remaining.remove(&best_node); + route_demand += demands[best_node]; + route.insert(best_pos, best_node); + } + + route = apply_size_filtered_local_search( + &route, + distance_matrix, + service_time, + ready_times, + due_times, + ); + routes.push(route); + } + + routes = do_local_searches( + num_nodes, + max_capacity, + demands, + distance_matrix, + &routes, + service_time, + ready_times, + due_times, + ); + Ok(Some(SubSolution { routes })) + } + + fn do_local_searches( + num_nodes: usize, + max_capacity: i32, + demands: &Vec, + distance_matrix: &Vec>, + routes: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Vec> { + let mut best_routes = routes.clone(); + let mut best_distance = calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &best_routes, + service_time, + ready_times, + due_times, + ) + .unwrap_or(i32::MAX); + let mut improved = true; + let mut iteration_count = 0; + let max_total_iterations = 30; + + let proximity_matrix = precompute_proximity_matrix( + num_nodes, + distance_matrix, + ready_times, + due_times, + service_time, + ); + + let mut node_positions = vec![(0, 0); num_nodes]; + for (i, route) in best_routes.iter().enumerate() { + for (j, &node) in route[1..route.len() - 1].iter().enumerate() { + node_positions[node] = (i, j + 1); + } + } + + while improved && iteration_count < max_total_iterations { + improved = false; + iteration_count += 1; + + let mut route_demands = calculate_route_demands(&best_routes, demands); + + let mut proximity_pairs = Vec::new(); + for i in 1..num_nodes { + if let Some((best_j, min_prox)) = (1..num_nodes) + .filter(|&j| j != i) + .map(|j| (j, proximity_matrix[i][j])) + .min_by(|(_, a_prox), (_, b_prox)| a_prox.partial_cmp(b_prox).unwrap()) + { + proximity_pairs.push((min_prox, i, best_j)); + } + } + proximity_pairs.sort_unstable_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); + + for (_corr, node, node2) in &proximity_pairs { + let node = *node; + let node2 = *node2; + let (route1_idx, pos1) = node_positions[node]; + let (route2_idx, _pos2) = node_positions[node2]; + if route1_idx == route2_idx { + continue; + } + + let target_route_demand = route_demands[route2_idx]; + if target_route_demand + demands[node] > max_capacity { + continue; + } + + let target_route = &best_routes[route2_idx]; + if let Some((best_pos, _delta_cost)) = find_best_insertion_in_route( + target_route, + node, + demands, + max_capacity, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + let mut new_routes = best_routes.clone(); + + if new_routes[route1_idx].len() > pos1 + && new_routes[route1_idx][pos1] == node + { + new_routes[route1_idx].remove(pos1); + new_routes[route2_idx].insert(best_pos, node); + + new_routes[route1_idx] = apply_size_filtered_local_search( + &new_routes[route1_idx], + distance_matrix, + service_time, + ready_times, + due_times, + ); + new_routes[route2_idx] = apply_size_filtered_local_search( + &new_routes[route2_idx], + distance_matrix, + service_time, + ready_times, + due_times, + ); + + match calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &new_routes, + service_time, + ready_times, + due_times, + ) { + Ok(new_distance) => { + if new_distance < best_distance { + best_distance = new_distance; + best_routes = new_routes; + route_demands[route1_idx] -= demands[node]; + route_demands[route2_idx] += demands[node]; + update_node_positions_for_routes( + &mut node_positions, + &best_routes, + &[route1_idx, route2_idx], + ); + improved = true; + break; + } + } + Err(_) => continue, + } + } + } + } + + if !improved { + let current_routes = best_routes.clone(); + + for route_idx in 0..current_routes.len() { + let route = ¤t_routes[route_idx]; + + let improved_route = apply_size_filtered_local_search( + route, + distance_matrix, + service_time, + ready_times, + due_times, + ); + + if improved_route != *route { + let mut new_routes = current_routes.clone(); + new_routes[route_idx] = improved_route; + + match calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &new_routes, + service_time, + ready_times, + due_times, + ) { + Ok(total_distance) => { + if total_distance < best_distance { + best_distance = total_distance; + best_routes = new_routes; + update_node_positions_for_routes( + &mut node_positions, + &best_routes, + &[route_idx], + ); + improved = true; + break; + } + } + Err(_) => continue, + } + } + } + } + } + + best_routes + } + } + + mod complex_solver { + use super::utils::*; + use super::*; + + pub fn solve_sub_instance_complex( + challenge: &SubInstance, + ) -> anyhow::Result> { + let num_nodes = challenge.difficulty.num_nodes; + let max_capacity = challenge.max_capacity; + let demands = &challenge.demands; + let distance_matrix = &challenge.distance_matrix; + let service_time = challenge.service_time; + let ready_times = &challenge.ready_times; + let due_times = &challenge.due_times; + let mut routes = Vec::new(); + + let mut nodes: Vec = (1..num_nodes).collect(); + nodes.sort_by_key(|&a| distance_matrix[0][a]); + + let mut remaining: BTreeSet = nodes.iter().cloned().collect(); + + while let Some(node) = nodes.pop() { + if !remaining.remove(&node) { + continue; + } + let mut route = vec![0, node, 0]; + let mut route_demand = demands[node]; + + while let Some((best_node, best_pos)) = find_best_insertion( + &route, + remaining + .iter() + .cloned() + .filter(|&n| route_demand + demands[n] <= max_capacity) + .collect(), + distance_matrix, + service_time, + ready_times, + due_times, + ) { + remaining.remove(&best_node); + route_demand += demands[best_node]; + route.insert(best_pos, best_node); + } + + route = apply_size_filtered_local_search( + &route, + distance_matrix, + service_time, + ready_times, + due_times, + ); + routes.push(route); + } + + if !remaining.is_empty() && remaining.len() > num_nodes / 8 { + return Ok(None); + } + + routes = do_local_searches( + num_nodes, + max_capacity, + demands, + distance_matrix, + &routes, + service_time, + ready_times, + due_times, + ); + + Ok(Some(SubSolution { routes })) + } + + fn do_local_searches( + num_nodes: usize, + max_capacity: i32, + demands: &Vec, + distance_matrix: &Vec>, + routes: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Vec> { + let mut best_routes = routes.clone(); + let mut best_distance = calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &best_routes, + service_time, + ready_times, + due_times, + ) + .unwrap_or(i32::MAX); + let mut improved = true; + + let proximity_matrix = precompute_proximity_matrix( + num_nodes, + distance_matrix, + ready_times, + due_times, + service_time, + ); + let max_outer_iterations = 15; + let max_swap_iterations = 5; + let max_merge_iterations = 5; + let mut outer_iterations = 0; + + let mut node_positions = vec![(0, 0); num_nodes]; + for (i, route) in best_routes.iter().enumerate() { + for (j, &node) in route[1..route.len() - 1].iter().enumerate() { + node_positions[node] = (i, j + 1); + } + } + + while improved && outer_iterations < max_outer_iterations { + improved = false; + outer_iterations += 1; + + let mut route_demands = calculate_route_demands(&best_routes, demands); + + let mut proximity_pairs = Vec::new(); + for i in 1..num_nodes { + if let Some((best_j, min_prox)) = (1..num_nodes) + .filter(|&j| j != i) + .map(|j| (j, proximity_matrix[i][j])) + .min_by(|(_, a_prox), (_, b_prox)| a_prox.partial_cmp(b_prox).unwrap()) + { + proximity_pairs.push((min_prox, i, best_j)); + } + } + proximity_pairs.sort_unstable_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); + + for (_, node, node2) in &proximity_pairs { + let node = *node; + let node2 = *node2; + let (route1_idx, pos1) = node_positions[node]; + let (route2_idx, _) = node_positions[node2]; + if route1_idx == route2_idx { + continue; + } + + let target_route_demand = route_demands[route2_idx]; + if target_route_demand + demands[node] > max_capacity { + continue; + } + + let target_route = &best_routes[route2_idx]; + if let Some((best_pos, _)) = find_best_insertion_in_route( + target_route, + node, + demands, + max_capacity, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + let mut new_routes = best_routes.clone(); + + if new_routes[route1_idx].len() > pos1 + && new_routes[route1_idx][pos1] == node + { + new_routes[route1_idx].remove(pos1); + new_routes[route2_idx].insert(best_pos, node); + + new_routes[route1_idx] = apply_size_filtered_local_search( + &new_routes[route1_idx], + distance_matrix, + service_time, + ready_times, + due_times, + ); + new_routes[route2_idx] = apply_size_filtered_local_search( + &new_routes[route2_idx], + distance_matrix, + service_time, + ready_times, + due_times, + ); + + match calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &new_routes, + service_time, + ready_times, + due_times, + ) { + Ok(new_distance) => { + if new_distance < best_distance { + best_distance = new_distance; + best_routes = new_routes; + route_demands[route1_idx] -= demands[node]; + route_demands[route2_idx] += demands[node]; + update_node_positions_for_routes( + &mut node_positions, + &best_routes, + &[route1_idx, route2_idx], + ); + improved = true; + break; + } + } + Err(_) => continue, + } + } + } + } + + let mut swap_improved = true; + let mut swap_iterations = 0; + + while swap_improved && swap_iterations < max_swap_iterations { + swap_improved = false; + swap_iterations += 1; + + for route_idx in 0..best_routes.len() { + let route = best_routes[route_idx].clone(); + if route.len() <= 4 { + continue; + } + + for i in 1..route.len() - 1 { + for j in i + 1..route.len() - 1 { + if j == i + 1 { + continue; + } + + let mut new_route = route.clone(); + new_route.swap(i, j); + + if !is_route_feasible( + &new_route, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + continue; + } + + let new_route_distance = + calculate_route_distance(&new_route, distance_matrix); + let old_route_distance = + calculate_route_distance(&route, distance_matrix); + + if new_route_distance < old_route_distance { + let optimized_route = apply_size_filtered_local_search( + &new_route, + distance_matrix, + service_time, + ready_times, + due_times, + ); + let mut new_routes = best_routes.clone(); + new_routes[route_idx] = optimized_route; + + match calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &new_routes, + service_time, + ready_times, + due_times, + ) { + Ok(total_distance) => { + if total_distance < best_distance { + best_distance = total_distance; + best_routes = new_routes; + update_node_positions_for_routes( + &mut node_positions, + &best_routes, + &[route_idx], + ); + swap_improved = true; + improved = true; + break; + } + } + Err(_) => continue, + } + } + } + if swap_improved { + break; + } + } + if swap_improved { + break; + } + } + + if !swap_improved { + for route1_idx in 0..best_routes.len() { + let route1 = best_routes[route1_idx].clone(); + + for route2_idx in route1_idx + 1..best_routes.len() { + let route2 = best_routes[route2_idx].clone(); + + for i in 1..route1.len() - 1 { + let node1 = route1[i]; + + for j in 1..route2.len() - 1 { + let node2 = route2[j]; + + let route1_demand: i32 = route1[1..route1.len() - 1] + .iter() + .map(|&n| demands[n]) + .sum(); + let route2_demand: i32 = route2[1..route2.len() - 1] + .iter() + .map(|&n| demands[n]) + .sum(); + + let new_route1_demand = + route1_demand - demands[node1] + demands[node2]; + let new_route2_demand = + route2_demand - demands[node2] + demands[node1]; + + if new_route1_demand > max_capacity + || new_route2_demand > max_capacity + { + continue; + } + + let mut new_route1 = route1.clone(); + let mut new_route2 = route2.clone(); + new_route1[i] = node2; + new_route2[j] = node1; + + if !is_route_feasible( + &new_route1, + distance_matrix, + service_time, + ready_times, + due_times, + ) || !is_route_feasible( + &new_route2, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + continue; + } + + new_route1 = apply_size_filtered_local_search( + &new_route1, + distance_matrix, + service_time, + ready_times, + due_times, + ); + new_route2 = apply_size_filtered_local_search( + &new_route2, + distance_matrix, + service_time, + ready_times, + due_times, + ); + + let old_distance = + calculate_route_distance(&route1, distance_matrix) + + calculate_route_distance( + &route2, + distance_matrix, + ); + let new_distance = + calculate_route_distance(&new_route1, distance_matrix) + + calculate_route_distance( + &new_route2, + distance_matrix, + ); + + if new_distance < old_distance { + let mut new_routes = best_routes.clone(); + new_routes[route1_idx] = new_route1; + new_routes[route2_idx] = new_route2; + + match calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &new_routes, + service_time, + ready_times, + due_times, + ) { + Ok(total_distance) => { + if total_distance < best_distance { + best_distance = total_distance; + best_routes = new_routes; + update_node_positions_for_routes( + &mut node_positions, + &best_routes, + &[route1_idx, route2_idx], + ); + swap_improved = true; + improved = true; + break; + } + } + Err(_) => continue, + } + } + } + if swap_improved { + break; + } + } + if swap_improved { + break; + } + } + if swap_improved { + break; + } + } + } + } + + let mut merge_improved = true; + let mut merge_iterations = 0; + + while merge_improved && merge_iterations < max_merge_iterations { + merge_improved = false; + merge_iterations += 1; + + for i in 0..best_routes.len() { + if merge_improved { + break; + } + + for j in 0..best_routes.len() { + if i == j { + continue; + } + + let route1 = &best_routes[i]; + let route2 = &best_routes[j]; + + if route1.len() <= 2 || route2.len() <= 2 { + continue; + } + + let route1_demand: i32 = route1[1..route1.len() - 1] + .iter() + .map(|&n| demands[n]) + .sum(); + let route2_demand: i32 = route2[1..route2.len() - 1] + .iter() + .map(|&n| demands[n]) + .sum(); + + if route1_demand + route2_demand <= max_capacity { + let mut best_insertion_pos = None; + let mut best_insertion_delta = i32::MAX; + + for &node in &route2[1..route2.len() - 1] { + for pos in 1..route1.len() { + let prev = route1[pos - 1]; + let next = route1[pos]; + + let insertion_delta = distance_matrix[prev][node] + + distance_matrix[node][next] + - distance_matrix[prev][next]; + + if insertion_delta < best_insertion_delta { + let mut test_route = route1.clone(); + test_route.insert(pos, node); + + if is_route_feasible( + &test_route, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + best_insertion_pos = Some(pos); + best_insertion_delta = insertion_delta; + } + } + } + } + + if let Some(pos) = best_insertion_pos { + let mut new_route = route1.clone(); + + for (idx, &node) in + route2[1..route2.len() - 1].iter().enumerate() + { + new_route.insert(pos + idx, node); + } + + if is_route_feasible( + &new_route, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + new_route = apply_size_filtered_local_search( + &new_route, + distance_matrix, + service_time, + ready_times, + due_times, + ); + + let new_distance = + calculate_route_distance(&new_route, distance_matrix); + let old_distance = + calculate_route_distance(route1, distance_matrix) + + calculate_route_distance(route2, distance_matrix); + + if new_distance < old_distance { + let mut new_routes = best_routes.clone(); + new_routes[i] = new_route; + new_routes.remove(j); + + match calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &new_routes, + service_time, + ready_times, + due_times, + ) { + Ok(total_distance) => { + if total_distance < best_distance { + best_distance = total_distance; + best_routes = new_routes; + for (route_idx, route) in + best_routes.iter().enumerate() + { + for (j, &node) in route + [1..route.len() - 1] + .iter() + .enumerate() + { + node_positions[node] = + (route_idx, j + 1); + } + } + merge_improved = true; + improved = true; + break; + } + } + Err(_) => continue, + } + } + } + } + } + } + } + } + } + + best_routes + } + + fn is_route_feasible( + route: &Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> bool { + if route.len() == 2 && route[0] == 0 && route[1] == 0 { + return true; + } + + let mut curr_time = 0; + let mut curr_node = 0; + + for &next_node in route.iter().skip(1) { + curr_time += distance_matrix[curr_node][next_node]; + + if curr_time > due_times[next_node] { + return false; + } + + curr_time = curr_time.max(ready_times[next_node]); + + if next_node != 0 { + curr_time += service_time; + } + + curr_node = next_node; + } + + true + } + } +} diff --git a/tig-algorithms/src/vehicle_routing/new_enhanced_cw/README.md b/tig-algorithms/src/vehicle_routing/new_enhanced_cw/README.md new file mode 100644 index 0000000..9585e88 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/new_enhanced_cw/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vehicle_routing +* **Algorithm Name:** new_enhanced_cw +* **Copyright:** 2025 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/new_enhanced_cw/mod.rs b/tig-algorithms/src/vehicle_routing/new_enhanced_cw/mod.rs new file mode 100644 index 0000000..61e99b1 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/new_enhanced_cw/mod.rs @@ -0,0 +1,569 @@ +use anyhow::{anyhow, Result}; +use serde_json::{Map, Value}; +use tig_challenges::vehicle_routing::*; + + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> Result<()>, + hyperparameters: &Option>, +) -> Result<()> { + Err(anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + use rand::{rngs::{SmallRng, StdRng}, Rng, SeedableRng}; + use tig_challenges::vehicle_routing::*; + + + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let mut solution = Solution { + sub_solutions: Vec::new(), + }; + for sub_instance in &challenge.sub_instances { + match solve_sub_instance(sub_instance)? { + Some(sub_solution) => solution.sub_solutions.push(sub_solution), + None => return Ok(None), + } + } + Ok(Some(solution)) + } + + pub fn solve_sub_instance(challenge: &SubInstance) -> anyhow::Result> { + let mut global_best_solution: Option = None; + let mut global_best_cost = std::i32::MAX; + + const NUM_ITERATIONS: usize = 5000; + let num_nodes = challenge.difficulty.num_nodes; + + let mut rng = SmallRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap())); + + let max_dist: f32 = challenge.distance_matrix[0].iter().sum::() as f32; + let p = challenge.baseline_total_distance as f32 / max_dist; + if p < 0.55 { + return Ok(None) + } + + let mut promising = false; + for _ in 0..4 { + let mut savings = Savings::new(challenge); + savings.sort_stable(); + + let mut current_params = vec![25; num_nodes]; + let mut current_solution = create_solution(challenge, &savings.stable_list); + let mut current_cost = calculate_solution_cost(¤t_solution, &challenge.distance_matrix); + + if current_cost <= challenge.baseline_total_distance { + return Ok(Some(current_solution)); + } + + if (current_cost as f32 * 0.95) > challenge.baseline_total_distance as f32 && !promising { + return Ok(None); + } + else { + promising = true; + } + + savings.build_supplementary_structs(challenge); + + let mut best_solution = Some(SubSolution { routes: current_solution.routes.clone() }); + let mut best_cost = current_cost; + + for i in 0..NUM_ITERATIONS { + let (neighbor_params, modified_indices) = generate_neighbor( + ¤t_params, + &mut rng, + i, + NUM_ITERATIONS + ); + savings.recompute_savings(&neighbor_params, &modified_indices); + + let mut neighbor_solution = create_solution(challenge, &savings.unstable_list); + postprocess_solution( + &mut neighbor_solution, + &challenge.distance_matrix, + &challenge.demands, + challenge.max_capacity, + ); + + let neighbor_cost = calculate_solution_cost(&neighbor_solution, &challenge.distance_matrix); + + let delta = neighbor_cost - current_cost; + if delta <= 0 { + current_params = neighbor_params; + current_cost = neighbor_cost; + current_solution = neighbor_solution; + savings.apply_unstable_list(); + + if current_cost < best_cost { + best_cost = current_cost; + best_solution = Some(SubSolution { + routes: current_solution.routes.clone(), + }); + } + } + if best_cost <= challenge.baseline_total_distance { + return Ok(best_solution); + } + } + + if best_cost < global_best_cost { + global_best_cost = best_cost; + global_best_solution = best_solution; + } + } + + Ok(global_best_solution) + } + + pub struct Savings { + pub stable_list: Vec<(u32, u8, u8)>, + + raw_savings: Vec>, + pub pair_map: Vec>, + pub unstable_list: Vec<(u32, u8, u8)>, + } + + impl Savings { + pub fn new(challenge: &SubInstance) -> Self { + let stable_list = Self::create_initial_savings_list(challenge); + + Self { + stable_list, + raw_savings: Vec::new(), + pair_map: Vec::new(), + unstable_list: Vec::new(), + } + } + + fn create_initial_savings_list(challenge: &SubInstance) -> Vec<(u32, u8, u8)> { + let num_nodes = challenge.difficulty.num_nodes; + + let max_distance = challenge + .distance_matrix + .iter() + .flat_map(|row| row.iter()) + .cloned() + .max() + .unwrap_or(0); + let threshold = max_distance / 3; + + let capacity = ((num_nodes - 1) * (num_nodes - 2)) / 2; + let mut savings = Vec::with_capacity(capacity); + + for i in 1..num_nodes { + for j in (i + 1)..num_nodes { + let dist_ij = challenge.distance_matrix[i][j]; + if dist_ij <= threshold { + let saving = challenge.distance_matrix[0][i] + challenge.distance_matrix[j][0] - dist_ij; + if saving > 0 { + savings.push((!(50u32 * (saving as u32)), i as u8, j as u8)); + } + } + } + } + savings + } + + pub fn build_supplementary_structs(&mut self, challenge: &SubInstance) { + let num_nodes = challenge.difficulty.num_nodes; + + self.pair_map = vec![Vec::new(); num_nodes]; + self.raw_savings = vec![vec![0; num_nodes]; num_nodes]; + for &(_, i8, j8) in &self.stable_list { + let (i, j) = (i8 as usize, j8 as usize); + let saving = challenge.distance_matrix[0][i] + + challenge.distance_matrix[j][0] + - challenge.distance_matrix[i][j]; + + self.raw_savings[i][j] = saving as u32; + self.raw_savings[j][i] = saving as u32; + + self.pair_map[i].push(j); + self.pair_map[j].push(i); + } + + self.unstable_list = Vec::with_capacity(self.stable_list.len()); + self.unstable_list.resize(self.stable_list.len(), (0, 0, 0)); + } + + fn radix_sort(savings_list: &mut [(u32, u8, u8)]) { + unsafe { + let mut counts = [0u32; 256]; + let mut buf = Vec::with_capacity(savings_list.len()); + buf.set_len(savings_list.len()); + + let savings_ptr: *mut (u32, u8, u8) = savings_list.as_mut_ptr(); + let buf_ptr: *mut (u32, u8, u8) = buf.as_mut_ptr(); + + for shift in [0, 8, 16] { + counts.fill(0); + + for i in 0..savings_list.len() { + let bits = (*savings_ptr.add(i)).0; + let byte = ((bits >> shift) & 0xFF) as usize; + counts[byte] += 1; + } + + let mut total = 0u32; + for count in counts.iter_mut() { + let c = *count; + *count = total; + total += c; + } + + for i in 0..savings_list.len() { + let item = *savings_ptr.add(i); + let bits = item.0; + let byte = ((bits >> shift) & 0xFF) as usize; + let pos = counts[byte]; + *buf_ptr.add(pos as usize) = item; + counts[byte] += 1; + } + + std::ptr::copy_nonoverlapping(buf_ptr, savings_ptr, savings_list.len()); + } + } + } + + pub fn recompute_savings(&mut self, params: &[u32], modified_indices: &[usize]) { + let num_nodes = params.len(); + let mut reduced_savings = Vec::with_capacity(modified_indices.len() * modified_indices.len()); + let mut modified = vec![false; num_nodes]; + + unsafe { + for &i in modified_indices { + for &j in &self.pair_map[i] { + if *modified.get_unchecked(j) { + continue; + } + let base_saving = *self.raw_savings.get_unchecked(i).get_unchecked(j); + let new_score = (*params.get_unchecked(i) + *params.get_unchecked(j)) * base_saving; + reduced_savings.push((!new_score, i as u8, j as u8)); + } + *modified.get_unchecked_mut(i) = true; + } + } + + Self::radix_sort(&mut reduced_savings); + + let mut stable_idx = 0; + let mut reduced_idx = 0; + let mut k = 0; + + while stable_idx < self.stable_list.len() + && (modified[self.stable_list[stable_idx].1 as usize] + || modified[self.stable_list[stable_idx].2 as usize]) { + stable_idx += 1; + } + + while stable_idx < self.stable_list.len() && reduced_idx < reduced_savings.len() { + let stable_entry = self.stable_list[stable_idx]; + let reduced_entry = reduced_savings[reduced_idx]; + + if stable_entry.0 < reduced_entry.0 { + self.unstable_list[k] = stable_entry; + + stable_idx += 1; + while stable_idx < self.stable_list.len() + && (modified[self.stable_list[stable_idx].1 as usize] + || modified[self.stable_list[stable_idx].2 as usize]) { + stable_idx += 1; + } + } else { + self.unstable_list[k] = reduced_entry; + reduced_idx += 1; + } + k += 1; + } + + while stable_idx < self.stable_list.len() { + let entry = self.stable_list[stable_idx]; + + if !modified[entry.1 as usize] && !modified[entry.2 as usize] { + self.unstable_list[k] = entry; + k += 1; + } + stable_idx += 1; + } + + while reduced_idx < reduced_savings.len() { + self.unstable_list[k] = reduced_savings[reduced_idx]; + k += 1; + reduced_idx += 1; + } + } + + pub fn apply_unstable_list(&mut self) { + std::mem::swap(&mut self.stable_list, &mut self.unstable_list); + } + + pub fn sort_stable(&mut self) { + Self::radix_sort(&mut self.stable_list); + } + } + + fn generate_neighbor( + current: &[u32], + rng: &mut R, + iteration: usize, + max_iterations: usize, + ) -> (Vec, Vec) { + let progress = iteration as f32 / max_iterations as f32; + let base_prob = 0.5 * (-7.0 * progress).exp() + 0.04; + let max_steps = 2; + + let mut result = current.to_vec(); + let mut modified_indices = Vec::new(); + + while modified_indices.is_empty() { + for (i, ¶m) in current.iter().enumerate() { + if rng.gen_bool(base_prob as f64) { + let steps = rng.gen_range(1..=max_steps); + + let sign = if rng.gen_bool(0.5) { 1 } else { -1 }; + result[i] = (param as i32 + sign * steps).clamp(25, 50) as u32; + modified_indices.push(i); + } + } + } + + (result, modified_indices) + } + + #[inline] + fn calculate_solution_cost(solution: &SubSolution, distance_matrix: &Vec>) -> i32 { + solution + .routes + .iter() + .map(|route| { + route.windows(2).map(|pair| distance_matrix[pair[0]][pair[1]]).sum::() + }) + .sum() + } + + #[inline] + fn create_solution( + challenge: &SubInstance, + savings_list: &[(u32, u8, u8)], + ) -> SubSolution { + let num_nodes = challenge.difficulty.num_nodes; + let demands = &challenge.demands; + let max_capacity = challenge.max_capacity; + + let mut routes = vec![None; num_nodes]; + for i in 1..num_nodes { + routes[i] = Some(vec![i]); + } + let mut route_demands = demands.clone(); + + for &(_, i8, j8) in savings_list { + let (i, j) = (i8 as usize, j8 as usize); + if routes[i].is_none() || routes[j].is_none() { + continue; + } + + let left_route = routes[i].as_ref().unwrap(); + let right_route = routes[j].as_ref().unwrap(); + let left_start = *left_route.first().unwrap(); + let right_start = *right_route.first().unwrap(); + if left_start == right_start + || route_demands[left_start] + route_demands[right_start] > max_capacity + { + continue; + } + + let mut new_route = routes[i].take().unwrap(); + let mut other_route = routes[j].take().unwrap(); + let left_end = *new_route.last().unwrap(); + let right_end = *other_route.last().unwrap(); + + if left_start == i { + new_route.reverse(); + } + if right_end == j { + other_route.reverse(); + } + new_route.extend(other_route); + + let combined = route_demands[left_start] + route_demands[right_start]; + let new_start = new_route[0]; + let new_end = *new_route.last().unwrap(); + + route_demands[new_start] = combined; + route_demands[new_end] = combined; + routes[new_start] = Some(new_route.clone()); + routes[new_end] = Some(new_route); + } + + let final_routes = routes + .into_iter() + .enumerate() + .filter_map(|(i, r)| { + r.and_then(|v| { + if v[0] == i { + let mut route = Vec::with_capacity(v.len() + 2); + route.push(0); + route.extend(v); + route.push(0); + Some(route) + } else { + None + } + }) + }) + .collect(); + + SubSolution { routes: final_routes } + } + + + pub fn postprocess_solution( + solution: &mut SubSolution, + distance_matrix: &Vec>, + demands: &Vec, + max_capacity: i32, + ) { + let original_routes = solution.routes.clone(); + let mut best_routes = original_routes.clone(); + + let mut routes_to_check: Vec = vec![true; solution.routes.len()]; + + // // First-improvement 2-opt + // loop { + // let mut improved = false; + // for (idx, route) in solution.routes.iter_mut().enumerate() { + // if !routes_to_check[idx] { + // continue; + // } + // if unsafe { two_opt_first_unsafe(route, distance_matrix) } { + // improved = true; + // routes_to_check[idx] = true; + // } else { + // routes_to_check[idx] = false; + // } + // } + // if !improved { + // break; + // } + // } + + // Best-improvement 2-opt + let mut routes_to_check = vec![true; best_routes.len()]; + loop { + let mut improved = false; + for (idx, route) in best_routes.iter_mut().enumerate() { + if !routes_to_check[idx] { + continue; + } + if unsafe { two_opt_best_unsafe(route, distance_matrix) } { + improved = true; + routes_to_check[idx] = true; + } else { + routes_to_check[idx] = false; + } + } + if !improved { + break; + } + } + + //let first_cost = calculate_solution_cost(&SubSolution { routes: solution.routes.clone() }, distance_matrix); + let best_cost = calculate_solution_cost(&SubSolution { routes: best_routes.clone() }, distance_matrix); + + //if best_cost < first_cost { + solution.routes = best_routes; + //} + } + + #[inline] + unsafe fn two_opt_best_unsafe(route: &mut Vec, distance_matrix: &Vec>) -> bool { + let n = route.len(); + if n < 4 { + return false; + } + + let mut improved = false; + let route_ptr = route.as_mut_ptr(); + + for i in 1..(n - 2) { + let mut best_gain = 0; + let mut best_j = 0; + for j in (i + 1)..(n - 1) { + let [ri_m1, ri, rj, rj_p1] = [ + *route_ptr.add(i - 1), + *route_ptr.add(i), + *route_ptr.add(j), + *route_ptr.add(j + 1), + ]; + let gain = distance_matrix[ri_m1][ri] + + distance_matrix[rj][rj_p1] + - distance_matrix[ri_m1][rj] + - distance_matrix[ri][rj_p1]; + if gain > best_gain { + best_gain = gain; + best_j = j; + } + } + if best_gain > 0 { + let mut start = i; + let mut end = best_j; + while start < end { + let tmp = *route_ptr.add(start); + *route_ptr.add(start) = *route_ptr.add(end); + *route_ptr.add(end) = tmp; + start += 1; + end -= 1; + } + improved = true; + } + } + improved + } + + #[inline] + unsafe fn two_opt_first_unsafe(route: &mut Vec, distance_matrix: &Vec>) -> bool { + let n = route.len(); + if n < 4 { + return false; + } + + let mut improved = false; + let route_ptr = route.as_mut_ptr(); + + for i in 1..(n - 2) { + for j in (i + 1)..(n - 1) { + let [ri_m1, ri, rj, rj_p1] = [ + *route_ptr.add(i - 1), + *route_ptr.add(i), + *route_ptr.add(j), + *route_ptr.add(j + 1), + ]; + let gain = distance_matrix[ri_m1][ri] + + distance_matrix[rj][rj_p1] + - distance_matrix[ri_m1][rj] + - distance_matrix[ri][rj_p1]; + // First-improvement + if gain > 0 { + let mut start = i; + let mut end = j; + while start < end { + let tmp = *route_ptr.add(start); + *route_ptr.add(start) = *route_ptr.add(end); + *route_ptr.add(end) = tmp; + start += 1; + end -= 1; + } + improved = true; + break; + } + } + if improved { + break; + } + } + improved + } +} \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/new_enhanced_cw_low/README.md b/tig-algorithms/src/vehicle_routing/new_enhanced_cw_low/README.md new file mode 100644 index 0000000..b79b951 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/new_enhanced_cw_low/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vehicle_routing +* **Algorithm Name:** new_enhanced_cw_low +* **Copyright:** 2025 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/new_enhanced_cw_low/mod.rs b/tig-algorithms/src/vehicle_routing/new_enhanced_cw_low/mod.rs new file mode 100644 index 0000000..01627c6 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/new_enhanced_cw_low/mod.rs @@ -0,0 +1,644 @@ +use anyhow::{anyhow, Result}; +use serde_json::{Map, Value}; +use tig_challenges::vehicle_routing::*; + + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> Result<()>, + hyperparameters: &Option>, +) -> Result<()> { + Err(anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + use rand::{rngs::{SmallRng, StdRng}, Rng, SeedableRng}; + use tig_challenges::vehicle_routing::*; + + + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let mut solution = Solution { + sub_solutions: Vec::new(), + }; + for sub_instance in &challenge.sub_instances { + match solve_sub_instance(sub_instance)? { + Some(sub_solution) => solution.sub_solutions.push(sub_solution), + None => return Ok(None), + } + } + Ok(Some(solution)) + } + + pub fn solve_sub_instance(challenge: &SubInstance) -> anyhow::Result> { + let mut global_best_solution: Option = None; + let mut global_best_cost = std::i32::MAX; + + const OUTER_ITERATIONS: usize = 1; + let iterations_per_outer = [5000, 10000, 10000]; + let inner_iters_per_outer = [4, 1, 1]; + let num_nodes = challenge.difficulty.num_nodes; + + let mut rng = SmallRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap())); + + let max_dist: f32 = challenge.distance_matrix[0].iter().sum::() as f32; + let p = challenge.baseline_total_distance as f32 / max_dist; + if p < 0.55 { + return Ok(None) + } + + let mut promising = false; + let mut iteration_results: Vec<(Vec, i32)> = Vec::new(); + + let mut best_outer_params : Vec = Vec::new(); + + for outer_iter in 0..OUTER_ITERATIONS { + let num_iterations = iterations_per_outer[outer_iter]; + let inner_iterations = inner_iters_per_outer[outer_iter]; + + for inner_iter in 0..inner_iterations { + let mut savings = Savings::new(challenge); + savings.sort_stable(); + + let mut current_params = vec![25; num_nodes]; + + let mut current_solution = create_solution(challenge, &savings.stable_list); + let mut current_cost = calculate_solution_cost(¤t_solution, &challenge.distance_matrix); + + if current_cost <= challenge.baseline_total_distance { + return Ok(Some(current_solution)); + } + + if (current_cost as f32 * 0.95) > challenge.baseline_total_distance as f32 && !promising { + return Ok(None); + } + else { + promising = true; + } + + savings.build_supplementary_structs(challenge); + + let mut best_solution = Some(SubSolution { routes: current_solution.routes.clone() }); + let mut best_cost = current_cost; + + for i in 0..num_iterations { + let (mut neighbor_params, mut modified_indices) = generate_neighbor( + ¤t_params, + best_solution.as_ref().unwrap(), + &challenge, + &mut rng, + i, + num_iterations + ); + + if outer_iter > 0 && i == 0 { + neighbor_params = best_outer_params.clone(); + modified_indices = (0..num_nodes).collect(); + }; + + if !savings.recompute_savings(&neighbor_params, &modified_indices) { + continue; + } + + let mut neighbor_solution = create_solution(challenge, &savings.unstable_list); + postprocess_solution( + &mut neighbor_solution, + &challenge.distance_matrix, + &challenge.demands, + challenge.max_capacity, + ); + + let neighbor_cost = calculate_solution_cost(&neighbor_solution, &challenge.distance_matrix); + + let delta = neighbor_cost - current_cost; + if delta <= 0 { + current_params = neighbor_params; + current_cost = neighbor_cost; + current_solution = neighbor_solution; + savings.apply_unstable_list(); + + if current_cost < best_cost { + best_cost = current_cost; + best_solution = Some(SubSolution { + routes: current_solution.routes.clone(), + }); + } + if best_cost <= challenge.baseline_total_distance { + return Ok(best_solution); + } + } + } + + iteration_results.push(( + current_params, + best_cost + )); + + if best_cost < global_best_cost { + global_best_cost = best_cost; + global_best_solution = best_solution; + } + } + + if outer_iter < OUTER_ITERATIONS - 1 { + let (best_params, best_cost) = iteration_results.iter() + .min_by_key(|&(_, cost)| cost) + .map(|(params, cost)| (params.clone(), *cost)) + .unwrap(); + + iteration_results.clear(); + best_outer_params = best_params; + } + } + + Ok(global_best_solution) + } + + pub struct Savings { + pub stable_list: Vec<(u32, u16, u16)>, + + raw_savings: Vec>, + pub pair_map: Vec>, + pub unstable_list: Vec<(u32, u16, u16)>, + } + + impl Savings { + pub fn new(challenge: &SubInstance) -> Self { + let stable_list = Self::create_initial_savings_list(challenge); + + Self { + stable_list, + raw_savings: Vec::new(), + pair_map: Vec::new(), + unstable_list: Vec::new(), + } + } + + fn create_initial_savings_list(challenge: &SubInstance) -> Vec<(u32, u16, u16)> { + let num_nodes = challenge.difficulty.num_nodes; + + let max_distance = challenge + .distance_matrix + .iter() + .flat_map(|row| row.iter()) + .cloned() + .max() + .unwrap_or(0); + let threshold = max_distance / 3; + + let capacity = ((num_nodes - 1) * (num_nodes - 2)) / 2; + let mut savings = Vec::with_capacity(capacity); + + for i in 1..num_nodes { + for j in (i + 1)..num_nodes { + let dist_ij = challenge.distance_matrix[i][j]; + if dist_ij <= threshold { + let saving = challenge.distance_matrix[0][i] + challenge.distance_matrix[j][0] - dist_ij; + if saving > 0 { + savings.push((!(saving as u32), i as u16, j as u16)); + } + } + } + } + savings + } + + pub fn build_supplementary_structs(&mut self, challenge: &SubInstance) { + let num_nodes = challenge.difficulty.num_nodes; + let mask_size = (num_nodes + 63) / 64; // Calculate number of u64 chunks needed + + // Initialize pair_map as a bitmask with 64-bit chunks + self.pair_map = vec![vec![0u64; mask_size]; num_nodes]; + self.raw_savings = vec![vec![0; num_nodes]; num_nodes]; + + for &(_, i16, j16) in &self.stable_list { + let (i, j) = (i16 as usize, j16 as usize); + let saving = challenge.distance_matrix[0][i] + + challenge.distance_matrix[j][0] + - challenge.distance_matrix[i][j]; + + self.raw_savings[i][j] = saving as u32; + self.raw_savings[j][i] = saving as u32; + + let (idx, bit) = (j / 64, j % 64); + self.pair_map[i][idx] |= 1u64 << bit; + + let (idx, bit) = (i / 64, i % 64); + self.pair_map[j][idx] |= 1u64 << bit; + } + + self.unstable_list = Vec::with_capacity(self.stable_list.len()); + self.unstable_list.resize(self.stable_list.len(), (0, 0, 0)); + } + + fn radix_sort(savings_list: &mut [(u32, u16, u16)]) { + unsafe { + // 1. Use usize for counts to prevent overflow with large arrays + let mut counts_low = [0u32; 512]; + let mut counts_high = [0u32; 512]; + let mut buf = Vec::with_capacity(savings_list.len()); + buf.set_len(savings_list.len()); + + let savings_ptr : *mut (u32, u16, u16) = savings_list.as_mut_ptr(); + let buf_ptr : *mut (u32, u16, u16) = buf.as_mut_ptr(); + + let mut ptr = savings_ptr; + for _ in 0..savings_list.len() { + let bits = (*ptr).0; + counts_low[(bits & 511) as usize] += 1; + counts_high[((bits >> 9) & 511) as usize] += 1; + ptr = ptr.add(1); + } + + let mut total_low = 0; + let mut total_high = 0; + for i in 0..512 { + let cl = counts_low[i]; + let ch = counts_high[i]; + counts_low[i] = total_low; + counts_high[i] = total_high; + total_low += cl; + total_high += ch; + } + + let mut src = savings_ptr; + let mut dst = buf_ptr; + for _ in 0..savings_list.len() { + let item = *src; + let byte = (item.0 & 511) as usize; + let pos = counts_low[byte] as usize; + *dst.add(pos) = item; + counts_low[byte] += 1; + src = src.add(1); + } + + let mut src = buf_ptr; + let mut dst = savings_ptr; + for _ in 0..savings_list.len() { + let item = *src; + let byte = ((item.0 >> 9) & 511) as usize; + let pos = counts_high[byte] as usize; + *dst.add(pos) = item; + counts_high[byte] += 1; + src = src.add(1); + } + } + } + + pub fn recompute_savings(&mut self, params: &[u32], modified_indices: &[usize]) -> bool { + let num_nodes = params.len(); + let mut reduced_savings = Vec::with_capacity(modified_indices.len() * num_nodes / 10); + let mut modified = vec![false; num_nodes]; + + let mut mask_len = (num_nodes + 63) / 64; + let mut visited = vec![0u64; mask_len]; + + unsafe { + for &i in modified_indices { + for k in 0..mask_len { + let chunk = *self.pair_map.get_unchecked(i).get_unchecked(k); + let mut unvisited_pairs_mask = chunk & !visited[k]; + + while unvisited_pairs_mask != 0 { + let bit_pos = unvisited_pairs_mask.trailing_zeros() as usize; + let j = bit_pos + 64 * k; + + let base_saving = *self.raw_savings.get_unchecked(i).get_unchecked(j); + let new_score = (*params.get_unchecked(i) + *params.get_unchecked(j)) * base_saving; + reduced_savings.push((!new_score, i as u16, j as u16)); + + unvisited_pairs_mask &= unvisited_pairs_mask - 1; + } + } + let (idx, bit) = (i / 64, i % 64); + *visited.get_unchecked_mut(idx) |= 1 << bit; + modified[i] = true; + } + } + + if reduced_savings.len() == 0 { + return false; + } + Self::radix_sort(&mut reduced_savings); + + let mut stable_idx = 0; + let mut reduced_idx = 0; + let mut k = 0; + + while stable_idx < self.stable_list.len() + && (modified[self.stable_list[stable_idx].1 as usize] + || modified[self.stable_list[stable_idx].2 as usize]) { + stable_idx += 1; + } + + while stable_idx < self.stable_list.len() && reduced_idx < reduced_savings.len() { + let stable_entry = self.stable_list[stable_idx]; + let reduced_entry = reduced_savings[reduced_idx]; + + if stable_entry.0 < reduced_entry.0 { + self.unstable_list[k] = stable_entry; + + stable_idx += 1; + while stable_idx < self.stable_list.len() + && (modified[self.stable_list[stable_idx].1 as usize] + || modified[self.stable_list[stable_idx].2 as usize]) { + stable_idx += 1; + } + } else { + self.unstable_list[k] = reduced_entry; + reduced_idx += 1; + } + k += 1; + } + + while stable_idx < self.stable_list.len() { + let entry = self.stable_list[stable_idx]; + + if !modified[entry.1 as usize] && !modified[entry.2 as usize] { + self.unstable_list[k] = entry; + k += 1; + } + stable_idx += 1; + } + + while reduced_idx < reduced_savings.len() { + self.unstable_list[k] = reduced_savings[reduced_idx]; + k += 1; + reduced_idx += 1; + } + return true; + } + + pub fn apply_unstable_list(&mut self) { + std::mem::swap(&mut self.stable_list, &mut self.unstable_list); + } + + pub fn sort_stable(&mut self) { + Self::radix_sort(&mut self.stable_list); + } + } + + fn generate_neighbor( + current: &[u32], + best_solution: &SubSolution, + challenge : &SubInstance, + rng: &mut R, + iteration: usize, + max_iterations: usize, + ) -> (Vec, Vec) { + let progress = iteration as f32 / max_iterations as f32; + let base_prob = 0.5 * (-5.0 * progress).exp() + 0.04; + let max_steps = 2; + + let mut result = current.to_vec(); + let mut modified_indices = Vec::with_capacity(challenge.difficulty.num_nodes / 2); + + while modified_indices.is_empty() { + for (i, ¶m) in current.iter().enumerate() { + if rng.gen_bool(base_prob as f64) { + let steps = rng.gen_range(1..=max_steps); + + let sign = if rng.gen_bool(0.5) { 1 } else { -1 }; + result[i] = (param as i32 + sign * steps).clamp(25, 50) as u32; + modified_indices.push(i); + } + } + } + + let mut pairs = Vec::with_capacity(3 * challenge.difficulty.num_nodes); + for route in &best_solution.routes { + for i in 1..route.len()-1 { + for j in i+1..route.len()-1 { + pairs.push((route[i], route[j])); + } + } + } + + if pairs.len() > 0{ + let pair_idx = rng.gen_range(0..pairs.len()); + let idx1 = pairs[pair_idx].0; + let idx2 = pairs[pair_idx].1; + + result.swap(idx1, idx2); + + if !modified_indices.contains(&idx1) { + modified_indices.push(idx1); + } + if !modified_indices.contains(&idx2) { + modified_indices.push(idx2); + } + } + + (result, modified_indices) + } + + #[inline] + fn calculate_solution_cost(solution: &SubSolution, distance_matrix: &Vec>) -> i32 { + solution + .routes + .iter() + .map(|route| { + route.windows(2).map(|pair| distance_matrix[pair[0]][pair[1]]).sum::() + }) + .sum() + } + + #[inline(never)] + fn create_solution( + challenge: &SubInstance, + savings_list: &[(u32, u16, u16)] + ) -> SubSolution { + let num_nodes = challenge.difficulty.num_nodes; + let demands = &challenge.demands; + let max_capacity = challenge.max_capacity; + + let mut node_links: Vec<[Option; 2]> = vec![[None, None]; num_nodes]; + let mut route : Vec<(usize, usize)> = (0..num_nodes) + .map(|i| (i, i)) + .collect(); + + let mut route_demands = demands.clone(); + + for &(_, i16, j16) in savings_list { + let (i, j) = (i16 as usize, j16 as usize); + + let route_demands_left = route_demands[i]; + let route_demands_right = route_demands[j]; + if route_demands_left + route_demands_right > max_capacity { + continue; + } + + let route_i_start = route[i].0; + let route_j_start = route[j].0; + let route_i_end = route[i].1; + let route_j_end = route[j].1; + if route_i_start == route_j_start || route_i_start == route_j_end + || route_i_end == route_j_start || route_i_end == route_j_end { + continue; + } + + let node_i_left = node_links[i][0]; + let node_i_right = node_links[i][1]; + + let node_j_left = node_links[j][0]; + let node_j_right = node_links[j][1]; + + let mut dir_i = usize::from(node_i_left.is_some()); + let mut dir_j = usize::from(node_j_left.is_some()); + + node_links[i][dir_i] = Some(j); + node_links[j][dir_j] = Some(i); + + if node_i_left.is_some() || node_i_right.is_some() { + route_demands[i] = max_capacity + 1; + } + if node_j_left.is_some() || node_j_right.is_some() { + route_demands[j] = max_capacity + 1; + } + + let opposite_i = if route_i_start == i { + route_i_end + } else { + route_i_start + }; + + let opposite_j = if route_j_start == j { + route_j_end + } else { + route_j_start + }; + + let new_route = (opposite_i, opposite_j); + route[opposite_i] = new_route; + route[opposite_j] = new_route; + + let combined_demand = route_demands_left + route_demands_right; + route_demands[opposite_i] = combined_demand; + route_demands[opposite_j] = combined_demand; + } + + let final_routes = extract_routes( + num_nodes, + &route_demands, + max_capacity, + &route, + &node_links + ); + + SubSolution { routes: final_routes } + } + + #[inline(never)] + fn extract_routes( + num_nodes: usize, + route_demands: &[i32], + max_capacity: i32, + route: &[(usize, usize)], + node_links: &[[Option; 2]], + ) -> Vec> { + let mut visited = vec![false; num_nodes]; + + let mut all_nodes = Vec::with_capacity(3 * num_nodes); + let mut final_routes = Vec::with_capacity(num_nodes / 2); + + for i in 1..num_nodes { + if route_demands[i] > max_capacity || route[i].0 != i { + continue; + } + let route_start_idx = all_nodes.len(); + + let mut route_iter = i; + let route_end = route[i].1; + + all_nodes.push(0); + all_nodes.push(route_iter); + visited[route_iter] = true; + + while route_iter != route_end { + let links = node_links[route_iter]; + let next = match links { + [Some(left), Some(right)] => { + if !visited[left] { left } else { right } + }, + [Some(next), None] | [None, Some(next)] => next, + _ => break, + }; + route_iter = next; + all_nodes.push(route_iter); + visited[route_iter] = true; + } + all_nodes.push(0); + + let route_len = all_nodes.len() - route_start_idx; + final_routes.push(all_nodes[route_start_idx..all_nodes.len()].to_vec()); + } + + final_routes + } + + + #[inline(never)] + pub fn postprocess_solution( + solution: &mut SubSolution, + distance_matrix: &Vec>, + _demands: &Vec, + _max_capacity: i32, + ) { + for route in solution.routes.iter_mut() { + unsafe { two_opt_best_unsafe(route, distance_matrix) }; + } + } + + #[inline] + unsafe fn two_opt_best_unsafe(route: &mut Vec, distance_matrix: &Vec>) -> bool { + let n = route.len(); + if n < 4 { + return false; + } + + let mut any_improvement = false; + let route_slice = route.as_mut_slice(); + + loop { + let mut improved = false; + + for i in 1..(n - 2) { + let mut best_gain = 0; + let mut best_j = 0; + + let i_range = i..(n - 1); + for j in i_range.skip(1) { + let ri_m1 = *route_slice.get_unchecked(i - 1); + let ri = *route_slice.get_unchecked(i); + let rj = *route_slice.get_unchecked(j); + let rj_p1 = *route_slice.get_unchecked(j + 1); + + let current = distance_matrix.get_unchecked(ri_m1).get_unchecked(ri); + let candidate = distance_matrix.get_unchecked(rj).get_unchecked(rj_p1); + let new_connection = distance_matrix.get_unchecked(ri_m1).get_unchecked(rj); + let broken_connection = distance_matrix.get_unchecked(ri).get_unchecked(rj_p1); + + let gain = current + candidate - new_connection - broken_connection; + + if gain > best_gain { + best_gain = gain; + best_j = j; + } + } + + if best_gain > 0 { + route_slice[i..=best_j].reverse(); + improved = true; + any_improvement = true; + } + } + + if !improved { + break; + } + } + + any_improvement + } +} \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/new_enhanced_cw_opt/README.md b/tig-algorithms/src/vehicle_routing/new_enhanced_cw_opt/README.md new file mode 100644 index 0000000..2df27dc --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/new_enhanced_cw_opt/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vehicle_routing +* **Algorithm Name:** new_enhanced_cw_opt +* **Copyright:** 2025 syebastian +* **Identity of Submitter:** syebastian +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/new_enhanced_cw_opt/mod.rs b/tig-algorithms/src/vehicle_routing/new_enhanced_cw_opt/mod.rs new file mode 100644 index 0000000..85553d5 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/new_enhanced_cw_opt/mod.rs @@ -0,0 +1,644 @@ +use anyhow::{anyhow, Result}; +use serde_json::{Map, Value}; +use tig_challenges::vehicle_routing::*; + + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> Result<()>, + hyperparameters: &Option>, +) -> Result<()> { + Err(anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + use rand::{rngs::{SmallRng, StdRng}, Rng, SeedableRng}; + use tig_challenges::vehicle_routing::*; + + + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let mut solution = Solution { + sub_solutions: Vec::new(), + }; + for sub_instance in &challenge.sub_instances { + match solve_sub_instance(sub_instance)? { + Some(sub_solution) => solution.sub_solutions.push(sub_solution), + None => return Ok(None), + } + } + Ok(Some(solution)) + } + + pub fn solve_sub_instance(challenge: &SubInstance) -> anyhow::Result> { + let mut global_best_solution: Option = None; + let mut global_best_cost = std::i32::MAX; + + const OUTER_ITERATIONS: usize = 1; + let iterations_per_outer = [10000, 10000, 10000]; + let inner_iters_per_outer = [4, 1, 1]; + let num_nodes = challenge.difficulty.num_nodes; + + let mut rng = SmallRng::seed_from_u64(u64::from_le_bytes(challenge.seed[..8].try_into().unwrap())); + + let max_dist: f32 = challenge.distance_matrix[0].iter().sum::() as f32; + let p = challenge.baseline_total_distance as f32 / max_dist; + if p < 0.55 { + return Ok(None) + } + + let mut promising = false; + let mut iteration_results: Vec<(Vec, i32)> = Vec::new(); + + let mut best_outer_params : Vec = Vec::new(); + + for outer_iter in 0..OUTER_ITERATIONS { + let num_iterations = iterations_per_outer[outer_iter]; + let inner_iterations = inner_iters_per_outer[outer_iter]; + + for inner_iter in 0..inner_iterations { + let mut savings = Savings::new(challenge); + savings.sort_stable(); + + let mut current_params = vec![25; num_nodes]; + + let mut current_solution = create_solution(challenge, &savings.stable_list); + let mut current_cost = calculate_solution_cost(¤t_solution, &challenge.distance_matrix); + + if current_cost <= challenge.baseline_total_distance { + return Ok(Some(current_solution)); + } + + if (current_cost as f32 * 0.95) > challenge.baseline_total_distance as f32 && !promising { + return Ok(None); + } + else { + promising = true; + } + + savings.build_supplementary_structs(challenge); + + let mut best_solution = Some(SubSolution { routes: current_solution.routes.clone() }); + let mut best_cost = current_cost; + + for i in 0..num_iterations { + let (mut neighbor_params, mut modified_indices) = generate_neighbor( + ¤t_params, + best_solution.as_ref().unwrap(), + &challenge, + &mut rng, + i, + num_iterations + ); + + if outer_iter > 0 && i == 0 { + neighbor_params = best_outer_params.clone(); + modified_indices = (0..num_nodes).collect(); + }; + + if !savings.recompute_savings(&neighbor_params, &modified_indices) { + continue; + } + + let mut neighbor_solution = create_solution(challenge, &savings.unstable_list); + postprocess_solution( + &mut neighbor_solution, + &challenge.distance_matrix, + &challenge.demands, + challenge.max_capacity, + ); + + let neighbor_cost = calculate_solution_cost(&neighbor_solution, &challenge.distance_matrix); + + let delta = neighbor_cost - current_cost; + if delta <= 0 { + current_params = neighbor_params; + current_cost = neighbor_cost; + current_solution = neighbor_solution; + savings.apply_unstable_list(); + + if current_cost < best_cost { + best_cost = current_cost; + best_solution = Some(SubSolution { + routes: current_solution.routes.clone(), + }); + } + if best_cost <= challenge.baseline_total_distance { + return Ok(best_solution); + } + } + } + + iteration_results.push(( + current_params, + best_cost + )); + + if best_cost < global_best_cost { + global_best_cost = best_cost; + global_best_solution = best_solution; + } + } + + if outer_iter < OUTER_ITERATIONS - 1 { + let (best_params, best_cost) = iteration_results.iter() + .min_by_key(|&(_, cost)| cost) + .map(|(params, cost)| (params.clone(), *cost)) + .unwrap(); + + iteration_results.clear(); + best_outer_params = best_params; + } + } + + Ok(global_best_solution) + } + + pub struct Savings { + pub stable_list: Vec<(u32, u16, u16)>, + + raw_savings: Vec>, + pub pair_map: Vec>, + pub unstable_list: Vec<(u32, u16, u16)>, + } + + impl Savings { + pub fn new(challenge: &SubInstance) -> Self { + let stable_list = Self::create_initial_savings_list(challenge); + + Self { + stable_list, + raw_savings: Vec::new(), + pair_map: Vec::new(), + unstable_list: Vec::new(), + } + } + + fn create_initial_savings_list(challenge: &SubInstance) -> Vec<(u32, u16, u16)> { + let num_nodes = challenge.difficulty.num_nodes; + + let max_distance = challenge + .distance_matrix + .iter() + .flat_map(|row| row.iter()) + .cloned() + .max() + .unwrap_or(0); + let threshold = max_distance / 3; + + let capacity = ((num_nodes - 1) * (num_nodes - 2)) / 2; + let mut savings = Vec::with_capacity(capacity); + + for i in 1..num_nodes { + for j in (i + 1)..num_nodes { + let dist_ij = challenge.distance_matrix[i][j]; + if dist_ij <= threshold { + let saving = challenge.distance_matrix[0][i] + challenge.distance_matrix[j][0] - dist_ij; + if saving > 0 { + savings.push((!(saving as u32), i as u16, j as u16)); + } + } + } + } + savings + } + + pub fn build_supplementary_structs(&mut self, challenge: &SubInstance) { + let num_nodes = challenge.difficulty.num_nodes; + let mask_size = (num_nodes + 63) / 64; // Calculate number of u64 chunks needed + + // Initialize pair_map as a bitmask with 64-bit chunks + self.pair_map = vec![vec![0u64; mask_size]; num_nodes]; + self.raw_savings = vec![vec![0; num_nodes]; num_nodes]; + + for &(_, i16, j16) in &self.stable_list { + let (i, j) = (i16 as usize, j16 as usize); + let saving = challenge.distance_matrix[0][i] + + challenge.distance_matrix[j][0] + - challenge.distance_matrix[i][j]; + + self.raw_savings[i][j] = saving as u32; + self.raw_savings[j][i] = saving as u32; + + let (idx, bit) = (j / 64, j % 64); + self.pair_map[i][idx] |= 1u64 << bit; + + let (idx, bit) = (i / 64, i % 64); + self.pair_map[j][idx] |= 1u64 << bit; + } + + self.unstable_list = Vec::with_capacity(self.stable_list.len()); + self.unstable_list.resize(self.stable_list.len(), (0, 0, 0)); + } + + fn radix_sort(savings_list: &mut [(u32, u16, u16)]) { + unsafe { + // 1. Use usize for counts to prevent overflow with large arrays + let mut counts_low = [0u32; 512]; + let mut counts_high = [0u32; 512]; + let mut buf = Vec::with_capacity(savings_list.len()); + buf.set_len(savings_list.len()); + + let savings_ptr : *mut (u32, u16, u16) = savings_list.as_mut_ptr(); + let buf_ptr : *mut (u32, u16, u16) = buf.as_mut_ptr(); + + let mut ptr = savings_ptr; + for _ in 0..savings_list.len() { + let bits = (*ptr).0; + counts_low[(bits & 511) as usize] += 1; + counts_high[((bits >> 9) & 511) as usize] += 1; + ptr = ptr.add(1); + } + + let mut total_low = 0; + let mut total_high = 0; + for i in 0..512 { + let cl = counts_low[i]; + let ch = counts_high[i]; + counts_low[i] = total_low; + counts_high[i] = total_high; + total_low += cl; + total_high += ch; + } + + let mut src = savings_ptr; + let mut dst = buf_ptr; + for _ in 0..savings_list.len() { + let item = *src; + let byte = (item.0 & 511) as usize; + let pos = counts_low[byte] as usize; + *dst.add(pos) = item; + counts_low[byte] += 1; + src = src.add(1); + } + + let mut src = buf_ptr; + let mut dst = savings_ptr; + for _ in 0..savings_list.len() { + let item = *src; + let byte = ((item.0 >> 9) & 511) as usize; + let pos = counts_high[byte] as usize; + *dst.add(pos) = item; + counts_high[byte] += 1; + src = src.add(1); + } + } + } + + pub fn recompute_savings(&mut self, params: &[u32], modified_indices: &[usize]) -> bool { + let num_nodes = params.len(); + let mut reduced_savings = Vec::with_capacity(modified_indices.len() * num_nodes / 10); + let mut modified = vec![false; num_nodes]; + + let mut mask_len = (num_nodes + 63) / 64; + let mut visited = vec![0u64; mask_len]; + + unsafe { + for &i in modified_indices { + for k in 0..mask_len { + let chunk = *self.pair_map.get_unchecked(i).get_unchecked(k); + let mut unvisited_pairs_mask = chunk & !visited[k]; + + while unvisited_pairs_mask != 0 { + let bit_pos = unvisited_pairs_mask.trailing_zeros() as usize; + let j = bit_pos + 64 * k; + + let base_saving = *self.raw_savings.get_unchecked(i).get_unchecked(j); + let new_score = (*params.get_unchecked(i) + *params.get_unchecked(j)) * base_saving; + reduced_savings.push((!new_score, i as u16, j as u16)); + + unvisited_pairs_mask &= unvisited_pairs_mask - 1; + } + } + let (idx, bit) = (i / 64, i % 64); + *visited.get_unchecked_mut(idx) |= 1 << bit; + modified[i] = true; + } + } + + if reduced_savings.len() == 0 { + return false; + } + Self::radix_sort(&mut reduced_savings); + + let mut stable_idx = 0; + let mut reduced_idx = 0; + let mut k = 0; + + while stable_idx < self.stable_list.len() + && (modified[self.stable_list[stable_idx].1 as usize] + || modified[self.stable_list[stable_idx].2 as usize]) { + stable_idx += 1; + } + + while stable_idx < self.stable_list.len() && reduced_idx < reduced_savings.len() { + let stable_entry = self.stable_list[stable_idx]; + let reduced_entry = reduced_savings[reduced_idx]; + + if stable_entry.0 < reduced_entry.0 { + self.unstable_list[k] = stable_entry; + + stable_idx += 1; + while stable_idx < self.stable_list.len() + && (modified[self.stable_list[stable_idx].1 as usize] + || modified[self.stable_list[stable_idx].2 as usize]) { + stable_idx += 1; + } + } else { + self.unstable_list[k] = reduced_entry; + reduced_idx += 1; + } + k += 1; + } + + while stable_idx < self.stable_list.len() { + let entry = self.stable_list[stable_idx]; + + if !modified[entry.1 as usize] && !modified[entry.2 as usize] { + self.unstable_list[k] = entry; + k += 1; + } + stable_idx += 1; + } + + while reduced_idx < reduced_savings.len() { + self.unstable_list[k] = reduced_savings[reduced_idx]; + k += 1; + reduced_idx += 1; + } + return true; + } + + pub fn apply_unstable_list(&mut self) { + std::mem::swap(&mut self.stable_list, &mut self.unstable_list); + } + + pub fn sort_stable(&mut self) { + Self::radix_sort(&mut self.stable_list); + } + } + + fn generate_neighbor( + current: &[u32], + best_solution: &SubSolution, + challenge : &SubInstance, + rng: &mut R, + iteration: usize, + max_iterations: usize, + ) -> (Vec, Vec) { + let progress = iteration as f32 / max_iterations as f32; + let base_prob = 0.5 * (-5.0 * progress).exp() + 0.04; + let max_steps = 2; + + let mut result = current.to_vec(); + let mut modified_indices = Vec::with_capacity(challenge.difficulty.num_nodes / 2); + + while modified_indices.is_empty() { + for (i, ¶m) in current.iter().enumerate() { + if rng.gen_bool(base_prob as f64) { + let steps = rng.gen_range(1..=max_steps); + + let sign = if rng.gen_bool(0.5) { 1 } else { -1 }; + result[i] = (param as i32 + sign * steps).clamp(25, 50) as u32; + modified_indices.push(i); + } + } + } + + let mut pairs = Vec::with_capacity(3 * challenge.difficulty.num_nodes); + for route in &best_solution.routes { + for i in 1..route.len()-1 { + for j in i+1..route.len()-1 { + pairs.push((route[i], route[j])); + } + } + } + + if pairs.len() > 0{ + let pair_idx = rng.gen_range(0..pairs.len()); + let idx1 = pairs[pair_idx].0; + let idx2 = pairs[pair_idx].1; + + result.swap(idx1, idx2); + + if !modified_indices.contains(&idx1) { + modified_indices.push(idx1); + } + if !modified_indices.contains(&idx2) { + modified_indices.push(idx2); + } + } + + (result, modified_indices) + } + + #[inline] + fn calculate_solution_cost(solution: &SubSolution, distance_matrix: &Vec>) -> i32 { + solution + .routes + .iter() + .map(|route| { + route.windows(2).map(|pair| distance_matrix[pair[0]][pair[1]]).sum::() + }) + .sum() + } + + #[inline(never)] + fn create_solution( + challenge: &SubInstance, + savings_list: &[(u32, u16, u16)] + ) -> SubSolution { + let num_nodes = challenge.difficulty.num_nodes; + let demands = &challenge.demands; + let max_capacity = challenge.max_capacity; + + let mut node_links: Vec<[Option; 2]> = vec![[None, None]; num_nodes]; + let mut route : Vec<(usize, usize)> = (0..num_nodes) + .map(|i| (i, i)) + .collect(); + + let mut route_demands = demands.clone(); + + for &(_, i16, j16) in savings_list { + let (i, j) = (i16 as usize, j16 as usize); + + let route_demands_left = route_demands[i]; + let route_demands_right = route_demands[j]; + if route_demands_left + route_demands_right > max_capacity { + continue; + } + + let route_i_start = route[i].0; + let route_j_start = route[j].0; + let route_i_end = route[i].1; + let route_j_end = route[j].1; + if route_i_start == route_j_start || route_i_start == route_j_end + || route_i_end == route_j_start || route_i_end == route_j_end { + continue; + } + + let node_i_left = node_links[i][0]; + let node_i_right = node_links[i][1]; + + let node_j_left = node_links[j][0]; + let node_j_right = node_links[j][1]; + + let mut dir_i = usize::from(node_i_left.is_some()); + let mut dir_j = usize::from(node_j_left.is_some()); + + node_links[i][dir_i] = Some(j); + node_links[j][dir_j] = Some(i); + + if node_i_left.is_some() || node_i_right.is_some() { + route_demands[i] = max_capacity + 1; + } + if node_j_left.is_some() || node_j_right.is_some() { + route_demands[j] = max_capacity + 1; + } + + let opposite_i = if route_i_start == i { + route_i_end + } else { + route_i_start + }; + + let opposite_j = if route_j_start == j { + route_j_end + } else { + route_j_start + }; + + let new_route = (opposite_i, opposite_j); + route[opposite_i] = new_route; + route[opposite_j] = new_route; + + let combined_demand = route_demands_left + route_demands_right; + route_demands[opposite_i] = combined_demand; + route_demands[opposite_j] = combined_demand; + } + + let final_routes = extract_routes( + num_nodes, + &route_demands, + max_capacity, + &route, + &node_links + ); + + SubSolution { routes: final_routes } + } + + #[inline(never)] + fn extract_routes( + num_nodes: usize, + route_demands: &[i32], + max_capacity: i32, + route: &[(usize, usize)], + node_links: &[[Option; 2]], + ) -> Vec> { + let mut visited = vec![false; num_nodes]; + + let mut all_nodes = Vec::with_capacity(3 * num_nodes); + let mut final_routes = Vec::with_capacity(num_nodes / 2); + + for i in 1..num_nodes { + if route_demands[i] > max_capacity || route[i].0 != i { + continue; + } + let route_start_idx = all_nodes.len(); + + let mut route_iter = i; + let route_end = route[i].1; + + all_nodes.push(0); + all_nodes.push(route_iter); + visited[route_iter] = true; + + while route_iter != route_end { + let links = node_links[route_iter]; + let next = match links { + [Some(left), Some(right)] => { + if !visited[left] { left } else { right } + }, + [Some(next), None] | [None, Some(next)] => next, + _ => break, + }; + route_iter = next; + all_nodes.push(route_iter); + visited[route_iter] = true; + } + all_nodes.push(0); + + let route_len = all_nodes.len() - route_start_idx; + final_routes.push(all_nodes[route_start_idx..all_nodes.len()].to_vec()); + } + + final_routes + } + + + #[inline(never)] + pub fn postprocess_solution( + solution: &mut SubSolution, + distance_matrix: &Vec>, + _demands: &Vec, + _max_capacity: i32, + ) { + for route in solution.routes.iter_mut() { + unsafe { two_opt_best_unsafe(route, distance_matrix) }; + } + } + + #[inline] + unsafe fn two_opt_best_unsafe(route: &mut Vec, distance_matrix: &Vec>) -> bool { + let n = route.len(); + if n < 4 { + return false; + } + + let mut any_improvement = false; + let route_slice = route.as_mut_slice(); + + loop { + let mut improved = false; + + for i in 1..(n - 2) { + let mut best_gain = 0; + let mut best_j = 0; + + let i_range = i..(n - 1); + for j in i_range.skip(1) { + let ri_m1 = *route_slice.get_unchecked(i - 1); + let ri = *route_slice.get_unchecked(i); + let rj = *route_slice.get_unchecked(j); + let rj_p1 = *route_slice.get_unchecked(j + 1); + + let current = distance_matrix.get_unchecked(ri_m1).get_unchecked(ri); + let candidate = distance_matrix.get_unchecked(rj).get_unchecked(rj_p1); + let new_connection = distance_matrix.get_unchecked(ri_m1).get_unchecked(rj); + let broken_connection = distance_matrix.get_unchecked(ri).get_unchecked(rj_p1); + + let gain = current + candidate - new_connection - broken_connection; + + if gain > best_gain { + best_gain = gain; + best_j = j; + } + } + + if best_gain > 0 { + route_slice[i..=best_j].reverse(); + improved = true; + any_improvement = true; + } + } + + if !improved { + break; + } + } + + any_improvement + } +} \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/routing_redone/README.md b/tig-algorithms/src/vehicle_routing/routing_redone/README.md new file mode 100644 index 0000000..79e0e5c --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/routing_redone/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vehicle_routing +* **Algorithm Name:** routing_redone +* **Copyright:** 2025 frogmarch +* **Identity of Submitter:** frogmarch +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/routing_redone/mod.rs b/tig-algorithms/src/vehicle_routing/routing_redone/mod.rs new file mode 100644 index 0000000..81f7ac7 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/routing_redone/mod.rs @@ -0,0 +1,1110 @@ +use std::collections::BTreeSet; +use serde_json::{Map, Value}; +use tig_challenges::vehicle_routing::*; + + +mod utils { + + pub fn precompute_proximity_matrix( + num_nodes: usize, + distance_matrix: &Vec>, + ready_times: &Vec, + due_times: &Vec, + service_time: i32, + ) -> Vec> { + let mut proximity_matrix = vec![vec![0.0; num_nodes]; num_nodes]; + for i in 1..num_nodes { + for j in 1..num_nodes { + if i != j { + proximity_matrix[i][j] = compute_proximity( + i, + j, + distance_matrix, + ready_times, + due_times, + service_time, + ); + } + } + } + proximity_matrix + } + + pub fn calculate_route_demands(routes: &Vec>, demands: &Vec) -> Vec { + routes + .iter() + .map(|route| route[1..route.len() - 1].iter().map(|&n| demands[n]).sum()) + .collect() + } + + pub fn find_best_insertion( + route: &Vec, + remaining_nodes: Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Option<(usize, usize)> { + let alpha1 = 1; + let alpha2 = 0; + let lambda = 1; + + let mut best_c2 = None; + let mut best = None; + for insert_node in remaining_nodes { + let mut best_c1 = None; + + let mut curr_time = 0; + let mut curr_node = 0; + for pos in 1..route.len() { + let next_node = route[pos]; + let new_arrival_time = ready_times[insert_node] + .max(curr_time + distance_matrix[curr_node][insert_node]); + if new_arrival_time > due_times[insert_node] { + continue; + } + let old_arrival_time = + ready_times[next_node].max(curr_time + distance_matrix[curr_node][next_node]); + + let c11 = distance_matrix[curr_node][insert_node] + + distance_matrix[insert_node][next_node] + - distance_matrix[curr_node][next_node]; + + let c12 = new_arrival_time - old_arrival_time; + + let c1 = -(alpha1 * c11 + alpha2 * c12); + let c2 = lambda * distance_matrix[0][insert_node] + c1; + + let c1_is_better = match best_c1 { + None => true, + Some(x) => c1 > x, + }; + + let c2_is_better = match best_c2 { + None => true, + Some(x) => c2 > x, + }; + + if c1_is_better + && c2_is_better + && is_feasible( + route, + distance_matrix, + service_time, + ready_times, + due_times, + insert_node, + new_arrival_time + service_time, + pos, + ) + { + best_c1 = Some(c1); + best_c2 = Some(c2); + best = Some((insert_node, pos)); + } + + curr_time = ready_times[next_node] + .max(curr_time + distance_matrix[curr_node][next_node]) + + service_time; + curr_node = next_node; + } + } + best + } + + pub fn is_feasible( + route: &Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + mut curr_node: usize, + mut curr_time: i32, + start_pos: usize, + ) -> bool { + let mut valid = true; + for pos in start_pos..route.len() { + let next_node = route[pos]; + curr_time += distance_matrix[curr_node][next_node]; + if curr_time > due_times[route[pos]] { + valid = false; + break; + } + curr_time = curr_time.max(ready_times[next_node]) + service_time; + curr_node = next_node; + } + valid + } + + pub fn compute_proximity( + i: usize, + j: usize, + distance_matrix: &Vec>, + ready_times: &Vec, + due_times: &Vec, + service_time: i32, + ) -> f64 { + let time_ij = distance_matrix[i][j]; + let expr1 = (ready_times[j] - time_ij - service_time - due_times[i]).max(0) as f64 + + (ready_times[i] + service_time + time_ij - due_times[j]).max(0) as f64; + let expr2 = (ready_times[i] - time_ij - service_time - due_times[j]).max(0) as f64 + + (ready_times[j] + service_time + time_ij - due_times[i]).max(0) as f64; + time_ij as f64 + expr1.min(expr2) + } + + pub fn find_best_insertion_in_route( + route: &Vec, + node: usize, + demands: &Vec, + max_capacity: i32, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Option<(usize, i32)> { + let current_demand: i32 = route[1..route.len() - 1].iter().map(|&n| demands[n]).sum(); + if current_demand + demands[node] > max_capacity { + return None; + } + + let mut best_pos = None; + let mut best_delta = i32::MAX; + + for pos in 1..route.len() { + let prev_node = route[pos - 1]; + let next_node = route[pos]; + let delta = distance_matrix[prev_node][node] + distance_matrix[node][next_node] + - distance_matrix[prev_node][next_node]; + + if check_feasible_insertion( + route, + node, + pos, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + if delta < best_delta { + best_delta = delta; + best_pos = Some(pos); + } + } + } + + best_pos.map(|pos| (pos, best_delta)) + } + + pub fn check_feasible_insertion( + route: &Vec, + insert_node: usize, + insert_pos: usize, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> bool { + if insert_pos == route.len() - 1 { + let last_node = route[insert_pos - 1]; + let arrival_time = if last_node == 0 { + 0 + } else { + let mut time = 0; + let mut node = 0; + for &n in route.iter().take(insert_pos) { + if n == 0 { + continue; + } + time += distance_matrix[node][n]; + time = time.max(ready_times[n]); + if time > due_times[n] { + return false; + } + time += service_time; + node = n; + } + time + }; + + let new_arrival = arrival_time + distance_matrix[last_node][insert_node]; + if new_arrival > due_times[insert_node] { + return false; + } + + let departure = new_arrival.max(ready_times[insert_node]) + service_time; + let final_arrival = departure + distance_matrix[insert_node][0]; + + return final_arrival <= due_times[0]; + } + + let mut curr_time = 0; + let mut curr_node = 0; + + for &node in route[..insert_pos].iter() { + if node == 0 { + continue; + } + let travel_time = distance_matrix[curr_node][node]; + curr_time += travel_time; + + if curr_time > due_times[node] { + return false; + } + + curr_time = curr_time.max(ready_times[node]) + service_time; + curr_node = node; + } + + let travel_time = distance_matrix[curr_node][insert_node]; + curr_time += travel_time; + if curr_time > due_times[insert_node] { + return false; + } + + curr_time = curr_time.max(ready_times[insert_node]) + service_time; + curr_node = insert_node; + + for &node in route[insert_pos..].iter() { + if node == 0 { + continue; + } + let travel_time = distance_matrix[curr_node][node]; + curr_time += travel_time; + + if curr_time > due_times[node] { + return false; + } + + curr_time = curr_time.max(ready_times[node]) + service_time; + curr_node = node; + } + + true + } + + pub fn calculate_route_distance(route: &Vec, distance_matrix: &Vec>) -> i32 { + let mut distance = 0; + for i in 0..route.len() - 1 { + distance += distance_matrix[route[i]][route[i + 1]]; + } + distance + } +} + +mod simple_solver { + use super::utils::*; + use super::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + let num_nodes = challenge.difficulty.num_nodes; + let max_capacity = challenge.max_capacity; + let demands = &challenge.demands; + let distance_matrix = &challenge.distance_matrix; + let service_time = challenge.service_time; + let ready_times = &challenge.ready_times; + let due_times = &challenge.due_times; + let mut routes = Vec::new(); + + let mut nodes: Vec = (1..num_nodes).collect(); + nodes.sort_by(|&a, &b| distance_matrix[0][a].cmp(&distance_matrix[0][b])); + + let mut remaining: BTreeSet = nodes.iter().cloned().collect(); + + while let Some(node) = nodes.pop() { + if !remaining.remove(&node) { + continue; + } + let mut route = vec![0, node, 0]; + let mut route_demand = demands[node]; + + while let Some((best_node, best_pos)) = find_best_insertion( + &route, + remaining + .iter() + .cloned() + .filter(|&n| route_demand + demands[n] <= max_capacity) + .collect(), + distance_matrix, + service_time, + ready_times, + due_times, + ) { + remaining.remove(&best_node); + route_demand += demands[best_node]; + route.insert(best_pos, best_node); + } + + routes.push(route); + } + + routes = do_local_searches( + num_nodes, + max_capacity, + demands, + distance_matrix, + &routes, + service_time, + ready_times, + due_times, + ); + let _ = save_solution(&Solution { routes }); + return Ok(()); + } + + fn do_local_searches( + num_nodes: usize, + max_capacity: i32, + demands: &Vec, + distance_matrix: &Vec>, + routes: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Vec> { + let mut best_routes = routes.clone(); + let mut best_distance = calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &best_routes, + service_time, + ready_times, + due_times, + ) + .unwrap_or(i32::MAX); + let mut improved = true; + + let proximity_matrix = precompute_proximity_matrix( + num_nodes, + distance_matrix, + ready_times, + due_times, + service_time, + ); + + while improved { + improved = false; + + let mut route_demands = calculate_route_demands(&best_routes, demands); + + let mut node_positions = vec![(0, 0); num_nodes]; + for (i, route) in best_routes.iter().enumerate() { + for (j, &node) in route[1..route.len() - 1].iter().enumerate() { + node_positions[node] = (i, j + 1); + } + } + + let mut proximity_pairs = Vec::new(); + for i in 1..num_nodes { + if let Some((best_j, min_prox)) = (1..num_nodes) + .filter(|&j| j != i) + .map(|j| (j, proximity_matrix[i][j])) + .min_by(|(_, a_prox), (_, b_prox)| a_prox.partial_cmp(b_prox).unwrap()) + { + proximity_pairs.push((min_prox, i, best_j)); + } + } + proximity_pairs.sort_unstable_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); + + for (_corr, node, node2) in &proximity_pairs { + let node = *node; + let node2 = *node2; + let (route1_idx, pos1) = node_positions[node]; + let (route2_idx, _pos2) = node_positions[node2]; + if route1_idx == route2_idx { + continue; + } + + let target_route_demand = route_demands[route2_idx]; + if target_route_demand + demands[node] > max_capacity { + continue; + } + + let target_route = &best_routes[route2_idx]; + if let Some((best_pos, _delta_cost)) = find_best_insertion_in_route( + target_route, + node, + demands, + max_capacity, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + let mut new_routes = best_routes.clone(); + + if new_routes[route1_idx].len() > pos1 && new_routes[route1_idx][pos1] == node { + new_routes[route1_idx].remove(pos1); + new_routes[route2_idx].insert(best_pos, node); + + match calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &new_routes, + service_time, + ready_times, + due_times, + ) { + Ok(new_distance) => { + if new_distance < best_distance { + best_distance = new_distance; + best_routes = new_routes; + route_demands[route1_idx] -= demands[node]; + route_demands[route2_idx] += demands[node]; + for (i, route) in best_routes.iter().enumerate() { + for (j, &n) in route[1..route.len() - 1].iter().enumerate() + { + node_positions[n] = (i, j + 1); + } + } + improved = true; + break; + } + } + Err(_) => continue, + } + } + } + } + + if !improved { + let current_routes = best_routes.clone(); + + for route_idx in 0..current_routes.len() { + let route = ¤t_routes[route_idx]; + + if route.len() < 4 { + continue; + } + + for i in 1..route.len() - 2 { + for j in i + 1..route.len() - 1 { + let mut new_route = Vec::with_capacity(route.len()); + + for k in 0..i { + new_route.push(route[k]); + } + + for k in (i..=j).rev() { + new_route.push(route[k]); + } + + for k in j + 1..route.len() { + new_route.push(route[k]); + } + + if !is_route_time_feasible( + &new_route, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + continue; + } + + let old_distance = calculate_route_distance(route, distance_matrix); + let new_distance = + calculate_route_distance(&new_route, distance_matrix); + + if new_distance < old_distance { + let mut new_routes = current_routes.clone(); + new_routes[route_idx] = new_route; + + match calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &new_routes, + service_time, + ready_times, + due_times, + ) { + Ok(total_distance) => { + if total_distance < best_distance { + best_distance = total_distance; + best_routes = new_routes; + improved = true; + break; + } + } + Err(_) => continue, + } + } + } + if improved { + break; + } + } + if improved { + break; + } + } + } + } + + best_routes + } + + fn is_route_time_feasible( + route: &Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> bool { + let mut curr_time = 0; + let mut curr_node = route[0]; + + for &next_node in route.iter().skip(1) { + curr_time += distance_matrix[curr_node][next_node]; + + if next_node != 0 && curr_time > due_times[next_node] { + return false; + } + + if next_node != 0 { + curr_time = curr_time.max(ready_times[next_node]); + curr_time += service_time; + } + + curr_node = next_node; + } + + true + } +} + +mod complex_solver { + use super::utils::*; + use super::*; + + pub fn solve_sub_instance_complex( + challenge: &Challenge, + ) -> anyhow::Result> { + let num_nodes = challenge.difficulty.num_nodes; + let max_capacity = challenge.max_capacity; + let demands = &challenge.demands; + let distance_matrix = &challenge.distance_matrix; + let service_time = challenge.service_time; + let ready_times = &challenge.ready_times; + let due_times = &challenge.due_times; + let mut routes = Vec::new(); + + let mut nodes: Vec = (1..num_nodes).collect(); + nodes.sort_by_key(|&a| distance_matrix[0][a]); + + let mut remaining: BTreeSet = nodes.iter().cloned().collect(); + + while let Some(node) = nodes.pop() { + if !remaining.remove(&node) { + continue; + } + let mut route = vec![0, node, 0]; + let mut route_demand = demands[node]; + + while let Some((best_node, best_pos)) = find_best_insertion( + &route, + remaining + .iter() + .cloned() + .filter(|&n| route_demand + demands[n] <= max_capacity) + .collect(), + distance_matrix, + service_time, + ready_times, + due_times, + ) { + remaining.remove(&best_node); + route_demand += demands[best_node]; + route.insert(best_pos, best_node); + } + + routes.push(route); + } + + if !remaining.is_empty() && remaining.len() > num_nodes / 8 { + return Ok(None); + } + + routes = do_local_searches( + num_nodes, + max_capacity, + demands, + distance_matrix, + &routes, + service_time, + ready_times, + due_times, + ); + + Ok(Some(Solution { routes })) + } + + fn do_local_searches( + num_nodes: usize, + max_capacity: i32, + demands: &Vec, + distance_matrix: &Vec>, + routes: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Vec> { + let mut best_routes = routes.clone(); + let mut best_distance = calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &best_routes, + service_time, + ready_times, + due_times, + ) + .unwrap_or(i32::MAX); + let mut improved = true; + + let proximity_matrix = precompute_proximity_matrix( + num_nodes, + distance_matrix, + ready_times, + due_times, + service_time, + ); + let max_outer_iterations = 25; + let max_swap_iterations = 10; + let max_merge_iterations = 10; + let mut outer_iterations = 0; + + while improved && outer_iterations < max_outer_iterations { + improved = false; + outer_iterations += 1; + + let mut route_demands = calculate_route_demands(&best_routes, demands); + + let mut node_positions = vec![(0, 0); num_nodes]; + for (i, route) in best_routes.iter().enumerate() { + for (j, &node) in route[1..route.len() - 1].iter().enumerate() { + node_positions[node] = (i, j + 1); + } + } + + let mut proximity_pairs = Vec::new(); + for i in 1..num_nodes { + if let Some((best_j, min_prox)) = (1..num_nodes) + .filter(|&j| j != i) + .map(|j| (j, proximity_matrix[i][j])) + .min_by(|(_, a_prox), (_, b_prox)| a_prox.partial_cmp(b_prox).unwrap()) + { + proximity_pairs.push((min_prox, i, best_j)); + } + } + proximity_pairs.sort_unstable_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); + + for (_, node, node2) in &proximity_pairs { + let node = *node; + let node2 = *node2; + let (route1_idx, pos1) = node_positions[node]; + let (route2_idx, _) = node_positions[node2]; + if route1_idx == route2_idx { + continue; + } + + let target_route_demand = route_demands[route2_idx]; + if target_route_demand + demands[node] > max_capacity { + continue; + } + + let target_route = &best_routes[route2_idx]; + if let Some((best_pos, _)) = find_best_insertion_in_route( + target_route, + node, + demands, + max_capacity, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + let mut new_routes = best_routes.clone(); + + if new_routes[route1_idx].len() > pos1 && new_routes[route1_idx][pos1] == node { + new_routes[route1_idx].remove(pos1); + new_routes[route2_idx].insert(best_pos, node); + + match calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &new_routes, + service_time, + ready_times, + due_times, + ) { + Ok(new_distance) => { + if new_distance < best_distance { + best_distance = new_distance; + best_routes = new_routes; + route_demands[route1_idx] -= demands[node]; + route_demands[route2_idx] += demands[node]; + for (i, route) in best_routes.iter().enumerate() { + for (j, &n) in route[1..route.len() - 1].iter().enumerate() + { + node_positions[n] = (i, j + 1); + } + } + improved = true; + break; + } + } + Err(_) => continue, + } + } + } + } + + let mut swap_improved = true; + let mut swap_iterations = 0; + + while swap_improved && swap_iterations < max_swap_iterations { + swap_improved = false; + swap_iterations += 1; + + for route_idx in 0..best_routes.len() { + let route = best_routes[route_idx].clone(); + if route.len() <= 4 { + continue; + } + + for i in 1..route.len() - 1 { + for j in i + 1..route.len() - 1 { + if j == i + 1 { + continue; + } + + let mut new_route = route.clone(); + new_route.swap(i, j); + + if !is_route_feasible( + &new_route, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + continue; + } + + let new_route_distance = + calculate_route_distance(&new_route, distance_matrix); + let old_route_distance = + calculate_route_distance(&route, distance_matrix); + + if new_route_distance < old_route_distance { + let mut new_routes = best_routes.clone(); + new_routes[route_idx] = new_route; + + match calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &new_routes, + service_time, + ready_times, + due_times, + ) { + Ok(total_distance) => { + if total_distance < best_distance { + best_distance = total_distance; + best_routes = new_routes; + swap_improved = true; + improved = true; + break; + } + } + Err(_) => continue, + } + } + } + if swap_improved { + break; + } + } + if swap_improved { + break; + } + } + + if !swap_improved { + for route1_idx in 0..best_routes.len() { + let route1 = best_routes[route1_idx].clone(); + + for route2_idx in route1_idx + 1..best_routes.len() { + let route2 = best_routes[route2_idx].clone(); + + for i in 1..route1.len() - 1 { + let node1 = route1[i]; + + for j in 1..route2.len() - 1 { + let node2 = route2[j]; + + let route1_demand: i32 = route1[1..route1.len() - 1] + .iter() + .map(|&n| demands[n]) + .sum(); + let route2_demand: i32 = route2[1..route2.len() - 1] + .iter() + .map(|&n| demands[n]) + .sum(); + + let new_route1_demand = + route1_demand - demands[node1] + demands[node2]; + let new_route2_demand = + route2_demand - demands[node2] + demands[node1]; + + if new_route1_demand > max_capacity + || new_route2_demand > max_capacity + { + continue; + } + + let mut new_route1 = route1.clone(); + let mut new_route2 = route2.clone(); + new_route1[i] = node2; + new_route2[j] = node1; + + if !is_route_feasible( + &new_route1, + distance_matrix, + service_time, + ready_times, + due_times, + ) || !is_route_feasible( + &new_route2, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + continue; + } + + let old_distance = + calculate_route_distance(&route1, distance_matrix) + + calculate_route_distance(&route2, distance_matrix); + let new_distance = + calculate_route_distance(&new_route1, distance_matrix) + + calculate_route_distance( + &new_route2, + distance_matrix, + ); + + if new_distance < old_distance { + let mut new_routes = best_routes.clone(); + new_routes[route1_idx] = new_route1; + new_routes[route2_idx] = new_route2; + + match calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &new_routes, + service_time, + ready_times, + due_times, + ) { + Ok(total_distance) => { + if total_distance < best_distance { + best_distance = total_distance; + best_routes = new_routes; + swap_improved = true; + improved = true; + break; + } + } + Err(_) => continue, + } + } + } + if swap_improved { + break; + } + } + if swap_improved { + break; + } + } + if swap_improved { + break; + } + } + } + } + + let mut merge_improved = true; + let mut merge_iterations = 0; + + while merge_improved && merge_iterations < max_merge_iterations { + merge_improved = false; + merge_iterations += 1; + + for i in 0..best_routes.len() { + if merge_improved { + break; + } + + for j in 0..best_routes.len() { + if i == j { + continue; + } + + let route1 = &best_routes[i]; + let route2 = &best_routes[j]; + + if route1.len() <= 2 || route2.len() <= 2 { + continue; + } + + let route1_demand: i32 = route1[1..route1.len() - 1] + .iter() + .map(|&n| demands[n]) + .sum(); + let route2_demand: i32 = route2[1..route2.len() - 1] + .iter() + .map(|&n| demands[n]) + .sum(); + + if route1_demand + route2_demand <= max_capacity { + let mut best_insertion_pos = None; + let mut best_insertion_delta = i32::MAX; + + for &node in &route2[1..route2.len() - 1] { + for pos in 1..route1.len() { + let prev = route1[pos - 1]; + let next = route1[pos]; + + let insertion_delta = distance_matrix[prev][node] + + distance_matrix[node][next] + - distance_matrix[prev][next]; + + if insertion_delta < best_insertion_delta { + let mut test_route = route1.clone(); + test_route.insert(pos, node); + + if is_route_feasible( + &test_route, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + best_insertion_pos = Some(pos); + best_insertion_delta = insertion_delta; + } + } + } + } + + if let Some(pos) = best_insertion_pos { + let mut new_route = route1.clone(); + + for (idx, &node) in route2[1..route2.len() - 1].iter().enumerate() { + new_route.insert(pos + idx, node); + } + + if is_route_feasible( + &new_route, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + let new_distance = + calculate_route_distance(&new_route, distance_matrix); + let old_distance = + calculate_route_distance(route1, distance_matrix) + + calculate_route_distance(route2, distance_matrix); + + if new_distance < old_distance { + let mut new_routes = best_routes.clone(); + new_routes[i] = new_route; + new_routes.remove(j); + + match calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &new_routes, + service_time, + ready_times, + due_times, + ) { + Ok(total_distance) => { + if total_distance < best_distance { + best_distance = total_distance; + best_routes = new_routes; + merge_improved = true; + improved = true; + break; + } + } + Err(_) => continue, + } + } + } + } + } + } + } + } + } + + best_routes + } + + fn is_route_feasible( + route: &Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> bool { + if route.len() == 2 && route[0] == 0 && route[1] == 0 { + return true; + } + + let mut curr_time = 0; + let mut curr_node = 0; + + for &next_node in route.iter().skip(1) { + curr_time += distance_matrix[curr_node][next_node]; + + if curr_time > due_times[next_node] { + return false; + } + + curr_time = curr_time.max(ready_times[next_node]); + + if next_node != 0 { + curr_time += service_time; + } + + curr_node = next_node; + } + + true + } +} diff --git a/tig-algorithms/src/vehicle_routing/sausage/README.md b/tig-algorithms/src/vehicle_routing/sausage/README.md new file mode 100644 index 0000000..46e69f5 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/sausage/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vehicle_routing +* **Algorithm Name:** sausage +* **Copyright:** 2025 BigBean +* **Identity of Submitter:** BigBean +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/sausage/mod.rs b/tig-algorithms/src/vehicle_routing/sausage/mod.rs new file mode 100644 index 0000000..897efec --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/sausage/mod.rs @@ -0,0 +1,1272 @@ +use anyhow::Result; +use serde_json::{Map, Value}; +use std::collections::BTreeSet; +use tig_challenges::vehicle_routing::*; + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + Err(anyhow::anyhow!("This algorithm is no longer compatible.")) +} + +// Old code that is no longer compatible +#[cfg(none)] +mod dead_code { + pub fn solve_challenge(challenge: &Challenge) -> anyhow::Result> { + let mut solution = Solution { + sub_solutions: Vec::new(), + }; + for sub_instance in &challenge.sub_instances { + let better_than_baseline = sub_instance.difficulty.better_than_baseline; + + let sub_solution = if better_than_baseline < 50 { + simple_solver::solve_sub_instance_simple(sub_instance)? + } else { + complex_solver::solve_sub_instance_complex(sub_instance)? + }; + + match sub_solution { + Some(sub_solution) => solution.sub_solutions.push(sub_solution), + None => return Ok(None), + } + } + Ok(Some(solution)) + } + + mod utils { + + pub fn precompute_proximity_matrix( + num_nodes: usize, + distance_matrix: &Vec>, + ready_times: &Vec, + due_times: &Vec, + service_time: i32, + ) -> Vec> { + let mut proximity_matrix = vec![vec![0.0; num_nodes]; num_nodes]; + for i in 1..num_nodes { + for j in 1..num_nodes { + if i != j { + proximity_matrix[i][j] = compute_proximity( + i, + j, + distance_matrix, + ready_times, + due_times, + service_time, + ); + } + } + } + proximity_matrix + } + + pub fn calculate_route_demands(routes: &Vec>, demands: &Vec) -> Vec { + routes + .iter() + .map(|route| route[1..route.len() - 1].iter().map(|&n| demands[n]).sum()) + .collect() + } + + pub fn find_best_insertion( + route: &Vec, + remaining_nodes: Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Option<(usize, usize)> { + let alpha1 = 1; + let alpha2 = 0; + let lambda = 1; + + let mut best_c2 = None; + let mut best = None; + for insert_node in remaining_nodes { + let mut best_c1 = None; + + let mut curr_time = 0; + let mut curr_node = 0; + for pos in 1..route.len() { + let next_node = route[pos]; + let new_arrival_time = ready_times[insert_node] + .max(curr_time + distance_matrix[curr_node][insert_node]); + if new_arrival_time > due_times[insert_node] { + continue; + } + let old_arrival_time = ready_times[next_node] + .max(curr_time + distance_matrix[curr_node][next_node]); + + let c11 = distance_matrix[curr_node][insert_node] + + distance_matrix[insert_node][next_node] + - distance_matrix[curr_node][next_node]; + + let c12 = new_arrival_time - old_arrival_time; + + let c1 = -(alpha1 * c11 + alpha2 * c12); + let c2 = lambda * distance_matrix[0][insert_node] + c1; + + let c1_is_better = match best_c1 { + None => true, + Some(x) => c1 > x, + }; + + let c2_is_better = match best_c2 { + None => true, + Some(x) => c2 > x, + }; + + if c1_is_better + && c2_is_better + && is_feasible( + route, + distance_matrix, + service_time, + ready_times, + due_times, + insert_node, + new_arrival_time + service_time, + pos, + ) + { + best_c1 = Some(c1); + best_c2 = Some(c2); + best = Some((insert_node, pos)); + } + + curr_time = ready_times[next_node] + .max(curr_time + distance_matrix[curr_node][next_node]) + + service_time; + curr_node = next_node; + } + } + best + } + + pub fn is_feasible( + route: &Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + mut curr_node: usize, + mut curr_time: i32, + start_pos: usize, + ) -> bool { + let mut valid = true; + for pos in start_pos..route.len() { + let next_node = route[pos]; + curr_time += distance_matrix[curr_node][next_node]; + if curr_time > due_times[route[pos]] { + valid = false; + break; + } + curr_time = curr_time.max(ready_times[next_node]) + service_time; + curr_node = next_node; + } + valid + } + + pub fn compute_proximity( + i: usize, + j: usize, + distance_matrix: &Vec>, + ready_times: &Vec, + due_times: &Vec, + service_time: i32, + ) -> f64 { + let time_ij = distance_matrix[i][j]; + let expr1 = (ready_times[j] - time_ij - service_time - due_times[i]).max(0) as f64 + + (ready_times[i] + service_time + time_ij - due_times[j]).max(0) as f64; + let expr2 = (ready_times[i] - time_ij - service_time - due_times[j]).max(0) as f64 + + (ready_times[j] + service_time + time_ij - due_times[i]).max(0) as f64; + time_ij as f64 + expr1.min(expr2) + } + + pub fn find_best_insertion_in_route( + route: &Vec, + node: usize, + demands: &Vec, + max_capacity: i32, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Option<(usize, i32)> { + let current_demand: i32 = route[1..route.len() - 1].iter().map(|&n| demands[n]).sum(); + if current_demand + demands[node] > max_capacity { + return None; + } + + let mut best_pos = None; + let mut best_delta = i32::MAX; + + for pos in 1..route.len() { + let prev_node = route[pos - 1]; + let next_node = route[pos]; + let delta = distance_matrix[prev_node][node] + distance_matrix[node][next_node] + - distance_matrix[prev_node][next_node]; + + if check_feasible_insertion( + route, + node, + pos, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + if delta < best_delta { + best_delta = delta; + best_pos = Some(pos); + } + } + } + + best_pos.map(|pos| (pos, best_delta)) + } + + pub fn check_feasible_insertion( + route: &Vec, + insert_node: usize, + insert_pos: usize, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> bool { + if insert_pos == route.len() - 1 { + let last_node = route[insert_pos - 1]; + let arrival_time = if last_node == 0 { + 0 + } else { + let mut time = 0; + let mut node = 0; + for &n in route.iter().take(insert_pos) { + if n == 0 { + continue; + } + time += distance_matrix[node][n]; + time = time.max(ready_times[n]); + if time > due_times[n] { + return false; + } + time += service_time; + node = n; + } + time + }; + + let new_arrival = arrival_time + distance_matrix[last_node][insert_node]; + if new_arrival > due_times[insert_node] { + return false; + } + + let departure = new_arrival.max(ready_times[insert_node]) + service_time; + let final_arrival = departure + distance_matrix[insert_node][0]; + + return final_arrival <= due_times[0]; + } + + let mut curr_time = 0; + let mut curr_node = 0; + + for &node in route[..insert_pos].iter() { + if node == 0 { + continue; + } + let travel_time = distance_matrix[curr_node][node]; + curr_time += travel_time; + + if curr_time > due_times[node] { + return false; + } + + curr_time = curr_time.max(ready_times[node]) + service_time; + curr_node = node; + } + + let travel_time = distance_matrix[curr_node][insert_node]; + curr_time += travel_time; + if curr_time > due_times[insert_node] { + return false; + } + + curr_time = curr_time.max(ready_times[insert_node]) + service_time; + curr_node = insert_node; + + for &node in route[insert_pos..].iter() { + if node == 0 { + continue; + } + let travel_time = distance_matrix[curr_node][node]; + curr_time += travel_time; + + if curr_time > due_times[node] { + return false; + } + + curr_time = curr_time.max(ready_times[node]) + service_time; + curr_node = node; + } + + true + } + + pub fn calculate_route_distance( + route: &Vec, + distance_matrix: &Vec>, + ) -> i32 { + let mut distance = 0; + for i in 0..route.len() - 1 { + distance += distance_matrix[route[i]][route[i + 1]]; + } + distance + } + + // New 2-opt function + pub fn apply_2opt( + route: &Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Vec { + let mut best_route = route.clone(); + let mut improved = true; + + while improved { + improved = false; + + for i in 1..best_route.len() - 2 { + for j in i + 1..best_route.len() - 1 { + // Create new route with reversed segment + let mut new_route = Vec::with_capacity(best_route.len()); + + // Add nodes before the reversal + for k in 0..i { + new_route.push(best_route[k]); + } + + // Add reversed segment + for k in (i..=j).rev() { + new_route.push(best_route[k]); + } + + // Add nodes after the reversal + for k in j + 1..best_route.len() { + new_route.push(best_route[k]); + } + + // Check if the new route is feasible + if !is_route_time_feasible( + &new_route, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + continue; + } + + // Calculate improvement + let old_distance = calculate_route_distance(&best_route, distance_matrix); + let new_distance = calculate_route_distance(&new_route, distance_matrix); + + if new_distance < old_distance { + best_route = new_route; + improved = true; + break; + } + } + if improved { + break; + } + } + } + + best_route + } + + pub fn is_route_time_feasible( + route: &Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> bool { + let mut curr_time = 0; + let mut curr_node = route[0]; + + for &next_node in route.iter().skip(1) { + curr_time += distance_matrix[curr_node][next_node]; + + if next_node != 0 && curr_time > due_times[next_node] { + return false; + } + + if next_node != 0 { + curr_time = curr_time.max(ready_times[next_node]); + curr_time += service_time; + } + + curr_node = next_node; + } + + true + } + } + + mod simple_solver { + use super::utils::*; + use super::*; + + pub fn solve_sub_instance_simple( + challenge: &SubInstance, + ) -> anyhow::Result> { + let num_nodes = challenge.difficulty.num_nodes; + let max_capacity = challenge.max_capacity; + let demands = &challenge.demands; + let distance_matrix = &challenge.distance_matrix; + let service_time = challenge.service_time; + let ready_times = &challenge.ready_times; + let due_times = &challenge.due_times; + let mut routes = Vec::new(); + + let mut nodes: Vec = (1..num_nodes).collect(); + nodes.sort_by(|&a, &b| distance_matrix[0][a].cmp(&distance_matrix[0][b])); + + let mut remaining: BTreeSet = nodes.iter().cloned().collect(); + + while let Some(node) = nodes.pop() { + if !remaining.remove(&node) { + continue; + } + let mut route = vec![0, node, 0]; + let mut route_demand = demands[node]; + + while let Some((best_node, best_pos)) = find_best_insertion( + &route, + remaining + .iter() + .cloned() + .filter(|&n| route_demand + demands[n] <= max_capacity) + .collect(), + distance_matrix, + service_time, + ready_times, + due_times, + ) { + remaining.remove(&best_node); + route_demand += demands[best_node]; + route.insert(best_pos, best_node); + } + + // Apply 2-opt to the route after construction + route = apply_2opt( + &route, + distance_matrix, + service_time, + ready_times, + due_times, + ); + + routes.push(route); + } + + routes = do_local_searches( + num_nodes, + max_capacity, + demands, + distance_matrix, + &routes, + service_time, + ready_times, + due_times, + ); + Ok(Some(SubSolution { routes })) + } + + fn do_local_searches( + num_nodes: usize, + max_capacity: i32, + demands: &Vec, + distance_matrix: &Vec>, + routes: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Vec> { + let mut best_routes = routes.clone(); + let mut best_distance = calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &best_routes, + service_time, + ready_times, + due_times, + ) + .unwrap_or(i32::MAX); + let mut improved = true; + + let proximity_matrix = precompute_proximity_matrix( + num_nodes, + distance_matrix, + ready_times, + due_times, + service_time, + ); + + while improved { + improved = false; + + let mut route_demands = calculate_route_demands(&best_routes, demands); + + let mut node_positions = vec![(0, 0); num_nodes]; + for (i, route) in best_routes.iter().enumerate() { + for (j, &node) in route[1..route.len() - 1].iter().enumerate() { + node_positions[node] = (i, j + 1); + } + } + + let mut proximity_pairs = Vec::new(); + for i in 1..num_nodes { + if let Some((best_j, min_prox)) = (1..num_nodes) + .filter(|&j| j != i) + .map(|j| (j, proximity_matrix[i][j])) + .min_by(|(_, a_prox), (_, b_prox)| a_prox.partial_cmp(b_prox).unwrap()) + { + proximity_pairs.push((min_prox, i, best_j)); + } + } + proximity_pairs.sort_unstable_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); + + for (_corr, node, node2) in &proximity_pairs { + let node = *node; + let node2 = *node2; + let (route1_idx, pos1) = node_positions[node]; + let (route2_idx, _pos2) = node_positions[node2]; + if route1_idx == route2_idx { + continue; + } + + let target_route_demand = route_demands[route2_idx]; + if target_route_demand + demands[node] > max_capacity { + continue; + } + + let target_route = &best_routes[route2_idx]; + if let Some((best_pos, _delta_cost)) = find_best_insertion_in_route( + target_route, + node, + demands, + max_capacity, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + let mut new_routes = best_routes.clone(); + + if new_routes[route1_idx].len() > pos1 + && new_routes[route1_idx][pos1] == node + { + new_routes[route1_idx].remove(pos1); + new_routes[route2_idx].insert(best_pos, node); + + // Apply 2-opt to both affected routes + new_routes[route1_idx] = apply_2opt( + &new_routes[route1_idx], + distance_matrix, + service_time, + ready_times, + due_times, + ); + new_routes[route2_idx] = apply_2opt( + &new_routes[route2_idx], + distance_matrix, + service_time, + ready_times, + due_times, + ); + + match calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &new_routes, + service_time, + ready_times, + due_times, + ) { + Ok(new_distance) => { + if new_distance < best_distance { + best_distance = new_distance; + best_routes = new_routes; + route_demands[route1_idx] -= demands[node]; + route_demands[route2_idx] += demands[node]; + for (i, route) in best_routes.iter().enumerate() { + for (j, &n) in + route[1..route.len() - 1].iter().enumerate() + { + node_positions[n] = (i, j + 1); + } + } + improved = true; + break; + } + } + Err(_) => continue, + } + } + } + } + + if !improved { + let current_routes = best_routes.clone(); + + for route_idx in 0..current_routes.len() { + let route = ¤t_routes[route_idx]; + + if route.len() < 4 { + continue; + } + + // Try 2-opt on each route + let improved_route = apply_2opt( + route, + distance_matrix, + service_time, + ready_times, + due_times, + ); + + if improved_route != *route { + let mut new_routes = current_routes.clone(); + new_routes[route_idx] = improved_route; + + match calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &new_routes, + service_time, + ready_times, + due_times, + ) { + Ok(total_distance) => { + if total_distance < best_distance { + best_distance = total_distance; + best_routes = new_routes; + improved = true; + break; + } + } + Err(_) => continue, + } + } + } + } + } + + best_routes + } + } + + mod complex_solver { + use super::utils::*; + use super::*; + + pub fn solve_sub_instance_complex( + challenge: &SubInstance, + ) -> anyhow::Result> { + let num_nodes = challenge.difficulty.num_nodes; + let max_capacity = challenge.max_capacity; + let demands = &challenge.demands; + let distance_matrix = &challenge.distance_matrix; + let service_time = challenge.service_time; + let ready_times = &challenge.ready_times; + let due_times = &challenge.due_times; + let mut routes = Vec::new(); + + let mut nodes: Vec = (1..num_nodes).collect(); + nodes.sort_by_key(|&a| distance_matrix[0][a]); + + let mut remaining: BTreeSet = nodes.iter().cloned().collect(); + + while let Some(node) = nodes.pop() { + if !remaining.remove(&node) { + continue; + } + let mut route = vec![0, node, 0]; + let mut route_demand = demands[node]; + + while let Some((best_node, best_pos)) = find_best_insertion( + &route, + remaining + .iter() + .cloned() + .filter(|&n| route_demand + demands[n] <= max_capacity) + .collect(), + distance_matrix, + service_time, + ready_times, + due_times, + ) { + remaining.remove(&best_node); + route_demand += demands[best_node]; + route.insert(best_pos, best_node); + } + + // Apply 2-opt to the route after construction + route = apply_2opt( + &route, + distance_matrix, + service_time, + ready_times, + due_times, + ); + + routes.push(route); + } + + if !remaining.is_empty() && remaining.len() > num_nodes / 8 { + return Ok(None); + } + + routes = do_local_searches( + num_nodes, + max_capacity, + demands, + distance_matrix, + &routes, + service_time, + ready_times, + due_times, + ); + + Ok(Some(SubSolution { routes })) + } + + fn do_local_searches( + num_nodes: usize, + max_capacity: i32, + demands: &Vec, + distance_matrix: &Vec>, + routes: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Vec> { + let mut best_routes = routes.clone(); + let mut best_distance = calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &best_routes, + service_time, + ready_times, + due_times, + ) + .unwrap_or(i32::MAX); + let mut improved = true; + + let proximity_matrix = precompute_proximity_matrix( + num_nodes, + distance_matrix, + ready_times, + due_times, + service_time, + ); + let max_outer_iterations = 25; + let max_swap_iterations = 10; + let max_merge_iterations = 10; + let mut outer_iterations = 0; + + while improved && outer_iterations < max_outer_iterations { + improved = false; + outer_iterations += 1; + + let mut route_demands = calculate_route_demands(&best_routes, demands); + + let mut node_positions = vec![(0, 0); num_nodes]; + for (i, route) in best_routes.iter().enumerate() { + for (j, &node) in route[1..route.len() - 1].iter().enumerate() { + node_positions[node] = (i, j + 1); + } + } + + let mut proximity_pairs = Vec::new(); + for i in 1..num_nodes { + if let Some((best_j, min_prox)) = (1..num_nodes) + .filter(|&j| j != i) + .map(|j| (j, proximity_matrix[i][j])) + .min_by(|(_, a_prox), (_, b_prox)| a_prox.partial_cmp(b_prox).unwrap()) + { + proximity_pairs.push((min_prox, i, best_j)); + } + } + proximity_pairs.sort_unstable_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); + + for (_, node, node2) in &proximity_pairs { + let node = *node; + let node2 = *node2; + let (route1_idx, pos1) = node_positions[node]; + let (route2_idx, _) = node_positions[node2]; + if route1_idx == route2_idx { + continue; + } + + let target_route_demand = route_demands[route2_idx]; + if target_route_demand + demands[node] > max_capacity { + continue; + } + + let target_route = &best_routes[route2_idx]; + if let Some((best_pos, _)) = find_best_insertion_in_route( + target_route, + node, + demands, + max_capacity, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + let mut new_routes = best_routes.clone(); + + if new_routes[route1_idx].len() > pos1 + && new_routes[route1_idx][pos1] == node + { + new_routes[route1_idx].remove(pos1); + new_routes[route2_idx].insert(best_pos, node); + + // Apply 2-opt to both affected routes + new_routes[route1_idx] = apply_2opt( + &new_routes[route1_idx], + distance_matrix, + service_time, + ready_times, + due_times, + ); + new_routes[route2_idx] = apply_2opt( + &new_routes[route2_idx], + distance_matrix, + service_time, + ready_times, + due_times, + ); + + match calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &new_routes, + service_time, + ready_times, + due_times, + ) { + Ok(new_distance) => { + if new_distance < best_distance { + best_distance = new_distance; + best_routes = new_routes; + route_demands[route1_idx] -= demands[node]; + route_demands[route2_idx] += demands[node]; + for (i, route) in best_routes.iter().enumerate() { + for (j, &n) in + route[1..route.len() - 1].iter().enumerate() + { + node_positions[n] = (i, j + 1); + } + } + improved = true; + break; + } + } + Err(_) => continue, + } + } + } + } + + let mut swap_improved = true; + let mut swap_iterations = 0; + + while swap_improved && swap_iterations < max_swap_iterations { + swap_improved = false; + swap_iterations += 1; + + for route_idx in 0..best_routes.len() { + let route = best_routes[route_idx].clone(); + if route.len() <= 4 { + continue; + } + + for i in 1..route.len() - 1 { + for j in i + 1..route.len() - 1 { + if j == i + 1 { + continue; + } + + let mut new_route = route.clone(); + new_route.swap(i, j); + + if !is_route_feasible( + &new_route, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + continue; + } + + let new_route_distance = + calculate_route_distance(&new_route, distance_matrix); + let old_route_distance = + calculate_route_distance(&route, distance_matrix); + + if new_route_distance < old_route_distance { + // Apply 2-opt after swap + let optimized_route = apply_2opt( + &new_route, + distance_matrix, + service_time, + ready_times, + due_times, + ); + let mut new_routes = best_routes.clone(); + new_routes[route_idx] = optimized_route; + + match calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &new_routes, + service_time, + ready_times, + due_times, + ) { + Ok(total_distance) => { + if total_distance < best_distance { + best_distance = total_distance; + best_routes = new_routes; + swap_improved = true; + improved = true; + break; + } + } + Err(_) => continue, + } + } + } + if swap_improved { + break; + } + } + if swap_improved { + break; + } + } + + if !swap_improved { + for route1_idx in 0..best_routes.len() { + let route1 = best_routes[route1_idx].clone(); + + for route2_idx in route1_idx + 1..best_routes.len() { + let route2 = best_routes[route2_idx].clone(); + + for i in 1..route1.len() - 1 { + let node1 = route1[i]; + + for j in 1..route2.len() - 1 { + let node2 = route2[j]; + + let route1_demand: i32 = route1[1..route1.len() - 1] + .iter() + .map(|&n| demands[n]) + .sum(); + let route2_demand: i32 = route2[1..route2.len() - 1] + .iter() + .map(|&n| demands[n]) + .sum(); + + let new_route1_demand = + route1_demand - demands[node1] + demands[node2]; + let new_route2_demand = + route2_demand - demands[node2] + demands[node1]; + + if new_route1_demand > max_capacity + || new_route2_demand > max_capacity + { + continue; + } + + let mut new_route1 = route1.clone(); + let mut new_route2 = route2.clone(); + new_route1[i] = node2; + new_route2[j] = node1; + + if !is_route_feasible( + &new_route1, + distance_matrix, + service_time, + ready_times, + due_times, + ) || !is_route_feasible( + &new_route2, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + continue; + } + + // Apply 2-opt to both routes after swap + new_route1 = apply_2opt( + &new_route1, + distance_matrix, + service_time, + ready_times, + due_times, + ); + new_route2 = apply_2opt( + &new_route2, + distance_matrix, + service_time, + ready_times, + due_times, + ); + + let old_distance = + calculate_route_distance(&route1, distance_matrix) + + calculate_route_distance( + &route2, + distance_matrix, + ); + let new_distance = + calculate_route_distance(&new_route1, distance_matrix) + + calculate_route_distance( + &new_route2, + distance_matrix, + ); + + if new_distance < old_distance { + let mut new_routes = best_routes.clone(); + new_routes[route1_idx] = new_route1; + new_routes[route2_idx] = new_route2; + + match calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &new_routes, + service_time, + ready_times, + due_times, + ) { + Ok(total_distance) => { + if total_distance < best_distance { + best_distance = total_distance; + best_routes = new_routes; + swap_improved = true; + improved = true; + break; + } + } + Err(_) => continue, + } + } + } + if swap_improved { + break; + } + } + if swap_improved { + break; + } + } + if swap_improved { + break; + } + } + } + } + + let mut merge_improved = true; + let mut merge_iterations = 0; + + while merge_improved && merge_iterations < max_merge_iterations { + merge_improved = false; + merge_iterations += 1; + + for i in 0..best_routes.len() { + if merge_improved { + break; + } + + for j in 0..best_routes.len() { + if i == j { + continue; + } + + let route1 = &best_routes[i]; + let route2 = &best_routes[j]; + + if route1.len() <= 2 || route2.len() <= 2 { + continue; + } + + let route1_demand: i32 = route1[1..route1.len() - 1] + .iter() + .map(|&n| demands[n]) + .sum(); + let route2_demand: i32 = route2[1..route2.len() - 1] + .iter() + .map(|&n| demands[n]) + .sum(); + + if route1_demand + route2_demand <= max_capacity { + let mut best_insertion_pos = None; + let mut best_insertion_delta = i32::MAX; + + for &node in &route2[1..route2.len() - 1] { + for pos in 1..route1.len() { + let prev = route1[pos - 1]; + let next = route1[pos]; + + let insertion_delta = distance_matrix[prev][node] + + distance_matrix[node][next] + - distance_matrix[prev][next]; + + if insertion_delta < best_insertion_delta { + let mut test_route = route1.clone(); + test_route.insert(pos, node); + + if is_route_feasible( + &test_route, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + best_insertion_pos = Some(pos); + best_insertion_delta = insertion_delta; + } + } + } + } + + if let Some(pos) = best_insertion_pos { + let mut new_route = route1.clone(); + + for (idx, &node) in + route2[1..route2.len() - 1].iter().enumerate() + { + new_route.insert(pos + idx, node); + } + + if is_route_feasible( + &new_route, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + // Apply 2-opt to the merged route + new_route = apply_2opt( + &new_route, + distance_matrix, + service_time, + ready_times, + due_times, + ); + + let new_distance = + calculate_route_distance(&new_route, distance_matrix); + let old_distance = + calculate_route_distance(route1, distance_matrix) + + calculate_route_distance(route2, distance_matrix); + + if new_distance < old_distance { + let mut new_routes = best_routes.clone(); + new_routes[i] = new_route; + new_routes.remove(j); + + match calc_routes_total_distance( + num_nodes, + max_capacity, + demands, + distance_matrix, + &new_routes, + service_time, + ready_times, + due_times, + ) { + Ok(total_distance) => { + if total_distance < best_distance { + best_distance = total_distance; + best_routes = new_routes; + merge_improved = true; + improved = true; + break; + } + } + Err(_) => continue, + } + } + } + } + } + } + } + } + } + + best_routes + } + + fn is_route_feasible( + route: &Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> bool { + if route.len() == 2 && route[0] == 0 && route[1] == 0 { + return true; + } + + let mut curr_time = 0; + let mut curr_node = 0; + + for &next_node in route.iter().skip(1) { + curr_time += distance_matrix[curr_node][next_node]; + + if curr_time > due_times[next_node] { + return false; + } + + curr_time = curr_time.max(ready_times[next_node]); + + if next_node != 0 { + curr_time += service_time; + } + + curr_node = next_node; + } + + true + } + } +} diff --git a/tig-algorithms/src/vehicle_routing/simple_ls_zero/README.md b/tig-algorithms/src/vehicle_routing/simple_ls_zero/README.md new file mode 100644 index 0000000..8bb8e53 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/simple_ls_zero/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vehicle_routing +* **Algorithm Name:** simple_ls_zero +* **Copyright:** 2025 Thibaut Vidal, Rafael Martinelli +* **Identity of Submitter:** Thibaut Vidal +* **Identity of Creator of Algorithmic Method:** null +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/simple_ls_zero/mod.rs b/tig-algorithms/src/vehicle_routing/simple_ls_zero/mod.rs new file mode 100644 index 0000000..f47a838 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/simple_ls_zero/mod.rs @@ -0,0 +1,988 @@ +/* +REFERENCES AND ACKNOWLEDGMENTS + +This implementation is based on or inspired by existing work. +This is a Rust re-implementation of the local search component of the HGS algorithm, +adapted to the VRP with time windows: + +Vidal, T., Crainic, T. G., Gendreau, M., Lahrichi, N., & Rei, W. (2012). +A hybrid genetic algorithm for multidepot and periodic vehicle routing problems. +Operations Research, 60(3), 611–624. +https://doi.org/10.1287/opre.1120.1048 + +Vidal, T. (2022). +Hybrid genetic search for the CVRP: Open-source implementation and SWAP* neighborhood. +Computers and Operations Research, 140, 105643. +https://doi.org/10.1016/j.cor.2021.105643 + +*/ + +use serde_json::{Map, Value}; +use std::cmp::{max, min}; +use std::collections::BTreeSet; +use std::mem::replace; +use std::mem::take; +use tig_challenges::vehicle_routing::*; + +pub struct MoveArgs { + pub route1: usize, + pub pos1: usize, + pub route2: usize, + pub pos2: usize, + pub profit: isize, +} + +impl MoveArgs { + pub fn new(route1: usize, pos1: usize, route2: usize, pos2: usize) -> Self { + Self { + route1, + pos1, + route2, + pos2, + profit: 0, + } + } +} + +pub struct Parameters { + pub file_name: String, + pub seed: [u8; 32], + pub initial_penalty: isize, + pub granularity: usize, +} + +pub struct Data { + pub seed: [u8; 32], + pub nb_nodes: usize, + pub nb_routes: usize, + pub demands: Vec, + pub max_capacity: isize, + pub distance_matrix: Vec>, + pub service_times: Vec, + pub start_tw: Vec, + pub end_tw: Vec, + pub penalty_capacity: isize, + pub penalty_tw: isize, + pub granularity: usize, +} + +#[derive(Copy, Clone, Debug, Default)] +pub struct Sequence { + pub tau_minus: isize, + pub tau_plus: isize, + pub tmin: isize, + pub tw: isize, + pub total_service_duration: isize, + pub load: isize, + pub distance: isize, + pub first_node: usize, + pub last_node: usize, +} + +#[inline(always)] +fn penalty(x: isize, coef: isize) -> isize { + if x > 0 { + x * coef + } else { + 0 + } +} + +#[inline(always)] +fn dm(dm: &[Vec], i: usize, j: usize) -> isize { + dm[i][j] +} + +impl Sequence { + #[inline(always)] + pub fn initialize(&mut self, data: &Data, node: usize) { + let st = data.start_tw[node]; + let et = data.end_tw[node]; + let svc = data.service_times[node]; + let ld = data.demands[node]; + self.tau_minus = st; + self.tau_plus = et; + self.tmin = svc; + self.tw = 0; + self.total_service_duration = svc; + self.load = ld; + self.distance = 0; + self.first_node = node; + self.last_node = node; + } + + #[inline(always)] + pub fn join2(data: &Data, s1: &Sequence, s2: &Sequence) -> Sequence { + let travel = dm(&data.distance_matrix, s1.last_node, s2.first_node); + let distance = s1.distance + s2.distance + travel; + let temp = travel + s1.tmin - s1.tw; + + let wtij = max(s2.tau_minus - temp - s1.tau_plus, 0); + let twij = max(temp + s1.tau_minus - s2.tau_plus, 0); + let tw = s1.tw + s2.tw + twij; + let tmin = temp + s1.tw + s2.tmin + wtij; + let tau_minus = max(s2.tau_minus - temp - wtij, s1.tau_minus); + let tau_plus = min(s2.tau_plus - temp + twij, s1.tau_plus); + let load = s1.load + s2.load; + + Sequence { + tau_minus, + tau_plus, + tmin, + tw, + total_service_duration: s1.total_service_duration + s2.total_service_duration, + load, + distance, + first_node: s1.first_node, + last_node: s2.last_node, + } + } + + #[inline(always)] + pub fn singleton(data: &Data, node: usize) -> Sequence { + let mut s = Sequence::default(); + s.initialize(data, node); + s + } + + #[inline(always)] + pub fn eval(&self, data: &Data) -> isize { + self.distance + + penalty(self.load - data.max_capacity, data.penalty_capacity) + + penalty(self.tw, data.penalty_tw) + } + + #[inline(always)] + pub fn eval2(data: &Data, s1: &Sequence, s2: &Sequence) -> isize { + let travel = dm(&data.distance_matrix, s1.last_node, s2.first_node); + let distance = s1.distance + s2.distance + travel; + let temp = s1.tmin - s1.tw + travel; + let tw_viol = s1.tw + s2.tw + max(s1.tau_minus - s2.tau_plus + temp, 0); + let load = s1.load + s2.load; + + distance + + penalty(load - data.max_capacity, data.penalty_capacity) + + penalty(tw_viol, data.penalty_tw) + } + + #[inline(always)] + pub fn eval3(data: &Data, s1: &Sequence, s2: &Sequence, s3: &Sequence) -> isize { + let travel12 = dm(&data.distance_matrix, s1.last_node, s2.first_node); + let distance12 = s1.distance + s2.distance + travel12; + let mut temp = travel12 + s1.tmin - s1.tw; + + let wtij = max(s2.tau_minus - temp - s1.tau_plus, 0); + let twij = max(temp + s1.tau_minus - s2.tau_plus, 0); + let tw_viol12 = s1.tw + s2.tw + twij; + let tmin12 = temp + s1.tw + s2.tmin + wtij; + let tau_m12 = max(s2.tau_minus - temp - wtij, s1.tau_minus); + let tau_p12 = min(s2.tau_plus - temp + twij, s1.tau_plus); + + let travel23 = dm(&data.distance_matrix, s2.last_node, s3.first_node); + let distance = distance12 + s3.distance + travel23; + temp = travel23 + tmin12 - tw_viol12; + + let tw_viol = tw_viol12 + s3.tw + max(tau_m12 - s3.tau_plus + temp, 0); + let _tmin123 = tw_viol12 + s3.tmin + max(s3.tau_minus - tau_p12, temp); + let load = s1.load + s2.load + s3.load; + + distance + + penalty(load - data.max_capacity, data.penalty_capacity) + + penalty(tw_viol, data.penalty_tw) + } + + #[inline(always)] + pub fn eval_n(data: &Data, chain: &[Sequence]) -> isize { + debug_assert!(chain.len() >= 3); + let mut agg = chain[0]; + + for s in &chain[1..chain.len() - 1] { + agg = Sequence::join2(data, &agg, s); + } + let last = &chain[chain.len() - 1]; + Sequence::eval2(data, &agg, last) + } +} + +pub struct TigLoader; + +impl TigLoader { + pub fn load(params: Parameters, challenge: &Challenge) -> Data { + Data { + seed: challenge.seed, + nb_nodes: challenge.difficulty.num_nodes, + nb_routes: challenge.difficulty.num_nodes, + demands: challenge.demands.iter().map(|&d| d as isize).collect(), + max_capacity: challenge.max_capacity as isize, + distance_matrix: challenge + .distance_matrix + .iter() + .map(|row| row.iter().map(|&x| x as isize).collect()) + .collect(), + service_times: std::iter::once(0) + .chain( + std::iter::repeat(challenge.service_time as isize) + .take(challenge.difficulty.num_nodes), + ) + .collect(), + start_tw: challenge.ready_times.iter().map(|&d| d as isize).collect(), + end_tw: challenge.due_times.iter().map(|&d| d as isize).collect(), + penalty_capacity: params.initial_penalty, + penalty_tw: params.initial_penalty, + granularity: params.granularity, + } + } +} + +#[derive(Clone, Debug, Default)] +pub struct Node { + pub id: usize, + pub seq0_i: Sequence, + pub seqi_n: Sequence, + pub seq1: Sequence, + pub seq12: Sequence, + pub seq21: Sequence, +} + +impl Node { + pub fn new(id: usize) -> Self { + Self { + id, + seq0_i: Sequence::default(), + seqi_n: Sequence::default(), + seq1: Sequence::default(), + seq12: Sequence::default(), + seq21: Sequence::default(), + } + } +} + +#[derive(Clone, Debug, Default)] +pub struct Route { + pub cost: isize, + pub load: isize, + pub tw: isize, + pub nodes: Vec, +} + +impl Route { + pub fn new(data: &Data, node_ids: &Vec) -> Self { + let mut route = Route { + cost: 0, + load: 0, + tw: 0, + nodes: Vec::new(), + }; + + for &node_id in node_ids { + route.nodes.push(Node::new(node_id)); + } + + route.update_metrics(data); + route + } + + pub fn update_metrics(&mut self, data: &Data) { + self.cost = 0; + self.load = 0; + self.tw = 0; + + let mut current_time = 0; + for i in 0..self.nodes.len() - 1 { + let from = self.nodes[i].id; + let to = self.nodes[i + 1].id; + + self.cost += data.distance_matrix[from][to]; + + if to != 0 { + self.load += data.demands[to]; + } + + current_time += data.distance_matrix[from][to]; + if current_time < data.start_tw[to] { + current_time = data.start_tw[to]; + } + if current_time > data.end_tw[to] { + self.tw += current_time - data.end_tw[to]; + current_time = data.end_tw[to]; + } + current_time += data.service_times[to]; + } + } + + pub fn preprocess(&mut self, data: &Data) { + let len = self.nodes.len(); + + let mut acc_fwd = Sequence::singleton(data, self.nodes[0].id); + self.nodes[0].seq0_i = acc_fwd; + for pos in 1..len { + let id = self.nodes[pos].id; + acc_fwd = Sequence::join2(data, &acc_fwd, &Sequence::singleton(data, id)); + self.nodes[pos].seq0_i = acc_fwd; + } + + let mut acc_bwd = Sequence::singleton(data, self.nodes[len - 1].id); + self.nodes[len - 1].seqi_n = acc_bwd; + for pos in (0..len - 1).rev() { + let id = self.nodes[pos].id; + acc_bwd = Sequence::join2(data, &Sequence::singleton(data, id), &acc_bwd); + self.nodes[pos].seqi_n = acc_bwd; + } + + for pos in 0..len { + let id = self.nodes[pos].id; + self.nodes[pos].seq1 = Sequence::singleton(data, id); + if pos + 1 < len { + let id_next = self.nodes[pos + 1].id; + self.nodes[pos].seq12 = Sequence::join2( + data, + &Sequence::singleton(data, id), + &Sequence::singleton(data, id_next), + ); + self.nodes[pos].seq21 = Sequence::join2( + data, + &Sequence::singleton(data, id_next), + &Sequence::singleton(data, id), + ); + } + } + + let end = self.nodes[len - 1].seq0_i; + self.load = end.load; + self.tw = end.tw; + self.cost = end.eval(data); + } + + pub fn to_string(&self) -> String { + let mut result = String::from("[ "); + for node in &self.nodes { + result.push_str(&format!("{} ", node.id)); + } + result.push_str(&format!( + "] (Cost: {}, Load: {}, TW: {})", + self.cost, self.load, self.tw + )); + result + } +} + +#[derive(Debug, Clone)] +pub struct Individual { + pub cost: isize, + pub routes: Vec, + pub node_route: Vec, + pub node_pos: Vec, +} + +impl Individual { + pub fn new(data: &Data, routes: Vec>) -> Self { + let mut ind = Individual { + cost: 0, + routes: routes.iter().map(|r| Route::new(data, r)).collect(), + node_route: vec![0; data.nb_nodes], + node_pos: vec![0; data.nb_nodes], + }; + + ind.cost = ind.routes.iter().map(|r| r.cost).sum(); + ind.update_node_mappings(); + + ind + } + + pub fn update_route_node_mappings(&mut self, route_id: usize) { + for (pos, node) in self.routes[route_id].nodes.iter().enumerate() { + self.node_route[node.id] = route_id; + self.node_pos[node.id] = pos; + } + } + + pub fn update_node_mappings(&mut self) { + for route_id in 0..self.routes.len() { + self.update_route_node_mappings(route_id); + } + } +} + +pub struct Swap; + +impl Swap { + #[inline(always)] + pub fn evaluate(data: &Data, ind: &Individual, args: &mut MoveArgs) { + let r1 = args.route1; + let r2 = args.route2; + let p1 = args.pos1; + let p2 = args.pos2; + + let new1 = Sequence::eval3( + data, + &ind.routes[r1].nodes[p1 - 1].seq0_i, + &ind.routes[r2].nodes[p2].seq1, + &ind.routes[r1].nodes[p1 + 1].seqi_n, + ); + + let new2 = Sequence::eval3( + data, + &ind.routes[r2].nodes[p2 - 1].seq0_i, + &ind.routes[r1].nodes[p1].seq1, + &ind.routes[r2].nodes[p2 + 1].seqi_n, + ); + + let old_cost = ind.routes[r1].cost + ind.routes[r2].cost; + let new_cost = new1 + new2; + args.profit = old_cost - new_cost; + } + + pub fn perform(data: &Data, ind: &mut Individual, args: &MoveArgs) { + let temp = take(&mut ind.routes[args.route1].nodes[args.pos1]); + ind.routes[args.route1].nodes[args.pos1] = + replace(&mut ind.routes[args.route2].nodes[args.pos2], temp); + ind.routes[args.route1].preprocess(&data); + ind.update_route_node_mappings(args.route1); + ind.routes[args.route2].preprocess(&data); + ind.update_route_node_mappings(args.route2); + ind.cost -= args.profit; + } +} + +pub struct Constructive; + +impl Constructive { + pub fn build_individual(data: &Data) -> Individual { + let mut routes = Vec::new(); + let mut nodes: Vec = (1..data.nb_nodes).collect(); + nodes.sort_by(|&a, &b| data.distance_matrix[0][a].cmp(&data.distance_matrix[0][b])); + let mut remaining: BTreeSet = nodes.iter().cloned().collect(); + + while let Some(node) = nodes.pop() { + if !remaining.remove(&node) { + continue; + } + let mut route = vec![0, node, 0]; + let mut route_demand = data.demands[node]; + + while let Some((best_node, best_pos)) = Self::find_best_insertion( + &route, + remaining + .iter() + .cloned() + .filter(|&n| route_demand + data.demands[n] <= data.max_capacity) + .collect(), + data, + ) { + remaining.remove(&best_node); + route_demand += data.demands[best_node]; + route.insert(best_pos, best_node); + } + + routes.push(route); + } + + Individual::new(data, routes) + } + + fn find_best_insertion( + route: &Vec, + remaining_nodes: Vec, + data: &Data, + ) -> Option<(usize, usize)> { + let alpha1 = 1; + let alpha2 = 0; + let lambda = 1; + + let mut best_c2 = None; + let mut best = None; + for insert_node in remaining_nodes { + let mut best_c1 = None; + let mut curr_time = 0; + let mut curr_node = 0; + for pos in 1..route.len() { + let next_node = route[pos]; + let new_arrival_time = data.start_tw[insert_node] + .max(curr_time + data.distance_matrix[curr_node][insert_node]); + if new_arrival_time > data.end_tw[insert_node] { + continue; + } + let old_arrival_time = data.start_tw[next_node] + .max(curr_time + data.distance_matrix[curr_node][next_node]); + + let c11 = data.distance_matrix[curr_node][insert_node] + + data.distance_matrix[insert_node][next_node] + - data.distance_matrix[curr_node][next_node]; + + let c12 = new_arrival_time - old_arrival_time; + let c1 = -(alpha1 * c11 + alpha2 * c12); + let c2 = lambda * data.distance_matrix[0][insert_node] + c1; + + let c1_is_better = match best_c1 { + None => true, + Some(x) => c1 > x, + }; + + let c2_is_better = match best_c2 { + None => true, + Some(x) => c2 > x, + }; + + if c1_is_better + && c2_is_better + && Self::is_feasible( + route, + insert_node, + new_arrival_time + data.service_times[next_node], + pos, + data, + ) + { + best_c1 = Some(c1); + best_c2 = Some(c2); + best = Some((insert_node, pos)); + } + + curr_time = data.start_tw[next_node] + .max(curr_time + data.distance_matrix[curr_node][next_node]) + + data.service_times[next_node]; + curr_node = next_node; + } + } + best + } + + fn is_feasible( + route: &Vec, + mut curr_node: usize, + mut curr_time: isize, + start_pos: usize, + data: &Data, + ) -> bool { + let mut valid = true; + for pos in start_pos..route.len() { + let next_node = route[pos]; + curr_time += data.distance_matrix[curr_node][next_node]; + if curr_time > data.end_tw[route[pos]] { + valid = false; + break; + } + curr_time = curr_time.max(data.start_tw[next_node]) + data.service_times[next_node]; + curr_node = next_node; + } + valid + } +} + +pub struct Relocate; + +impl Relocate { + #[inline(always)] + pub fn evaluate(data: &Data, ind: &Individual, args: &mut MoveArgs) { + let r1 = args.route1; + let r2 = args.route2; + let p1 = args.pos1; + let p2 = args.pos2; + + let new1 = Sequence::eval2( + data, + &ind.routes[r1].nodes[p1 - 1].seq0_i, + &ind.routes[r1].nodes[p1 + 1].seqi_n, + ); + + let new2 = Sequence::eval3( + data, + &ind.routes[r2].nodes[p2 - 1].seq0_i, + &ind.routes[r1].nodes[p1].seq1, + &ind.routes[r2].nodes[p2].seqi_n, + ); + + let old_cost = ind.routes[r1].cost + ind.routes[r2].cost; + let new_cost = new1 + new2; + args.profit = old_cost - new_cost; + } + + pub fn perform(data: &Data, ind: &mut Individual, args: &MoveArgs) { + let element = ind.routes[args.route1].nodes.remove(args.pos1); + ind.routes[args.route2].nodes.insert(args.pos2, element); + ind.routes[args.route1].preprocess(&data); + ind.update_route_node_mappings(args.route1); + ind.routes[args.route2].preprocess(&data); + ind.update_route_node_mappings(args.route2); + ind.cost -= args.profit; + } +} + +pub struct TwoOptStar; + +impl TwoOptStar { + #[inline(always)] + pub fn evaluate(data: &Data, ind: &Individual, args: &mut MoveArgs) { + let r1 = args.route1; + let r2 = args.route2; + let p1 = args.pos1; + let p2 = args.pos2; + + let new1 = Sequence::eval2( + data, + &ind.routes[r1].nodes[p1 - 1].seq0_i, + &ind.routes[r2].nodes[p2].seqi_n, + ); + + let new2 = Sequence::eval2( + data, + &ind.routes[r2].nodes[p2 - 1].seq0_i, + &ind.routes[r1].nodes[p1].seqi_n, + ); + + let old_cost = ind.routes[r1].cost + ind.routes[r2].cost; + let new_cost = new1 + new2; + args.profit = old_cost - new_cost; + } + + pub fn perform(data: &Data, ind: &mut Individual, args: &MoveArgs) { + let mut suffix1 = ind.routes[args.route1].nodes.split_off(args.pos1); + let mut suffix2 = ind.routes[args.route2].nodes.split_off(args.pos2); + ind.routes[args.route1].nodes.append(&mut suffix2); + ind.routes[args.route2].nodes.append(&mut suffix1); + ind.routes[args.route1].preprocess(&data); + ind.update_route_node_mappings(args.route1); + ind.routes[args.route2].preprocess(&data); + ind.update_route_node_mappings(args.route2); + ind.cost -= args.profit; + } +} + +pub struct LocalSearch<'a> { + pub data: &'a Data, + + pub neighbors_before: Vec>, +} + +impl<'a> LocalSearch<'a> { + pub fn new(data: &'a Data) -> Self { + let mut neighbors_before: Vec> = vec![Vec::new(); data.nb_nodes]; + + for i in 1..data.nb_nodes { + let mut prox: Vec<(isize, usize)> = Vec::with_capacity(data.nb_nodes - 2); + for j in 1..data.nb_nodes { + if j == i { + continue; + } + let tji = data.distance_matrix[j][i]; + let wait = (data.start_tw[i] - tji - data.service_times[j] - data.end_tw[j]).max(0); + let late = (data.start_tw[j] + data.service_times[j] + tji - data.end_tw[i]).max(0); + let proxy10 = 10 * tji + 2 * wait + 10 * late; + prox.push((proxy10, j)); + } + + prox.sort_by_key(|&(p, _)| p); + let keep = min(data.granularity, data.nb_nodes - 2); + neighbors_before[i] = prox[..keep].iter().map(|&(_, j)| j).collect(); + } + + Self { + data, + neighbors_before, + } + } + + pub fn run_intra_route_relocate( + &mut self, + ind: &mut Individual, + r1: usize, + pos1: usize, + ) -> bool { + let route = &ind.routes[r1]; + let len = route.nodes.len(); + if len < pos1 + 4 { + return false; + } + + debug_assert!(pos1 > 0); + debug_assert!(ind.routes[r1].nodes[pos1].id != 0); + + let mut left_excl: Vec = vec![Sequence::default(); len]; + let mut acc_left = route.nodes[0].seq0_i; + for p in 1..len { + left_excl[p] = acc_left; + if p != pos1 { + acc_left = Sequence::join2(self.data, &acc_left, &route.nodes[p].seq1); + } + } + + let mut right_excl: Vec = vec![Sequence::default(); len]; + let mut acc_right = route.nodes[len - 1].seq1; + right_excl[len - 1] = acc_right; + for p in (1..len - 1).rev() { + if p != pos1 { + acc_right = Sequence::join2(self.data, &route.nodes[p].seq1, &acc_right); + } + right_excl[p] = acc_right; + } + + let old_cost = route.cost; + let mut best_cost = old_cost; + let mut best_pos: Option = None; + + for t in 1..len { + if t == pos1 || t == pos1 + 1 { + continue; + } + let new_cost = Sequence::eval3( + self.data, + &left_excl[t], + &route.nodes[pos1].seq1, + &right_excl[t], + ); + + if new_cost < best_cost { + best_cost = new_cost; + best_pos = Some(t); + } + } + + if let Some(mypos) = best_pos { + let insert_pos = if mypos > pos1 { mypos - 1 } else { mypos }; + let elem = ind.routes[r1].nodes.remove(pos1); + ind.routes[r1].nodes.insert(insert_pos, elem); + ind.routes[r1].preprocess(self.data); + ind.update_route_node_mappings(r1); + ind.cost += ind.routes[r1].cost - old_cost; + + return true; + } else { + return false; + }; + } + + pub fn run_intra_route_swap_right( + &mut self, + ind: &mut Individual, + r1: usize, + pos1: usize, + ) -> bool { + let route = &ind.routes[r1]; + let len = route.nodes.len(); + if len < pos1 + 4 { + return false; + } + + debug_assert!(pos1 > 0); + debug_assert!(ind.routes[r1].nodes[pos1].id != 0); + + let old_cost = route.cost; + let mut best_cost = old_cost; + let mut best_pos: Option = None; + + let mut acc_mid = route.nodes[pos1 + 1].seq1; + for pos2 in (pos1 + 2)..(len - 1) { + let new_cost = Sequence::eval_n( + self.data, + &[ + route.nodes[pos1 - 1].seq0_i, + route.nodes[pos2].seq1, + acc_mid, + route.nodes[pos1].seq1, + route.nodes[pos2 + 1].seqi_n, + ], + ); + if new_cost < best_cost { + best_cost = new_cost; + best_pos = Some(pos2); + } + acc_mid = Sequence::join2(self.data, &acc_mid, &route.nodes[pos2].seq1); + } + + if let Some(mypos) = best_pos { + ind.routes[r1].nodes.swap(pos1, mypos); + ind.routes[r1].preprocess(self.data); + ind.update_route_node_mappings(r1); + ind.cost += ind.routes[r1].cost - old_cost; + + return true; + } else { + return false; + }; + } + + pub fn run_2opt(&mut self, ind: &mut Individual, r1: usize, pos1: usize) -> bool { + let route = &ind.routes[r1]; + let len = route.nodes.len(); + if len < pos1 + 3 { + return false; + } + + debug_assert!(pos1 > 0); + debug_assert!(ind.routes[r1].nodes[pos1].id != 0); + + let old_cost = route.cost; + let mut best_cost = old_cost; + let mut best_pos: Option = None; + + let mut mid_rev = route.nodes[pos1].seq21; + for pos2 in (pos1 + 1)..(len - 1) { + let new_cost = Sequence::eval3( + self.data, + &route.nodes[pos1 - 1].seq0_i, + &mid_rev, + &route.nodes[pos2 + 1].seqi_n, + ); + if new_cost < best_cost { + best_cost = new_cost; + best_pos = Some(pos2); + } + if pos2 + 1 < len - 1 { + mid_rev = Sequence::join2(self.data, &route.nodes[pos2 + 1].seq1, &mid_rev); + } + } + + if let Some(mypos) = best_pos { + ind.routes[r1].nodes[pos1..=mypos].reverse(); + ind.routes[r1].preprocess(self.data); + ind.update_route_node_mappings(r1); + ind.cost += ind.routes[r1].cost - old_cost; + + return true; + } else { + return false; + }; + } + + pub fn runls(&mut self, ind: &mut Individual) { + for route in &mut ind.routes { + route.preprocess(self.data); + } + + let mut improved = true; + let mut loop_id = 0; + while improved { + improved = false; + loop_id += 1; + let c1_order: Vec = (1..self.data.nb_nodes).collect(); + for c1 in c1_order { + for &c2 in &self.neighbors_before[c1] { + let r1 = ind.node_route[c1]; + let pos1 = ind.node_pos[c1]; + let r2 = ind.node_route[c2]; + let pos2 = ind.node_pos[c2] + 1; + if r1 == r2 { + continue; + } + let mut args = MoveArgs::new(r1, pos1, r2, pos2); + + if ind.routes[r2].nodes[pos2].id != 0 { + Swap::evaluate(self.data, ind, &mut args); + if args.profit > 0 { + Swap::perform(self.data, ind, &args); + improved = true; + continue; + } + } + + Relocate::evaluate(self.data, ind, &mut args); + if args.profit > 0 { + Relocate::perform(self.data, ind, &args); + improved = true; + continue; + } + + TwoOptStar::evaluate(self.data, ind, &mut args); + if args.profit > 0 { + TwoOptStar::perform(self.data, ind, &args); + improved = true; + continue; + } + } + + let mut tested_empty_route = false; + for r2 in 0..ind.routes.len() { + let r1 = ind.node_route[c1]; + let pos1 = ind.node_pos[c1]; + let pos2 = 1; + if r1 == r2 { + continue; + } + + if ind.routes[r2].nodes.len() == 2 { + if loop_id == 1 || tested_empty_route { + continue; + }; + tested_empty_route = true; + } + + let mut args = MoveArgs::new(r1, pos1, r2, pos2); + if ind.routes[r2].nodes[pos2].id != 0 { + Swap::evaluate(self.data, ind, &mut args); + if args.profit > 0 { + Swap::perform(self.data, ind, &args); + improved = true; + continue; + } + } + + Relocate::evaluate(self.data, ind, &mut args); + if args.profit > 0 { + Relocate::perform(self.data, ind, &args); + improved = true; + continue; + } + + TwoOptStar::evaluate(self.data, ind, &mut args); + if args.profit > 0 { + TwoOptStar::perform(self.data, ind, &args); + improved = true; + continue; + } + } + + improved |= self.run_2opt(ind, ind.node_route[c1], ind.node_pos[c1]); + improved |= + self.run_intra_route_relocate(ind, ind.node_route[c1], ind.node_pos[c1]); + improved |= + self.run_intra_route_swap_right(ind, ind.node_route[c1], ind.node_pos[c1]); + } + } + } +} + +fn solve(data: Data) -> anyhow::Result> { + let mut individual = Constructive::build_individual(&data); + let mut my_local_search = LocalSearch::new(&data); + my_local_search.runls(&mut individual); + + let usize_routes = individual + .routes + .iter() + .map(|r| r.nodes.iter().map(|n| n.id).collect::>()) + .filter(|route| route.len() > 2) + .collect::>>(); + + Ok(Some(Solution { + routes: usize_routes, + })) +} + +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + let data = TigLoader::load( + Parameters { + file_name: String::new(), + seed: challenge.seed, + initial_penalty: 1000, + granularity: 30, + }, + &challenge, + ); + + match solve(data) { + Ok(Some(solution)) => { + let _ = save_solution(&solution); + } + Ok(None) => { + eprintln!("No solution found for the sub-instance."); + } + Err(e) => { + eprintln!("Error solving sub-instance: {}", e); + } + } + Ok(()) +} diff --git a/tig-algorithms/src/vehicle_routing/vrptw_ultimate/README.md b/tig-algorithms/src/vehicle_routing/vrptw_ultimate/README.md new file mode 100644 index 0000000..649ee6d --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/vrptw_ultimate/README.md @@ -0,0 +1,23 @@ +# TIG Code Submission + +## Submission Details + +* **Challenge Name:** vehicle_routing +* **Algorithm Name:** vrptw_ultimate +* **Copyright:** 2025 Rootz +* **Identity of Submitter:** Rootz +* **Identity of Creator of Algorithmic Method:** Rootz +* **Unique Algorithm Identifier (UAI):** null + +## License + +The files in this folder are under the following licenses: +* TIG Benchmarker Outbound License +* TIG Commercial License +* TIG Inbound Game License +* TIG Innovator Outbound Game License +* TIG Open Data License +* TIG THV Game License + +Copies of the licenses can be obtained at: +https://github.com/tig-foundation/tig-monorepo/tree/main/docs/licenses \ No newline at end of file diff --git a/tig-algorithms/src/vehicle_routing/vrptw_ultimate/mod.rs b/tig-algorithms/src/vehicle_routing/vrptw_ultimate/mod.rs new file mode 100644 index 0000000..51fc6d9 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/vrptw_ultimate/mod.rs @@ -0,0 +1,2438 @@ +use serde_json::{Map, Value}; +use tig_challenges::vehicle_routing::*; + +#[inline(always)] +pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, +) -> anyhow::Result<()> { + simple_solver::solve_challenge(challenge, save_solution, hyperparameters) +} + +mod utils { + + #[inline(always)] + pub fn calculate_route_demands(routes: &Vec>, demands: &Vec) -> Vec { + let mut out = Vec::with_capacity(routes.len()); + unsafe { + let dem = demands.as_slice(); + for route in routes { + let len = route.len(); + if len <= 2 { + out.push(0); + continue; + } + let mut s = 0i32; + let r = route.as_slice(); + let mut idx = 1usize; + let end = len - 1; + while idx < end { + let n = *r.get_unchecked(idx); + s += *dem.get_unchecked(n); + idx += 1; + } + out.push(s); + } + } + out + } + + #[inline(always)] + pub fn find_best_insertion( + route: &Vec, + remaining_nodes: &[usize], + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Option<(usize, usize)> { + let lambda = 1; + + let mut best_c2: Option = None; + let mut best: Option<(usize, usize)> = None; + + let dm = distance_matrix; + let rt = &ready_times[..]; + let dt = &due_times[..]; + let len = route.len(); + + unsafe { + let row0 = dm.get_unchecked(0); + for &insert_node in remaining_nodes { + let base_c2 = lambda * *row0.get_unchecked(insert_node); + let row_insert = dm.get_unchecked(insert_node); + let ready_ins = *rt.get_unchecked(insert_node); + let due_ins = *dt.get_unchecked(insert_node); + + let mut curr_time: i32 = 0; + let mut curr_node: usize = 0; + + for pos in 1..len { + let next_node = *route.get_unchecked(pos); + let row_curr = dm.get_unchecked(curr_node); + + let travel_to_insert = *row_curr.get_unchecked(insert_node); + let travel_to_next = *row_curr.get_unchecked(next_node); + let mut new_arrival_time = curr_time + travel_to_insert; + if new_arrival_time < ready_ins { + new_arrival_time = ready_ins; + } + + if new_arrival_time > due_ins { + let mut tmp = curr_time + travel_to_next; + let r_next = *rt.get_unchecked(next_node); + if tmp < r_next { + tmp = r_next; + } + curr_time = tmp + service_time; + curr_node = next_node; + continue; + } + + let c11 = + travel_to_insert + *row_insert.get_unchecked(next_node) - travel_to_next; + + let c1 = -c11; + let c2 = base_c2 + c1; + + let c2_is_better = match best_c2 { + None => true, + Some(x) => c2 > x, + }; + + if c2_is_better + && is_feasible( + route, + distance_matrix, + service_time, + ready_times, + due_times, + insert_node, + new_arrival_time + service_time, + pos, + ) + { + best_c2 = Some(c2); + best = Some((insert_node, pos)); + } + + let mut tmp = curr_time + travel_to_next; + let r_next2 = *rt.get_unchecked(next_node); + if tmp < r_next2 { + tmp = r_next2; + } + curr_time = tmp + service_time; + curr_node = next_node; + } + } + } + best + } + + #[inline(always)] + pub fn is_feasible( + route: &Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + mut curr_node: usize, + mut curr_time: i32, + start_pos: usize, + ) -> bool { + let dm = distance_matrix; + let rt = ready_times.as_slice(); + let dt = due_times.as_slice(); + let len = route.len(); + let mut pos = start_pos; + unsafe { + let r = route.as_slice(); + while pos < len { + let next_node = *r.get_unchecked(pos); + let row_curr = dm.get_unchecked(curr_node); + curr_time += *row_curr.get_unchecked(next_node); + if next_node != 0 { + if curr_time > *dt.get_unchecked(next_node) { + return false; + } + let ready = *rt.get_unchecked(next_node); + if curr_time < ready { + curr_time = ready; + } + curr_time += service_time; + } + curr_node = next_node; + pos += 1; + } + } + true + } + + #[inline(always)] + pub fn find_best_insertion_in_route( + route: &Vec, + node: usize, + _demands: &Vec, + _max_capacity: i32, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Option<(usize, i32)> { + let len = route.len(); + if len < 2 { + return None; + } + let mut best_pos = None; + let mut best_delta = i32::MAX; + + unsafe { + let dm = distance_matrix; + let row_node = dm.get_unchecked(node); + let rt = ready_times.as_slice(); + let dt = due_times.as_slice(); + let r = route.as_slice(); + + let mut curr_time: i32 = 0; + let mut curr_node: usize = 0; + + let mut pos = 1usize; + while pos < len { + let next_node = *r.get_unchecked(pos); + let row_curr = dm.get_unchecked(curr_node); + + let travel_to_next = *row_curr.get_unchecked(next_node); + let travel_to_insert = *row_curr.get_unchecked(node); + + let mut new_arrival_time = curr_time + travel_to_insert; + let ready_ins = *rt.get_unchecked(node); + if new_arrival_time < ready_ins { + new_arrival_time = ready_ins; + } + + if new_arrival_time <= *dt.get_unchecked(node) { + let delta = + travel_to_insert + *row_node.get_unchecked(next_node) - travel_to_next; + if delta < best_delta { + let suffix_ok = if pos == len - 1 { + let departure = new_arrival_time + service_time; + let row_ins = dm.get_unchecked(node); + departure + *row_ins.get_unchecked(0) <= *dt.get_unchecked(0) + } else { + is_feasible( + route, + dm, + service_time, + ready_times, + due_times, + node, + new_arrival_time + service_time, + pos, + ) + }; + if suffix_ok { + best_delta = delta; + best_pos = Some(pos); + } + } + } + + let mut tmp = curr_time + travel_to_next; + let r_next = *rt.get_unchecked(next_node); + if tmp < r_next { + tmp = r_next; + } + curr_time = tmp + service_time; + curr_node = next_node; + + pos += 1; + } + } + + best_pos.map(|p| (p, best_delta)) + } + + #[inline(always)] + pub fn find_best_insertion_pair_in_route( + route: &Vec, + node_a: usize, + node_b: usize, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Option<(usize, i32)> { + if node_a == 0 || node_b == 0 || route.len() < 3 { + return None; + } + let mut best_pos = None; + let mut best_delta = i32::MAX; + let dm = distance_matrix; + let len = route.len(); + unsafe { + let r = route.as_slice(); + let row_a = dm.get_unchecked(node_a); + let row_b = dm.get_unchecked(node_b); + let mut pos = 1usize; + while pos < len { + let prev = *r.get_unchecked(pos - 1); + let next = *r.get_unchecked(pos); + let row_prev = dm.get_unchecked(prev); + + let delta = *row_prev.get_unchecked(node_a) + + *row_a.get_unchecked(node_b) + + *row_b.get_unchecked(next) + - *row_prev.get_unchecked(next); + + let mut candidate: Vec = Vec::with_capacity(len + 2); + candidate.extend_from_slice(&route[..pos]); + candidate.push(node_a); + candidate.push(node_b); + candidate.extend_from_slice(&route[pos..]); + + if is_route_time_feasible_fast(&candidate, dm, service_time, ready_times, due_times) + { + if delta < best_delta { + best_delta = delta; + best_pos = Some(pos); + } + } + + pos += 1; + } + } + best_pos.map(|p| (p, best_delta)) + } + + #[inline(always)] + pub fn calculate_route_distance(route: &Vec, distance_matrix: &Vec>) -> i32 { + let len = route.len(); + if len < 2 { + return 0; + } + let mut distance: i32 = 0; + let dm = distance_matrix; + unsafe { + let r = route.as_slice(); + let mut i = 0usize; + while i + 1 < len { + let a = *r.get_unchecked(i); + let b = *r.get_unchecked(i + 1); + let row_a = dm.get_unchecked(a); + distance += *row_a.get_unchecked(b); + i += 1; + } + } + distance + } + + #[inline(always)] + pub fn apply_efficient_2opt( + route: &Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Vec { + if route.len() < 4 { + return route.clone(); + } + + let mut best_route = route.clone(); + let mut improved = true; + let mut iteration = 0; + let max_iterations = if route.len() > 80 { + 40 + } else { + (route.len() / 2).min(35) + }; + + while improved && iteration < max_iterations { + improved = false; + iteration += 1; + + let dm = distance_matrix; + let len = best_route.len(); + let end = len - 1; + for i in 1..end - 1 { + let prev = best_route[i - 1]; + let a = best_route[i]; + let row_prev = unsafe { dm.get_unchecked(prev) }; + let row_a = unsafe { dm.get_unchecked(a) }; + for j in i + 2..end { + let b = best_route[j]; + let next = best_route[j + 1]; + let delta = unsafe { + let row_b = dm.get_unchecked(b); + (*row_prev.get_unchecked(b) + *row_a.get_unchecked(next)) + - (*row_prev.get_unchecked(a) + *row_b.get_unchecked(next)) + }; + if delta >= 0 { + continue; + } + + best_route[i..=j].reverse(); + + if is_route_time_feasible_fast( + &best_route, + dm, + service_time, + ready_times, + due_times, + ) { + improved = true; + break; + } else { + best_route[i..=j].reverse(); + } + } + if improved { + break; + } + } + } + + best_route + } + + #[inline(always)] + pub fn is_route_time_feasible_fast( + route: &Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> bool { + let len = route.len(); + if len < 3 || route[0] != 0 || route[len - 1] != 0 { + return false; + } + let dm = distance_matrix; + let rt = &ready_times[..]; + let dt = &due_times[..]; + let mut curr_time: i32 = 0; + let mut curr_node: usize = 0; + + if len > 2 { + unsafe { + let r = route.as_slice(); + for idx in 1..len - 1 { + let next_node = *r.get_unchecked(idx); + let row_curr = dm.get_unchecked(curr_node); + curr_time += *row_curr.get_unchecked(next_node); + if curr_time > *dt.get_unchecked(next_node) { + return false; + } + let rdy = *rt.get_unchecked(next_node); + if curr_time < rdy { + curr_time = rdy; + } + curr_time += service_time; + curr_node = next_node; + } + } + } + + unsafe { + let row_curr = dm.get_unchecked(curr_node); + curr_time += *row_curr.get_unchecked(0); + curr_time <= *dt.get_unchecked(0) + } + } + + #[inline(always)] + pub fn is_route_time_feasible_strict( + route: &Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> bool { + is_route_time_feasible_fast(route, distance_matrix, service_time, ready_times, due_times) + } + + #[inline(always)] + pub fn apply_size_filtered_local_search( + route: &Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Vec { + if route.len() <= 4 { + return route.clone(); + } + + apply_smart_local_search(route, distance_matrix, service_time, ready_times, due_times) + } + + #[inline(always)] + pub fn apply_smart_local_search( + route: &Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Vec { + if route.len() <= 3 { + return route.clone(); + } + + let mut current_route = + apply_efficient_2opt(route, distance_matrix, service_time, ready_times, due_times); + + if route.len() > 4 { + current_route = apply_limited_or_opt( + ¤t_route, + distance_matrix, + service_time, + ready_times, + due_times, + ); + } + + current_route + } + + #[inline(always)] + fn apply_limited_or_opt( + route: &Vec, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Vec { + if route.len() < 4 { + return route.clone(); + } + + let dm = distance_matrix; + let mut best_route = route.clone(); + let mut best_distance = calculate_route_distance(&best_route, dm); + let mut improved = true; + let mut iteration = 0; + let max_iterations = if best_route.len() > 100 { 4 } else { 6 }; + + let mut candidate: Vec = Vec::with_capacity(best_route.len()); + + while improved && iteration < max_iterations { + improved = false; + iteration += 1; + + let max_seg = if best_route.len() > 80 { 2 } else { 3 }; + for segment_size in 1..=max_seg { + let len = best_route.len(); + for i in 1..len - segment_size { + if i + segment_size >= len - 1 { + continue; + } + + let first = best_route[i]; + let last = best_route[i + segment_size - 1]; + let prev_before = best_route[i - 1]; + let after_segment = best_route[i + segment_size]; + + for insert_pos in 1..len { + if insert_pos >= i && insert_pos <= i + segment_size { + continue; + } + + let prev_ins = best_route[insert_pos - 1]; + let succ_ins = best_route[insert_pos]; + + let (delta_remove_segment, delta_insert_segment) = unsafe { + let row_prev_before = dm.get_unchecked(prev_before); + let row_last = dm.get_unchecked(last); + let row_prev_ins = dm.get_unchecked(prev_ins); + ( + *row_prev_before.get_unchecked(after_segment) + - *row_prev_before.get_unchecked(first) + - *row_last.get_unchecked(after_segment), + *row_prev_ins.get_unchecked(first) + + *row_last.get_unchecked(succ_ins) + - *row_prev_ins.get_unchecked(succ_ins), + ) + }; + if delta_remove_segment + delta_insert_segment >= 0 { + continue; + } + + candidate.clear(); + if insert_pos < i { + candidate.extend_from_slice(&best_route[..insert_pos]); + candidate.extend_from_slice(&best_route[i..i + segment_size]); + candidate.extend_from_slice(&best_route[insert_pos..i]); + candidate.extend_from_slice(&best_route[i + segment_size..]); + } else { + candidate.extend_from_slice(&best_route[..i]); + candidate.extend_from_slice(&best_route[i + segment_size..insert_pos]); + candidate.extend_from_slice(&best_route[i..i + segment_size]); + candidate.extend_from_slice(&best_route[insert_pos..]); + } + + if candidate.len() >= 3 + && candidate[0] == 0 + && candidate[candidate.len() - 1] == 0 + && is_route_time_feasible_fast( + &candidate, + dm, + service_time, + ready_times, + due_times, + ) + { + let new_distance = + best_distance + delta_remove_segment + delta_insert_segment; + if new_distance < best_distance { + best_distance = new_distance; + best_route.clear(); + best_route.extend_from_slice(&candidate); + candidate.clear(); + improved = true; + break; + } + } + } + if improved { + break; + } + } + if improved { + break; + } + } + } + + best_route + } + + #[inline(always)] + pub fn check_swap_time_feasibility( + route: &Vec, + pos: usize, + new_node: usize, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> bool { + if pos == 0 || pos >= route.len() - 1 { + return false; + } + + let prev_node = route[pos - 1]; + let next_node = route[pos + 1]; + + let mut curr_time = 0i32; + unsafe { + let r = route.as_slice(); + let dm = distance_matrix; + let rt = &ready_times[..]; + let dt = &due_times[..]; + + let mut i = 1usize; + while i < pos { + let from = *r.get_unchecked(i - 1); + let to = *r.get_unchecked(i); + let row_from = dm.get_unchecked(from); + curr_time += *row_from.get_unchecked(to); + let rdy = *rt.get_unchecked(to); + if curr_time < rdy { + curr_time = rdy; + } + curr_time += service_time; + i += 1; + } + + let row_prev = dm.get_unchecked(prev_node); + curr_time += *row_prev.get_unchecked(new_node); + if curr_time > *dt.get_unchecked(new_node) { + return false; + } + + let rdy_new = *rt.get_unchecked(new_node); + if curr_time < rdy_new { + curr_time = rdy_new; + } + curr_time += service_time; + + let row_new = dm.get_unchecked(new_node); + curr_time += *row_new.get_unchecked(next_node); + if curr_time > *dt.get_unchecked(next_node) { + return false; + } + } + + true + } + + #[inline(always)] + pub fn try_two_opt_star( + routes: &mut Vec>, + route1_idx: usize, + pos1: usize, + route2_idx: usize, + pos2: usize, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + route_distances: &mut Vec, + total_distance: &mut i32, + route_demands: &mut Vec, + demands: &Vec, + max_capacity: i32, + ) -> bool { + if pos1 == 0 + || pos2 == 0 + || pos1 >= routes[route1_idx].len() - 1 + || pos2 >= routes[route2_idx].len() - 1 + { + return false; + } + + let r1 = &routes[route1_idx]; + let r2 = &routes[route2_idx]; + + let prev1 = r1[pos1 - 1]; + let curr1 = r1[pos1]; + let prev2 = r2[pos2 - 1]; + let curr2 = r2[pos2]; + + let dm = distance_matrix; + let (old_dist, new_dist) = unsafe { + let row_prev1 = dm.get_unchecked(prev1); + let row_prev2 = dm.get_unchecked(prev2); + ( + *row_prev1.get_unchecked(curr1) + *row_prev2.get_unchecked(curr2), + *row_prev1.get_unchecked(curr2) + *row_prev2.get_unchecked(curr1), + ) + }; + + if new_dist >= old_dist { + return false; + } + + let (demand1, demand2) = { + unsafe { + let dem = demands.as_slice(); + let r1s = r1.as_slice(); + let r2s = r2.as_slice(); + let len1 = r1s.len(); + let len2 = r2s.len(); + + let mut d1_prefix = 0i32; + let mut i = 1usize; + while i < pos1 { + let node = *r1s.get_unchecked(i); + d1_prefix += *dem.get_unchecked(node); + i += 1; + } + let mut d2_prefix = 0i32; + let mut j = 1usize; + while j < pos2 { + let node = *r2s.get_unchecked(j); + d2_prefix += *dem.get_unchecked(node); + j += 1; + } + + let mut d1_tail = 0i32; + i = pos1; + while i + 1 < len1 { + let node = *r1s.get_unchecked(i); + d1_tail += *dem.get_unchecked(node); + i += 1; + } + let mut d2_tail = 0i32; + j = pos2; + while j + 1 < len2 { + let node = *r2s.get_unchecked(j); + d2_tail += *dem.get_unchecked(node); + j += 1; + } + + (d1_prefix + d2_tail, d2_prefix + d1_tail) + } + }; + if demand1 > max_capacity || demand2 > max_capacity { + return false; + } + + let cap1 = pos1 + (r2.len() - pos2); + let cap2 = pos2 + (r1.len() - pos1); + let mut new_route1 = Vec::with_capacity(cap1); + let mut new_route2 = Vec::with_capacity(cap2); + + new_route1.extend_from_slice(&r1[..pos1]); + new_route1.extend_from_slice(&r2[pos2..]); + + new_route2.extend_from_slice(&r2[..pos2]); + new_route2.extend_from_slice(&r1[pos1..]); + + if !is_route_time_feasible_fast( + &new_route1, + distance_matrix, + service_time, + ready_times, + due_times, + ) || !is_route_time_feasible_fast( + &new_route2, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + return false; + } + + let old_total = route_distances[route1_idx] + route_distances[route2_idx]; + let new_dist1 = calculate_route_distance(&new_route1, distance_matrix); + let new_dist2 = calculate_route_distance(&new_route2, distance_matrix); + let new_total = new_dist1 + new_dist2; + + if new_total < old_total { + routes[route1_idx] = new_route1; + routes[route2_idx] = new_route2; + route_distances[route1_idx] = new_dist1; + route_distances[route2_idx] = new_dist2; + route_demands[route1_idx] = demand1; + route_demands[route2_idx] = demand2; + *total_distance = *total_distance - old_total + new_total; + return true; + } + + false + } + + #[inline(always)] + pub fn try_2_2_cross_exchange( + routes: &mut Vec>, + route1_idx: usize, + pos1: usize, + route2_idx: usize, + pos2: usize, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + route_distances: &mut Vec, + total_distance: &mut i32, + route_demands: &mut Vec, + demands: &Vec, + max_capacity: i32, + ) -> bool { + let r1 = &routes[route1_idx]; + let r2 = &routes[route2_idx]; + + if pos1 + 1 >= r1.len() - 1 || pos2 + 1 >= r2.len() - 1 { + return false; + } + + let node1a = r1[pos1]; + let node1b = r1[pos1 + 1]; + let node2a = r2[pos2]; + let node2b = r2[pos2 + 1]; + + let dem = demands.as_slice(); + let demand_diff1 = unsafe { + *dem.get_unchecked(node2a) + *dem.get_unchecked(node2b) + - *dem.get_unchecked(node1a) + - *dem.get_unchecked(node1b) + }; + let demand_diff2 = unsafe { + *dem.get_unchecked(node1a) + *dem.get_unchecked(node1b) + - *dem.get_unchecked(node2a) + - *dem.get_unchecked(node2b) + }; + + if route_demands[route1_idx] + demand_diff1 > max_capacity + || route_demands[route2_idx] + demand_diff2 > max_capacity + { + return false; + } + + let prev1 = r1[pos1 - 1]; + let next1 = r1[pos1 + 2]; + let prev2 = r2[pos2 - 1]; + let next2 = r2[pos2 + 2]; + let dm = distance_matrix; + let (delta1, delta2) = unsafe { + let row_prev1 = dm.get_unchecked(prev1); + let row_prev2 = dm.get_unchecked(prev2); + let row_n1a = dm.get_unchecked(node1a); + let row_n1b = dm.get_unchecked(node1b); + let row_n2a = dm.get_unchecked(node2a); + let row_n2b = dm.get_unchecked(node2b); + + ( + *row_prev1.get_unchecked(node2a) + + *row_n2a.get_unchecked(node2b) + + *row_n2b.get_unchecked(next1) + - (*row_prev1.get_unchecked(node1a) + + *row_n1a.get_unchecked(node1b) + + *row_n1b.get_unchecked(next1)), + *row_prev2.get_unchecked(node1a) + + *row_n1a.get_unchecked(node1b) + + *row_n1b.get_unchecked(next2) + - (*row_prev2.get_unchecked(node2a) + + *row_n2a.get_unchecked(node2b) + + *row_n2b.get_unchecked(next2)), + ) + }; + + if delta1 + delta2 >= 0 { + return false; + } + + let mut new_route1 = Vec::with_capacity(r1.len()); + let mut new_route2 = Vec::with_capacity(r2.len()); + + new_route1.extend_from_slice(&r1[..pos1]); + new_route1.push(node2a); + new_route1.push(node2b); + new_route1.extend_from_slice(&r1[pos1 + 2..]); + + new_route2.extend_from_slice(&r2[..pos2]); + new_route2.push(node1a); + new_route2.push(node1b); + new_route2.extend_from_slice(&r2[pos2 + 2..]); + + if !is_route_time_feasible_fast( + &new_route1, + distance_matrix, + service_time, + ready_times, + due_times, + ) || !is_route_time_feasible_fast( + &new_route2, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + return false; + } + + let old_total = route_distances[route1_idx] + route_distances[route2_idx]; + let new_dist1 = calculate_route_distance(&new_route1, distance_matrix); + let new_dist2 = calculate_route_distance(&new_route2, distance_matrix); + let new_total = new_dist1 + new_dist2; + + if new_total < old_total { + routes[route1_idx] = new_route1; + routes[route2_idx] = new_route2; + route_distances[route1_idx] = new_dist1; + route_distances[route2_idx] = new_dist2; + route_demands[route1_idx] += demand_diff1; + route_demands[route2_idx] += demand_diff2; + *total_distance = *total_distance - old_total + new_total; + return true; + } + + false + } + + #[inline(always)] + pub fn try_1_2_cross_exchange( + routes: &mut Vec>, + route1_idx: usize, + pos1: usize, + route2_idx: usize, + pos2: usize, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + route_distances: &mut Vec, + total_distance: &mut i32, + route_demands: &mut Vec, + demands: &Vec, + max_capacity: i32, + ) -> bool { + let r1 = &routes[route1_idx]; + let r2 = &routes[route2_idx]; + + if pos1 >= r1.len() - 1 || pos2 + 1 >= r2.len() - 1 { + return false; + } + + let node1 = r1[pos1]; + let node2a = r2[pos2]; + let node2b = r2[pos2 + 1]; + + let dem = demands.as_slice(); + let demand_diff1 = unsafe { + *dem.get_unchecked(node2a) + *dem.get_unchecked(node2b) - *dem.get_unchecked(node1) + }; + let demand_diff2 = unsafe { + *dem.get_unchecked(node1) - *dem.get_unchecked(node2a) - *dem.get_unchecked(node2b) + }; + + if route_demands[route1_idx] + demand_diff1 > max_capacity + || route_demands[route2_idx] + demand_diff2 > max_capacity + { + return false; + } + + let prev1 = r1[pos1 - 1]; + let next1 = r1[pos1 + 1]; + let prev2 = r2[pos2 - 1]; + let next2 = r2[pos2 + 2]; + let dm = distance_matrix; + let (delta1, delta2) = unsafe { + let row_prev1 = dm.get_unchecked(prev1); + let row_prev2 = dm.get_unchecked(prev2); + let row_n1 = dm.get_unchecked(node1); + let row_n2a = dm.get_unchecked(node2a); + let row_n2b = dm.get_unchecked(node2b); + + ( + *row_prev1.get_unchecked(node2a) + + *row_n2a.get_unchecked(node2b) + + *row_n2b.get_unchecked(next1) + - (*row_prev1.get_unchecked(node1) + *row_n1.get_unchecked(next1)), + *row_prev2.get_unchecked(node1) + *row_n1.get_unchecked(next2) + - (*row_prev2.get_unchecked(node2a) + + *row_n2a.get_unchecked(node2b) + + *row_n2b.get_unchecked(next2)), + ) + }; + + if delta1 + delta2 >= 0 { + return false; + } + + let mut new_route1 = Vec::with_capacity(r1.len() + 1); + let mut new_route2 = Vec::with_capacity(r2.len() - 1); + + new_route1.extend_from_slice(&r1[..pos1]); + new_route1.push(node2a); + new_route1.push(node2b); + new_route1.extend_from_slice(&r1[pos1 + 1..]); + + new_route2.extend_from_slice(&r2[..pos2]); + new_route2.push(node1); + new_route2.extend_from_slice(&r2[pos2 + 2..]); + + if !is_route_time_feasible_fast( + &new_route1, + distance_matrix, + service_time, + ready_times, + due_times, + ) || !is_route_time_feasible_fast( + &new_route2, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + return false; + } + + let old_total = route_distances[route1_idx] + route_distances[route2_idx]; + let new_dist1 = calculate_route_distance(&new_route1, distance_matrix); + let new_dist2 = calculate_route_distance(&new_route2, distance_matrix); + let new_total = new_dist1 + new_dist2; + + if new_total < old_total { + routes[route1_idx] = new_route1; + routes[route2_idx] = new_route2; + route_distances[route1_idx] = new_dist1; + route_distances[route2_idx] = new_dist2; + route_demands[route1_idx] += demand_diff1; + route_demands[route2_idx] += demand_diff2; + *total_distance = *total_distance - old_total + new_total; + return true; + } + + false + } + + #[inline(always)] + pub fn try_1_3_cross_exchange( + routes: &mut Vec>, + route1_idx: usize, + pos1: usize, + route2_idx: usize, + pos2: usize, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + route_distances: &mut Vec, + total_distance: &mut i32, + route_demands: &mut Vec, + demands: &Vec, + max_capacity: i32, + ) -> bool { + let r1 = &routes[route1_idx]; + let r2 = &routes[route2_idx]; + + if pos1 >= r1.len() - 1 || pos2 + 2 >= r2.len() - 1 { + return false; + } + + let node1 = r1[pos1]; + let node2a = r2[pos2]; + let node2b = r2[pos2 + 1]; + let node2c = r2[pos2 + 2]; + + let dem = demands.as_slice(); + let demand_diff1 = unsafe { + *dem.get_unchecked(node2a) + *dem.get_unchecked(node2b) + *dem.get_unchecked(node2c) + - *dem.get_unchecked(node1) + }; + let demand_diff2 = unsafe { + *dem.get_unchecked(node1) + - *dem.get_unchecked(node2a) + - *dem.get_unchecked(node2b) + - *dem.get_unchecked(node2c) + }; + + if route_demands[route1_idx] + demand_diff1 > max_capacity + || route_demands[route2_idx] + demand_diff2 > max_capacity + { + return false; + } + + let prev1 = r1[pos1 - 1]; + let next1 = r1[pos1 + 1]; + let prev2 = r2[pos2 - 1]; + let next2 = r2[pos2 + 3]; + let dm = distance_matrix; + let (delta1, delta2) = unsafe { + let row_prev1 = dm.get_unchecked(prev1); + let row_prev2 = dm.get_unchecked(prev2); + let row_n1 = dm.get_unchecked(node1); + let row_n2a = dm.get_unchecked(node2a); + let row_n2b = dm.get_unchecked(node2b); + let row_n2c = dm.get_unchecked(node2c); + + ( + *row_prev1.get_unchecked(node2a) + + *row_n2a.get_unchecked(node2b) + + *row_n2b.get_unchecked(node2c) + + *row_n2c.get_unchecked(next1) + - (*row_prev1.get_unchecked(node1) + *row_n1.get_unchecked(next1)), + *row_prev2.get_unchecked(node1) + *row_n1.get_unchecked(next2) + - (*row_prev2.get_unchecked(node2a) + + *row_n2a.get_unchecked(node2b) + + *row_n2b.get_unchecked(node2c) + + *row_n2c.get_unchecked(next2)), + ) + }; + + if delta1 + delta2 >= 0 { + return false; + } + + let mut new_route1 = Vec::with_capacity(r1.len() + 2); + let mut new_route2 = Vec::with_capacity(r2.len() - 2); + + new_route1.extend_from_slice(&r1[..pos1]); + new_route1.push(node2a); + new_route1.push(node2b); + new_route1.push(node2c); + new_route1.extend_from_slice(&r1[pos1 + 1..]); + + new_route2.extend_from_slice(&r2[..pos2]); + new_route2.push(node1); + new_route2.extend_from_slice(&r2[pos2 + 3..]); + + if !is_route_time_feasible_fast( + &new_route1, + distance_matrix, + service_time, + ready_times, + due_times, + ) || !is_route_time_feasible_fast( + &new_route2, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + return false; + } + + let old_total = route_distances[route1_idx] + route_distances[route2_idx]; + let new_dist1 = calculate_route_distance(&new_route1, distance_matrix); + let new_dist2 = calculate_route_distance(&new_route2, distance_matrix); + let new_total = new_dist1 + new_dist2; + + if new_total < old_total { + routes[route1_idx] = new_route1; + routes[route2_idx] = new_route2; + route_distances[route1_idx] = new_dist1; + route_distances[route2_idx] = new_dist2; + route_demands[route1_idx] += demand_diff1; + route_demands[route2_idx] += demand_diff2; + *total_distance = *total_distance - old_total + new_total; + return true; + } + + false + } + + #[inline(always)] + pub fn try_2_3_cross_exchange( + routes: &mut Vec>, + route1_idx: usize, + pos1: usize, + route2_idx: usize, + pos2: usize, + distance_matrix: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + route_distances: &mut Vec, + total_distance: &mut i32, + route_demands: &mut Vec, + demands: &Vec, + max_capacity: i32, + ) -> bool { + let r1 = &routes[route1_idx]; + let r2 = &routes[route2_idx]; + + if pos1 + 1 >= r1.len() - 1 || pos2 + 2 >= r2.len() - 1 { + return false; + } + + let node1a = r1[pos1]; + let node1b = r1[pos1 + 1]; + let node2a = r2[pos2]; + let node2b = r2[pos2 + 1]; + let node2c = r2[pos2 + 2]; + + let dem = demands.as_slice(); + let demand_diff1 = unsafe { + *dem.get_unchecked(node2a) + *dem.get_unchecked(node2b) + *dem.get_unchecked(node2c) + - *dem.get_unchecked(node1a) + - *dem.get_unchecked(node1b) + }; + let demand_diff2 = unsafe { + *dem.get_unchecked(node1a) + *dem.get_unchecked(node1b) + - *dem.get_unchecked(node2a) + - *dem.get_unchecked(node2b) + - *dem.get_unchecked(node2c) + }; + + if route_demands[route1_idx] + demand_diff1 > max_capacity + || route_demands[route2_idx] + demand_diff2 > max_capacity + { + return false; + } + + let prev1 = r1[pos1 - 1]; + let next1 = r1[pos1 + 2]; + let prev2 = r2[pos2 - 1]; + let next2 = r2[pos2 + 3]; + let dm = distance_matrix; + let (delta1, delta2) = unsafe { + let row_prev1 = dm.get_unchecked(prev1); + let row_prev2 = dm.get_unchecked(prev2); + let row_n1a = dm.get_unchecked(node1a); + let row_n1b = dm.get_unchecked(node1b); + let row_n2a = dm.get_unchecked(node2a); + let row_n2b = dm.get_unchecked(node2b); + let row_n2c = dm.get_unchecked(node2c); + + ( + *row_prev1.get_unchecked(node2a) + + *row_n2a.get_unchecked(node2b) + + *row_n2b.get_unchecked(node2c) + + *row_n2c.get_unchecked(next1) + - (*row_prev1.get_unchecked(node1a) + + *row_n1a.get_unchecked(node1b) + + *row_n1b.get_unchecked(next1)), + *row_prev2.get_unchecked(node1a) + + *row_n1a.get_unchecked(node1b) + + *row_n1b.get_unchecked(next2) + - (*row_prev2.get_unchecked(node2a) + + *row_n2a.get_unchecked(node2b) + + *row_n2b.get_unchecked(node2c) + + *row_n2c.get_unchecked(next2)), + ) + }; + + if delta1 + delta2 >= 0 { + return false; + } + + let mut new_route1 = Vec::with_capacity(r1.len() + 1); + let mut new_route2 = Vec::with_capacity(r2.len() - 1); + + new_route1.extend_from_slice(&r1[..pos1]); + new_route1.push(node2a); + new_route1.push(node2b); + new_route1.push(node2c); + new_route1.extend_from_slice(&r1[pos1 + 2..]); + + new_route2.extend_from_slice(&r2[..pos2]); + new_route2.push(node1a); + new_route2.push(node1b); + new_route2.extend_from_slice(&r2[pos2 + 3..]); + + if !is_route_time_feasible_fast( + &new_route1, + distance_matrix, + service_time, + ready_times, + due_times, + ) || !is_route_time_feasible_fast( + &new_route2, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + return false; + } + + let old_total = route_distances[route1_idx] + route_distances[route2_idx]; + let new_dist1 = calculate_route_distance(&new_route1, distance_matrix); + let new_dist2 = calculate_route_distance(&new_route2, distance_matrix); + let new_total = new_dist1 + new_dist2; + + if new_total < old_total { + routes[route1_idx] = new_route1; + routes[route2_idx] = new_route2; + route_distances[route1_idx] = new_dist1; + route_distances[route2_idx] = new_dist2; + route_demands[route1_idx] += demand_diff1; + route_demands[route2_idx] += demand_diff2; + *total_distance = *total_distance - old_total + new_total; + return true; + } + + false + } + + #[inline(always)] + pub fn update_node_positions_for_routes( + node_positions: &mut Vec<(usize, usize)>, + routes: &Vec>, + route_indices: &[usize], + ) { + for &route_idx in route_indices { + let route = &routes[route_idx]; + let end = route.len() - 1; + if end <= 1 { + continue; + } + unsafe { + let slice = route.as_slice(); + let mut j = 1usize; + while j < end { + let node = *slice.get_unchecked(j); + *node_positions.get_unchecked_mut(node) = (route_idx, j); + j += 1; + } + } + } + } +} + +mod simple_solver { + use super::utils::*; + use super::*; + + #[inline(always)] + pub fn solve_challenge( + challenge: &Challenge, + save_solution: &dyn Fn(&Solution) -> anyhow::Result<()>, + hyperparameters: &Option>, + ) -> anyhow::Result<()> { + let num_nodes = challenge.difficulty.num_nodes; + let max_capacity = challenge.max_capacity; + let demands = &challenge.demands; + let distance_matrix = &challenge.distance_matrix; + let service_time = challenge.service_time; + let ready_times = &challenge.ready_times; + let due_times = &challenge.due_times; + let mut routes: Vec> = Vec::with_capacity(num_nodes); + + let mut nodes: Vec = (1..num_nodes).collect(); + + let strategy_idx = (num_nodes + max_capacity as usize + service_time as usize) % 5; + match strategy_idx { + 0 => nodes.sort_by(|a, b| distance_matrix[0][*a].cmp(&distance_matrix[0][*b])), + 1 => nodes.sort_by(|a, b| demands[*b].cmp(&demands[*a])), + 2 => nodes.sort_by(|a, b| ready_times[*a].cmp(&ready_times[*b])), + 3 => nodes.sort_by(|a, b| due_times[*a].cmp(&due_times[*b])), + _ => nodes.sort_by(|a, b| { + let urgency_a = due_times[*a] - ready_times[*a]; + let urgency_b = due_times[*b] - ready_times[*b]; + urgency_a.cmp(&urgency_b) + }), + } + + let mut remaining = vec![false; num_nodes]; + for &n in &nodes { + remaining[n] = true; + } + + while let Some(node) = nodes.pop() { + if !remaining[node] { + continue; + } + remaining[node] = false; + let mut route = vec![0, node, 0]; + let mut route_demand = demands[node]; + + let mut insertion_attempts = 0; + let max_insertion_attempts = { + let base_attempts = if (due_times[0] - ready_times[0]) < service_time * 10 { + 3 + } else { + 2 + }; + let avg_slack = { + let mut s: i64 = 0; + let mut c: i64 = 0; + for i in 1..num_nodes { + s += (due_times[i] - ready_times[i]) as i64; + c += 1; + } + if c > 0 { + (s / c) as i32 + } else { + 0 + } + }; + let tight = if avg_slack <= service_time * 4 { 2 } else { 0 }; + let scale_factor = (num_nodes as f32 / 1200.0).sqrt(); + ((base_attempts + tight) as f32 * scale_factor).round() as usize + } + .clamp(3, 8); + let mut candidates: Vec = Vec::with_capacity(num_nodes); + + while insertion_attempts < max_insertion_attempts { + candidates.clear(); + + for n in 1..num_nodes { + if remaining[n] && route_demand + demands[n] <= max_capacity { + candidates.push(n); + } + } + + if candidates.is_empty() { + break; + } + + if let Some((best_node, best_pos)) = find_best_insertion( + &route, + &candidates, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + remaining[best_node] = false; + route_demand += demands[best_node]; + route.insert(best_pos, best_node); + insertion_attempts = 0; + } else { + insertion_attempts += 1; + } + } + + route = apply_size_filtered_local_search( + &route, + distance_matrix, + service_time, + ready_times, + due_times, + ); + if route.len() >= 3 && route[0] == 0 && route[route.len() - 1] == 0 { + routes.push(route); + } + } + + { + let mut leftover: Vec = Vec::new(); + for n in 1..num_nodes { + if remaining[n] { + leftover.push(n); + } + } + if !leftover.is_empty() && !routes.is_empty() { + let mut route_demands_pre = utils::calculate_route_demands(&routes, demands); + let mut iter_count = 0usize; + let max_global_iters = leftover.len().saturating_mul(2).min(64); + let mut progress = true; + while progress && !leftover.is_empty() && iter_count < max_global_iters { + iter_count += 1; + progress = false; + let mut best_choice: Option<(usize, usize, usize, i32)> = None; + for &n in &leftover { + for (r_idx, r) in routes.iter().enumerate() { + if route_demands_pre[r_idx] + demands[n] > max_capacity { + continue; + } + if let Some((pos, delta)) = find_best_insertion_in_route( + r, + n, + demands, + max_capacity, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + match best_choice { + None => best_choice = Some((n, r_idx, pos, delta)), + Some((_bn, _ri, _bp, bdelta)) => { + if delta < bdelta { + best_choice = Some((n, r_idx, pos, delta)); + } + } + } + } + } + } + if let Some((n, r_idx, pos, _delta)) = best_choice { + routes[r_idx].insert(pos, n); + route_demands_pre[r_idx] += demands[n]; + remaining[n] = false; + if let Some(idx) = leftover.iter().position(|&x| x == n) { + leftover.swap_remove(idx); + } + progress = true; + } + } + } + } + for n in 1..num_nodes { + if remaining[n] { + routes.push(vec![0, n, 0]); + } + } + + routes = do_local_searches( + num_nodes, + max_capacity, + demands, + distance_matrix, + &routes, + service_time, + ready_times, + due_times, + ); + + let mut seen = vec![false; num_nodes]; + let mut duplication = false; + for r in &routes { + for &n in r[1..r.len() - 1].iter() { + if seen[n] { + duplication = true; + } else { + seen[n] = true; + } + } + } + let mut missing_any = false; + for n in 1..num_nodes { + if !seen[n] { + missing_any = true; + break; + } + } + if duplication || missing_any { + let mut fallback: Vec> = Vec::with_capacity(num_nodes - 1); + for n in 1..num_nodes { + fallback.push(vec![0, n, 0]); + } + routes = fallback; + } + + let mut validated_routes: Vec> = Vec::with_capacity(routes.len()); + let mut need_fallback = false; + + for route in routes { + if route.len() < 3 || route[0] != 0 || route[route.len() - 1] != 0 { + need_fallback = true; + break; + } + + let time_ok = is_route_time_feasible_fast( + &route, + distance_matrix, + service_time, + ready_times, + due_times, + ); + + let cap: i32 = route[1..route.len() - 1].iter().map(|&n| demands[n]).sum(); + let cap_ok = cap <= max_capacity; + + if time_ok && cap_ok { + validated_routes.push(route); + continue; + } + + let mut local_repair_ok = true; + for &node in route[1..route.len() - 1].iter() { + if demands[node] > max_capacity { + local_repair_ok = false; + break; + } + let singleton = vec![0, node, 0]; + if !is_route_time_feasible_strict( + &singleton, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + local_repair_ok = false; + break; + } + validated_routes.push(singleton); + } + + if !local_repair_ok { + need_fallback = true; + break; + } + } + + if !need_fallback { + let served: usize = validated_routes + .iter() + .map(|r| if r.len() >= 2 { r.len() - 2 } else { 0 }) + .sum(); + if served != num_nodes - 1 { + need_fallback = true; + } + } + + if need_fallback || validated_routes.is_empty() { + let mut fallback: Vec> = Vec::with_capacity(num_nodes - 1); + for n in 1..num_nodes { + if demands[n] > max_capacity { + return Ok(()); + } + let singleton = vec![0, n, 0]; + if !is_route_time_feasible_strict( + &singleton, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + return Ok(()); + } + fallback.push(singleton); + } + let _ = save_solution(&Solution { routes: fallback }); + return Ok(()); + } + + let _ = save_solution(&Solution { + routes: validated_routes, + }); + return Ok(()); + } + + #[inline(always)] + fn do_local_searches( + num_nodes: usize, + max_capacity: i32, + demands: &Vec, + distance_matrix: &Vec>, + routes: &Vec>, + service_time: i32, + ready_times: &Vec, + due_times: &Vec, + ) -> Vec> { + let mut best_routes = routes.clone(); + + let mut route_distances: Vec = Vec::with_capacity(best_routes.len()); + let mut best_distance: i32 = 0; + for r in &best_routes { + let d = utils::calculate_route_distance(r, distance_matrix); + route_distances.push(d); + best_distance += d; + } + let mut improved = true; + let mut iteration_count = 0; + let improvement_threshold = (best_distance as f32 * 0.00009).max(0.1) as i32; + let max_total_iterations = { + let base_iterations = 150; + let scale_factor = if num_nodes <= 1500 { + 1.0 + (num_nodes - 1000) as f32 / 2000.0 + } else { + 1.167 + (num_nodes - 1500) as f32 / 2000.0 + }; + ((base_iterations as f32 * scale_factor).round() as usize).clamp(150, 330) + }; + + let neighbor_count = { + let base_neighbors = 30; + let scale_factor = (num_nodes as f32 / 1500.0).powf(0.5); + (base_neighbors as f32 * scale_factor).round() as usize + } + .clamp(28, 60); + let pool = { + let base_pool = 44; + let scale_factor = (num_nodes as f32 / 1500.0).powf(0.5); + (base_pool as f32 * scale_factor).round() as usize + } + .clamp(40, 100); + let mut proximity_pairs: Vec<(i32, usize, usize)> = + Vec::with_capacity(num_nodes * neighbor_count * 2); + let mut pool_vec: Vec<(i32, usize)> = Vec::with_capacity(pool); + let mut bests: Vec<(i32, usize)> = Vec::with_capacity(neighbor_count); + for i in 1..num_nodes { + pool_vec.clear(); + let mut max_idx: usize = 0; + let mut max_d: i32 = i32::MIN; + let di = &distance_matrix[i]; + unsafe { + for j in 1..num_nodes { + if j == i { + continue; + } + let d = *di.get_unchecked(j); + if pool_vec.len() < pool { + pool_vec.push((d, j)); + if d > max_d { + max_d = d; + max_idx = pool_vec.len() - 1; + } + } else if d < max_d { + pool_vec[max_idx] = (d, j); + let mut new_max_d = i32::MIN; + let mut new_max_idx = 0; + let pv = pool_vec.as_slice(); + let mut idx = 0usize; + while idx < pv.len() { + let dd = (*pv.get_unchecked(idx)).0; + if dd > new_max_d { + new_max_d = dd; + new_max_idx = idx; + } + idx += 1; + } + max_d = new_max_d; + max_idx = new_max_idx; + } + } + } + bests.clear(); + let ri = ready_times[i]; + let di_due = due_times[i]; + unsafe { + let rt = ready_times.as_slice(); + let dt = due_times.as_slice(); + for &(_d, j) in pool_vec.iter() { + let time_ij = *di.get_unchecked(j); + let rj = *rt.get_unchecked(j); + let dj = *dt.get_unchecked(j); + let expr1 = (rj - time_ij - service_time - di_due).max(0) + + (ri + service_time + time_ij - dj).max(0); + let expr2 = (ri - time_ij - service_time - dj).max(0) + + (rj + service_time + time_ij - di_due).max(0); + + let time_window_penalty = if expr1 < expr2 { expr1 } else { expr2 }; + let wi = di_due - ri; + let wj = dj - rj; + let time_compatibility = wi.min(wj) as f32 / (wi.max(wj) + 1) as f32; + let time_bonus = (time_compatibility * 20.0) as i32; + + let p = time_ij + time_window_penalty - time_bonus; + if bests.len() < neighbor_count { + let mut pos = bests.len(); + while pos > 0 && p < bests[pos - 1].0 { + pos -= 1; + } + bests.insert(pos, (p, j)); + } else if p < bests[bests.len() - 1].0 { + let mut pos = bests.len() - 1; + while pos > 0 && p < bests[pos - 1].0 { + pos -= 1; + } + bests.insert(pos, (p, j)); + bests.pop(); + } + } + } + for (p, j) in &bests { + proximity_pairs.push((*p, i, *j)); + proximity_pairs.push((*p, *j, i)); + } + } + + let mut node_positions = vec![(0, 0); num_nodes]; + for (i, route) in best_routes.iter().enumerate() { + if route.len() <= 2 { + continue; + } + unsafe { + let r = route.as_slice(); + let mut j = 1usize; + let end = r.len() - 1; + while j < end { + let node = *r.get_unchecked(j); + *node_positions.get_unchecked_mut(node) = (i, j); + j += 1; + } + } + } + + let mut route_demands = calculate_route_demands(&best_routes, demands); + let mut cand_r1: Vec = Vec::with_capacity(64); + let mut cand_r2: Vec = Vec::with_capacity(64); + let mut swap_r1: Vec = Vec::with_capacity(64); + let mut swap_r2: Vec = Vec::with_capacity(64); + while improved && iteration_count < max_total_iterations { + improved = false; + iteration_count += 1; + + for (_corr, node, node2) in &proximity_pairs { + let node = *node; + let node2 = *node2; + let (route1_idx, pos1) = node_positions[node]; + let (route2_idx, pos2_base) = node_positions[node2]; + if route1_idx == route2_idx { + continue; + } + + for pos2_offset in 0..=1 { + let pos2 = pos2_base + pos2_offset; + + if pos2_offset == 0 { + let target_route_demand = route_demands[route2_idx]; + if target_route_demand + demands[node] <= max_capacity { + if let Some((best_pos, _delta_cost)) = find_best_insertion_in_route( + &best_routes[route2_idx], + node, + demands, + max_capacity, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + if best_routes[route1_idx].len() > pos1 + && best_routes[route1_idx][pos1] == node + { + let r1_ref = &best_routes[route1_idx]; + let r2_ref = &best_routes[route2_idx]; + let prev1 = r1_ref[pos1 - 1]; + let next1 = r1_ref[pos1 + 1]; + let prev2 = r2_ref[best_pos - 1]; + let next2 = r2_ref[best_pos]; + let dm = distance_matrix; + let (delta_remove, delta_insert) = unsafe { + let row_prev1 = dm.get_unchecked(prev1); + let row_prev2 = dm.get_unchecked(prev2); + let row_node = dm.get_unchecked(node); + ( + *row_prev1.get_unchecked(next1) + - *row_prev1.get_unchecked(node) + - *row_node.get_unchecked(next1), + *row_prev2.get_unchecked(node) + + *row_node.get_unchecked(next2) + - *row_prev2.get_unchecked(next2), + ) + }; + if delta_remove + delta_insert >= 0 { + continue; + } + + if r1_ref.len() <= 3 { + continue; + } + + cand_r1.clear(); + cand_r2.clear(); + + if cand_r1.capacity() < r1_ref.len() - 1 { + cand_r1.reserve(r1_ref.len() - 1 - cand_r1.capacity()); + } + if cand_r2.capacity() < r2_ref.len() + 1 { + cand_r2.reserve(r2_ref.len() + 1 - cand_r2.capacity()); + } + + cand_r1.extend_from_slice(&r1_ref[..pos1]); + cand_r1.extend_from_slice(&r1_ref[pos1 + 1..]); + + cand_r2.extend_from_slice(&r2_ref[..best_pos]); + cand_r2.push(node); + cand_r2.extend_from_slice(&r2_ref[best_pos..]); + + let r1 = apply_size_filtered_local_search( + &cand_r1, + distance_matrix, + service_time, + ready_times, + due_times, + ); + let r2 = apply_size_filtered_local_search( + &cand_r2, + distance_matrix, + service_time, + ready_times, + due_times, + ); + + if r1.len() >= 3 + && r1[0] == 0 + && r1[r1.len() - 1] == 0 + && r2.len() >= 3 + && r2[0] == 0 + && r2[r2.len() - 1] == 0 + { + let old_d1 = route_distances[route1_idx]; + let old_d2 = route_distances[route2_idx]; + let new_d1 = + utils::calculate_route_distance(&r1, distance_matrix); + let new_d2 = + utils::calculate_route_distance(&r2, distance_matrix); + let new_distance = + best_distance - old_d1 - old_d2 + new_d1 + new_d2; + if new_distance < best_distance { + let improvement = best_distance - new_distance; + best_distance = new_distance; + + best_routes[route1_idx] = r1; + best_routes[route2_idx] = r2; + + route_distances[route1_idx] = new_d1; + route_distances[route2_idx] = new_d2; + route_demands[route1_idx] -= demands[node]; + route_demands[route2_idx] += demands[node]; + + update_node_positions_for_routes( + &mut node_positions, + &best_routes, + &[route1_idx, route2_idx], + ); + + if improvement >= improvement_threshold { + improved = true; + break; + } + } + } + } + } + } + } + + if pos2 < best_routes[route2_idx].len() + && best_routes[route2_idx][pos2] != 0 + && route_demands[route1_idx] - demands[node] + demands[node2] + <= max_capacity + && route_demands[route2_idx] - demands[node2] + demands[node] + <= max_capacity + { + if best_routes[route1_idx].len() > pos1 + && best_routes[route1_idx][pos1] == node + && best_routes[route2_idx].len() > pos2 + && best_routes[route2_idx][pos2] == node2 + { + let r1_ref = &best_routes[route1_idx]; + let r2_ref = &best_routes[route2_idx]; + let prev1 = r1_ref[pos1 - 1]; + let next1 = r1_ref[pos1 + 1]; + let prev2 = r2_ref[pos2 - 1]; + let next2 = r2_ref[pos2 + 1]; + let dm = distance_matrix; + let (delta1, delta2) = unsafe { + let row_prev1 = dm.get_unchecked(prev1); + let row_prev2 = dm.get_unchecked(prev2); + let row_node = dm.get_unchecked(node); + let row_node2 = dm.get_unchecked(node2); + ( + *row_prev1.get_unchecked(node2) + + *row_node2.get_unchecked(next1) + - (*row_prev1.get_unchecked(node) + + *row_node.get_unchecked(next1)), + *row_prev2.get_unchecked(node) + *row_node.get_unchecked(next2) + - (*row_prev2.get_unchecked(node2) + + *row_node2.get_unchecked(next2)), + ) + }; + + if delta1 + delta2 >= 0 { + continue; + } + + let time_feasible_1 = check_swap_time_feasibility( + r1_ref, + pos1, + node2, + distance_matrix, + service_time, + ready_times, + due_times, + ); + let time_feasible_2 = check_swap_time_feasibility( + r2_ref, + pos2, + node, + distance_matrix, + service_time, + ready_times, + due_times, + ); + if !time_feasible_1 || !time_feasible_2 { + continue; + } + + swap_r1.clear(); + swap_r2.clear(); + let r1_src = &best_routes[route1_idx]; + let r2_src = &best_routes[route2_idx]; + if swap_r1.capacity() < r1_src.len() { + swap_r1.reserve(r1_src.len() - swap_r1.capacity()); + } + if swap_r2.capacity() < r2_src.len() { + swap_r2.reserve(r2_src.len() - swap_r2.capacity()); + } + swap_r1.extend_from_slice(r1_src); + swap_r2.extend_from_slice(r2_src); + + swap_r1[pos1] = node2; + swap_r2[pos2] = node; + + if is_route_time_feasible_fast( + &swap_r1, + distance_matrix, + service_time, + ready_times, + due_times, + ) && is_route_time_feasible_fast( + &swap_r2, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + let old_d1 = route_distances[route1_idx]; + let old_d2 = route_distances[route2_idx]; + let new_d1 = old_d1 + delta1; + let new_d2 = old_d2 + delta2; + let new_distance = + best_distance - old_d1 - old_d2 + new_d1 + new_d2; + if new_distance < best_distance { + let improvement = best_distance - new_distance; + best_distance = new_distance; + + best_routes[route1_idx].clear(); + best_routes[route1_idx].extend_from_slice(&swap_r1); + best_routes[route2_idx].clear(); + best_routes[route2_idx].extend_from_slice(&swap_r2); + + route_distances[route1_idx] = new_d1; + route_distances[route2_idx] = new_d2; + route_demands[route1_idx] = + route_demands[route1_idx] - demands[node] + demands[node2]; + route_demands[route2_idx] = + route_demands[route2_idx] - demands[node2] + demands[node]; + + unsafe { + *node_positions.get_unchecked_mut(node) = + (route2_idx, pos2); + *node_positions.get_unchecked_mut(node2) = + (route1_idx, pos1); + } + + if improvement >= improvement_threshold { + improved = true; + break; + } + } + } + } + } + } + } + + if !improved { + for (_corr, node, node2) in &proximity_pairs { + let node = *node; + let node2 = *node2; + let (route1_idx, pos1) = node_positions[node]; + let (route2_idx, _) = node_positions[node2]; + if route1_idx == route2_idx { + continue; + } + let r1_ref = &best_routes[route1_idx]; + if pos1 + 1 >= r1_ref.len() - 1 { + continue; + } + let node_b = r1_ref[pos1 + 1]; + if node_b == 0 { + continue; + } + let add_dem = demands[node] + demands[node_b]; + if route_demands[route2_idx] + add_dem > max_capacity { + continue; + } + if let Some((best_pos2, delta_insert2)) = find_best_insertion_pair_in_route( + &best_routes[route2_idx], + node, + node_b, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + let prev1 = r1_ref[pos1 - 1]; + let next_after = r1_ref[pos1 + 2]; + let dm = distance_matrix; + let delta_remove_pair = unsafe { + let row_prev1 = dm.get_unchecked(prev1); + let row_node = dm.get_unchecked(node); + let row_nodeb = dm.get_unchecked(node_b); + *row_prev1.get_unchecked(next_after) + - (*row_prev1.get_unchecked(node) + + *row_node.get_unchecked(node_b) + + *row_nodeb.get_unchecked(next_after)) + }; + if delta_remove_pair + delta_insert2 >= 0 { + continue; + } + + cand_r1.clear(); + cand_r2.clear(); + + let r2_ref = &best_routes[route2_idx]; + if cand_r1.capacity() < r1_ref.len() - 2 { + cand_r1.reserve(r1_ref.len() - 2 - cand_r1.capacity()); + } + if cand_r2.capacity() < r2_ref.len() + 2 { + cand_r2.reserve(r2_ref.len() + 2 - cand_r2.capacity()); + } + + cand_r1.extend_from_slice(&r1_ref[..pos1]); + cand_r1.extend_from_slice(&r1_ref[pos1 + 2..]); + + cand_r2.extend_from_slice(&r2_ref[..best_pos2]); + cand_r2.push(node); + cand_r2.push(node_b); + cand_r2.extend_from_slice(&r2_ref[best_pos2..]); + + if is_route_time_feasible_fast( + &cand_r1, + distance_matrix, + service_time, + ready_times, + due_times, + ) && is_route_time_feasible_fast( + &cand_r2, + distance_matrix, + service_time, + ready_times, + due_times, + ) { + let old_d1 = route_distances[route1_idx]; + let old_d2 = route_distances[route2_idx]; + let new_d1 = utils::calculate_route_distance(&cand_r1, distance_matrix); + let new_d2 = utils::calculate_route_distance(&cand_r2, distance_matrix); + let new_total = best_distance - old_d1 - old_d2 + new_d1 + new_d2; + + if new_total < best_distance { + let improvement = best_distance - new_total; + best_distance = new_total; + + best_routes[route1_idx].clear(); + best_routes[route1_idx].extend_from_slice(&cand_r1); + best_routes[route2_idx].clear(); + best_routes[route2_idx].extend_from_slice(&cand_r2); + + route_distances[route1_idx] = new_d1; + route_distances[route2_idx] = new_d2; + route_demands[route1_idx] -= add_dem; + route_demands[route2_idx] += add_dem; + + update_node_positions_for_routes( + &mut node_positions, + &best_routes, + &[route1_idx, route2_idx], + ); + + if improvement >= improvement_threshold { + improved = true; + break; + } else { + improved = true; + break; + } + } + } + } + } + } + if !improved { + for (_corr, node, node2) in &proximity_pairs { + let node = *node; + let node2 = *node2; + let (route1_idx, pos1) = node_positions[node]; + let (route2_idx, pos2_base) = node_positions[node2]; + if route1_idx == route2_idx { + continue; + } + + for pos2_offset in 0..=1 { + let pos2 = pos2_base + pos2_offset; + + if try_1_2_cross_exchange( + &mut best_routes, + route1_idx, + pos1, + route2_idx, + pos2, + distance_matrix, + service_time, + ready_times, + due_times, + &mut route_distances, + &mut best_distance, + &mut route_demands, + demands, + max_capacity, + ) { + update_node_positions_for_routes( + &mut node_positions, + &best_routes, + &[route1_idx, route2_idx], + ); + improved = true; + break; + } + + if try_1_2_cross_exchange( + &mut best_routes, + route2_idx, + pos2, + route1_idx, + pos1, + distance_matrix, + service_time, + ready_times, + due_times, + &mut route_distances, + &mut best_distance, + &mut route_demands, + demands, + max_capacity, + ) { + update_node_positions_for_routes( + &mut node_positions, + &best_routes, + &[route1_idx, route2_idx], + ); + improved = true; + break; + } + + if try_2_2_cross_exchange( + &mut best_routes, + route1_idx, + pos1, + route2_idx, + pos2, + distance_matrix, + service_time, + ready_times, + due_times, + &mut route_distances, + &mut best_distance, + &mut route_demands, + demands, + max_capacity, + ) { + update_node_positions_for_routes( + &mut node_positions, + &best_routes, + &[route1_idx, route2_idx], + ); + improved = true; + break; + } + + if try_two_opt_star( + &mut best_routes, + route1_idx, + pos1, + route2_idx, + pos2, + distance_matrix, + service_time, + ready_times, + due_times, + &mut route_distances, + &mut best_distance, + &mut route_demands, + demands, + max_capacity, + ) { + update_node_positions_for_routes( + &mut node_positions, + &best_routes, + &[route1_idx, route2_idx], + ); + improved = true; + break; + } + + if (*_corr as usize + node + node2) % 3 == 0 { + if try_1_3_cross_exchange( + &mut best_routes, + route1_idx, + pos1, + route2_idx, + pos2, + distance_matrix, + service_time, + ready_times, + due_times, + &mut route_distances, + &mut best_distance, + &mut route_demands, + demands, + max_capacity, + ) { + update_node_positions_for_routes( + &mut node_positions, + &best_routes, + &[route1_idx, route2_idx], + ); + improved = true; + break; + } + + if try_1_3_cross_exchange( + &mut best_routes, + route2_idx, + pos2, + route1_idx, + pos1, + distance_matrix, + service_time, + ready_times, + due_times, + &mut route_distances, + &mut best_distance, + &mut route_demands, + demands, + max_capacity, + ) { + update_node_positions_for_routes( + &mut node_positions, + &best_routes, + &[route1_idx, route2_idx], + ); + improved = true; + break; + } + + if try_2_3_cross_exchange( + &mut best_routes, + route1_idx, + pos1, + route2_idx, + pos2, + distance_matrix, + service_time, + ready_times, + due_times, + &mut route_distances, + &mut best_distance, + &mut route_demands, + demands, + max_capacity, + ) { + update_node_positions_for_routes( + &mut node_positions, + &best_routes, + &[route1_idx, route2_idx], + ); + improved = true; + break; + } + + if try_2_3_cross_exchange( + &mut best_routes, + route2_idx, + pos2, + route1_idx, + pos1, + distance_matrix, + service_time, + ready_times, + due_times, + &mut route_distances, + &mut best_distance, + &mut route_demands, + demands, + max_capacity, + ) { + update_node_positions_for_routes( + &mut node_positions, + &best_routes, + &[route1_idx, route2_idx], + ); + improved = true; + break; + } + } + } + } + } + + if !improved { + for route_idx in 0..best_routes.len() { + let improved_route = apply_size_filtered_local_search( + &best_routes[route_idx], + distance_matrix, + service_time, + ready_times, + due_times, + ); + + if improved_route != best_routes[route_idx] + && improved_route.len() >= 3 + && improved_route[0] == 0 + && improved_route[improved_route.len() - 1] == 0 + { + let old_d = route_distances[route_idx]; + let new_d = + utils::calculate_route_distance(&improved_route, distance_matrix); + let total_distance = best_distance - old_d + new_d; + if total_distance < best_distance { + let improvement = best_distance - total_distance; + best_distance = total_distance; + + best_routes[route_idx] = improved_route; + + route_distances[route_idx] = new_d; + update_node_positions_for_routes( + &mut node_positions, + &best_routes, + &[route_idx], + ); + + if improvement >= improvement_threshold { + improved = true; + break; + } + } + } + } + } + } + + best_routes + .retain(|route| route.len() >= 3 && route[0] == 0 && route[route.len() - 1] == 0); + + best_routes + } +}