Optimized Pareto Algorithm (See PR#24) (#27)

This commit is contained in:
germ3n 2024-11-29 16:33:29 +00:00 committed by GitHub
parent 59dbef3739
commit e11e85e950
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 364 additions and 303 deletions

View File

@ -67,47 +67,67 @@ def calc_valid_difficulties(upper_frontier: List[Point], lower_frontier: List[Po
valid_difficulties = np.stack(np.where(weights), axis=1) + min_difficulty
return valid_difficulties.tolist()
def calc_pareto_frontier(points: List[Point]) -> Frontier:
"""
Calculates a single Pareto frontier from a list of points
Adapted from https://stackoverflow.com/questions/32791911/fast-calculation-of-pareto-front-in-python
"""
points_ = points
points = np.array(points)
frontier_idxs = np.arange(points.shape[0])
n_points = points.shape[0]
next_point_index = 0 # Next index in the frontier_idxs array to search for
while next_point_index < len(points):
nondominated_point_mask = np.any(points < points[next_point_index], axis=1)
nondominated_point_mask[np.all(points == points[next_point_index], axis=1)] = True
frontier_idxs = frontier_idxs[nondominated_point_mask] # Remove dominated points
points = points[nondominated_point_mask]
next_point_index = np.sum(nondominated_point_mask[:next_point_index]) + 1
return [points_[idx] for idx in frontier_idxs]
def calc_pareto_frontier(points: List[Point]) -> Tuple[Frontier, List[bool]]:
if not points:
return [], []
indices = list(range(len(points)))
indices.sort(key=lambda i: (points[i][1], points[i][0]))
on_front = [True] * len(points)
stack = []
for curr_idx in indices:
while stack and points[stack[-1]][0] > points[curr_idx][0]:
stack.pop()
if stack and points[stack[-1]][0] <= points[curr_idx][0]:
on_front[curr_idx] = False
stack.append(curr_idx)
i = 0
while i < len(indices):
j = i + 1
while j < len(indices) and points[indices[j]][1] == points[indices[i]][1]:
j += 1
if j - i > 1:
min_x_idx = min(indices[i:j], key=lambda k: points[k][0])
for k in indices[i:j]:
if k != min_x_idx:
on_front[k] = False
i = j
frontier = [points[i] for i in range(len(points)) if on_front[i]]
return frontier, on_front
def calc_all_frontiers(points: List[Point]) -> List[Frontier]:
"""
Calculates a list of Pareto frontiers from a list of points
"""
buckets = {}
r = np.max(points, axis=0) - np.min(points, axis=0)
dim1, dim2 = (1, 0) if r[0] > r[1] else (0, 1)
for p in points:
if p[dim1] not in buckets:
buckets[p[dim1]] = []
buckets[p[dim1]].append(p)
for bucket in buckets.values():
bucket.sort(reverse=True, key=lambda x: x[dim2])
frontiers = []
while len(buckets) > 0:
points = [bucket[-1] for bucket in buckets.values()]
frontier = calc_pareto_frontier(points)
for p in frontier:
x = p[dim1]
buckets[x].pop()
if len(buckets[x]) == 0:
buckets.pop(x)
if not points:
return []
frontiers = []
remaining_points = None
while True:
points_ = remaining_points if remaining_points is not None else points
frontier, on_front = calc_pareto_frontier(points_)
frontiers.append(frontier)
# Get remaining points not on frontier
remaining_points = [points_[i] for i in range(len(points_)) if not on_front[i]]
# Break if no more points to process
if not remaining_points:
break
return frontiers
@dataclass

View File

@ -14,6 +14,7 @@ hex = "0.4.3"
rand = "0.8.4"
serde = { version = "1.0.196", features = ["derive"] }
serde_json = { version = "1.0.113" }
tokio = { version = "1.41.1", features = ["full"] }
tig-structs = { path = "../tig-structs" }
tig-utils = { path = "../tig-utils", features = ["web3"] }

View File

@ -76,10 +76,10 @@ pub async fn submit_precommit<T: Context>(
};
if lower_frontier
.iter()
.any(|lower_point| difficulty.pareto_compare(lower_point) == ParetoCompare::BDominatesA)
|| upper_frontier
.iter()
.any(|upper_point| difficulty.pareto_compare(upper_point) == ParetoCompare::ADominatesB)
.any(|lower_point| pareto_compare(difficulty, lower_point) == ParetoCompare::BDominatesA)
|| upper_frontier.iter().any(|upper_point| {
pareto_compare(difficulty, upper_point) == ParetoCompare::ADominatesB
})
{
return Err(anyhow!("Invalid difficulty. Out of bounds"));
}

View File

@ -124,7 +124,8 @@ pub(crate) async fn update(cache: &mut AddBlockCache) {
.map(|(settings, _)| settings.difficulty.clone())
.collect::<Frontier>();
let mut frontier_indexes = HashMap::<Point, usize>::new();
for (frontier_index, frontier) in pareto_algorithm(points, false).into_iter().enumerate() {
for (frontier_index, frontier) in pareto_algorithm(&points, false).into_iter().enumerate()
{
for point in frontier {
frontier_indexes.insert(point, frontier_index);
}
@ -205,19 +206,26 @@ pub(crate) async fn update(cache: &mut AddBlockCache) {
let scaled_frontier = base_frontier.clone();
(base_frontier, scaling_factor, scaled_frontier)
} else {
let base_frontier = pareto_algorithm(points, true)
let mut base_frontier = pareto_algorithm(&points, true)
.pop()
.unwrap()
.into_iter()
.map(|d| d.into_iter().map(|x| -x).collect())
.collect::<Frontier>() // mirror the points back;
.extend(&min_difficulty, &max_difficulty);
.collect::<Frontier>(); // mirror the points back;
base_frontier = extend_frontier(&base_frontier, &min_difficulty, &max_difficulty);
let scaling_factor = (challenge_data.num_qualifiers as f64
/ config.opow.total_qualifiers_threshold as f64)
.min(config.challenges.max_scaling_factor);
let scaled_frontier = base_frontier
.scale(&min_difficulty, &max_difficulty, scaling_factor)
.extend(&min_difficulty, &max_difficulty);
let mut scaled_frontier = scale_frontier(
&base_frontier,
&min_difficulty,
&max_difficulty,
scaling_factor,
);
scaled_frontier = extend_frontier(&scaled_frontier, &min_difficulty, &max_difficulty);
(base_frontier, scaling_factor, scaled_frontier)
};
@ -340,51 +348,3 @@ pub(crate) async fn update(cache: &mut AddBlockCache) {
data.influence = influence;
}
}
fn find_smallest_range_dimension(points: &Frontier) -> usize {
(0..2)
.min_by_key(|&d| {
let (min, max) = points
.iter()
.map(|p| p[d])
.fold((i32::MAX, i32::MIN), |(min, max), val| {
(min.min(val), max.max(val))
});
max - min
})
.unwrap()
}
fn pareto_algorithm(points: Frontier, only_one: bool) -> Vec<Frontier> {
if points.is_empty() {
return Vec::new();
}
let dimension = find_smallest_range_dimension(&points);
let sort_dimension = 1 - dimension;
let mut buckets: HashMap<i32, Vec<Point>> = HashMap::new();
for point in points {
buckets.entry(point[dimension]).or_default().push(point);
}
for (_, group) in buckets.iter_mut() {
// sort descending
group.sort_unstable_by(|a, b| b[sort_dimension].cmp(&a[sort_dimension]));
}
let mut result = Vec::new();
while !buckets.is_empty() {
let points: HashSet<Point> = buckets.values().map(|group| group[0].clone()).collect();
let frontier = points.pareto_frontier();
for point in frontier.iter() {
let bucket = buckets.get_mut(&point[dimension]).unwrap();
bucket.remove(0);
if bucket.is_empty() {
buckets.remove(&point[dimension]);
}
}
result.push(frontier);
if only_one {
break;
}
}
result
}

View File

@ -1,219 +1,290 @@
use rand::Rng;
// optimized pareto impl
use std::cmp::min;
use std::collections::HashSet;
pub type Point = Vec<i32>;
pub type Frontier<P = Point> = HashSet<P>;
pub type Frontier = Vec<Point>;
#[derive(Debug, Clone, PartialEq)]
pub enum PointCompareFrontiers {
Below,
Within,
Above,
fn is_pareto_front_2d(costs: &Vec<Vec<i32>>) -> Vec<bool> {
let n_observations = costs.len();
if n_observations == 0 {
return vec![];
}
let mut indices: Vec<usize> = (0..n_observations).collect();
// sort by y, then x
indices.sort_by(|&a, &b| {
costs[a][1]
.cmp(&costs[b][1])
.then(costs[a][0].cmp(&costs[b][0]))
});
let mut on_front = vec![true; n_observations];
let mut stack = Vec::new();
// First pass: check dominance based on x-coordinate
for &curr_idx in &indices {
while let Some(&top_idx) = stack.last() {
let cost1: &Vec<i32> = &costs[top_idx];
let cost2: &Vec<i32> = &costs[curr_idx];
if cost1[0] <= cost2[0] {
break;
}
stack.pop();
}
if let Some(&top_idx) = stack.last() {
let cost1: &Vec<i32> = &costs[top_idx];
let cost2: &Vec<i32> = &costs[curr_idx];
if cost1[0] <= cost2[0] {
on_front[curr_idx] = false;
}
}
stack.push(curr_idx);
}
// Second pass: handle points with equal y-coordinates
let mut i = 0;
while i < indices.len() {
let mut j = i + 1;
while j < indices.len() && costs[indices[j]][1] == costs[indices[i]][1] {
j += 1;
}
if j - i > 1 {
let min_x_idx = indices[i..j].iter().min_by_key(|&&k| costs[k][0]).unwrap();
for &k in &indices[i..j] {
if k != *min_x_idx {
on_front[k] = false;
}
}
}
i = j;
}
return on_front;
}
#[derive(Debug, Clone, PartialEq)]
pub fn is_pareto_front(
costs: &Vec<Vec<i32>>,
assume_unique_lexsorted: bool,
pre_sorted_along_x: Option<bool>,
) -> Vec<bool> {
let apply_unique = !assume_unique_lexsorted;
let (unique_costs, order_inv) = if apply_unique {
let (unique, indices) = unique_with_indices(costs);
(Some(unique), Some(indices))
} else {
(None, None)
};
let on_front = if unique_costs.is_some() {
is_pareto_front_2d(&unique_costs.unwrap())
} else {
is_pareto_front_2d(costs)
};
if let Some(inv) = order_inv {
return inv.iter().map(|&i| on_front[i]).collect();
}
return on_front;
}
// will be about 1.3x faster if we use this and cache it somehow instead of calling it repeatedely on the same points
use std::collections::HashMap;
pub fn unique_with_indices(arr: &Vec<Vec<i32>>) -> (Vec<Vec<i32>>, Vec<usize>) {
let n = arr.len();
let mut unique = Vec::with_capacity(n);
let mut indices = Vec::with_capacity(n);
let mut seen = HashMap::with_capacity(n);
for (i, point) in arr.iter().enumerate() {
if let Some(&idx) = seen.get(point) {
indices.push(idx);
} else {
seen.insert(point, unique.len());
unique.push(point.clone());
indices.push(unique.len() - 1);
}
}
return (unique, indices);
}
#[derive(PartialEq, Debug)]
pub enum ParetoCompare {
ADominatesB,
Equal,
BDominatesA,
}
pub trait PointOps {
type Point;
fn pareto_compare(&self, other: &Self) -> ParetoCompare;
fn scale(&self, min_point: &Self, max_point: &Self, multiplier: f64) -> Self::Point;
fn within(
&self,
lower_frontier: &Frontier<Self::Point>,
upper_frontier: &Frontier<Self::Point>,
) -> PointCompareFrontiers;
}
pub trait FrontierOps {
type Point;
fn pareto_frontier(&self) -> Frontier<Self::Point>;
fn extend(&self, min_point: &Self::Point, max_point: &Self::Point) -> Frontier<Self::Point>;
fn scale(
&self,
min_point: &Self::Point,
max_point: &Self::Point,
multiplier: f64,
) -> Frontier<Self::Point>;
fn sample<T: Rng>(&self, rng: &mut T) -> Self::Point;
}
impl PointOps for Point {
type Point = Point;
fn pareto_compare(&self, other: &Self) -> ParetoCompare {
let mut a_dominate_b = false;
let mut b_dominate_a = false;
for (a_val, b_val) in self.iter().zip(other) {
if a_val < b_val {
b_dominate_a = true;
} else if a_val > b_val {
a_dominate_b = true;
}
}
if a_dominate_b == b_dominate_a {
ParetoCompare::Equal
} else if a_dominate_b {
ParetoCompare::ADominatesB
} else {
ParetoCompare::BDominatesA
pub fn pareto_compare(point: &Point, other: &Point) -> ParetoCompare {
let mut a_dominate_b = false;
let mut b_dominate_a = false;
for (a_val, b_val) in point.iter().zip(other) {
if a_val < b_val {
b_dominate_a = true;
} else if a_val > b_val {
a_dominate_b = true;
}
}
fn scale(
&self,
min_point: &Self::Point,
max_point: &Self::Point,
multiplier: f64,
) -> Self::Point {
self.iter()
.enumerate()
.map(|(i, value)| {
// Calculate the offset for the current dimension
let offset = ((value - min_point[i] + 1) as f64) * multiplier;
// Scale the point and clamp it between min_point and max_point
(min_point[i] + offset.ceil() as i32 - 1).clamp(min_point[i], max_point[i])
})
.collect()
if a_dominate_b == b_dominate_a {
return ParetoCompare::Equal;
} else if a_dominate_b {
return ParetoCompare::ADominatesB;
} else {
return ParetoCompare::BDominatesA;
}
fn within(
&self,
lower_frontier: &Frontier<Self::Point>,
upper_frontier: &Frontier<Self::Point>,
) -> PointCompareFrontiers {
// Check if the point is not dominated by any point in the lower frontier
if lower_frontier
.iter()
.any(|lower_point| self.pareto_compare(lower_point) == ParetoCompare::BDominatesA)
{
}
#[derive(PartialEq, Debug)]
pub enum PointCompareFrontiers {
Below,
Within,
Above,
}
pub fn pareto_within(
point: &Point,
lower_frontier: &Frontier,
upper_frontier: &Frontier,
) -> PointCompareFrontiers {
for point_ in lower_frontier.iter() {
if pareto_compare(point, point_) == ParetoCompare::BDominatesA {
return PointCompareFrontiers::Below;
}
}
// Check if the point does not dominate any point in the upper frontier
if upper_frontier
.iter()
.any(|upper_point| self.pareto_compare(upper_point) == ParetoCompare::ADominatesB)
{
for point_ in upper_frontier.iter() {
if pareto_compare(point, point_) == ParetoCompare::ADominatesB {
return PointCompareFrontiers::Above;
}
PointCompareFrontiers::Within
}
return PointCompareFrontiers::Within;
}
impl FrontierOps for Frontier {
type Point = Point;
pub fn scale_point(point: &Point, min_point: &Point, max_point: &Point, multiplier: f64) -> Point {
return point
.iter()
.enumerate()
.map(|(i, value)| {
let offset = ((value - min_point[i] + 1) as f64) * multiplier;
(min_point[i] + offset.ceil() as i32 - 1).clamp(min_point[i], max_point[i])
})
.collect();
}
fn pareto_frontier(&self) -> Frontier<Self::Point> {
let mut frontier = self.clone();
for point in self.iter() {
if !frontier.contains(point) {
continue;
}
let mut dominated_points = HashSet::new();
for other_point in frontier.iter() {
match point.pareto_compare(other_point) {
ParetoCompare::ADominatesB => {
dominated_points.insert(other_point.clone());
}
ParetoCompare::BDominatesA => {
dominated_points.insert(point.clone());
break;
}
ParetoCompare::Equal => {}
}
}
frontier = frontier.difference(&dominated_points).cloned().collect();
}
frontier
pub fn scale_frontier(
frontier: &Frontier,
min_point: &Point,
max_point: &Point,
multiplier: f64,
) -> Frontier {
if frontier.is_empty() {
return vec![];
}
fn extend(&self, min_point: &Self::Point, max_point: &Self::Point) -> Frontier<Self::Point> {
let mut frontier = self.clone();
(0..min_point.len()).into_iter().for_each(|i| {
let mut d = min_point.clone();
if let Some(v) = frontier.iter().map(|d| d[i]).max() {
d[i] = v;
}
if !frontier.contains(&d) {
d[i] = min(d[i] + 1, max_point[i]);
frontier.insert(d);
}
});
frontier
let scaled_frontier = frontier
.iter()
.map(|point| scale_point(&point, min_point, max_point, multiplier))
.collect();
if multiplier > 1.0 {
return pareto_frontier(&scaled_frontier);
}
fn scale(
&self,
min_point: &Self::Point,
max_point: &Self::Point,
multiplier: f64,
) -> Frontier<Self::Point> {
let frontier: Frontier<Self::Point> = self
let mirrored_frontier = scaled_frontier
.into_iter()
.map(|d| d.iter().map(|x| -x).collect()) // mirror the points so easiest difficulties are first
.collect::<Frontier>();
return pareto_frontier(&mirrored_frontier)
.iter()
.map(|d| d.iter().map(|x| -x).collect())
.collect();
}
pub fn pareto_algorithm(points: &Vec<Vec<i32>>, only_one: bool) -> Vec<Vec<Point>> {
if points.len() == 0 {
return vec![];
}
let points_inverted = points
.iter()
.map(|d| d.iter().map(|x| -x).collect())
.collect::<Vec<Point>>();
let mut frontiers = Vec::new();
let (mut remaining_points, indices) = unique_with_indices(&points_inverted);
//remaining_points.sort_by(|a, b| a[0].cmp(&b[0]));
while true {
let on_front = is_pareto_front(&remaining_points, true, Some(true));
// Extract frontier points
let frontier: Vec<_> = remaining_points
.iter()
.map(|point| point.scale(min_point, max_point, multiplier))
.zip(on_front.iter())
.filter(|(_, &is_front)| is_front)
.map(|(point, _)| point.to_vec())
.collect();
if multiplier > 1.0 {
frontier.pareto_frontier()
} else {
frontier
.into_iter()
.map(|d| d.iter().map(|x| -x).collect()) // mirror the points so easiest difficulties are first
frontiers.push(frontier);
let new_points: Vec<_> = remaining_points
.iter()
.zip(on_front.iter())
.filter(|(_, &is_front)| !is_front)
.map(|(point, _)| point.to_vec())
.collect();
if new_points.is_empty() {
break;
}
remaining_points = new_points;
if only_one {
break;
}
}
return frontiers
.iter()
.map(|d| {
d.iter()
.map(|x| x.iter().map(|y| -y).collect())
.collect::<Frontier>()
.pareto_frontier()
.iter()
.map(|d| d.iter().map(|x| -x).collect())
.collect()
}
}
fn sample<R: Rng>(&self, rng: &mut R) -> Self::Point {
// FIXME only works for 2 dimensional points
// Potential strategy for >2d: triangulate -> sample triangle -> sample point in triangle
match self.iter().next() {
None => panic!("Frontier is empty"),
Some(point) => {
if point.len() != 2 {
panic!("Only 2 dimensional points are supported");
}
}
};
// randomly pick a dimension
let dim = (rng.next_u32() % 2) as usize;
let dim2 = (dim + 1) % 2;
// sort points by that dimension
let mut sorted_points: Vec<&Point> = self.iter().collect();
sorted_points.sort_by(|a, b| a[dim].cmp(&b[dim]));
// sample value in that dimension
let min_v = sorted_points.first().unwrap()[dim];
let max_v = sorted_points.last().unwrap()[dim];
let rand_v = rng.gen_range(min_v..=max_v);
// interpolate value in the other dimension
match sorted_points.binary_search_by(|point| point[dim].cmp(&rand_v)) {
Ok(idx) => sorted_points[idx].clone(),
Err(idx) => {
let a = sorted_points[idx - 1];
let b = sorted_points[idx];
let ratio = (rand_v - a[dim]) as f64 / (b[dim] - a[dim]) as f64;
let rand_v2 = (a[dim2] as f64 + ratio * (b[dim2] - a[dim2]) as f64).ceil() as i32;
// a is smaller than b in dim, but larger in dim2
if rand_v2 == a[dim2] {
a.clone()
} else {
(0..2)
.into_iter()
.map(|i| if i == dim { rand_v } else { rand_v2 })
.collect()
}
}
}
}
})
.collect();
}
pub fn pareto_frontier(frontier: &Frontier) -> Frontier {
return pareto_algorithm(frontier, true).first().unwrap().to_vec();
}
pub fn extend_frontier(frontier: &Frontier, min_point: &Point, max_point: &Point) -> Frontier {
let mut frontier = frontier.clone();
(0..min_point.len()).into_iter().for_each(|i| {
let mut d = min_point.clone();
if let Some(v) = frontier.iter().map(|d| d[i]).max() {
d[i] = v;
}
if !frontier.contains(&d) {
d[i] = min(d[i] + 1, max_point[i]);
frontier.push(d);
}
});
return frontier;
}

View File

@ -1,7 +1,5 @@
mod eth;
pub use eth::*;
mod frontiers;
pub use frontiers::*;
mod hash;
pub use hash::*;
mod json;
@ -14,3 +12,5 @@ pub use number::*;
mod request;
#[cfg(any(feature = "request", feature = "request-js"))]
pub use request::*;
mod frontiers;
pub use frontiers::*;

View File

@ -1,15 +1,24 @@
use tig_utils::{Frontier, FrontierOps, ParetoCompare, PointCompareFrontiers, PointOps};
use tig_utils::{
extend_frontier, pareto_compare, pareto_frontier, pareto_within, scale_frontier, scale_point,
Frontier, ParetoCompare, PointCompareFrontiers,
};
#[test]
fn test_pareto_compare() {
assert_eq!(vec![1, 0].pareto_compare(&vec![1, 0]), ParetoCompare::Equal);
assert_eq!(vec![1, 0].pareto_compare(&vec![0, 1]), ParetoCompare::Equal);
assert_eq!(
vec![1, 1].pareto_compare(&vec![0, 1]),
pareto_compare(&vec![1, 0], &vec![1, 0]),
ParetoCompare::Equal
);
assert_eq!(
pareto_compare(&vec![0, 1], &vec![0, 1]),
ParetoCompare::Equal
);
assert_eq!(
pareto_compare(&vec![1, 1], &vec![0, 1]),
ParetoCompare::ADominatesB
);
assert_eq!(
vec![1, 0].pareto_compare(&vec![1, 1]),
pareto_compare(&vec![1, 0], &vec![1, 1]),
ParetoCompare::BDominatesA
);
}
@ -29,8 +38,8 @@ fn test_pareto_frontier() {
.into_iter()
.collect();
assert_eq!(
points.pareto_frontier(),
vec![vec![2, 2], vec![3, 1], vec![1, 3]]
pareto_frontier(&points),
vec![vec![3, 1], vec![2, 2], vec![1, 3]]
.into_iter()
.collect::<Frontier>()
);
@ -40,11 +49,11 @@ fn test_pareto_frontier() {
fn test_scale_point() {
// ceil((x - min + 1) * multiplier)
assert_eq!(
vec![3, 1].scale(&vec![0, 0], &vec![10, 10], 1.2),
scale_point(&vec![3, 1], &vec![0, 0], &vec![10, 10], 1.2),
vec![4, 2]
);
assert_eq!(
vec![6, 2].scale(&vec![0, 0], &vec![10, 10], 0.7),
scale_point(&vec![6, 2], &vec![0, 0], &vec![10, 10], 0.7),
vec![4, 2]
);
}
@ -55,13 +64,13 @@ fn test_scale_frontier() {
.into_iter()
.collect();
assert_eq!(
frontier.scale(&vec![0, 0], &vec![10, 10], 1.2),
scale_frontier(&frontier, &vec![0, 0], &vec![10, 10], 1.2),
vec![vec![4, 2], vec![3, 3], vec![1, 5]]
.into_iter()
.collect::<Frontier>()
);
assert_eq!(
frontier.scale(&vec![0, 0], &vec![10, 10], 0.6),
scale_frontier(&frontier, &vec![0, 0], &vec![10, 10], 0.6),
vec![vec![1, 1], vec![0, 2]]
.into_iter()
.collect::<Frontier>()
@ -74,8 +83,8 @@ fn test_extend() {
.into_iter()
.collect();
assert_eq!(
frontier.extend(&vec![0, 0], &vec![10, 10]),
vec![vec![4, 0], vec![3, 1], vec![2, 2], vec![0, 4]]
extend_frontier(&frontier, &vec![0, 0], &vec![10, 10]),
vec![vec![3, 1], vec![2, 2], vec![0, 4], vec![4, 0]]
.into_iter()
.collect::<Frontier>()
);
@ -90,19 +99,19 @@ fn test_within() {
.into_iter()
.collect();
assert_eq!(
vec![4, 4].within(&frontier1, &frontier2),
pareto_within(&vec![4, 4], &frontier1, &frontier2),
PointCompareFrontiers::Within
);
assert_eq!(
vec![4, 0].within(&frontier1, &frontier2),
pareto_within(&vec![4, 0], &frontier1, &frontier2),
PointCompareFrontiers::Within
);
assert_eq!(
vec![5, 4].within(&frontier1, &frontier2),
pareto_within(&vec![5, 4], &frontier1, &frontier2),
PointCompareFrontiers::Above
);
assert_eq!(
vec![1, 2].within(&frontier1, &frontier2),
pareto_within(&vec![1, 2], &frontier1, &frontier2),
PointCompareFrontiers::Below
);
}