aboutsummaryrefslogtreecommitdiff
path: root/src/geometry
diff options
context:
space:
mode:
authorSébastien Crozet <developer@crozet.re>2020-08-25 22:10:25 +0200
committerSébastien Crozet <developer@crozet.re>2020-08-25 22:10:25 +0200
commit754a48b7ff6d8c58b1ee08651e60112900b60455 (patch)
tree7d777a6c003f1f5d8f8d24f533f35a95a88957fe /src/geometry
downloadrapier-754a48b7ff6d8c58b1ee08651e60112900b60455.tar.gz
rapier-754a48b7ff6d8c58b1ee08651e60112900b60455.tar.bz2
rapier-754a48b7ff6d8c58b1ee08651e60112900b60455.zip
First public release of Rapier.v0.1.0
Diffstat (limited to 'src/geometry')
-rw-r--r--src/geometry/ball.rs16
-rw-r--r--src/geometry/broad_phase.rs255
-rw-r--r--src/geometry/broad_phase_multi_sap.rs645
-rw-r--r--src/geometry/capsule.rs168
-rw-r--r--src/geometry/collider.rs373
-rw-r--r--src/geometry/collider_set.rs139
-rw-r--r--src/geometry/contact.rs485
-rw-r--r--src/geometry/contact_generator/ball_ball_contact_generator.rs98
-rw-r--r--src/geometry/contact_generator/ball_convex_contact_generator.rs85
-rw-r--r--src/geometry/contact_generator/ball_polygon_contact_generator.rs0
-rw-r--r--src/geometry/contact_generator/capsule_capsule_contact_generator.rs200
-rw-r--r--src/geometry/contact_generator/contact_dispatcher.rs127
-rw-r--r--src/geometry/contact_generator/contact_generator.rs222
-rw-r--r--src/geometry/contact_generator/cuboid_capsule_contact_generator.rs188
-rw-r--r--src/geometry/contact_generator/cuboid_cuboid_contact_generator.rs155
-rw-r--r--src/geometry/contact_generator/cuboid_polygon_contact_generator.rs0
-rw-r--r--src/geometry/contact_generator/cuboid_triangle_contact_generator.rs171
-rw-r--r--src/geometry/contact_generator/heightfield_shape_contact_generator.rs189
-rw-r--r--src/geometry/contact_generator/mod.rs71
-rw-r--r--src/geometry/contact_generator/polygon_polygon_contact_generator.rs298
-rw-r--r--src/geometry/contact_generator/trimesh_shape_contact_generator.rs194
-rw-r--r--src/geometry/contact_generator/voxels_shape_contact_generator.rs0
-rw-r--r--src/geometry/cuboid.rs234
-rw-r--r--src/geometry/cuboid_feature2d.rs128
-rw-r--r--src/geometry/cuboid_feature3d.rs516
-rw-r--r--src/geometry/interaction_graph.rs259
-rw-r--r--src/geometry/mod.rs80
-rw-r--r--src/geometry/narrow_phase.rs483
-rw-r--r--src/geometry/polygon.rs76
-rw-r--r--src/geometry/polygon_intersection.rs263
-rw-r--r--src/geometry/polyhedron_feature3d.rs284
-rw-r--r--src/geometry/proximity.rs31
-rw-r--r--src/geometry/proximity_detector/ball_ball_proximity_detector.rs68
-rw-r--r--src/geometry/proximity_detector/ball_convex_proximity_detector.rs53
-rw-r--r--src/geometry/proximity_detector/ball_polygon_proximity_detector.rs0
-rw-r--r--src/geometry/proximity_detector/cuboid_cuboid_proximity_detector.rs79
-rw-r--r--src/geometry/proximity_detector/cuboid_polygon_proximity_detector.rs0
-rw-r--r--src/geometry/proximity_detector/cuboid_triangle_proximity_detector.rs88
-rw-r--r--src/geometry/proximity_detector/mod.rs30
-rw-r--r--src/geometry/proximity_detector/polygon_polygon_proximity_detector.rs54
-rw-r--r--src/geometry/proximity_detector/proximity_detector.rs212
-rw-r--r--src/geometry/proximity_detector/proximity_dispatcher.rs136
-rw-r--r--src/geometry/proximity_detector/trimesh_shape_proximity_detector.rs133
-rw-r--r--src/geometry/proximity_detector/voxels_shape_proximity_detector.rs0
-rw-r--r--src/geometry/sat.rs294
-rw-r--r--src/geometry/triangle.rs9
-rw-r--r--src/geometry/trimesh.rs122
-rw-r--r--src/geometry/waabb.rs116
-rw-r--r--src/geometry/z_order.rs70
49 files changed, 7897 insertions, 0 deletions
diff --git a/src/geometry/ball.rs b/src/geometry/ball.rs
new file mode 100644
index 0000000..7f4ad03
--- /dev/null
+++ b/src/geometry/ball.rs
@@ -0,0 +1,16 @@
+#[cfg(feature = "simd-is-enabled")]
+use crate::math::{Point, SimdFloat};
+
+#[cfg(feature = "simd-is-enabled")]
+#[derive(Copy, Clone, Debug)]
+pub(crate) struct WBall {
+ pub center: Point<SimdFloat>,
+ pub radius: SimdFloat,
+}
+
+#[cfg(feature = "simd-is-enabled")]
+impl WBall {
+ pub fn new(center: Point<SimdFloat>, radius: SimdFloat) -> Self {
+ WBall { center, radius }
+ }
+}
diff --git a/src/geometry/broad_phase.rs b/src/geometry/broad_phase.rs
new file mode 100644
index 0000000..a43b7af
--- /dev/null
+++ b/src/geometry/broad_phase.rs
@@ -0,0 +1,255 @@
+use crate::geometry::ColliderHandle;
+use ncollide::bounding_volume::AABB;
+#[cfg(feature = "simd-is-enabled")]
+use {
+ crate::geometry::WAABB,
+ crate::math::{Point, SIMD_WIDTH},
+ crate::utils::WVec,
+ simba::simd::SimdBool as _,
+};
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
+pub struct ColliderPair {
+ pub collider1: ColliderHandle,
+ pub collider2: ColliderHandle,
+}
+
+impl ColliderPair {
+ pub fn new(collider1: ColliderHandle, collider2: ColliderHandle) -> Self {
+ ColliderPair {
+ collider1,
+ collider2,
+ }
+ }
+
+ pub fn new_sorted(collider1: ColliderHandle, collider2: ColliderHandle) -> Self {
+ if collider1.into_raw_parts().0 <= collider2.into_raw_parts().0 {
+ Self::new(collider1, collider2)
+ } else {
+ Self::new(collider2, collider1)
+ }
+ }
+
+ pub fn swap(self) -> Self {
+ Self::new(self.collider2, self.collider1)
+ }
+
+ pub fn zero() -> Self {
+ Self {
+ collider1: ColliderHandle::from_raw_parts(0, 0),
+ collider2: ColliderHandle::from_raw_parts(0, 0),
+ }
+ }
+}
+
+pub struct WAABBHierarchyIntersections {
+ curr_level_interferences: Vec<usize>,
+ next_level_interferences: Vec<usize>,
+}
+
+impl WAABBHierarchyIntersections {
+ pub fn new() -> Self {
+ Self {
+ curr_level_interferences: Vec::new(),
+ next_level_interferences: Vec::new(),
+ }
+ }
+
+ pub fn computed_interferences(&self) -> &[usize] {
+ &self.curr_level_interferences[..]
+ }
+
+ pub(crate) fn computed_interferences_mut(&mut self) -> &mut Vec<usize> {
+ &mut self.curr_level_interferences
+ }
+}
+
+#[cfg(feature = "simd-is-enabled")]
+#[derive(Clone)]
+#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
+pub struct WAABBHierarchy {
+ levels: Vec<Vec<WAABB>>,
+}
+
+#[cfg(feature = "simd-is-enabled")]
+impl WAABBHierarchy {
+ pub fn new(aabbs: &[AABB<f32>]) -> Self {
+ let mut waabbs: Vec<_> = aabbs
+ .chunks_exact(SIMD_WIDTH)
+ .map(|aabbs| WAABB::from(array![|ii| aabbs[ii]; SIMD_WIDTH]))
+ .collect();
+
+ if aabbs.len() % SIMD_WIDTH != 0 {
+ let first_i = (aabbs.len() / SIMD_WIDTH) * SIMD_WIDTH;
+ let last_i = aabbs.len() - 1;
+ let last_waabb =
+ WAABB::from(array![|ii| aabbs[(first_i + ii).min(last_i)]; SIMD_WIDTH]);
+ waabbs.push(last_waabb);
+ }
+
+ let mut levels = vec![waabbs];
+
+ loop {
+ let last_level = levels.last().unwrap();
+ let mut next_level = Vec::new();
+
+ for chunk in last_level.chunks_exact(SIMD_WIDTH) {
+ let mins = Point::from(array![|ii| chunk[ii].mins.horizontal_inf(); SIMD_WIDTH]);
+ let maxs = Point::from(array![|ii| chunk[ii].maxs.horizontal_sup(); SIMD_WIDTH]);
+ next_level.push(WAABB::new(mins, maxs));
+ }
+
+ // Deal with the last non-exact chunk.
+ if last_level.len() % SIMD_WIDTH != 0 {
+ let first_id = (last_level.len() / SIMD_WIDTH) * SIMD_WIDTH;
+ let last_id = last_level.len() - 1;
+ let mins = array![|ii| last_level[(first_id + ii).min(last_id)]
+ .mins
+ .horizontal_inf(); SIMD_WIDTH];
+ let maxs = array![|ii| last_level[(first_id + ii).min(last_id)]
+ .maxs
+ .horizontal_sup(); SIMD_WIDTH];
+
+ let mins = Point::from(mins);
+ let maxs = Point::from(maxs);
+ next_level.push(WAABB::new(mins, maxs));
+ }
+
+ if next_level.len() == 1 {
+ levels.push(next_level);
+ break;
+ }
+
+ levels.push(next_level);
+ }
+
+ Self { levels }
+ }
+
+ pub fn compute_interferences_with(
+ &self,
+ aabb: AABB<f32>,
+ workspace: &mut WAABBHierarchyIntersections,
+ ) {
+ let waabb1 = WAABB::splat(aabb);
+ workspace.next_level_interferences.clear();
+ workspace.curr_level_interferences.clear();
+ workspace.curr_level_interferences.push(0);
+
+ for level in self.levels.iter().rev() {
+ for i in &workspace.curr_level_interferences {
+ // This `if let` handle the case when `*i` is out of bounds because
+ // the initial number of aabbs was not a power of SIMD_WIDTH.
+ if let Some(waabb2) = level.get(*i) {
+ // NOTE: using `intersect.bitmask()` and performing bit comparisons
+ // is much more efficient than testing if each intersect.extract(i) is true.
+ let intersect = waabb1.intersects_lanewise(waabb2);
+ let bitmask = intersect.bitmask();
+
+ for j in 0..SIMD_WIDTH {
+ if (bitmask & (1 << j)) != 0 {
+ workspace.next_level_interferences.push(i * SIMD_WIDTH + j)
+ }
+ }
+ }
+ }
+
+ std::mem::swap(
+ &mut workspace.curr_level_interferences,
+ &mut workspace.next_level_interferences,
+ );
+ workspace.next_level_interferences.clear();
+ }
+ }
+}
+
+#[cfg(not(feature = "simd-is-enabled"))]
+#[derive(Clone)]
+#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
+pub struct WAABBHierarchy {
+ levels: Vec<Vec<AABB<f32>>>,
+}
+
+#[cfg(not(feature = "simd-is-enabled"))]
+impl WAABBHierarchy {
+ const GROUP_SIZE: usize = 4;
+
+ pub fn new(aabbs: &[AABB<f32>]) -> Self {
+ use ncollide::bounding_volume::BoundingVolume;
+
+ let mut levels = vec![aabbs.to_vec()];
+
+ loop {
+ let last_level = levels.last().unwrap();
+ let mut next_level = Vec::new();
+
+ for chunk in last_level.chunks(Self::GROUP_SIZE) {
+ let mut merged = chunk[0];
+ for aabb in &chunk[1..] {
+ merged.merge(aabb)
+ }
+
+ next_level.push(merged);
+ }
+
+ if next_level.len() == 1 {
+ levels.push(next_level);
+ break;
+ }
+
+ levels.push(next_level);
+ }
+
+ Self { levels }
+ }
+
+ pub fn compute_interferences_with(
+ &self,
+ aabb1: AABB<f32>,
+ workspace: &mut WAABBHierarchyIntersections,
+ ) {
+ use ncollide::bounding_volume::BoundingVolume;
+
+ workspace.next_level_interferences.clear();
+ workspace.curr_level_interferences.clear();
+ workspace.curr_level_interferences.push(0);
+
+ for level in self.levels[1..].iter().rev() {
+ for i in &workspace.curr_level_interferences {
+ for j in 0..Self::GROUP_SIZE {
+ if let Some(aabb2) = level.get(*i + j) {
+ if aabb1.intersects(aabb2) {
+ workspace
+ .next_level_interferences
+ .push((i + j) * Self::GROUP_SIZE)
+ }
+ }
+ }
+ }
+
+ std::mem::swap(
+ &mut workspace.curr_level_interferences,
+ &mut workspace.next_level_interferences,
+ );
+ workspace.next_level_interferences.clear();
+ }
+
+ // Last level.
+ for i in &workspace.curr_level_interferences {
+ for j in 0..Self::GROUP_SIZE {
+ if let Some(aabb2) = self.levels[0].get(*i + j) {
+ if aabb1.intersects(aabb2) {
+ workspace.next_level_interferences.push(i + j)
+ }
+ }
+ }
+ }
+
+ std::mem::swap(
+ &mut workspace.curr_level_interferences,
+ &mut workspace.next_level_interferences,
+ );
+ workspace.next_level_interferences.clear();
+ }
+}
diff --git a/src/geometry/broad_phase_multi_sap.rs b/src/geometry/broad_phase_multi_sap.rs
new file mode 100644
index 0000000..8356339
--- /dev/null
+++ b/src/geometry/broad_phase_multi_sap.rs
@@ -0,0 +1,645 @@
+use crate::dynamics::RigidBodySet;
+use crate::geometry::{ColliderHandle, ColliderPair, ColliderSet};
+use crate::math::{Point, Vector, DIM};
+#[cfg(feature = "enhanced-determinism")]
+use crate::utils::FxHashMap32 as HashMap;
+use bit_vec::BitVec;
+use ncollide::bounding_volume::{BoundingVolume, AABB};
+#[cfg(not(feature = "enhanced-determinism"))]
+use rustc_hash::FxHashMap as HashMap;
+use std::cmp::Ordering;
+use std::ops::{Index, IndexMut};
+
+const NUM_SENTINELS: usize = 1;
+const NEXT_FREE_SENTINEL: u32 = u32::MAX;
+const SENTINEL_VALUE: f32 = f32::MAX;
+const CELL_WIDTH: f32 = 20.0;
+
+pub enum BroadPhasePairEvent {
+ AddPair(ColliderPair),
+ DeletePair(ColliderPair),
+}
+
+fn sort2(a: u32, b: u32) -> (u32, u32) {
+ assert_ne!(a, b);
+
+ if a < b {
+ (a, b)
+ } else {
+ (b, a)
+ }
+}
+
+fn point_key(point: Point<f32>) -> Point<i32> {
+ (point / CELL_WIDTH).coords.map(|e| e.floor() as i32).into()
+}
+
+fn region_aabb(index: Point<i32>) -> AABB<f32> {
+ let mins = index.coords.map(|i| i as f32 * CELL_WIDTH).into();
+ let maxs = mins + Vector::repeat(CELL_WIDTH);
+ AABB::new(mins, maxs)
+}
+
+#[derive(Copy, Clone, Debug, PartialEq)]
+#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
+struct Endpoint {
+ value: f32,
+ packed_flag_proxy: u32,
+}
+
+const START_FLAG_MASK: u32 = 0b1 << 31;
+const PROXY_MASK: u32 = u32::MAX ^ START_FLAG_MASK;
+const START_SENTINEL_TAG: u32 = u32::MAX;
+const END_SENTINEL_TAG: u32 = u32::MAX ^ START_FLAG_MASK;
+
+impl Endpoint {
+ pub fn start_endpoint(value: f32, proxy: u32) -> Self {
+ Self {
+ value,
+ packed_flag_proxy: proxy | START_FLAG_MASK,
+ }
+ }
+
+ pub fn end_endpoint(value: f32, proxy: u32) -> Self {
+ Self {
+ value,
+ packed_flag_proxy: proxy & PROXY_MASK,
+ }
+ }
+
+ pub fn start_sentinel() -> Self {
+ Self {
+ value: -SENTINEL_VALUE,
+ packed_flag_proxy: START_SENTINEL_TAG,
+ }
+ }
+
+ pub fn end_sentinel() -> Self {
+ Self {
+ value: SENTINEL_VALUE,
+ packed_flag_proxy: END_SENTINEL_TAG,
+ }
+ }
+
+ pub fn is_sentinel(self) -> bool {
+ self.packed_flag_proxy & PROXY_MASK == PROXY_MASK
+ }
+
+ pub fn proxy(self) -> u32 {
+ self.packed_flag_proxy & PROXY_MASK
+ }
+
+ pub fn is_start(self) -> bool {
+ (self.packed_flag_proxy & START_FLAG_MASK) != 0
+ }
+
+ pub fn is_end(self) -> bool {
+ (self.packed_flag_proxy & START_FLAG_MASK) == 0
+ }
+}
+
+#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
+struct SAPAxis {
+ min_bound: f32,
+ max_bound: f32,
+ endpoints: Vec<Endpoint>,
+ #[cfg_attr(feature = "serde-serialize", serde(skip))]
+ new_endpoints: Vec<(Endpoint, usize)>, // Workspace
+}
+
+impl SAPAxis {
+ fn new(min_bound: f32, max_bound: f32) -> Self {
+ assert!(min_bound <= max_bound);
+
+ Self {
+ min_bound,
+ max_bound,
+ endpoints: vec![Endpoint::start_sentinel(), Endpoint::end_sentinel()],
+ new_endpoints: Vec::new(),
+ }
+ }
+
+ fn batch_insert(
+ &mut self,
+ dim: usize,
+ new_proxies: &[usize],
+ proxies: &Proxies,
+ reporting: Option<&mut HashMap<(u32, u32), bool>>,
+ ) {
+ if new_proxies.is_empty() {
+ return;
+ }
+
+ self.new_endpoints.clear();
+
+ for proxy_id in new_proxies {
+ let proxy = &proxies[*proxy_id];
+ assert!(proxy.aabb.mins[dim] <= self.max_bound);
+ assert!(proxy.aabb.maxs[dim] >= self.min_bound);
+ let start_endpoint = Endpoint::start_endpoint(proxy.aabb.mins[dim], *proxy_id as u32);
+ let end_endpoint = Endpoint::end_endpoint(proxy.aabb.maxs[dim], *proxy_id as u32);
+
+ self.new_endpoints.push((start_endpoint, 0));
+ self.new_endpoints.push((end_endpoint, 0));
+ }
+
+ self.new_endpoints
+ .sort_by(|a, b| a.0.value.partial_cmp(&b.0.value).unwrap_or(Ordering::Equal));
+
+ let mut curr_existing_index = self.endpoints.len() - NUM_SENTINELS - 1;
+ let new_num_endpoints = self.endpoints.len() + self.new_endpoints.len();
+ self.endpoints
+ .resize(new_num_endpoints, Endpoint::end_sentinel());
+ let mut curr_shift_index = new_num_endpoints - NUM_SENTINELS - 1;
+
+ // Sort the endpoints.
+ // TODO: specialize for the case where this is the
+ // first time we insert endpoints to this axis?
+ for new_endpoint in self.new_endpoints.iter_mut().rev() {
+ loop {
+ let existing_endpoint = self.endpoints[curr_existing_index];
+ if existing_endpoint.value <= new_endpoint.0.value {
+ break;
+ }
+
+ self.endpoints[curr_shift_index] = existing_endpoint;
+
+ curr_shift_index -= 1;
+ curr_existing_index -= 1;
+ }
+
+ self.endpoints[curr_shift_index] = new_endpoint.0;
+ new_endpoint.1 = curr_shift_index;
+ curr_shift_index -= 1;
+ }
+
+ // Report pairs using a single mbp pass on each new endpoint.
+ let endpoints_wo_last_sentinel = &self.endpoints[..self.endpoints.len() - 1];
+ if let Some(reporting) = reporting {
+ for (endpoint, endpoint_id) in self.new_endpoints.drain(..).filter(|e| e.0.is_start()) {
+ let proxy1 = &proxies[endpoint.proxy() as usize];
+ let min = endpoint.value;
+ let max = proxy1.aabb.maxs[dim];
+
+ for endpoint2 in &endpoints_wo_last_sentinel[endpoint_id + 1..] {
+ if endpoint2.proxy() == endpoint.proxy() {
+ continue;
+ }
+
+ let proxy2 = &proxies[endpoint2.proxy() as usize];
+
+ // NOTE: some pairs with equal aabb.mins[dim] may end up being reported twice.
+ if (endpoint2.is_start() && endpoint2.value < max)
+ || (endpoint2.is_end() && proxy2.aabb.mins[dim] <= min)
+ {
+ // Report pair.
+ if proxy1.aabb.intersects(&proxy2.aabb) {
+ // Report pair.
+ let pair = sort2(endpoint.proxy(), endpoint2.proxy());
+ reporting.insert(pair, true);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ fn delete_out_of_bounds_proxies(&self, existing_proxies: &mut BitVec) -> bool {
+ let mut deleted_any = false;
+ for endpoint in &self.endpoints {
+ if endpoint.value < self.min_bound {
+ if endpoint.is_end() {
+ existing_proxies.set(endpoint.proxy() as usize, false);
+ deleted_any = true;
+ }
+ } else {
+ break;
+ }
+ }
+
+ for endpoint in self.endpoints.iter().rev() {
+ if endpoint.value > self.max_bound {
+ if endpoint.is_start() {
+ existing_proxies.set(endpoint.proxy() as usize, false);
+ deleted_any = true;
+ }
+ } else {
+ break;
+ }
+ }
+
+ deleted_any
+ }
+
+ fn delete_out_of_bounds_endpoints(&mut self, existing_proxies: &BitVec) {
+ self.endpoints
+ .retain(|endpt| endpt.is_sentinel() || existing_proxies[endpt.proxy() as usize])
+ }
+
+ fn update_endpoints(
+ &mut self,
+ dim: usize,
+ proxies: &Proxies,
+ reporting: &mut HashMap<(u32, u32), bool>,
+ ) {
+ let last_endpoint = self.endpoints.len() - NUM_SENTINELS;
+ for i in NUM_SENTINELS..last_endpoint {
+ let mut endpoint_i = self.endpoints[i];
+ let aabb_i = proxies[endpoint_i.proxy() as usize].aabb;
+
+ if endpoint_i.is_start() {
+ endpoint_i.value = aabb_i.mins[dim];
+ } else {
+ endpoint_i.value = aabb_i.maxs[dim];
+ }
+
+ let mut j = i;
+
+ if endpoint_i.is_start() {
+ while endpoint_i.value < self.endpoints[j - 1].value {
+ let endpoint_j = self.endpoints[j - 1];
+ self.endpoints[j] = endpoint_j;
+
+ if endpoint_j.is_end() {
+ // Report start collision.
+ if aabb_i.intersects(&proxies[endpoint_j.proxy() as usize].aabb) {
+ let pair = sort2(endpoint_i.proxy(), endpoint_j.proxy());
+ reporting.insert(pair, true);
+ }
+ }
+
+ j -= 1;
+ }
+ } else {
+ while endpoint_i.value < self.endpoints[j - 1].value {
+ let endpoint_j = self.endpoints[j - 1];
+ self.endpoints[j] = endpoint_j;
+
+ if endpoint_j.is_start() {
+ // Report end collision.
+ if !aabb_i.intersects(&proxies[endpoint_j.proxy() as usize].aabb) {
+ let pair = sort2(endpoint_i.proxy(), endpoint_j.proxy());
+ reporting.insert(pair, false);
+ }
+ }
+
+ j -= 1;
+ }
+ }
+
+ self.endpoints[j] = endpoint_i;
+ }
+
+ // println!(
+ // "Num start swaps: {}, end swaps: {}, dim: {}",
+ // num_start_swaps, num_end_swaps, dim
+ // );
+ }
+}
+
+#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
+struct SAPRegion {
+ axii: [SAPAxis; DIM],
+ existing_proxies: BitVec,
+ #[cfg_attr(feature = "serde-serialize", serde(skip))]
+ to_insert: Vec<usize>, // Workspace
+ need_update: bool,
+}
+
+impl SAPRegion {
+ pub fn new(bounds: AABB<f32>) -> Self {
+ let axii = [
+ SAPAxis::new(bounds.mins.x, bounds.maxs.x),
+ SAPAxis::new(bounds.mins.y, bounds.maxs.y),
+ #[cfg(feature = "dim3")]
+ SAPAxis::new(bounds.mins.z, bounds.maxs.z),
+ ];
+ SAPRegion {
+ axii,
+ existing_proxies: BitVec::new(),
+ to_insert: Vec::new(),
+ need_update: false,
+ }
+ }
+
+ pub fn predelete_proxy(&mut self, _proxy_id: usize) {
+ // We keep the proxy_id as argument for uniformity with the "preupdate"
+ // method. However we don't actually need it because the deletion will be
+ // handled transparently during the next update.
+ self.need_update = true;
+ }
+
+ pub fn preupdate_proxy(&mut self, proxy_id: usize) -> bool {
+ let mask_len = self.existing_proxies.len();
+ if proxy_id >= mask_len {
+ self.existing_proxies.grow(proxy_id + 1 - mask_len, false);
+ }
+
+ if !self.existing_proxies[proxy_id] {
+ self.to_insert.push(proxy_id);
+ self.existing_proxies.set(proxy_id, true);
+ false
+ } else {
+ self.need_update = true;
+ true
+ }
+ }
+
+ pub fn update(&mut self, proxies: &Proxies, reporting: &mut HashMap<(u32, u32), bool>) {
+ if self.need_update {
+ // Update endpoints.
+ let mut deleted_any = false;
+ for dim in 0..DIM {
+ self.axii[dim].update_endpoints(dim, proxies, reporting);
+ deleted_any = self.axii[dim]
+ .delete_out_of_bounds_proxies(&mut self.existing_proxies)