From b00113ed2f9e4824a254027b57c9b4a07c4c2307 Mon Sep 17 00:00:00 2001 From: Sébastien Crozet Date: Sat, 23 Mar 2024 10:15:07 +0100 Subject: fix: implement linear-coupled-motor constraint between two dynamic bodies Fix #602 --- .../contact_constraint/contact_constraints_set.rs | 1 + .../joint_constraint/joint_constraint_builder.rs | 76 ++++++++++++++++++++++ .../joint_constraint/joint_velocity_constraint.rs | 43 ++++++------ 3 files changed, 98 insertions(+), 22 deletions(-) (limited to 'src') diff --git a/src/dynamics/solver/contact_constraint/contact_constraints_set.rs b/src/dynamics/solver/contact_constraint/contact_constraints_set.rs index b5fdb6d..4d88949 100644 --- a/src/dynamics/solver/contact_constraint/contact_constraints_set.rs +++ b/src/dynamics/solver/contact_constraint/contact_constraints_set.rs @@ -25,6 +25,7 @@ use { crate::math::SIMD_WIDTH, }; +#[derive(Debug)] pub struct ConstraintsCounts { pub num_constraints: usize, pub num_jacobian_lines: usize, diff --git a/src/dynamics/solver/joint_constraint/joint_constraint_builder.rs b/src/dynamics/solver/joint_constraint/joint_constraint_builder.rs index 00bead1..44716b2 100644 --- a/src/dynamics/solver/joint_constraint/joint_constraint_builder.rs +++ b/src/dynamics/solver/joint_constraint/joint_constraint_builder.rs @@ -551,6 +551,82 @@ impl JointTwoBodyConstraintHelper { constraint } + pub fn motor_linear_coupled( + &self, + params: &IntegrationParameters, + joint_id: [JointIndex; LANES], + body1: &JointSolverBody, + body2: &JointSolverBody, + coupled_axes: u8, + motor_params: &MotorParameters, + limits: Option<[N; 2]>, + writeback_id: WritebackId, + ) -> JointTwoBodyConstraint { + let inv_dt = N::splat(params.inv_dt()); + + let mut lin_jac = Vector::zeros(); + let mut ang_jac1: AngVector = na::zero(); + let mut ang_jac2: AngVector = na::zero(); + + for i in 0..DIM { + if coupled_axes & (1 << i) != 0 { + let coeff = self.basis.column(i).dot(&self.lin_err); + lin_jac += self.basis.column(i) * coeff; + #[cfg(feature = "dim2")] + { + ang_jac1 += self.cmat1_basis[i] * coeff; + ang_jac2 += self.cmat2_basis[i] * coeff; + } + #[cfg(feature = "dim3")] + { + ang_jac1 += self.cmat1_basis.column(i) * coeff; + ang_jac2 += self.cmat2_basis.column(i) * coeff; + } + } + } + + let dist = lin_jac.norm(); + let inv_dist = crate::utils::simd_inv(dist); + lin_jac *= inv_dist; + ang_jac1 *= inv_dist; + ang_jac2 *= inv_dist; + + let mut rhs_wo_bias = N::zero(); + if motor_params.erp_inv_dt != N::zero() { + rhs_wo_bias += (dist - motor_params.target_pos) * motor_params.erp_inv_dt; + } + + let mut target_vel = motor_params.target_vel; + if let Some(limits) = limits { + target_vel = + target_vel.simd_clamp((limits[0] - dist) * inv_dt, (limits[1] - dist) * inv_dt); + }; + + rhs_wo_bias += -target_vel; + + ang_jac1 = body1.sqrt_ii * ang_jac1; + ang_jac2 = body2.sqrt_ii * ang_jac2; + + JointTwoBodyConstraint { + joint_id, + solver_vel1: body1.solver_vel, + solver_vel2: body2.solver_vel, + im1: body1.im, + im2: body2.im, + impulse: N::zero(), + impulse_bounds: [-motor_params.max_impulse, motor_params.max_impulse], + lin_jac, + ang_jac1, + ang_jac2, + inv_lhs: N::zero(), // Will be set during ortogonalization. + cfm_coeff: motor_params.cfm_coeff, + cfm_gain: motor_params.cfm_gain, + rhs: rhs_wo_bias, + rhs_wo_bias, + writeback_id, + } + } + pub fn lock_linear( &self, params: &IntegrationParameters, diff --git a/src/dynamics/solver/joint_constraint/joint_velocity_constraint.rs b/src/dynamics/solver/joint_constraint/joint_velocity_constraint.rs index e184e3d..60c42d3 100644 --- a/src/dynamics/solver/joint_constraint/joint_velocity_constraint.rs +++ b/src/dynamics/solver/joint_constraint/joint_velocity_constraint.rs @@ -217,28 +217,26 @@ impl JointTwoBodyConstraint { } if (motor_axes & coupled_axes) & JointAxesMask::LIN_AXES.bits() != 0 { - // if (motor_axes & !coupled_axes) & (1 << first_coupled_lin_axis_id) != 0 { - // let limits = if limit_axes & (1 << first_coupled_lin_axis_id) != 0 { - // Some([ - // joint.limits[first_coupled_lin_axis_id].min, - // joint.limits[first_coupled_lin_axis_id].max, - // ]) - // } else { - // None - // }; - // - // out[len] = builder.motor_linear_coupled - // params, - // [joint_id], - // body1, - // body2, - // coupled_axes, - // &joint.motors[first_coupled_lin_axis_id].motor_params(params.dt), - // limits, - // WritebackId::Motor(first_coupled_lin_axis_id), - // ); - // len += 1; - // } + let limits = if (limit_axes & (1 << first_coupled_lin_axis_id)) != 0 { + Some([ + joint.limits[first_coupled_lin_axis_id].min, + joint.limits[first_coupled_lin_axis_id].max, + ]) + } else { + None + }; + + out[len] = builder.motor_linear_coupled( + params, + [joint_id], + body1, + body2, + coupled_axes, + &joint.motors[first_coupled_lin_axis_id].motor_params(params.dt), + limits, + WritebackId::Motor(first_coupled_lin_axis_id), + ); + len += 1; } JointTwoBodyConstraintHelper::finalize_constraints(&mut out[start..len]); @@ -350,6 +348,7 @@ impl JointTwoBodyConstraint { } } } + #[cfg(feature = "simd-is-enabled")] impl JointTwoBodyConstraint { pub fn lock_axes( -- cgit From 3fd18f4da8476a29945fa6f0c49e0af08d1f7363 Mon Sep 17 00:00:00 2001 From: Max Whitehead Date: Sun, 10 Mar 2024 23:44:26 -0700 Subject: fix(user_changes): Fix RigidBodyType changed to Dynamic not updating active dynamic set. --- src/pipeline/physics_pipeline.rs | 71 +++++++++++++++++++++++++++++++++++++++- src/pipeline/user_changes.rs | 4 +-- 2 files changed, 72 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/pipeline/physics_pipeline.rs b/src/pipeline/physics_pipeline.rs index 6dbc4e5..893882d 100644 --- a/src/pipeline/physics_pipeline.rs +++ b/src/pipeline/physics_pipeline.rs @@ -653,7 +653,7 @@ mod test { use crate::geometry::{BroadPhase, ColliderBuilder, ColliderSet, NarrowPhase}; use crate::math::Vector; use crate::pipeline::PhysicsPipeline; - use crate::prelude::MultibodyJointSet; + use crate::prelude::{MultibodyJointSet, RigidBodyType}; #[test] fn kinematic_and_fixed_contact_crash() { @@ -852,4 +852,73 @@ mod test { ); } } + + #[test] + fn rigid_body_type_changed_dynamic_is_in_active_set() { + let mut colliders = ColliderSet::new(); + let mut impulse_joints = ImpulseJointSet::new(); + let mut multibody_joints = MultibodyJointSet::new(); + let mut pipeline = PhysicsPipeline::new(); + let mut bf = BroadPhase::new(); + let mut nf = NarrowPhase::new(); + let mut islands = IslandManager::new(); + + let mut bodies = RigidBodySet::new(); + + // Initialize body as kinematic with mass + let rb = RigidBodyBuilder::kinematic_position_based() + .additional_mass(1.0) + .build(); + let h = bodies.insert(rb.clone()); + + // Step once + let gravity = Vector::y() * -9.81; + pipeline.step( + &gravity, + &IntegrationParameters::default(), + &mut islands, + &mut bf, + &mut nf, + &mut bodies, + &mut colliders, + &mut impulse_joints, + &mut multibody_joints, + &mut CCDSolver::new(), + None, + &(), + &(), + ); + + // Switch body type to Dynamic + bodies + .get_mut(h) + .unwrap() + .set_body_type(RigidBodyType::Dynamic, true); + + // Step again + pipeline.step( + &gravity, + &IntegrationParameters::default(), + &mut islands, + &mut bf, + &mut nf, + &mut bodies, + &mut colliders, + &mut impulse_joints, + &mut multibody_joints, + &mut CCDSolver::new(), + None, + &(), + &(), + ); + + let body = bodies.get(h).unwrap(); + let h_y = body.pos.position.translation.y; + + // Expect gravity to be applied on second step after switching to Dynamic + assert!(h_y < 0.0); + + // Expect body to now be in active_dynamic_set + assert!(islands.active_dynamic_set.contains(&h)); + } } diff --git a/src/pipeline/user_changes.rs b/src/pipeline/user_changes.rs index 7c3f1ac..9aa93a5 100644 --- a/src/pipeline/user_changes.rs +++ b/src/pipeline/user_changes.rs @@ -121,8 +121,8 @@ pub(crate) fn handle_user_changes_to_rigid_bodies( } // Push the body to the active set if it is not - // sleeping and if it is not already inside of the active set. - if changes.contains(RigidBodyChanges::SLEEP) + // sleeping and if it is not already inside of the active set, or if type changed to dynamic. + if (changes.contains(RigidBodyChanges::SLEEP) || changes.contains(RigidBodyChanges::TYPE)) && rb.is_enabled() && !rb.activation.sleeping // May happen if the body was put to sleep manually. && rb.is_dynamic() // Only dynamic bodies are in the active dynamic set. -- cgit From 3020d442ea518c957ca094a79bd0e134f906d363 Mon Sep 17 00:00:00 2001 From: Sébastien Crozet Date: Sat, 23 Mar 2024 10:36:07 +0100 Subject: chore: minor comment fix --- src/pipeline/user_changes.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/pipeline/user_changes.rs b/src/pipeline/user_changes.rs index 9aa93a5..a2b00a1 100644 --- a/src/pipeline/user_changes.rs +++ b/src/pipeline/user_changes.rs @@ -120,8 +120,8 @@ pub(crate) fn handle_user_changes_to_rigid_bodies( islands.active_kinematic_set.push(*handle); } - // Push the body to the active set if it is not - // sleeping and if it is not already inside of the active set, or if type changed to dynamic. + // Push the body to the active set if it is not inside the active set yet, and + // is either not longer sleeping or became dynamic. if (changes.contains(RigidBodyChanges::SLEEP) || changes.contains(RigidBodyChanges::TYPE)) && rb.is_enabled() && !rb.activation.sleeping // May happen if the body was put to sleep manually. -- cgit From 6886f8f2073b3251e3b080523d8032961b68e52a Mon Sep 17 00:00:00 2001 From: Sébastien Crozet Date: Sat, 23 Mar 2024 10:51:50 +0100 Subject: feat: add RigidBody::predict_position_using_velocity Fix #601 --- src/dynamics/rigid_body.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'src') diff --git a/src/dynamics/rigid_body.rs b/src/dynamics/rigid_body.rs index b27b58e..2b07f37 100644 --- a/src/dynamics/rigid_body.rs +++ b/src/dynamics/rigid_body.rs @@ -817,6 +817,17 @@ impl RigidBody { .integrate_forces_and_velocities(dt, &self.forces, &self.vels, &self.mprops) } + /// Predicts the next position of this rigid-body, by integrating only its velocity + /// by a time of `dt`. + /// + /// The forces that were applied to this rigid-body since the last physics step will + /// be ignored by this function. Use [`Self::predict_position_using_velocity_and_forces`] + /// instead to take forces into account. + pub fn predict_position_using_velocity(&self, dt: Real) -> Isometry { + self.vels + .integrate(dt, &self.pos.position, &self.mprops.local_mprops.local_com) + } + pub(crate) fn update_world_mass_properties(&mut self) { self.mprops.update_world_mass_properties(&self.pos.position); } -- cgit From cd9fb8342d115bad557ca05aa16fb961fe28b1b5 Mon Sep 17 00:00:00 2001 From: Sébastien Crozet Date: Sat, 23 Mar 2024 13:11:37 +0100 Subject: feat: add RigidBody::copy_from and Collider::copy_from MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes #595 --- src/dynamics/rigid_body.rs | 55 ++++++++++++++++++++++++++++++++++++++++++++++ src/geometry/collider.rs | 47 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 102 insertions(+) (limited to 'src') diff --git a/src/dynamics/rigid_body.rs b/src/dynamics/rigid_body.rs index 2b07f37..5af4131 100644 --- a/src/dynamics/rigid_body.rs +++ b/src/dynamics/rigid_body.rs @@ -74,6 +74,61 @@ impl RigidBody { self.ids = Default::default(); } + /// Copy all the characteristics from `other` to `self`. + /// + /// If you have a mutable reference to a rigid-body `rigid_body: &mut RigidBody`, attempting to + /// assign it a whole new rigid-body instance, e.g., `*rigid_body = RigidBodyBuilder::dynamic().build()`, + /// will crash due to some internal indices being overwritten. Instead, use + /// `rigid_body.copy_from(&RigidBodyBuilder::dynamic().build())`. + /// + /// This method will allow you to set most characteristics of this rigid-body from another + /// rigid-body instance without causing any breakage. + /// + /// This method **cannot** be used for editing the list of colliders attached to this rigid-body. + /// Therefore, the list of colliders attached to `self` won’t be replaced by the one attached + /// to `other`. + /// + /// The pose of `other` will only copied into `self` if `self` doesn’t have a parent (if it has + /// a parent, its position is directly controlled by the parent rigid-body). + pub fn copy_from(&mut self, other: &RigidBody) { + // NOTE: we deconstruct the rigid-body struct to be sure we don’t forget to + // add some copies here if we add more field to RigidBody in the future. + let RigidBody { + pos, + mprops, + integrated_vels, + vels, + damping, + forces, + ccd, + ids: _ids, // Internal ids must not be overwritten. + colliders: _colliders, // This function cannot be used to edit collider sets. + activation, + changes: _changes, // Will be set to ALL. + body_type, + dominance, + enabled, + additional_solver_iterations, + user_data, + } = other; + + self.pos = *pos; + self.mprops = mprops.clone(); + self.integrated_vels = *integrated_vels; + self.vels = *vels; + self.damping = *damping; + self.forces = *forces; + self.ccd = *ccd; + self.activation = *activation; + self.body_type = *body_type; + self.dominance = *dominance; + self.enabled = *enabled; + self.additional_solver_iterations = *additional_solver_iterations; + self.user_data = *user_data; + + self.changes = RigidBodyChanges::all(); + } + /// Set the additional number of solver iterations run for this rigid-body and /// everything interacting with it. /// diff --git a/src/geometry/collider.rs b/src/geometry/collider.rs index 2a31afa..4e7fbc7 100644 --- a/src/geometry/collider.rs +++ b/src/geometry/collider.rs @@ -60,6 +60,53 @@ impl Collider { self.coll_type.is_sensor() } + /// Copy all the characteristics from `other` to `self`. + /// + /// If you have a mutable reference to a collider `collider: &mut Collider`, attempting to + /// assign it a whole new collider instance, e.g., `*collider = ColliderBuilder::ball(0.5).build()`, + /// will crash due to some internal indices being overwritten. Instead, use + /// `collider.copy_from(&ColliderBuilder::ball(0.5).build())`. + /// + /// This method will allow you to set most characteristics of this collider from another + /// collider instance without causing any breakage. + /// + /// This method **cannot** be used for reparenting a collider. Therefore, the parent of the + /// `other` (if any), as well as its relative position to that parent will not be copied into + /// `self`. + /// + /// The pose of `other` will only copied into `self` if `self` doesn’t have a parent (if it has + /// a parent, its position is directly controlled by the parent rigid-body). + pub fn copy_from(&mut self, other: &Collider) { + // NOTE: we deconstruct the collider struct to be sure we don’t forget to + // add some copies here if we add more field to Collider in the future. + let Collider { + coll_type, + shape, + mprops, + changes: _changes, // Will be set to ALL. + parent: _parent, // This function cannot be used to reparent the collider. + pos, + material, + flags, + bf_data: _bf_data, // Internal ids must not be overwritten. + contact_force_event_threshold, + user_data, + } = other; + + if self.parent.is_none() { + self.pos = *pos; + } + + self.coll_type = *coll_type; + self.shape = shape.clone(); + self.mprops = mprops.clone(); + self.material = *material; + self.contact_force_event_threshold = *contact_force_event_threshold; + self.user_data = *user_data; + self.flags = *flags; + self.changes = ColliderChanges::all(); + } + /// The physics hooks enabled for this collider. pub fn active_hooks(&self) -> ActiveHooks { self.flags.active_hooks -- cgit From cfb2c2c93e39e1f59557c4f32fde4a68dc4cd6fc Mon Sep 17 00:00:00 2001 From: Sébastien Crozet Date: Sat, 23 Mar 2024 13:26:53 +0100 Subject: feat!: rename BroadPhase to BroadPhaseMultiSap --- src/geometry/broad_phase_multi_sap/broad_phase.rs | 664 --------------------- .../broad_phase_multi_sap/broad_phase_multi_sap.rs | 664 +++++++++++++++++++++ src/geometry/broad_phase_multi_sap/mod.rs | 4 +- src/geometry/broad_phase_multi_sap/sap_layer.rs | 4 +- src/geometry/broad_phase_qbvh.rs | 6 +- src/geometry/mod.rs | 4 +- src/pipeline/collision_pipeline.rs | 11 +- src/pipeline/physics_pipeline.rs | 16 +- 8 files changed, 687 insertions(+), 686 deletions(-) delete mode 100644 src/geometry/broad_phase_multi_sap/broad_phase.rs create mode 100644 src/geometry/broad_phase_multi_sap/broad_phase_multi_sap.rs (limited to 'src') diff --git a/src/geometry/broad_phase_multi_sap/broad_phase.rs b/src/geometry/broad_phase_multi_sap/broad_phase.rs deleted file mode 100644 index 9e1bc06..0000000 --- a/src/geometry/broad_phase_multi_sap/broad_phase.rs +++ /dev/null @@ -1,664 +0,0 @@ -use super::{ - BroadPhasePairEvent, ColliderPair, SAPLayer, SAPProxies, SAPProxy, SAPProxyData, SAPRegionPool, -}; -use crate::geometry::broad_phase_multi_sap::SAPProxyIndex; -use crate::geometry::{ - ColliderBroadPhaseData, ColliderChanges, ColliderHandle, ColliderPosition, ColliderSet, - ColliderShape, -}; -use crate::math::Real; -use crate::utils::IndexMut2; -use parry::bounding_volume::BoundingVolume; -use parry::utils::hashmap::HashMap; - -/// A broad-phase combining a Hierarchical Grid and Sweep-and-Prune. -/// -/// The basic Sweep-and-Prune (SAP) algorithm has one significant flaws: -/// the interactions between far-away objects. This means that objects -/// that are very far away will still have some of their endpoints swapped -/// within the SAP data-structure. This results in poor scaling because this -/// results in lots of swapping between endpoints of Aabbs that won't ever -/// actually interact. -/// -/// The first optimization to address this problem is to use the Multi-SAP -/// method. This basically combines an SAP with a grid. The grid subdivides -/// the spaces into equally-sized subspaces (grid cells). Each subspace, which we call -/// a "region" contains an SAP instance (i.e. there SAP axes responsible for -/// collecting endpoints and swapping them when they move to detect interaction pairs). -/// Each Aabb is inserted in all the regions it intersects. -/// This prevents the far-away problem because two objects that are far away will -/// be located on different regions. So their endpoints will never meet. -/// -/// However, the Multi-SAP approach has one notable problem: the region size must -/// be chosen wisely. It could be user-defined, but that's makes it more difficult -/// to use (for the end-user). Or it can be given a fixed value. Using a fixed -/// value may result in large objects intersecting lots of regions, resulting in -/// poor performances and very high memory usage. -/// -/// So a solution to that large-objects problem is the Multi-SAP approach is to -/// replace the grid by a hierarchical grid. A hierarchical grid is composed of -/// several layers. And each layer have different region sizes. For example all -/// the regions on layer 0 will have the size 1x1x1. All the regions on the layer -/// 1 will have the size 10x10x10, etc. That way, a given Aabb will be inserted -/// on the layer that has regions big enough to avoid the large-object problem. -/// For example a 20x20x20 object will be inserted in the layer with region -/// of size 10x10x10, resulting in only 8 regions being intersect by the Aabb. -/// (If it was inserted in the layer with regions of size 1x1x1, it would have intersected -/// 8000 regions, which is a problem performancewise.) -/// -/// We call this new method the Hierarchical-SAP. -/// -/// Now with the Hierarchical-SAP, we can update each layer independently from one another. -/// However, objects belonging to different layers will never be detected as intersecting that -/// way. So we need a way to do inter-layer interference detection. There is a lot ways of doing -/// this: performing inter-layer Multi-Box-Pruning passes is one example (but this is not what we do). -/// In our implementation, we do the following: -/// - The Aabb bounds of each region of the layer `n` are inserted into the corresponding larger region -/// of the layer `n + 1`. -/// - When an Aabb in the region of the layer `n + 1` intersects the Aabb corresponding to one of the -/// regions at the smaller layer `n`, we add that Aabb to that smaller region. -/// So in the end it means that a given Aabb will be inserted into all the region it intersects at -/// the layer `n`. And it will also be inserted into all the regions it intersects at the smaller layers -/// (the layers `< n`), but only for the regions that already exist (so we don't have to discretize -/// our Aabb into the layers `< n`). This involves a fair amount of bookkeeping unfortunately, but -/// this has the benefit of keep the overall complexity of the algorithm O(1) in the typical specially -/// coherent scenario. -/// -/// From an implementation point-of-view, our hierarchical SAP is implemented with the following structures: -/// - There is one `SAPLayer` per layer of the hierarchical grid. -/// - Each `SAPLayer` contains multiple `SAPRegion` (each being a region of the grid represented by that layer). -/// - Each `SAPRegion` contains three `SAPAxis`, representing the "classical" SAP algorithm running on this region. -/// - Each `SAPAxis` maintains a sorted list of `SAPEndpoints` representing the endpoints of the Aabbs intersecting -/// the bounds on the `SAPRegion` containing this `SAPAxis`. -/// - A set of `SAPProxy` are maintained separately. It contains the Aabbs of all the colliders managed by this -/// broad-phase, as well as the Aabbs of all the regions part of this broad-phase. -#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] -#[derive(Clone)] -pub struct BroadPhase { - proxies: SAPProxies, - layers: Vec, - smallest_layer: u8, - largest_layer: u8, - // NOTE: we maintain this hashmap to simplify collider removal. - // This information is also present in the ColliderProxyId - // component. However if that component is removed, we need - // a way to access it to do some cleanup. - // Note that we could just remove the ColliderProxyId component - // altogether but that would be slow because of the need to - // always access this hashmap. Instead, we access this hashmap - // only when the collider has been added/removed. - // Another alternative would be to remove ColliderProxyId and - // just use a Coarena. But this seems like it could use too - // much memory. - colliders_proxy_ids: HashMap, - #[cfg_attr(feature = "serde-serialize", serde(skip))] - region_pool: SAPRegionPool, // To avoid repeated allocations. - // We could think serializing this workspace is useless. - // It turns out is is important to serialize at least its capacity - // and restore this capacity when deserializing the hashmap. - // This is because the order of future elements inserted into the - // hashmap depends on its capacity (because the internal bucket indices - // depend on this capacity). So not restoring this capacity may alter - // the order at which future elements are reported. This will in turn - // alter the order at which the pairs are registered in the narrow-phase, - // thus altering the order of the contact manifold. In the end, this - // alters the order of the resolution of contacts, resulting in - // diverging simulation after restoration of a snapshot. - #[cfg_attr( - feature = "serde-serialize", - serde( - serialize_with = "parry::utils::hashmap::serialize_hashmap_capacity", - deserialize_with = "parry::utils::hashmap::deserialize_hashmap_capacity" - ) - )] - reporting: HashMap<(u32, u32), bool>, // Workspace -} - -impl Default for BroadPhase { - fn default() -> Self { - Self::new() - } -} - -impl BroadPhase { - /// Create a new empty broad-phase. - pub fn new() -> Self { - BroadPhase { - proxies: SAPProxies::new(), - layers: Vec::new(), - smallest_layer: 0, - largest_layer: 0, - region_pool: Vec::new(), - reporting: HashMap::default(), - colliders_proxy_ids: HashMap::default(), - } - } - - /// Maintain the broad-phase internal state by taking collider removal into account. - /// - /// For each colliders marked as removed, we make their containing layer mark - /// its proxy as pre-deleted. The actual proxy removal will happen at the end - /// of the `BroadPhase::update`. - fn handle_removed_colliders(&mut self, removed_colliders: &[ColliderHandle]) { - // For each removed collider, remove the corresponding proxy. - for removed in removed_colliders { - if let Some(proxy_id) = self.colliders_proxy_ids.get(removed).copied() { - self.predelete_proxy(proxy_id); - } - } - } - - /// Pre-deletes a proxy from this broad-phase. - /// - /// The removal of a proxy is a semi-lazy process. It will mark - /// the proxy as predeleted, and will set its Aabb as +infinity. - /// After this method has been called with all the proxies to - /// remove, the `complete_removal` method MUST be called to - /// complete the removal of these proxies, by actually removing them - /// from all the relevant layers/regions/axes. - fn predelete_proxy(&mut self, proxy_index: SAPProxyIndex) { - if proxy_index == crate::INVALID_U32 { - // This collider has not been added to the broad-phase yet. - return; - } - - let proxy = &mut self.proxies[proxy_index]; - let layer = &mut self.layers[proxy.layer_id as usize]; - // Let the layer know that the proxy is being deleted. - layer.predelete_proxy(&mut self.proxies, proxy_index); - } - - /// Completes the removal of the deleted proxies. - /// - /// If `self.predelete_proxy` was called, then this `complete_removals` - /// method must be called to complete the removals. - /// - /// This method will actually remove from the proxy list all the proxies - /// marked as deletable by `self.predelete_proxy`, making their proxy - /// handles re-usable by new proxies. - fn complete_removals( - &mut self, - colliders: &mut ColliderSet, - removed_colliders: &[ColliderHandle], - ) { - // If there is no layer, there is nothing to remove. - if self.layers.is_empty() { - return; - } - - // This is a bottom-up pass: - // - Complete the removal on the layer `n`. This may cause so regions to be deleted. - // - Continue with the layer `n + 1`. This will delete from `n + 1` all the proxies - // of the regions originating from `n`. - // This bottom-up approach will propagate region removal from the smallest layer up - // to the largest layer. - let mut curr_layer_id = self.smallest_layer; - - loop { - let curr_layer = &mut self.layers[curr_layer_id as usize]; - - if let Some(larger_layer_id) = curr_layer.larger_layer { - let (curr_layer, larger_layer) = self - .layers - .index_mut2(curr_layer_id as usize, larger_layer_id as usize); - curr_layer.complete_removals( - Some(larger_layer), - &mut self.proxies, - &mut self.region_pool, - ); - - // NOTE: we don't care about reporting pairs. - self.reporting.clear(); - curr_layer_id = larger_layer_id; - } else { - curr_layer.complete_removals(None, &mut self.proxies, &mut self.region_pool); - - // NOTE: we don't care about reporting pairs. - self.reporting.clear(); - break; - } - } - - /* - * Actually remove the colliders proxies. - */ - for removed in removed_colliders { - if let Some(proxy_id) = self.colliders_proxy_ids.remove(removed) { - if proxy_id != crate::INVALID_U32 { - self.proxies.remove(proxy_id); - } - } - - if let Some(co) = colliders.get_mut_internal(*removed) { - // Reset the proxy index. - co.bf_data.proxy_index = crate::INVALID_U32; - } - } - } - - /// Finalize the insertion of the layer identified by `layer_id`. - /// - /// This will: - /// - Remove all the subregion proxies from the larger layer. - /// - Pre-insert all the smaller layer's region proxies into this layer. - fn finalize_layer_insertion(&mut self, layer_id: u8) { - // Remove all the region endpoints from the larger layer. - // They will be automatically replaced by the new layer's regions. - if let Some(larger_layer) = self.layers[layer_id as usize].larger_layer { - self.layers[larger_layer as usize].unregister_all_subregions(&mut self.proxies); - } - - // Add all the regions from the smaller layer to the new layer. - // This will result in new regions to be created in the new layer. - // These new regions will automatically propagate to the larger layers in - // the Phase 3 of `Self::update`. - if let Some(smaller_layer) = self.layers[layer_id as usize].smaller_layer { - let (smaller_layer, new_layer) = self - .layers - .index_mut2(smaller_layer as usize, layer_id as usize); - - smaller_layer.propagate_existing_regions( - new_layer, - &mut self.proxies, - &mut self.region_pool, - ); - } - } - - /// Ensures that a given layer exists. - /// - /// If the layer does not exist then: - /// 1. It is created and added to `self.layers`. - /// 2. The smaller/larger layer indices are updated to order them - /// properly depending on their depth. - /// 3. All the subregion proxies from the larger layer are deleted: - /// they will be replaced by this new layer's regions later in - /// the `update` function. - /// 4. All the regions from the smaller layer are added to that new - /// layer. - fn ensure_layer_exists(&mut self, new_depth: i8) -> u8 { - // Special case: we don't have any layers yet. - if self.layers.is_empty() { - let layer_id = self.layers.len() as u8; // TODO: check overflow. - self.layers - .push(SAPLayer::new(new_depth, layer_id, None, None)); - return 0; - } - - // Find the first layer with a depth larger or equal to new_depth. - let mut larger_layer_id = Some(self.smallest_layer); - - while let Some(curr_layer_id) = larger_layer_id { - if self.layers[curr_layer_id as usize].depth >= new_depth { - break; - } - - larger_layer_id = self.layers[curr_layer_id as usize].larger_layer; - } - - match larger_layer_id { - None => { - // The layer we are currently creating is the new largest layer. So - // we need to update `self.largest_layer` accordingly then call - // `self.finalize_layer_insertion. - assert_ne!(self.layers.len() as u8, u8::MAX, "Not yet implemented."); - let new_layer_id = self.layers.len() as u8; - self.layers[self.largest_layer as usize].larger_layer = Some(new_layer_id); - self.layers.push(SAPLayer::new( - new_depth, - new_layer_id, - Some(self.largest_layer), - None, - )); - self.largest_layer = new_layer_id; - self.finalize_layer_insertion(new_layer_id); - new_layer_id - } - Some(larger_layer_id) => { - if self.layers[larger_layer_id as usize].depth == new_depth { - // Found it! The layer already exists. - larger_layer_id - } else { - // The layer does not exist yet. Create it. - // And we found another layer that is larger than this one. - // So we need to adjust the smaller/larger layer indices too - // keep the list sorted, and then call `self.finalize_layer_insertion` - // to deal with region propagation. - let new_layer_id = self.layers.len() as u8; - let smaller_layer_id = self.layers[larger_layer_id as usize].smaller_layer; - self.layers[larger_layer_id as usize].smaller_layer = Some(new_layer_id); - - if let Some(smaller_layer_id) = smaller_layer_id { - self.layers[smaller_layer_id as usize].larger_layer = Some(new_layer_id); - } else { - self.smallest_layer = new_layer_id; - } - - self.layers.push(SAPLayer::new( - new_depth, - new_layer_id, - smaller_layer_id, - Some(larger_layer_id), - )); - self.finalize_layer_insertion(new_layer_id); - - new_layer_id - } - } - } - } - - fn handle_modified_collider( - &mut self, - prediction_distance: Real, - handle: ColliderHandle, - proxy_index: &mut u32, - collider: (&ColliderPosition, &ColliderShape, &ColliderChanges), - ) -> bool { - let (co_pos, co_shape, co_changes) = collider; - - let mut aabb = co_shape - .compute_aabb(co_pos) - .loosened(prediction_distance / 2.0); - - if aabb.mins.coords.iter().any(|e| !e.is_finite()) - || aabb.maxs.coords.iter().any(|e| !e.is_finite()) - { - // Reject Aabbs with non-finite values. - return false; - } - - aabb.mins = super::clamp_point(aabb.mins); - aabb.maxs = super::clamp_point(aabb.maxs); - - let prev_aabb; - - let layer_id = if let Some(proxy) = self.proxies.get_mut(*proxy_index) { - let mut layer_id = proxy.layer_id; - prev_aabb = proxy.aabb; - proxy.aabb = aabb; - - if co_changes.contains(ColliderChanges::SHAPE) { - // If the shape was changed, then we need to see if this proxy should be - // migrated to a larger layer. Indeed, if the shape was replaced by - // a much larger shape, we need to promote the proxy to a bigger layer - // to avoid the O(n²) discretization problem. - let new_layer_depth = super::layer_containing_aabb(&aabb); - if new_layer_depth > proxy.layer_depth { - self.layers[proxy.layer_id as usize] - .proper_proxy_moved_to_bigger_layer(&mut self.proxies, *proxy_index); - - // We need to promote the proxy to the bigger layer. - layer_id = self.ensure_layer_exists(new_layer_depth); - self.proxies[*proxy_index].layer_id = layer_id; - self.proxies[*proxy_index].layer_depth = new_layer_depth; - } - } - - layer_id - } else { - let layer_depth = super::layer_containing_aabb(&aabb); - let layer_id = self.ensure_layer_exists(layer_depth); - - // Create the proxy. - let proxy = SAPProxy::collider(handle, aabb, layer_id, layer_depth); - prev_aabb = aabb; - *proxy_index = self.proxies.insert(proxy); - layer_id - }; - - let layer = &mut self.layers[layer_id as usize]; - - // Preupdate the collider in the layer. - // We need to use both the prev Aabb and the new Aabb for this update, to - // handle special cases where one Aabb has left a region that doesn’t contain - // any other modified Aabbs. - // If the combination of both previous and new aabbs isn’t more than 25% bigger - // than the new Aabb, we just merge them to save some computation times (to avoid - // discretizing twice the area at their intersection. If it’s bigger than 25% then - // we discretize both aabbs individually. - let merged_aabbs = prev_aabb.merged(&aabb); - - if merged_aabbs.volume() > aabb.volume() * 1.25 { - layer.preupdate_collider( - *proxy_index, - &aabb, - None, - &mut self.proxies, - &mut self.region_pool, - ); - - layer.preupdate_collider( - *proxy_index, - &prev_aabb, - Some(&aabb), - &mut self.proxies, - &mut self.region_pool, - ); - } else { - layer.preupdate_collider( - *proxy_index, - &merged_aabbs, - Some(&aabb), - &mut self.proxies, - &mut self.region_pool, - ); - } - - // Returns true if propagation is needed. - !layer.created_regions.is_empty() - } - - /// Updates the broad-phase, taking into account the new collider positions. - pub fn update( - &mut self, - prediction_distance: Real, - colliders: &mut ColliderSet, - modified_colliders: &[ColliderHandle], - removed_colliders: &[ColliderHandle], - events: &mut Vec, - ) { - // Phase 1: pre-delete the collisions that have been deleted. - self.handle_removed_colliders(removed_colliders); - - let mut need_region_propagation = false; - - // Phase 2: pre-delete the collisions that have been deleted. - for handle in modified_colliders { - // NOTE: we use `get` because the collider may no longer - // exist if it has been removed. - if let Some(co) = colliders.get_mut_internal(*handle) { - if !co.is_enabled() || !co.changes.needs_broad_phase_update() { - continue; - } - - let mut new_proxy_id = co.bf_data.proxy_index; - - if self.handle_modified_collider( - prediction_distance, - *handle, - &mut new_proxy_id, - (&co.pos, &co.shape, &co.changes), - ) { - need_region_propagation = true; - } - - if co.bf_data.proxy_index != new_proxy_id { - self.colliders_proxy_ids.insert(*handle, new_proxy_id); - - // Make sure we have the new proxy index in case - // the collider was added for the first time. - co.bf_data = ColliderBroadPhaseData { - proxy_index: new_proxy_id, - }; - } - } - } - - // Phase 3: bottom-up pass to propagate new regions from smaller layers to larger layers. - if need_region_propagation { - self.propagate_created_regions(); - } - - // Phase 4: top-down pass to propagate proxies from larger layers to smaller layers. - self.update_layers_and_find_pairs(events); - - // Phase 5: bottom-up pass to remove proxies, and propagate region removed from smaller - // layers to possible remove regions from larger layers that would become empty that way. - self.complete_removals(colliders, removed_colliders); - } - - /// Propagate regions from the smallest layers up to the larger layers. - /// - /// Whenever a region is created on a layer `n`, then its Aabb must be - /// added to its larger layer so we can detect when an object - /// in a larger layer may start interacting with objects in a smaller - /// layer. - fn propagate_created_regions(&mut self) { - let mut curr_layer = Some(self.smallest_layer); - - while let Some(curr_layer_id) = curr_layer { - let layer = &mut self.layers[curr_layer_id as usize]; - let larger_layer = layer.larger_layer; - - if !layer.created_regions.is_empty() { - if let Some(larger_layer) = larger_layer { - let (layer, larger_layer) = self - .layers - .index_mut2(curr_layer_id as usize, larger_layer as usize); - layer.propagate_created_regions( - larger_layer, - &mut self.proxies, - &mut self.region_pool, - ); - layer.created_regions.clear(); - } else { - // Always clear the set of created regions, even if - // there is no larger layer. - layer.created_regions.clear(); - } - } - - curr_layer = larger_layer; - } - } - - fn update_layers_and_find_pairs(&mut self, out_events: &mut Vec) { - if self.layers.is_empty() { - return; - } - - // This is a top-down operation: we start by updating the largest - // layer, and continue down to the smallest layer. - // - // This must be top-down because: - // 1. If a non-region proxy from the layer `n` interacts with one of - // the regions from the layer `n - 1`, it must be added to that - // smaller layer before that smaller layer is updated. - // 2. If a region has been updated, then all its subregions at the - // layer `n - 1` must be marked as "needs-to-be-updated" too, in - // order to account for the fact that a big proxy moved. - // NOTE: this 2nd point could probably be improved: instead of updating - // all the subregions, we could perhaps just update the subregions - // that crosses the boundary of the Aabb of the big proxies that - // moved in they layer `n`. - let mut layer_id = Some(self.largest_layer); - - while let Some(curr_layer_id) = layer_id { - self.layers[curr_layer_id as usize] - .update_regions(&mut self.proxies, &mut self.reporting); - - layer_id = self.layers[curr_layer_id as usize].smaller_layer; - - for ((proxy_id1, proxy_id2), colliding) in &self.reporting { - let (proxy1, proxy2) = self - .proxies - .elements - .index_mut2(*proxy_id1 as usize, *proxy_id2 as usize); - - match (&mut proxy1.data, &mut proxy2.data) { - (SAPProxyData::Collider(handle1), SAPProxyData::Collider(handle2)) => { - if *colliding { - out_events.push(BroadPhasePairEvent::AddPair(ColliderPair::new( - *handle1, *handle2, - ))); - } else { - out_events.push(BroadPhasePairEvent::DeletePair(ColliderPair::new( - *handle1, *handle2, - ))); - } - } - (SAPProxyData::Collider(_), SAPProxyData::Region(_)) => { - if *colliding { - // Add the collider to the subregion. - proxy2 - .data - .as_region_mut() - .preupdate_proxy(*proxy_id1, false); - } - } - (SAPProxyData::Region(_), SAPProxyData::Collider(_)) => { - if *colliding { - // Add the collider to the subregion. - proxy1 - .data - .as_region_mut() - .preupdate_proxy(*proxy_id2, false); - } - } - (SAPProxyData::Region(_), SAPProxyData::Region(_)) => { - // This will only happen between two adjacent subregions because - // they share some identical bounds. So this case does not matter. - } - } - } - - self.reporting.clear(); - } - } -} - -#[cfg(test)] -mod test { - use crate::dynamics::{ - ImpulseJointSet, IslandManager, MultibodyJointSet, RigidBodyBuilder, RigidBodySet, - }; - use crate::geometry::{BroadPhase, ColliderBuilder, ColliderSet}; - - #[test] - fn test_add_update_remove() { - let mut broad_phase = BroadPhase::new(); - let mut bodies = RigidBodySet::new(); - let mut colliders = ColliderSet::new(); - let mut impulse_joints = ImpulseJointSet::new(); - let mut multibody_joints = MultibodyJointSet::new(); - let mut islands = IslandManager::new(); - - let rb = RigidBodyBuilder::dynamic().build(); - let co = ColliderBuilder::ball(0.5).build(); - let hrb = bodies.insert(rb); - let coh = colliders.insert_with_parent(co, hrb, &mut bodies); - - let mut events = Vec::new(); - broad_phase.update(0.0, &mut colliders, &[coh], &[], &mut events); - - bodies.remove( - hrb, - &mut islands, - &mut colliders, - &mut impulse_joints, - &mut multibody_joints, - true, - ); - broad_phase.update(0.0, &mut colliders, &[], &[coh], &mut events); - - // Create another body. - let rb = RigidBodyBuilder::dynamic().build(); - let co = ColliderBuilder::ball(0.5).build(); - let hrb = bodies.insert(rb); - let coh = colliders.insert_with_parent(co, hrb, &mut bodies); - - // Make sure the proxy handles is recycled properly. - broad_phase.update(0.0, &mut colliders, &[coh], &[], &mut events); - } -} diff --git a/src/geometry/broad_phase_multi_sap/broad_phase_multi_sap.rs b/src/geometry/broad_phase_multi_sap/broad_phase_multi_sap.rs new file mode 100644 index 0000000..24bc72d --- /dev/null +++ b/src/geometry/broad_phase_multi_sap/broad_phase_multi_sap.rs @@ -0,0 +1,664 @@ +use super::{ + BroadPhasePairEvent, ColliderPair, SAPLayer, SAPProxies, SAPProxy, SAPProxyData, SAPRegionPool, +}; +use crate::geometry::broad_phase_multi_sap::SAPProxyIndex; +use crate::geometry::{ + ColliderBroadPhaseData, ColliderChanges, ColliderHandle, ColliderPosition, ColliderSet, + ColliderShape, +}; +use crate::math::Real; +use crate::utils::IndexMut2; +use parry::bounding_volume::BoundingVolume; +use parry::utils::hashmap::HashMap; + +/// A broad-phase combining a Hierarchical Grid and Sweep-and-Prune. +/// +/// The basic Sweep-and-Prune (SAP) algorithm has one significant flaws: +/// the interactions between far-away objects. This means that objects +/// that are very far away will still have some of their endpoints swapped +/// within the SAP data-structure. This results in poor scaling because this +/// results in lots of swapping between endpoints of Aabbs that won't ever +/// actually interact. +/// +/// The first optimization to address this problem is to use the Multi-SAP +/// method. This basically combines an SAP with a grid. The grid subdivides +/// the spaces into equally-sized subspaces (grid cells). Each subspace, which we call +/// a "region" contains an SAP instance (i.e. there SAP axes responsible for +/// collecting endpoints and swapping them when they move to detect interaction pairs). +/// Each Aabb is inserted in all the regions it intersects. +/// This prevents the far-away problem because two objects that are far away will +/// be located on different regions. So their endpoints will never meet. +/// +/// However, the Multi-SAP approach has one notable problem: the region size must +/// be chosen wisely. It could be user-defined, but that's makes it more difficult +/// to use (for the end-user). Or it can be given a fixed value. Using a fixed +/// value may result in large objects intersecting lots of regions, resulting in +/// poor performances and very high memory usage. +/// +/// So a solution to that large-objects problem is the Multi-SAP approach is to +/// replace the grid by a hierarchical grid. A hierarchical grid is composed of +/// several layers. And each layer have different region sizes. For example all +/// the regions on layer 0 will have the size 1x1x1. All the regions on the layer +/// 1 will have the size 10x10x10, etc. That way, a given Aabb will be inserted +/// on the layer that has regions big enough to avoid the large-object problem. +/// For example a 20x20x20 object will be inserted in the layer with region +/// of size 10x10x10, resulting in only 8 regions being intersect by the Aabb. +/// (If it was inserted in the layer with regions of size 1x1x1, it would have intersected +/// 8000 regions, which is a problem performancewise.) +/// +/// We call this new method the Hierarchical-SAP. +/// +/// Now with the Hierarchical-SAP, we can update each layer independently from one another. +/// However, objects belonging to different layers will never be detected as intersecting that +/// way. So we need a way to do inter-layer interference detection. There is a lot ways of doing +/// this: performing inter-layer Multi-Box-Pruning passes is one example (but this is not what we do). +/// In our implementation, we do the following: +/// - The Aabb bounds of each region of the layer `n` are inserted into the corresponding larger region +/// of the layer `n + 1`. +/// - When an Aabb in the region of the layer `n + 1` intersects the Aabb corresponding to one of the +/// regions at the smaller layer `n`, we add that Aabb to that smaller region. +/// So in the end it means that a given Aabb will be inserted into all the region it intersects at +/// the layer `n`. And it will also be inserted into all the regions it intersects at the smaller layers +/// (the layers `< n`), but only for the regions that already exist (so we don't have to discretize +/// our Aabb into the layers `< n`). This involves a fair amount of bookkeeping unfortunately, but +/// this has the benefit of keep the overall complexity of the algorithm O(1) in the typical specially +/// coherent scenario. +/// +/// From an implementation point-of-view, our hierarchical SAP is implemented with the following structures: +/// - There is one `SAPLayer` per layer of the hierarchical grid. +/// - Each `SAPLayer` contains multiple `SAPRegion` (each being a region of the grid represented by that layer). +/// - Each `SAPRegion` contains three `SAPAxis`, representing the "classical" SAP algorithm running on this region. +/// - Each `SAPAxis` maintains a sorted list of `SAPEndpoints` representing the endpoints of the Aabbs intersecting +/// the bounds on the `SAPRegion` containing this `SAPAxis`. +/// - A set of `SAPProxy` are maintained separately. It contains the Aabbs of all the colliders managed by this +/// broad-phase, as well as the Aabbs of all the regions part of this broad-phase. +#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] +#[derive(Clone)] +pub struct BroadPhaseMultiSap { + proxies: SAPProxies, + layers: Vec, + smallest_layer: u8, + largest_layer: u8, + // NOTE: we maintain this hashmap to simplify collider removal. + // This information is also present in the ColliderProxyId + // component. However if that component is removed, we need + // a way to access it to do some cleanup. + // Note that we could just remove the ColliderProxyId component + // altogether but that would be slow because of the need to + // always access this hashmap. Instead, we access this hashmap + // only when the collider has been added/removed. + // Another alternative would be to remove ColliderProxyId and + // just use a Coarena. But this seems like it could use too + // much memory. + colliders_proxy_ids: HashMap, + #[cfg_attr(feature = "serde-serialize", serde(skip))] + region_pool: SAPRegionPool, // To avoid repeated allocations. + // We could think serializing this workspace is useless. + // It turns out is is important to serialize at least its capacity + // and restore this capacity when deserializing the hashmap. + // This is because the order of future elements inserted into the + // hashmap depends on its capacity (because the internal bucket indices + // depend on this capacity). So not restoring this capacity may alter + // the order at which future elements are reported. This will in turn + // alter the order at which the pairs are registered in the narrow-phase, + // thus altering the order of the contact manifold. In the end, this + // alters the order of the resolution of contacts, resulting in + // diverging simulation after restoration of a snapshot. + #[cfg_attr( + feature = "serde-serialize", + serde( + serialize_with = "parry::utils::hashmap::serialize_hashmap_capacity", + deserialize_with = "parry::utils::hashmap::deserialize_hashmap_capacity" + ) + )] + reporting: HashMap<(u32, u32), bool>, // Workspace +} + +impl Default for BroadPhaseMultiSap { + fn default() -> Self { + Self::new() + } +} + +impl BroadPhaseMultiSap { + /// Create a new empty broad-phase. + pub fn new() -> Self { + BroadPhaseMultiSap { + proxies: SAPProxies::new(), + layers: Vec::new(), + smallest_layer: 0, + largest_layer: 0, + region_pool: Vec::new(), + reporting: HashMap::default(), + colliders_proxy_ids: HashMap::default(), + } + } + + /// Maintain the broad-phase internal state by taking collider removal into account. + /// + /// For each colliders marked as removed, we make their containing layer mark + /// its proxy as pre-deleted. The actual proxy removal will happen at the end + /// of the `BroadPhaseMultiSap::update`. + fn handle_removed_colliders(&mut self, removed_colliders: &[ColliderHandle]) { + // For each removed collider, remove the corresponding proxy. + for removed in removed_colliders { + if let Some(proxy_id) = self.colliders_proxy_ids.get(removed).copied() { + self.predelete_proxy(proxy_id); + } + } + } + + /// Pre-deletes a proxy from this broad-phase. + /// + /// The removal of a proxy is a semi-lazy process. It will mark + /// the proxy as predeleted, and will set its Aabb as +infinity. + /// After this method has been called with all the proxies to + /// remove, the `complete_removal` method MUST be called to + /// complete the removal of these proxies, by actually removing them + /// from all the relevant layers/regions/axes. + fn predelete_proxy(&mut self, proxy_index: SAPProxyIndex) { + if proxy_index == crate::INVALID_U32 { + // This collider has not been added to the broad-phase yet. + return; + } + + let proxy = &mut self.proxies[proxy_index]; + let layer = &mut self.layers[proxy.layer_id as usize]; + // Let the layer know that the proxy is being deleted. + layer.predelete_proxy(&mut self.proxies, proxy_index); + } + + /// Completes the removal of the deleted proxies. + /// + /// If `self.predelete_proxy` was called, then this `complete_removals` + /// method must be called to complete the removals. + /// + /// This method will actually remove from the proxy list all the proxies + /// marked as deletable by `self.predelete_proxy`, making their proxy + /// handles re-usable by new proxies. + fn complete_removals( + &mut self, + colliders: &mut ColliderSet, + removed_colliders: &[ColliderHandle], + ) { + // If there is no layer, there is nothing to remove. + if self.layers.is_empty() { + return; + } + + // This is a bottom-up pass: + // - Complete the removal on the layer `n`. This may cause so regions to be deleted. + // - Continue with the layer `n + 1`. This will delete from `n + 1` all the proxies + // of the regions originating from `n`. + // This bottom-up approach will propagate region removal from the smallest layer up + // to the largest layer. + let mut curr_layer_id = self.smallest_layer; + + loop { + let curr_layer = &mut self.layers[curr_layer_id as usize]; + + if let Some(larger_layer_id) = curr_layer.larger_layer { + let (curr_layer, larger_layer) = self + .layers + .index_mut2(curr_layer_id as usize, larger_layer_id as usize); + curr_layer.complete_removals( + Some(larger_layer), + &mut self.proxies, + &mut self.region_pool, + ); + + // NOTE: we don't care about reporting pairs. + self.reporting.clear(); + curr_layer_id = larger_layer_id; + } else { + curr_layer.complete_removals(None, &mut self.proxies, &mut self.region_pool); + + // NOTE: we don't care about reporting pairs. + self.reporting.clear(); + break; + } + } + + /* + * Actually remove the colliders proxies. + */ + for removed in removed_colliders { + if let Some(proxy_id) = self.colliders_proxy_ids.remove(removed) { + if proxy_id != crate::INVALID_U32 { + self.proxies.remove(proxy_id); + } + } + + if let Some(co) = colliders.get_mut_internal(*removed) { + // Reset the proxy index. + co.bf_data.proxy_index = crate::INVALID_U32; + } + } + } + + /// Finalize the insertion of the layer identified by `layer_id`. + /// + /// This will: + /// - Remove all the subregion proxies from the larger layer. + /// - Pre-insert all the smaller layer's region proxies into this layer. + fn finalize_layer_insertion(&mut self, layer_id: u8) { + // Remove all the region endpoints from the larger layer. + // They will be automatically replaced by the new layer's regions. + if let Some(larger_layer) = self.layers[layer_id as usize].larger_layer { + self.layers[larger_layer as usize].unregister_all_subregions(&mut self.proxies); + } + + // Add all the regions from the smaller layer to the new layer. + // This will result in new regions to be created in the new layer. + // These new regions will automatically propagate to the larger layers in + // the Phase 3 of `Self::update`. + if let Some(smaller_layer) = self.layers[layer_id as usize].smaller_layer { + let (smaller_layer, new_layer) = self + .layers + .index_mut2(smaller_layer as usize, layer_id as usize); + + smaller_layer.propagate_existing_regions( + new_layer, + &mut self.proxies, + &mut self.region_pool, + ); + } + } + + /// Ensures that a given layer exists. + /// + /// If the layer does not exist then: + /// 1. It is created and added to `self.layers`. + /// 2. The smaller/larger layer indices are updated to order them + /// properly depending on their depth. + /// 3. All the subregion proxies from the larger layer are deleted: + /// they will be replaced by this new layer's regions later in + /// the `update` function. + /// 4. All the regions from the smaller layer are added to that new + /// layer. + fn ensure_layer_exists(&mut self, new_depth: i8) -> u8 { + // Special case: we don't have any layers yet. + if self.layers.is_empty() { + let layer_id = self.layers.len() as u8; // TODO: check overflow. + self.layers + .push(SAPLayer::new(new_depth, layer_id, None, None)); + return 0; + } + + // Find the first layer with a depth larger or equal to new_depth. + let mut larger_layer_id = Some(self.smallest_layer); + + while let Some(curr_layer_id) = larger_layer_id { + if self.layers[curr_layer_id as usize].depth >= new_depth { + break; + } + + larger_layer_id = self.layers[