feat(physics): wire physx sdk into build

This commit is contained in:
2026-04-15 12:22:15 +08:00
parent 5bf258df6d
commit 31f40e2cbb
2044 changed files with 752623 additions and 1 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,101 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "BpAABBManagerBase.h"
#include "BpBroadPhase.h"
using namespace physx;
using namespace Bp;
AABBManagerBase::AABBManagerBase( BroadPhase& bp, BoundsArray& boundsArray, PxFloatArrayPinned& contactDistance,
PxU32 maxNbAggregates, PxU32 maxNbShapes, PxVirtualAllocator& allocator, PxU64 contextID,
PxPairFilteringMode::Enum kineKineFilteringMode, PxPairFilteringMode::Enum staticKineFilteringMode) :
mAddedHandleMap (allocator),
mRemovedHandleMap (allocator),
mChangedHandleMap (allocator),
mGroups (allocator),
mEnvIDs (allocator),
mContactDistance (contactDistance),
mVolumeData (allocator),
mFilters (kineKineFilteringMode == PxPairFilteringMode::eKILL, staticKineFilteringMode == PxPairFilteringMode::eKILL),
mAddedHandles (allocator),
mUpdatedHandles (allocator),
mRemovedHandles (allocator),
mBroadPhase (bp),
mBoundsArray (boundsArray),
mUsedSize (0),
mNbAggregates (0),
#if PX_ENABLE_SIM_STATS
mGpuDynamicsLostFoundPairsStats(0),
mGpuDynamicsTotalAggregatePairsStats(0),
mGpuDynamicsLostFoundAggregatePairsStats(0),
#else
PX_CATCH_UNDEFINED_ENABLE_SIM_STATS
#endif
#if BP_USE_AGGREGATE_GROUP_TAIL
mAggregateGroupTide (PxU32(Bp::FilterGroup::eAGGREGATE_BASE)),
#endif
mContextID (contextID),
mOriginShifted (false)
{
PX_UNUSED(maxNbAggregates); // PT: TODO: use it or remove it
reserveShapeSpace(PxMax(maxNbShapes, 1u));
// mCreatedOverlaps.reserve(16000);
}
void AABBManagerBase::reserveShapeSpace(PxU32 nbTotalBounds)
{
nbTotalBounds = PxNextPowerOfTwo(nbTotalBounds);
mGroups.resize(nbTotalBounds, Bp::FilterGroup::eINVALID);
mVolumeData.resize(nbTotalBounds); //KS - must be initialized so that userData is NULL for SQ-only shapes
mContactDistance.resizeUninitialized(nbTotalBounds);
mAddedHandleMap.resize(nbTotalBounds);
mRemovedHandleMap.resize(nbTotalBounds);
}
void AABBManagerBase::reserveSpaceForBounds(BoundsIndex index)
{
if ((index + 1) >= mVolumeData.size())
reserveShapeSpace(index + 1);
resetEntry(index); //KS - make sure this entry is flagged as invalid
}
void AABBManagerBase::freeBuffers()
{
// PT: TODO: investigate if we need more stuff here
mBroadPhase.freeBuffers();
}
void AABBManagerBase::shiftOrigin(const PxVec3& shift)
{
mBroadPhase.shiftOrigin(shift, mBoundsArray.begin(), mContactDistance.begin());
mOriginShifted = true;
}

View File

@@ -0,0 +1,58 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "BpBroadPhase.h"
#include "BpBroadPhaseSap.h"
#include "BpBroadPhaseMBP.h"
#include "BpBroadPhaseABP.h"
using namespace physx;
using namespace Bp;
BroadPhase* BroadPhase::create(
const PxBroadPhaseType::Enum bpType,
const PxU32 maxNbRegions,
const PxU32 maxNbBroadPhaseOverlaps,
const PxU32 maxNbStaticShapes,
const PxU32 maxNbDynamicShapes,
PxU64 contextID)
{
if(bpType==PxBroadPhaseType::eABP)
return PX_NEW(BroadPhaseABP)(maxNbBroadPhaseOverlaps, maxNbStaticShapes, maxNbDynamicShapes, contextID, false);
else if(bpType==PxBroadPhaseType::ePABP)
return PX_NEW(BroadPhaseABP)(maxNbBroadPhaseOverlaps, maxNbStaticShapes, maxNbDynamicShapes, contextID, true);
else if(bpType==PxBroadPhaseType::eMBP)
return PX_NEW(BroadPhaseMBP)(maxNbRegions, maxNbBroadPhaseOverlaps, maxNbStaticShapes, maxNbDynamicShapes, contextID);
else if(bpType==PxBroadPhaseType::eSAP)
return PX_NEW(BroadPhaseSap)(maxNbBroadPhaseOverlaps, maxNbStaticShapes, maxNbDynamicShapes, contextID);
else
{
PX_ASSERT(0);
return NULL;
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,98 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_BROADPHASE_ABP_H
#define BP_BROADPHASE_ABP_H
#include "foundation/PxArray.h"
#include "BpBroadPhase.h"
#include "PxPhysXConfig.h"
#include "BpBroadPhaseUpdate.h"
#define ABP_MT2
namespace internalABP{
class ABP;
}
namespace physx
{
namespace Bp
{
class BroadPhaseABP : public BroadPhase
{
PX_NOCOPY(BroadPhaseABP)
public:
BroadPhaseABP( PxU32 maxNbBroadPhaseOverlaps,
PxU32 maxNbStaticShapes,
PxU32 maxNbDynamicShapes,
PxU64 contextID,
bool enableMT);
virtual ~BroadPhaseABP();
// BroadPhase
virtual PxBroadPhaseType::Enum getType() const PX_OVERRIDE PX_FINAL { return mEnableMT ? PxBroadPhaseType::ePABP : PxBroadPhaseType::eABP; }
virtual void release() PX_OVERRIDE PX_FINAL { PX_DELETE_THIS; }
virtual void update(PxcScratchAllocator* scratchAllocator, const BroadPhaseUpdateData& updateData, physx::PxBaseTask* continuation) PX_OVERRIDE;
virtual void preBroadPhase(const Bp::BroadPhaseUpdateData&) PX_OVERRIDE PX_FINAL {}
virtual void fetchBroadPhaseResults() PX_OVERRIDE PX_FINAL {}
virtual const BroadPhasePair* getCreatedPairs(PxU32&) const PX_OVERRIDE PX_FINAL;
virtual const BroadPhasePair* getDeletedPairs(PxU32&) const PX_OVERRIDE PX_FINAL;
virtual void freeBuffers() PX_OVERRIDE PX_FINAL;
virtual void shiftOrigin(const PxVec3& shift, const PxBounds3* boundsArray, const PxReal* contactDistances) PX_OVERRIDE PX_FINAL;
#if PX_CHECKED
virtual bool isValid(const BroadPhaseUpdateData& updateData) const PX_OVERRIDE PX_FINAL;
#endif
//~BroadPhase
internalABP::ABP* mABP; // PT: TODO: aggregate
PxU32 mNbAdded;
PxU32 mNbUpdated;
PxU32 mNbRemoved;
const BpHandle* mCreatedHandles;
const BpHandle* mUpdatedHandles;
const BpHandle* mRemovedHandles;
PxArray<BroadPhasePair> mCreated;
PxArray<BroadPhasePair> mDeleted;
const Bp::FilterGroup::Enum*mGroups;
const BpFilter* mFilter;
const PxU64 mContextID;
const bool mEnableMT;
void addObjects();
void removeObjects();
void updateObjects();
};
} //namespace Bp
} //namespace physx
#endif // BP_BROADPHASE_ABP_H

View File

@@ -0,0 +1,310 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_BROADPHASE_INTEGER_AABB_H
#define BP_BROADPHASE_INTEGER_AABB_H
#include "BpFiltering.h"
#include "foundation/PxBounds3.h"
#include "foundation/PxUnionCast.h"
namespace physx
{
namespace Bp
{
/*
\brief Encode a single float value with lossless encoding to integer
*/
PX_FORCE_INLINE PxU32 encodeFloat(PxU32 ir)
{
//we may need to check on -0 and 0
//But it should make no practical difference.
if(ir & PX_SIGN_BITMASK) //negative?
return ~ir;//reverse sequence of negative numbers
else
return ir | PX_SIGN_BITMASK; // flip sign
}
/*
\brief Encode a single float value with lossless encoding to integer
*/
PX_FORCE_INLINE PxU32 decodeFloat(PxU32 ir)
{
if(ir & PX_SIGN_BITMASK) //positive?
return ir & ~PX_SIGN_BITMASK; //flip sign
else
return ~ir; //undo reversal
}
/**
\brief Integer representation of PxBounds3 used by BroadPhase
\see BroadPhaseUpdateData
*/
typedef PxU32 ValType;
class IntegerAABB
{
public:
enum
{
MIN_X = 0,
MIN_Y,
MIN_Z,
MAX_X,
MAX_Y,
MAX_Z
};
IntegerAABB(const PxBounds3& b, PxReal contactDistance)
{
const PxVec3 dist(contactDistance);
encode(PxBounds3(b.minimum - dist, b.maximum + dist));
}
/*
\brief Return the minimum along a specified axis
\param[in] i is the axis
*/
PX_FORCE_INLINE ValType getMin(PxU32 i) const { return (mMinMax)[MIN_X+i]; }
/*
\brief Return the maximum along a specified axis
\param[in] i is the axis
*/
PX_FORCE_INLINE ValType getMax(PxU32 i) const { return (mMinMax)[MAX_X+i]; }
/*
\brief Return one of the six min/max values of the bound
\param[in] isMax determines whether a min or max value is returned
\param[in] index is the axis
*/
PX_FORCE_INLINE ValType getExtent(PxU32 isMax, PxU32 index) const
{
PX_ASSERT(isMax<=1);
return (mMinMax)[3*isMax+index];
}
/*
\brief Return the minimum on the x axis
*/
PX_FORCE_INLINE ValType getMinX() const { return mMinMax[MIN_X]; }
/*
\brief Return the minimum on the y axis
*/
PX_FORCE_INLINE ValType getMinY() const { return mMinMax[MIN_Y]; }
/*
\brief Return the minimum on the z axis
*/
PX_FORCE_INLINE ValType getMinZ() const { return mMinMax[MIN_Z]; }
/*
\brief Return the maximum on the x axis
*/
PX_FORCE_INLINE ValType getMaxX() const { return mMinMax[MAX_X]; }
/*
\brief Return the maximum on the y axis
*/
PX_FORCE_INLINE ValType getMaxY() const { return mMinMax[MAX_Y]; }
/*
\brief Return the maximum on the z axis
*/
PX_FORCE_INLINE ValType getMaxZ() const { return mMinMax[MAX_Z]; }
/*
\brief Encode float bounds so they are stored as integer bounds
\param[in] bounds is the bounds to be encoded
\note The integer values of minima are always even, while the integer values of maxima are always odd
\note The encoding process masks off the last four bits for minima and masks on the last four bits for maxima.
This keeps the bounds constant when its shape is subjected to small global pose perturbations. In turn, this helps
reduce computational effort in the broadphase update by reducing the amount of sorting required on near-stationary
bodies that are aligned along one or more axis.
\see decode
*/
PX_FORCE_INLINE void encode(const PxBounds3& bounds)
{
const PxU32* PX_RESTRICT min = PxUnionCast<const PxU32*, const PxF32*>(&bounds.minimum.x);
const PxU32* PX_RESTRICT max = PxUnionCast<const PxU32*, const PxF32*>(&bounds.maximum.x);
//Avoid min=max by enforcing the rule that mins are even and maxs are odd.
mMinMax[MIN_X] = encodeFloatMin(min[0]);
mMinMax[MIN_Y] = encodeFloatMin(min[1]);
mMinMax[MIN_Z] = encodeFloatMin(min[2]);
mMinMax[MAX_X] = encodeFloatMax(max[0]) | (1<<2);
mMinMax[MAX_Y] = encodeFloatMax(max[1]) | (1<<2);
mMinMax[MAX_Z] = encodeFloatMax(max[2]) | (1<<2);
}
/*
\brief Decode from integer bounds to float bounds
\param[out] bounds is the decoded float bounds
\note Encode followed by decode will produce a float bound larger than the original
due to the masking in encode.
\see encode
*/
PX_FORCE_INLINE void decode(PxBounds3& bounds) const
{
PxU32* PX_RESTRICT min = PxUnionCast<PxU32*, PxF32*>(&bounds.minimum.x);
PxU32* PX_RESTRICT max = PxUnionCast<PxU32*, PxF32*>(&bounds.maximum.x);
min[0] = decodeFloat(mMinMax[MIN_X]);
min[1] = decodeFloat(mMinMax[MIN_Y]);
min[2] = decodeFloat(mMinMax[MIN_Z]);
max[0] = decodeFloat(mMinMax[MAX_X]);
max[1] = decodeFloat(mMinMax[MAX_Y]);
max[2] = decodeFloat(mMinMax[MAX_Z]);
}
/*
\brief Encode a single minimum value from integer bounds to float bounds
\note The encoding process masks off the last four bits for minima
\see encode
*/
static PX_FORCE_INLINE ValType encodeFloatMin(PxU32 source)
{
return ((encodeFloat(source) >> eGRID_SNAP_VAL) - 1) << eGRID_SNAP_VAL;
}
/*
\brief Encode a single maximum value from integer bounds to float bounds
\note The encoding process masks on the last four bits for maxima
\see encode
*/
static PX_FORCE_INLINE ValType encodeFloatMax(PxU32 source)
{
return ((encodeFloat(source) >> eGRID_SNAP_VAL) + 1) << eGRID_SNAP_VAL;
}
/*
\brief Shift the encoded bounds by a specified vector
\param[in] shift is the vector used to shift the bounds
*/
PX_FORCE_INLINE void shift(const PxVec3& shift)
{
::physx::PxBounds3 elemBounds;
decode(elemBounds);
elemBounds.minimum -= shift;
elemBounds.maximum -= shift;
encode(elemBounds);
}
/*
\brief Test if this aabb lies entirely inside another aabb
\param[in] box is the other box
\return True if this aabb lies entirely inside box
*/
PX_INLINE bool isInside(const IntegerAABB& box) const
{
if(box.mMinMax[MIN_X]>mMinMax[MIN_X]) return false;
if(box.mMinMax[MIN_Y]>mMinMax[MIN_Y]) return false;
if(box.mMinMax[MIN_Z]>mMinMax[MIN_Z]) return false;
if(box.mMinMax[MAX_X]<mMinMax[MAX_X]) return false;
if(box.mMinMax[MAX_Y]<mMinMax[MAX_Y]) return false;
if(box.mMinMax[MAX_Z]<mMinMax[MAX_Z]) return false;
return true;
}
/*
\brief Test if this aabb and another intersect
\param[in] b is the other box
\return True if this aabb and b intersect
*/
PX_FORCE_INLINE bool intersects(const IntegerAABB& b) const
{
return !(b.mMinMax[MIN_X] > mMinMax[MAX_X] || mMinMax[MIN_X] > b.mMinMax[MAX_X] ||
b.mMinMax[MIN_Y] > mMinMax[MAX_Y] || mMinMax[MIN_Y] > b.mMinMax[MAX_Y] ||
b.mMinMax[MIN_Z] > mMinMax[MAX_Z] || mMinMax[MIN_Z] > b.mMinMax[MAX_Z]);
}
PX_FORCE_INLINE bool intersects1D(const IntegerAABB& b, const PxU32 axis) const
{
const PxU32 maxAxis = axis + 3;
return !(b.mMinMax[axis] > mMinMax[maxAxis] || mMinMax[axis] > b.mMinMax[maxAxis]);
}
/*
\brief Expand bounds to include another
\note This is used to compute the aggregate bounds of multiple shape bounds
\param[in] b is the bounds to be included
*/
PX_FORCE_INLINE void include(const IntegerAABB& b)
{
mMinMax[MIN_X] = PxMin(mMinMax[MIN_X], b.mMinMax[MIN_X]);
mMinMax[MIN_Y] = PxMin(mMinMax[MIN_Y], b.mMinMax[MIN_Y]);
mMinMax[MIN_Z] = PxMin(mMinMax[MIN_Z], b.mMinMax[MIN_Z]);
mMinMax[MAX_X] = PxMax(mMinMax[MAX_X], b.mMinMax[MAX_X]);
mMinMax[MAX_Y] = PxMax(mMinMax[MAX_Y], b.mMinMax[MAX_Y]);
mMinMax[MAX_Z] = PxMax(mMinMax[MAX_Z], b.mMinMax[MAX_Z]);
}
/*
\brief Set the bounds to (max, max, max), (min, min, min)
*/
PX_INLINE void setEmpty()
{
mMinMax[MIN_X] = mMinMax[MIN_Y] = mMinMax[MIN_Z] = 0xff7fffff; //PX_IR(PX_MAX_F32);
mMinMax[MAX_X] = mMinMax[MAX_Y] = mMinMax[MAX_Z] = 0x00800000; ///PX_IR(0.0f);
}
ValType mMinMax[6];
private:
enum
{
eGRID_SNAP_VAL = 4
};
};
PX_FORCE_INLINE ValType encodeMin(const PxBounds3& bounds, PxU32 axis, PxReal contactDistance)
{
const PxReal val = bounds.minimum[axis] - contactDistance;
const PxU32 min = PxUnionCast<PxU32, PxF32>(val);
const PxU32 m = IntegerAABB::encodeFloatMin(min);
return m;
}
PX_FORCE_INLINE ValType encodeMax(const PxBounds3& bounds, PxU32 axis, PxReal contactDistance)
{
const PxReal val = bounds.maximum[axis] + contactDistance;
const PxU32 max = PxUnionCast<PxU32, PxF32>(val);
const PxU32 m = IntegerAABB::encodeFloatMax(max) | (1<<2);
return m;
}
} //namespace Bp
} //namespace physx
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,112 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_BROADPHASE_MBP_H
#define BP_BROADPHASE_MBP_H
#include "BpBroadPhase.h"
#include "BpBroadPhaseMBPCommon.h"
#include "foundation/PxArray.h"
namespace internalMBP
{
class MBP;
}
namespace physx
{
namespace Bp
{
class BroadPhaseMBP : public BroadPhase
{
PX_NOCOPY(BroadPhaseMBP)
public:
BroadPhaseMBP( PxU32 maxNbRegions,
PxU32 maxNbBroadPhaseOverlaps,
PxU32 maxNbStaticShapes,
PxU32 maxNbDynamicShapes,
PxU64 contextID);
virtual ~BroadPhaseMBP();
// BroadPhaseBase
virtual void getCaps(PxBroadPhaseCaps& caps) const PX_OVERRIDE PX_FINAL;
//~BroadPhaseBase
// PxBroadPhaseRegions
virtual PxU32 getNbRegions() const PX_OVERRIDE PX_FINAL;
virtual PxU32 getRegions(PxBroadPhaseRegionInfo* userBuffer, PxU32 bufferSize, PxU32 startIndex=0) const PX_OVERRIDE PX_FINAL;
virtual PxU32 addRegion(const PxBroadPhaseRegion& region, bool populateRegion, const PxBounds3* boundsArray, const PxReal* contactDistance) PX_OVERRIDE PX_FINAL;
virtual bool removeRegion(PxU32 handle) PX_OVERRIDE PX_FINAL;
virtual PxU32 getNbOutOfBoundsObjects() const PX_OVERRIDE PX_FINAL;
virtual const PxU32* getOutOfBoundsObjects() const PX_OVERRIDE PX_FINAL;
//~PxBroadPhaseRegions
// BroadPhase
virtual PxBroadPhaseType::Enum getType() const PX_OVERRIDE PX_FINAL { return PxBroadPhaseType::eMBP; }
virtual void release() PX_OVERRIDE PX_FINAL { PX_DELETE_THIS; }
virtual void update(PxcScratchAllocator* scratchAllocator, const BroadPhaseUpdateData& updateData, physx::PxBaseTask* continuation) PX_OVERRIDE;
virtual void preBroadPhase(const Bp::BroadPhaseUpdateData&) PX_OVERRIDE PX_FINAL {}
virtual void fetchBroadPhaseResults() PX_OVERRIDE PX_FINAL {}
virtual const BroadPhasePair* getCreatedPairs(PxU32&) const PX_OVERRIDE PX_FINAL;
virtual const BroadPhasePair* getDeletedPairs(PxU32&) const PX_OVERRIDE PX_FINAL;
virtual void freeBuffers() PX_OVERRIDE PX_FINAL;
virtual void shiftOrigin(const PxVec3& shift, const PxBounds3* boundsArray, const PxReal* contactDistances) PX_OVERRIDE PX_FINAL;
#if PX_CHECKED
virtual bool isValid(const BroadPhaseUpdateData& updateData) const PX_OVERRIDE PX_FINAL;
#endif
//~BroadPhase
internalMBP::MBP* mMBP; // PT: TODO: aggregate
MBP_Handle* mMapping;
PxU32 mCapacity;
PxArray<BroadPhasePair> mCreated;
PxArray<BroadPhasePair> mDeleted;
const Bp::FilterGroup::Enum*mGroups;
const BpFilter* mFilter;
const PxU64 mContextID;
void setUpdateData(const BroadPhaseUpdateData& updateData);
void addObjects(const BroadPhaseUpdateData& updateData);
void removeObjects(const BroadPhaseUpdateData& updateData);
void updateObjects(const BroadPhaseUpdateData& updateData);
void update();
void postUpdate();
void allocateMappingArray(PxU32 newCapacity);
PxU32 getCurrentNbPairs() const;
};
} //namespace Bp
} //namespace physx
#endif // BP_BROADPHASE_MBP_H

View File

@@ -0,0 +1,198 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_BROADPHASE_MBP_COMMON_H
#define BP_BROADPHASE_MBP_COMMON_H
#include "PxPhysXConfig.h"
#include "BpBroadPhaseIntegerAABB.h"
#include "foundation/PxUserAllocated.h"
namespace physx
{
namespace Bp
{
#define MBP_USE_WORDS
#define MBP_USE_NO_CMP_OVERLAP
#if PX_INTEL_FAMILY && !defined(PX_SIMD_DISABLED)
#define MBP_SIMD_OVERLAP
#endif
#ifdef MBP_USE_WORDS
typedef PxU16 MBP_Index;
#else
typedef PxU32 MBP_Index;
#endif
typedef PxU32 MBP_ObjectIndex; // PT: index in mMBP_Objects
typedef PxU32 MBP_Handle; // PT: returned to MBP users, combination of index/flip-flop/static-bit
struct IAABB : public PxUserAllocated
{
PX_FORCE_INLINE bool isInside(const IAABB& box) const
{
if(box.mMinX>mMinX) return false;
if(box.mMinY>mMinY) return false;
if(box.mMinZ>mMinZ) return false;
if(box.mMaxX<mMaxX) return false;
if(box.mMaxY<mMaxY) return false;
if(box.mMaxZ<mMaxZ) return false;
return true;
}
PX_FORCE_INLINE PxIntBool intersects(const IAABB& a) const
{
if(mMaxX < a.mMinX || a.mMaxX < mMinX
|| mMaxY < a.mMinY || a.mMaxY < mMinY
|| mMaxZ < a.mMinZ || a.mMaxZ < mMinZ
)
return PxIntFalse;
return PxIntTrue;
}
PX_FORCE_INLINE PxIntBool intersectNoTouch(const IAABB& a) const
{
if(mMaxX <= a.mMinX || a.mMaxX <= mMinX
|| mMaxY <= a.mMinY || a.mMaxY <= mMinY
|| mMaxZ <= a.mMinZ || a.mMaxZ <= mMinZ
)
return PxIntFalse;
return PxIntTrue;
}
PX_FORCE_INLINE void initFrom2(const PxBounds3& box)
{
const PxU32* PX_RESTRICT binary = reinterpret_cast<const PxU32*>(&box.minimum.x);
mMinX = encodeFloat(binary[0])>>1;
mMinY = encodeFloat(binary[1])>>1;
mMinZ = encodeFloat(binary[2])>>1;
mMaxX = encodeFloat(binary[3])>>1;
mMaxY = encodeFloat(binary[4])>>1;
mMaxZ = encodeFloat(binary[5])>>1;
}
PX_FORCE_INLINE void decode(PxBounds3& box) const
{
PxU32* PX_RESTRICT binary = reinterpret_cast<PxU32*>(&box.minimum.x);
binary[0] = decodeFloat(mMinX<<1);
binary[1] = decodeFloat(mMinY<<1);
binary[2] = decodeFloat(mMinZ<<1);
binary[3] = decodeFloat(mMaxX<<1);
binary[4] = decodeFloat(mMaxY<<1);
binary[5] = decodeFloat(mMaxZ<<1);
}
PX_FORCE_INLINE PxU32 getMin(PxU32 i) const { return (&mMinX)[i]; }
PX_FORCE_INLINE PxU32 getMax(PxU32 i) const { return (&mMaxX)[i]; }
PxU32 mMinX;
PxU32 mMinY;
PxU32 mMinZ;
PxU32 mMaxX;
PxU32 mMaxY;
PxU32 mMaxZ;
};
struct SIMD_AABB : public PxUserAllocated
{
PX_FORCE_INLINE void initFrom(const PxBounds3& box)
{
const PxU32* PX_RESTRICT binary = reinterpret_cast<const PxU32*>(&box.minimum.x);
mMinX = encodeFloat(binary[0]);
mMinY = encodeFloat(binary[1]);
mMinZ = encodeFloat(binary[2]);
mMaxX = encodeFloat(binary[3]);
mMaxY = encodeFloat(binary[4]);
mMaxZ = encodeFloat(binary[5]);
}
PX_FORCE_INLINE void initFrom2(const PxBounds3& box)
{
const PxU32* PX_RESTRICT binary = reinterpret_cast<const PxU32*>(&box.minimum.x);
mMinX = encodeFloat(binary[0])>>1;
mMinY = encodeFloat(binary[1])>>1;
mMinZ = encodeFloat(binary[2])>>1;
mMaxX = encodeFloat(binary[3])>>1;
mMaxY = encodeFloat(binary[4])>>1;
mMaxZ = encodeFloat(binary[5])>>1;
}
PX_FORCE_INLINE void decode(PxBounds3& box) const
{
PxU32* PX_RESTRICT binary = reinterpret_cast<PxU32*>(&box.minimum.x);
binary[0] = decodeFloat(mMinX<<1);
binary[1] = decodeFloat(mMinY<<1);
binary[2] = decodeFloat(mMinZ<<1);
binary[3] = decodeFloat(mMaxX<<1);
binary[4] = decodeFloat(mMaxY<<1);
binary[5] = decodeFloat(mMaxZ<<1);
}
PX_FORCE_INLINE bool isInside(const SIMD_AABB& box) const
{
if(box.mMinX>mMinX) return false;
if(box.mMinY>mMinY) return false;
if(box.mMinZ>mMinZ) return false;
if(box.mMaxX<mMaxX) return false;
if(box.mMaxY<mMaxY) return false;
if(box.mMaxZ<mMaxZ) return false;
return true;
}
PX_FORCE_INLINE PxIntBool intersects(const SIMD_AABB& a) const
{
if(mMaxX < a.mMinX || a.mMaxX < mMinX
|| mMaxY < a.mMinY || a.mMaxY < mMinY
|| mMaxZ < a.mMinZ || a.mMaxZ < mMinZ
)
return PxIntFalse;
return PxIntTrue;
}
PX_FORCE_INLINE PxIntBool intersectNoTouch(const SIMD_AABB& a) const
{
if(mMaxX <= a.mMinX || a.mMaxX <= mMinX
|| mMaxY <= a.mMinY || a.mMaxY <= mMinY
|| mMaxZ <= a.mMinZ || a.mMaxZ <= mMinZ
)
return PxIntFalse;
return PxIntTrue;
}
PxU32 mMinX;
PxU32 mMaxX;
PxU32 mMinY;
PxU32 mMinZ;
PxU32 mMaxY;
PxU32 mMaxZ;
};
}
} // namespace physx
#endif // BP_BROADPHASE_MBP_COMMON_H

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,211 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_BROADPHASE_SAP_H
#define BP_BROADPHASE_SAP_H
#include "BpBroadPhase.h"
#include "BpBroadPhaseSapAux.h"
#include "CmPool.h"
#include "CmTask.h"
namespace physx
{
class PxcScratchAllocator;
namespace Gu
{
class Axes;
}
namespace Bp
{
class SapEndPoint;
class IntegerAABB;
class BroadPhaseBatchUpdateWorkTask: public Cm::Task
{
public:
BroadPhaseBatchUpdateWorkTask(PxU64 contextId=0) :
Cm::Task (contextId),
mSap (NULL),
mAxis (0xffffffff),
mPairs (NULL),
mPairsSize (0),
mPairsCapacity (0)
{
}
virtual void runInternal();
virtual const char* getName() const { return "BpBroadphaseSap.batchUpdate"; }
void set(class BroadPhaseSap* sap, const PxU32 axis) {mSap = sap; mAxis = axis;}
BroadPhasePair* getPairs() const {return mPairs;}
PxU32 getPairsSize() const {return mPairsSize;}
PxU32 getPairsCapacity() const {return mPairsCapacity;}
void setPairs(BroadPhasePair* pairs, const PxU32 pairsCapacity) {mPairs = pairs; mPairsCapacity = pairsCapacity;}
void setNumPairs(const PxU32 pairsSize) {mPairsSize=pairsSize;}
private:
class BroadPhaseSap* mSap;
PxU32 mAxis;
BroadPhasePair* mPairs;
PxU32 mPairsSize;
PxU32 mPairsCapacity;
};
//KS - TODO, this could be reduced to U16 in smaller scenes
struct BroadPhaseActivityPocket
{
PxU32 mStartIndex;
PxU32 mEndIndex;
};
class BroadPhaseSap : public BroadPhase
{
PX_NOCOPY(BroadPhaseSap)
public:
friend class BroadPhaseBatchUpdateWorkTask;
friend class SapUpdateWorkTask;
friend class SapPostUpdateWorkTask;
BroadPhaseSap(const PxU32 maxNbBroadPhaseOverlaps, const PxU32 maxNbStaticShapes, const PxU32 maxNbDynamicShapes, PxU64 contextID);
virtual ~BroadPhaseSap();
// BroadPhase
virtual PxBroadPhaseType::Enum getType() const PX_OVERRIDE PX_FINAL { return PxBroadPhaseType::eSAP; }
virtual void release() PX_OVERRIDE PX_FINAL;
virtual void update(PxcScratchAllocator* scratchAllocator, const BroadPhaseUpdateData& updateData, physx::PxBaseTask* continuation) PX_OVERRIDE PX_FINAL;
virtual void preBroadPhase(const Bp::BroadPhaseUpdateData&) PX_OVERRIDE PX_FINAL {}
virtual void fetchBroadPhaseResults() PX_OVERRIDE PX_FINAL {}
virtual const BroadPhasePair* getCreatedPairs(PxU32& nbCreatedPairs) const PX_OVERRIDE PX_FINAL { nbCreatedPairs = mCreatedPairsSize; return mCreatedPairsArray; }
virtual const BroadPhasePair* getDeletedPairs(PxU32& nbDeletedPairs) const PX_OVERRIDE PX_FINAL { nbDeletedPairs = mDeletedPairsSize; return mDeletedPairsArray; }
virtual void freeBuffers() PX_OVERRIDE PX_FINAL;
virtual void shiftOrigin(const PxVec3& shift, const PxBounds3* boundsArray, const PxReal* contactDistances) PX_OVERRIDE PX_FINAL;
#if PX_CHECKED
virtual bool isValid(const BroadPhaseUpdateData& updateData) const PX_OVERRIDE PX_FINAL;
#endif
//~BroadPhase
private:
void resizeBuffers();
PxcScratchAllocator* mScratchAllocator;
//Data passed in from updateV.
const BpHandle* mCreated;
PxU32 mCreatedSize;
const BpHandle* mRemoved;
PxU32 mRemovedSize;
const BpHandle* mUpdated;
PxU32 mUpdatedSize;
const PxBounds3* mBoxBoundsMinMax;
const Bp::FilterGroup::Enum*mBoxGroups;
const BpFilter* mFilter;
const PxReal* mContactDistance;
PxU32 mBoxesCapacity;
//Boxes.
SapBox1D* mBoxEndPts[3]; //Position of box min/max in sorted arrays of end pts (needs to have mBoxesCapacity).
//End pts (endpts of boxes sorted along each axis).
ValType* mEndPointValues[3]; //Sorted arrays of min and max box coords
BpHandle* mEndPointDatas[3]; //Corresponding owner id and isMin/isMax for each entry in the sorted arrays of min and max box coords.
PxU8* mBoxesUpdated;
BpHandle* mSortedUpdateElements;
BroadPhaseActivityPocket* mActivityPockets;
BpHandle* mListNext;
BpHandle* mListPrev;
PxU32 mBoxesSize; //Number of sorted boxes + number of unsorted (new) boxes
PxU32 mBoxesSizePrev; //Number of sorted boxes
PxU32 mEndPointsCapacity; //Capacity of sorted arrays.
//Default maximum number of overlap pairs
PxU32 mDefaultPairsCapacity;
//Box-box overlap pairs created or removed each update.
BpHandle* mData;
PxU32 mDataSize;
PxU32 mDataCapacity;
//All current box-box overlap pairs.
SapPairManager mPairs;
//Created and deleted overlap pairs reported back through api.
BroadPhasePair* mCreatedPairsArray;
PxU32 mCreatedPairsSize;
PxU32 mCreatedPairsCapacity;
BroadPhasePair* mDeletedPairsArray;
PxU32 mDeletedPairsSize;
PxU32 mDeletedPairsCapacity;
PxU32 mActualDeletedPairSize;
bool setUpdateData(const BroadPhaseUpdateData& updateData);
void update();
void postUpdate();
//Batch create/remove/update.
void batchCreate();
void batchRemove();
void batchUpdate();
void batchUpdate(const PxU32 Axis, BroadPhasePair*& pairs, PxU32& pairsSize, PxU32& pairsCapacity);
void batchUpdateFewUpdates(const PxU32 Axis, BroadPhasePair*& pairs, PxU32& pairsSize, PxU32& pairsCapacity);
void ComputeSortedLists( //const PxVec4& globalMin, const PxVec4& globalMax,
BpHandle* PX_RESTRICT newBoxIndicesSorted, PxU32& newBoxIndicesCount, BpHandle* PX_RESTRICT oldBoxIndicesSorted, PxU32& oldBoxIndicesCount,
bool& allNewBoxesStatics, bool& allOldBoxesStatics);
BroadPhaseBatchUpdateWorkTask mBatchUpdateTasks[3];
const PxU64 mContextID;
#if PX_DEBUG
bool isSelfOrdered() const;
bool isSelfConsistent() const;
#endif
};
} //namespace Bp
} //namespace physx
#endif //BP_BROADPHASE_SAP_H

View File

@@ -0,0 +1,911 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "BpBroadPhaseSapAux.h"
#include "PxcScratchAllocator.h"
namespace physx
{
namespace Bp
{
PX_FORCE_INLINE void PxBpHandleSwap(BpHandle& a, BpHandle& b)
{
const BpHandle c = a; a = b; b = c;
}
PX_FORCE_INLINE void Sort(BpHandle& id0, BpHandle& id1)
{
if(id0>id1) PxBpHandleSwap(id0, id1);
}
PX_FORCE_INLINE bool DifferentPair(const BroadPhasePair& p, BpHandle id0, BpHandle id1)
{
return (id0!=p.mVolA) || (id1!=p.mVolB);
}
PX_FORCE_INLINE int Hash32Bits_1(int key)
{
key += ~(key << 15);
key ^= (key >> 10);
key += (key << 3);
key ^= (key >> 6);
key += ~(key << 11);
key ^= (key >> 16);
return key;
}
PX_FORCE_INLINE PxU32 Hash(BpHandle id0, BpHandle id1)
{
return PxU32(Hash32Bits_1( int(PxU32(id0)|(PxU32(id1)<<16)) ));
}
///////////////////////////////////////////////////////////////////////////////
SapPairManager::SapPairManager() :
mHashTable (NULL),
mNext (NULL),
mHashSize (0),
mHashCapacity (0),
mMinAllowedHashCapacity (0),
mActivePairs (NULL),
mActivePairStates (NULL),
mNbActivePairs (0),
mActivePairsCapacity (0),
mMask (0)
{
}
///////////////////////////////////////////////////////////////////////////////
SapPairManager::~SapPairManager()
{
PX_ASSERT(NULL==mHashTable);
PX_ASSERT(NULL==mNext);
PX_ASSERT(NULL==mActivePairs);
PX_ASSERT(NULL==mActivePairStates);
}
///////////////////////////////////////////////////////////////////////////////
void SapPairManager::init(const PxU32 size)
{
mHashTable=reinterpret_cast<BpHandle*>(PX_ALLOC(ALIGN_SIZE_16(sizeof(BpHandle)*size), "BpHandle"));
mNext=reinterpret_cast<BpHandle*>(PX_ALLOC(ALIGN_SIZE_16(sizeof(BpHandle)*size), "BpHandle"));
mActivePairs=reinterpret_cast<BroadPhasePair*>(PX_ALLOC(ALIGN_SIZE_16(sizeof(BroadPhasePair)*size), "BroadPhasePair"));
mActivePairStates=reinterpret_cast<PxU8*>(PX_ALLOC(ALIGN_SIZE_16(sizeof(PxU8)*size), "BroadPhaseContextSap ActivePairStates"));
mHashCapacity=size;
mMinAllowedHashCapacity = size;
mActivePairsCapacity=size;
}
///////////////////////////////////////////////////////////////////////////////
void SapPairManager::release()
{
PX_FREE(mHashTable);
PX_FREE(mNext);
PX_FREE(mActivePairs);
PX_FREE(mActivePairStates);
mHashSize = 0;
mHashCapacity = 0;
mMinAllowedHashCapacity = 0;
mNbActivePairs = 0;
mActivePairsCapacity = 0;
mMask = 0;
}
///////////////////////////////////////////////////////////////////////////////
const BroadPhasePair* SapPairManager::FindPair(BpHandle id0, BpHandle id1) const
{
if(0==mHashSize) return NULL; // Nothing has been allocated yet
// Order the ids
Sort(id0, id1);
// Compute hash value for this pair
PxU32 HashValue = Hash(id0, id1) & mMask;
PX_ASSERT(HashValue<mHashCapacity);
// Look for it in the table
PX_ASSERT(HashValue<mHashCapacity);
PxU32 Offset = mHashTable[HashValue];
PX_ASSERT(BP_INVALID_BP_HANDLE==Offset || Offset<mActivePairsCapacity);
while(Offset!=BP_INVALID_BP_HANDLE && DifferentPair(mActivePairs[Offset], id0, id1))
{
PX_ASSERT(mActivePairs[Offset].mVolA!=BP_INVALID_BP_HANDLE);
PX_ASSERT(Offset<mHashCapacity);
Offset = mNext[Offset]; // Better to have a separate array for this
PX_ASSERT(BP_INVALID_BP_HANDLE==Offset || Offset<mActivePairsCapacity);
}
if(Offset==BP_INVALID_BP_HANDLE) return NULL;
PX_ASSERT(Offset<mNbActivePairs);
// Match mActivePairs[Offset] => the pair is persistent
PX_ASSERT(Offset<mActivePairsCapacity);
return &mActivePairs[Offset];
}
///////////////////////////////////////////////////////////////////////////////
// Internal version saving hash computation
PX_FORCE_INLINE BroadPhasePair* SapPairManager::FindPair(BpHandle id0, BpHandle id1, PxU32 hash_value) const
{
if(0==mHashSize) return NULL; // Nothing has been allocated yet
// Look for it in the table
PX_ASSERT(hash_value<mHashCapacity);
PxU32 Offset = mHashTable[hash_value];
PX_ASSERT(BP_INVALID_BP_HANDLE==Offset || Offset<mActivePairsCapacity);
while(Offset!=BP_INVALID_BP_HANDLE && DifferentPair(mActivePairs[Offset], id0, id1))
{
PX_ASSERT(mActivePairs[Offset].mVolA!=BP_INVALID_BP_HANDLE);
PX_ASSERT(Offset<mHashCapacity);
Offset = mNext[Offset]; // Better to have a separate array for this
PX_ASSERT(BP_INVALID_BP_HANDLE==Offset || Offset<mActivePairsCapacity);
}
if(Offset==BP_INVALID_BP_HANDLE) return NULL;
PX_ASSERT(Offset<mNbActivePairs);
// Match mActivePairs[Offset] => the pair is persistent
PX_ASSERT(Offset<mActivePairsCapacity);
return &mActivePairs[Offset];
}
///////////////////////////////////////////////////////////////////////////////
const BroadPhasePair* SapPairManager::AddPair(BpHandle id0, BpHandle id1, const PxU8 state)
{
// Order the ids
Sort(id0, id1);
PxU32 HashValue = Hash(id0, id1) & mMask;
BroadPhasePair* P = FindPair(id0, id1, HashValue);
if(P)
{
return P; // Persistent pair
}
// This is a new pair
if(mNbActivePairs >= mHashSize)
{
// Get more entries
mHashSize = PxNextPowerOfTwo(mNbActivePairs+1);
mMask = mHashSize-1;
reallocPairs(mHashSize>mHashCapacity);
// Recompute hash value with new hash size
HashValue = Hash(id0, id1) & mMask;
}
PX_ASSERT(mNbActivePairs<mActivePairsCapacity);
BroadPhasePair* p = &mActivePairs[mNbActivePairs];
p->mVolA = id0; // ### CMOVs would be nice here
p->mVolB = id1;
mActivePairStates[mNbActivePairs]=state;
PX_ASSERT(mNbActivePairs<mHashSize);
PX_ASSERT(mNbActivePairs<mHashCapacity);
PX_ASSERT(HashValue<mHashCapacity);
mNext[mNbActivePairs] = mHashTable[HashValue];
mHashTable[HashValue] = BpHandle(mNbActivePairs++);
return p;
}
///////////////////////////////////////////////////////////////////////////////
void SapPairManager::RemovePair(BpHandle /*id0*/, BpHandle /*id1*/, PxU32 hash_value, PxU32 pair_index)
{
// Walk the hash table to fix mNext
{
PX_ASSERT(hash_value<mHashCapacity);
PxU32 Offset = mHashTable[hash_value];
PX_ASSERT(Offset!=BP_INVALID_BP_HANDLE);
PxU32 Previous=BP_INVALID_BP_HANDLE;
while(Offset!=pair_index)
{
Previous = Offset;
PX_ASSERT(Offset<mHashCapacity);
Offset = mNext[Offset];
}
// Let us go/jump us
if(Previous!=BP_INVALID_BP_HANDLE)
{
PX_ASSERT(Previous<mHashCapacity);
PX_ASSERT(pair_index<mHashCapacity);
PX_ASSERT(mNext[Previous]==pair_index);
mNext[Previous] = mNext[pair_index];
}
// else we were the first
else
{
PX_ASSERT(hash_value<mHashCapacity);
PX_ASSERT(pair_index<mHashCapacity);
mHashTable[hash_value] = mNext[pair_index];
}
}
// we're now free to reuse mNext[PairIndex] without breaking the list
#if PX_DEBUG
PX_ASSERT(pair_index<mHashCapacity);
mNext[pair_index]=BP_INVALID_BP_HANDLE;
#endif
// Invalidate entry
// Fill holes
{
// 1) Remove last pair
const PxU32 LastPairIndex = mNbActivePairs-1;
if(LastPairIndex==pair_index)
{
mNbActivePairs--;
}
else
{
PX_ASSERT(LastPairIndex<mActivePairsCapacity);
const BroadPhasePair* Last = &mActivePairs[LastPairIndex];
const PxU32 LastHashValue = Hash(Last->mVolA, Last->mVolB) & mMask;
// Walk the hash table to fix mNext
PX_ASSERT(LastHashValue<mHashCapacity);
PxU32 Offset = mHashTable[LastHashValue];
PX_ASSERT(Offset!=BP_INVALID_BP_HANDLE);
PxU32 Previous=BP_INVALID_BP_HANDLE;
while(Offset!=LastPairIndex)
{
Previous = Offset;
PX_ASSERT(Offset<mHashCapacity);
Offset = mNext[Offset];
}
// Let us go/jump us
if(Previous!=BP_INVALID_BP_HANDLE)
{
PX_ASSERT(Previous<mHashCapacity);
PX_ASSERT(LastPairIndex<mHashCapacity);
PX_ASSERT(mNext[Previous]==LastPairIndex);
mNext[Previous] = mNext[LastPairIndex];
}
// else we were the first
else
{
PX_ASSERT(LastHashValue<mHashCapacity);
PX_ASSERT(LastPairIndex<mHashCapacity);
mHashTable[LastHashValue] = mNext[LastPairIndex];
}
// we're now free to reuse mNext[LastPairIndex] without breaking the list
#if PX_DEBUG
PX_ASSERT(LastPairIndex<mHashCapacity);
mNext[LastPairIndex]=BP_INVALID_BP_HANDLE;
#endif
// Don't invalidate entry since we're going to shrink the array
// 2) Re-insert in free slot
PX_ASSERT(pair_index<mActivePairsCapacity);
PX_ASSERT(LastPairIndex<mActivePairsCapacity);
mActivePairs[pair_index] = mActivePairs[LastPairIndex];
mActivePairStates[pair_index] = mActivePairStates[LastPairIndex];
#if PX_DEBUG
PX_ASSERT(pair_index<mHashCapacity);
PX_ASSERT(mNext[pair_index]==BP_INVALID_BP_HANDLE);
#endif
PX_ASSERT(pair_index<mHashCapacity);
PX_ASSERT(LastHashValue<mHashCapacity);
mNext[pair_index] = mHashTable[LastHashValue];
mHashTable[LastHashValue] = BpHandle(pair_index);
mNbActivePairs--;
}
}
}
bool SapPairManager::RemovePair(BpHandle id0, BpHandle id1)
{
// Order the ids
Sort(id0, id1);
const PxU32 HashValue = Hash(id0, id1) & mMask;
const BroadPhasePair* P = FindPair(id0, id1, HashValue);
if(!P) return false;
PX_ASSERT(P->mVolA==id0);
PX_ASSERT(P->mVolB==id1);
RemovePair(id0, id1, HashValue, GetPairIndex(P));
shrinkMemory();
return true;
}
bool SapPairManager::RemovePairs(const PxBitMap& removedAABBs)
{
PxU32 i=0;
while(i<mNbActivePairs)
{
const BpHandle id0 = mActivePairs[i].mVolA;
const BpHandle id1 = mActivePairs[i].mVolB;
if(removedAABBs.test(id0) || removedAABBs.test(id1))
{
const PxU32 HashValue = Hash(id0, id1) & mMask;
RemovePair(id0, id1, HashValue, i);
}
else i++;
}
return true;
}
void SapPairManager::shrinkMemory()
{
//Compute the hash size given the current number of active pairs.
const PxU32 correctHashSize = PxNextPowerOfTwo(mNbActivePairs);
//If we have the correct hash size then no action required.
if(correctHashSize==mHashSize || (correctHashSize < mMinAllowedHashCapacity && mHashSize == mMinAllowedHashCapacity))
return;
//The hash size can be reduced so take action.
//Don't let the hash size fall below a threshold value.
PxU32 newHashSize = correctHashSize;
if(newHashSize < mMinAllowedHashCapacity)
{
newHashSize = mMinAllowedHashCapacity;
}
mHashSize = newHashSize;
mMask = newHashSize-1;
reallocPairs( (newHashSize > mMinAllowedHashCapacity) || (mHashSize <= (mHashCapacity >> 2)) || (mHashSize <= (mActivePairsCapacity >> 2)));
}
void SapPairManager::reallocPairs(const bool allocRequired)
{
if(allocRequired)
{
PX_FREE(mHashTable);
mHashCapacity=mHashSize;
mActivePairsCapacity=mHashSize;
mHashTable = reinterpret_cast<BpHandle*>(PX_ALLOC(mHashSize*sizeof(BpHandle), "BpHandle"));
for(PxU32 i=0;i<mHashSize;i++)
{
mHashTable[i] = BP_INVALID_BP_HANDLE;
}
// Get some bytes for new entries
BroadPhasePair* NewPairs = reinterpret_cast<BroadPhasePair*>(PX_ALLOC(mHashSize * sizeof(BroadPhasePair), "BroadPhasePair")); PX_ASSERT(NewPairs);
BpHandle* NewNext = reinterpret_cast<BpHandle*>(PX_ALLOC(mHashSize * sizeof(BpHandle), "BpHandle")); PX_ASSERT(NewNext);
PxU8* NewPairStates = reinterpret_cast<PxU8*>(PX_ALLOC(mHashSize * sizeof(PxU8), "SapPairStates")); PX_ASSERT(NewPairStates);
// Copy old data if needed
if(mNbActivePairs)
{
PxMemCopy(NewPairs, mActivePairs, mNbActivePairs*sizeof(BroadPhasePair));
PxMemCopy(NewPairStates, mActivePairStates, mNbActivePairs*sizeof(PxU8));
}
// ### check it's actually needed... probably only for pairs whose hash value was cut by the and
// yeah, since Hash(id0, id1) is a constant
// However it might not be needed to recompute them => only less efficient but still ok
for(PxU32 i=0;i<mNbActivePairs;i++)
{
const PxU32 HashValue = Hash(mActivePairs[i].mVolA, mActivePairs[i].mVolB) & mMask; // New hash value with new mask
NewNext[i] = mHashTable[HashValue];
PX_ASSERT(HashValue<mHashCapacity);
mHashTable[HashValue] = BpHandle(i);
}
// Delete old data
PX_FREE(mNext);
PX_FREE(mActivePairs);
PX_FREE(mActivePairStates);
// Assign new pointer
mActivePairs = NewPairs;
mActivePairStates = NewPairStates;
mNext = NewNext;
}
else
{
for(PxU32 i=0;i<mHashSize;i++)
{
mHashTable[i] = BP_INVALID_BP_HANDLE;
}
// ### check it's actually needed... probably only for pairs whose hash value was cut by the and
// yeah, since Hash(id0, id1) is a constant
// However it might not be needed to recompute them => only less efficient but still ok
for(PxU32 i=0;i<mNbActivePairs;i++)
{
const PxU32 HashValue = Hash(mActivePairs[i].mVolA, mActivePairs[i].mVolB) & mMask; // New hash value with new mask
mNext[i] = mHashTable[HashValue];
PX_ASSERT(HashValue<mHashCapacity);
mHashTable[HashValue] = BpHandle(i);
}
}
}
void resizeCreatedDeleted(BroadPhasePair*& pairs, PxU32& maxNumPairs)
{
PX_ASSERT(pairs);
PX_ASSERT(maxNumPairs>0);
const PxU32 newMaxNumPairs=2*maxNumPairs;
BroadPhasePair* newPairs=reinterpret_cast<BroadPhasePair*>(PX_ALLOC(sizeof(BroadPhasePair)*newMaxNumPairs, "BroadPhasePair"));
PxMemCopy(newPairs, pairs, sizeof(BroadPhasePair)*maxNumPairs);
PX_FREE(pairs);
pairs=newPairs;
maxNumPairs=newMaxNumPairs;
}
void ComputeCreatedDeletedPairsLists
(const Bp::FilterGroup::Enum* PX_RESTRICT boxGroups,
const BpHandle* PX_RESTRICT dataArray, const PxU32 dataArraySize,
PxcScratchAllocator* scratchAllocator,
BroadPhasePair*& createdPairsList, PxU32& numCreatedPairs, PxU32& maxNumCreatedPairs,
BroadPhasePair*& deletedPairsList, PxU32& numDeletedPairs, PxU32& maxNumDeletedPairs,
PxU32& numActualDeletedPairs,
SapPairManager& pairManager)
{
#if BP_SAP_TEST_GROUP_ID_CREATEUPDATE
PX_UNUSED(boxGroups);
#endif
for(PxU32 i=0;i<dataArraySize;i++)
{
const PxU32 ID = dataArray[i];
PX_ASSERT(ID<pairManager.mNbActivePairs);
const BroadPhasePair* PX_RESTRICT UP = pairManager.mActivePairs + ID;
PX_ASSERT(pairManager.IsInArray(UP));
if(pairManager.IsRemoved(UP))
{
if(!pairManager.IsNew(UP))
{
// No need to call "ClearInArray" in this case, since the pair will get removed anyway
if(numDeletedPairs==maxNumDeletedPairs)
{
BroadPhasePair* newDeletedPairsList = reinterpret_cast<BroadPhasePair*>(scratchAllocator->alloc(sizeof(BroadPhasePair)*2*maxNumDeletedPairs, true));
PxMemCopy(newDeletedPairsList, deletedPairsList, sizeof(BroadPhasePair)*maxNumDeletedPairs);
scratchAllocator->free(deletedPairsList);
deletedPairsList = newDeletedPairsList;
maxNumDeletedPairs = 2*maxNumDeletedPairs;
}
PX_ASSERT(numDeletedPairs<maxNumDeletedPairs);
//PX_ASSERT((uintptr_t)UP->mUserData != 0xcdcdcdcd);
deletedPairsList[numDeletedPairs++] = BroadPhasePair(UP->mVolA,UP->mVolB/*, ID*/);
}
}
else
{
pairManager.ClearInArray(UP);
// Add => already there... Might want to create user data, though
if(pairManager.IsNew(UP))
{
#if !BP_SAP_TEST_GROUP_ID_CREATEUPDATE
if(groupFiltering(boxGroups[UP->mVolA], boxGroups[UP->mVolB]))
#endif
{
if(numCreatedPairs==maxNumCreatedPairs)
{
BroadPhasePair* newCreatedPairsList = reinterpret_cast<BroadPhasePair*>(scratchAllocator->alloc(sizeof(BroadPhasePair)*2*maxNumCreatedPairs, true));
PxMemCopy(newCreatedPairsList, createdPairsList, sizeof(BroadPhasePair)*maxNumCreatedPairs);
scratchAllocator->free(createdPairsList);
createdPairsList = newCreatedPairsList;
maxNumCreatedPairs = 2*maxNumCreatedPairs;
}
PX_ASSERT(numCreatedPairs<maxNumCreatedPairs);
createdPairsList[numCreatedPairs++] = BroadPhasePair(UP->mVolA,UP->mVolB/*, ID*/);
}
pairManager.ClearNew(UP);
}
}
}
//Record pairs that are to be deleted because they were simultaneously created and removed
//from different axis sorts.
numActualDeletedPairs=numDeletedPairs;
for(PxU32 i=0;i<dataArraySize;i++)
{
const PxU32 ID = dataArray[i];
PX_ASSERT(ID<pairManager.mNbActivePairs);
const BroadPhasePair* PX_RESTRICT UP = pairManager.mActivePairs + ID;
if(pairManager.IsRemoved(UP) && pairManager.IsNew(UP))
{
PX_ASSERT(pairManager.IsInArray(UP));
if(numActualDeletedPairs==maxNumDeletedPairs)
{
BroadPhasePair* newDeletedPairsList = reinterpret_cast<BroadPhasePair*>(scratchAllocator->alloc(sizeof(BroadPhasePair)*2*maxNumDeletedPairs, true));
PxMemCopy(newDeletedPairsList, deletedPairsList, sizeof(BroadPhasePair)*maxNumDeletedPairs);
scratchAllocator->free(deletedPairsList);
deletedPairsList = newDeletedPairsList;
maxNumDeletedPairs = 2*maxNumDeletedPairs;
}
PX_ASSERT(numActualDeletedPairs<=maxNumDeletedPairs);
deletedPairsList[numActualDeletedPairs++] = BroadPhasePair(UP->mVolA,UP->mVolB/*, ID*/); //KS - should we even get here????
}
}
// // #### try batch removal here
// for(PxU32 i=0;i<numActualDeletedPairs;i++)
// {
// const BpHandle id0 = deletedPairsList[i].mVolA;
// const BpHandle id1 = deletedPairsList[i].mVolB;
//#if PX_DEBUG
// const bool Status = pairManager.RemovePair(id0, id1);
// PX_ASSERT(Status);
//#else
// pairManager.RemovePair(id0, id1);
//#endif
// }
//Only report deleted pairs from different groups.
#if !BP_SAP_TEST_GROUP_ID_CREATEUPDATE
for(PxU32 i=0;i<numDeletedPairs;i++)
{
const PxU32 id0 = deletedPairsList[i].mVolA;
const PxU32 id1 = deletedPairsList[i].mVolB;
if(!groupFiltering(boxGroups[id0], boxGroups[id1]))
{
while((numDeletedPairs-1) > i && boxGroups[deletedPairsList[numDeletedPairs-1].mVolA] == boxGroups[deletedPairsList[numDeletedPairs-1].mVolB])
{
numDeletedPairs--;
}
deletedPairsList[i]=deletedPairsList[numDeletedPairs-1];
numDeletedPairs--;
}
}
#endif
}
//#define PRINT_STATS
#ifdef PRINT_STATS
#include <stdio.h>
static PxU32 gNbIter = 0;
static PxU32 gNbTests = 0;
static PxU32 gNbPairs = 0;
#define START_STATS gNbIter = gNbTests = gNbPairs = 0;
#define INCREASE_STATS_NB_ITER gNbIter++;
#define INCREASE_STATS_NB_TESTS gNbTests++;
#define INCREASE_STATS_NB_PAIRS gNbPairs++;
#define DUMP_STATS printf("%d %d %d\n", gNbIter, gNbTests, gNbPairs);
#else
#define START_STATS
#define INCREASE_STATS_NB_ITER
#define INCREASE_STATS_NB_TESTS
#define INCREASE_STATS_NB_PAIRS
#define DUMP_STATS
#endif
void DataArray::Resize(PxcScratchAllocator* scratchAllocator)
{
BpHandle* newDataArray = reinterpret_cast<BpHandle*>(scratchAllocator->alloc(sizeof(BpHandle)*mCapacity*2, true));
PxMemCopy(newDataArray, mData, mCapacity*sizeof(BpHandle));
scratchAllocator->free(mData);
mData = newDataArray;
mCapacity *= 2;
}
static PX_FORCE_INLINE int intersect2D(const BoxYZ& a, const BoxYZ& b)
{
const bool b0 = b.mMaxY < a.mMinY;
const bool b1 = a.mMaxY < b.mMinY;
const bool b2 = b.mMaxZ < a.mMinZ;
const bool b3 = a.mMaxZ < b.mMinZ;
// const bool b4 = b0 || b1 || b2 || b3;
const bool b4 = b0 | b1 | b2 | b3;
return !b4;
}
void addPair(const BpHandle id0, const BpHandle id1, PxcScratchAllocator* scratchAllocator, SapPairManager& pairManager, DataArray& dataArray)
{
const BroadPhasePair* UP = reinterpret_cast<const BroadPhasePair*>(pairManager.AddPair(id0, id1, SapPairManager::PAIR_UNKNOWN));
//If the hash table has reached its limit then we're unable to add a new pair.
if(NULL==UP)
return;
PX_ASSERT(UP);
if(pairManager.IsUnknown(UP))
{
pairManager.ClearState(UP);
pairManager.SetInArray(UP);
dataArray.AddData(pairManager.GetPairIndex(UP), scratchAllocator);
pairManager.SetNew(UP);
}
pairManager.ClearRemoved(UP);
}
void removePair(BpHandle id0, BpHandle id1, PxcScratchAllocator* scratchAllocator, SapPairManager& pairManager, DataArray& dataArray)
{
const BroadPhasePair* UP = reinterpret_cast<const BroadPhasePair*>(pairManager.FindPair(id0, id1));
if(UP)
{
if(!pairManager.IsInArray(UP))
{
pairManager.SetInArray(UP);
dataArray.AddData(pairManager.GetPairIndex(UP), scratchAllocator);
}
pairManager.SetRemoved(UP);
}
}
struct AddPairParams
{
AddPairParams(const PxU32* remap0, const PxU32* remap1, PxcScratchAllocator* alloc, SapPairManager* pm, DataArray* da) :
mRemap0 (remap0),
mRemap1 (remap1),
mScratchAllocator (alloc),
mPairManager (pm),
mDataArray (da)
{
}
const PxU32* mRemap0;
const PxU32* mRemap1;
PxcScratchAllocator* mScratchAllocator;
SapPairManager* mPairManager;
DataArray* mDataArray;
};
static void addPair(const AddPairParams* PX_RESTRICT params, const BpHandle id0_, const BpHandle id1_)
{
SapPairManager& pairManager = *params->mPairManager;
const BroadPhasePair* UP = reinterpret_cast<const BroadPhasePair*>(pairManager.AddPair(params->mRemap0[id0_], params->mRemap1[id1_], SapPairManager::PAIR_UNKNOWN));
//If the hash table has reached its limit then we're unable to add a new pair.
if(NULL==UP)
return;
PX_ASSERT(UP);
if(pairManager.IsUnknown(UP))
{
pairManager.ClearState(UP);
pairManager.SetInArray(UP);
params->mDataArray->AddData(pairManager.GetPairIndex(UP), params->mScratchAllocator);
pairManager.SetNew(UP);
}
pairManager.ClearRemoved(UP);
}
// PT: TODO: use SIMD
AuxData::AuxData(PxU32 nb, const SapBox1D*const* PX_RESTRICT boxes, const BpHandle* PX_RESTRICT indicesSorted, const Bp::FilterGroup::Enum* PX_RESTRICT groupIds)
{
// PT: TODO: use scratch allocator / etc
BoxX* PX_RESTRICT boxX = reinterpret_cast<BoxX*>(PX_ALLOC(sizeof(BoxX)*(nb+1), "mBoxX"));
BoxYZ* PX_RESTRICT boxYZ = reinterpret_cast<BoxYZ*>(PX_ALLOC(sizeof(BoxYZ)*nb, "mBoxYZ"));
Bp::FilterGroup::Enum* PX_RESTRICT groups = reinterpret_cast<Bp::FilterGroup::Enum*>(PX_ALLOC(sizeof(Bp::FilterGroup::Enum)*nb, "mGroups"));
PxU32* PX_RESTRICT remap = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32)*nb, "mRemap"));
mBoxX = boxX;
mBoxYZ = boxYZ;
mGroups = groups;
mRemap = remap;
mNb = nb;
const PxU32 axis0 = 0;
const PxU32 axis1 = 2;
const PxU32 axis2 = 1;
const SapBox1D* PX_RESTRICT boxes0 = boxes[axis0];
const SapBox1D* PX_RESTRICT boxes1 = boxes[axis1];
const SapBox1D* PX_RESTRICT boxes2 = boxes[axis2];
for(PxU32 i=0;i<nb;i++)
{
const PxU32 boxID = indicesSorted[i];
groups[i] = groupIds[boxID];
remap[i] = boxID;
const SapBox1D& currentBoxX = boxes0[boxID];
boxX[i].mMinX = currentBoxX.mMinMax[0];
boxX[i].mMaxX = currentBoxX.mMinMax[1];
const SapBox1D& currentBoxY = boxes1[boxID];
boxYZ[i].mMinY = currentBoxY.mMinMax[0];
boxYZ[i].mMaxY = currentBoxY.mMinMax[1];
const SapBox1D& currentBoxZ = boxes2[boxID];
boxYZ[i].mMinZ = currentBoxZ.mMinMax[0];
boxYZ[i].mMaxZ = currentBoxZ.mMinMax[1];
}
boxX[nb].mMinX = 0xffffffff;
}
AuxData::~AuxData()
{
PX_FREE(mRemap);
PX_FREE(mGroups);
PX_FREE(mBoxYZ);
PX_FREE(mBoxX);
}
void performBoxPruningNewNew( const AuxData* PX_RESTRICT auxData, PxcScratchAllocator* scratchAllocator,
const bool* lut, SapPairManager& pairManager, BpHandle*& dataArray, PxU32& dataArraySize, PxU32& dataArrayCapacity)
{
const PxU32 nb = auxData->mNb;
if(!nb)
return;
DataArray da(dataArray, dataArraySize, dataArrayCapacity);
START_STATS
{
BoxX* boxX = auxData->mBoxX;
BoxYZ* boxYZ = auxData->mBoxYZ;
Bp::FilterGroup::Enum* groups = auxData->mGroups;
PxU32* remap = auxData->mRemap;
AddPairParams params(remap, remap, scratchAllocator, &pairManager, &da);
PxU32 runningIndex = 0;
PxU32 index0 = 0;
while(runningIndex<nb && index0<nb)
{
#if BP_SAP_TEST_GROUP_ID_CREATEUPDATE
const Bp::FilterGroup::Enum group0 = groups[index0];
#endif
const BoxX& boxX0 = boxX[index0];
const BpHandle minLimit = boxX0.mMinX;
while(boxX[runningIndex++].mMinX<minLimit);
const BpHandle maxLimit = boxX0.mMaxX;
PxU32 index1 = runningIndex;
while(boxX[index1].mMinX <= maxLimit)
{
INCREASE_STATS_NB_ITER
#if BP_SAP_TEST_GROUP_ID_CREATEUPDATE
if(groupFiltering(group0, groups[index1], lut))
#endif
{
INCREASE_STATS_NB_TESTS
if(intersect2D(boxYZ[index0], boxYZ[index1]))
/* __m128i b = _mm_loadu_si128(reinterpret_cast<const __m128i*>(&boxYZ[index0].mMinY));
b = _mm_shuffle_epi32(b, 78);
const __m128i a = _mm_loadu_si128(reinterpret_cast<const __m128i*>(&boxYZ[index1].mMinY));
const __m128i d = _mm_cmpgt_epi32(a, b);
const int mask = _mm_movemask_epi8(d);
if(mask==0x0000ff00)*/
{
INCREASE_STATS_NB_PAIRS
addPair(&params, index0, index1);
}
}
index1++;
}
index0++;
}
}
DUMP_STATS
dataArray = da.mData;
dataArraySize = da.mSize;
dataArrayCapacity = da.mCapacity;
}
template<int codepath>
static void bipartitePruning(
const PxU32 nb0, const BoxX* PX_RESTRICT boxX0, const BoxYZ* PX_RESTRICT boxYZ0, const PxU32* PX_RESTRICT remap0, const Bp::FilterGroup::Enum* PX_RESTRICT groups0,
const PxU32 nb1, const BoxX* PX_RESTRICT boxX1, const BoxYZ* PX_RESTRICT boxYZ1, const PxU32* PX_RESTRICT remap1, const Bp::FilterGroup::Enum* PX_RESTRICT groups1,
const bool* lut, PxcScratchAllocator* scratchAllocator, SapPairManager& pairManager, DataArray& dataArray)
{
AddPairParams params(remap0, remap1, scratchAllocator, &pairManager, &dataArray);
PxU32 runningIndex = 0;
PxU32 index0 = 0;
while(runningIndex<nb1 && index0<nb0)
{
#if BP_SAP_TEST_GROUP_ID_CREATEUPDATE
const Bp::FilterGroup::Enum group0 = groups0[index0];
#endif
const BpHandle minLimit = boxX0[index0].mMinX;
if(!codepath)
{
while(boxX1[runningIndex].mMinX<minLimit)
runningIndex++;
}
else
{
while(boxX1[runningIndex].mMinX<=minLimit)
runningIndex++;
}
const BpHandle maxLimit = boxX0[index0].mMaxX;
PxU32 index1 = runningIndex;
while(boxX1[index1].mMinX <= maxLimit)
{
INCREASE_STATS_NB_ITER
#if BP_SAP_TEST_GROUP_ID_CREATEUPDATE
if(groupFiltering(group0, groups1[index1], lut))
#endif
{
INCREASE_STATS_NB_TESTS
if(intersect2D(boxYZ0[index0], boxYZ1[index1]))
{
INCREASE_STATS_NB_PAIRS
addPair(&params, index0, index1);
}
}
index1++;
}
index0++;
}
}
void performBoxPruningNewOld( const AuxData* PX_RESTRICT auxData0, const AuxData* PX_RESTRICT auxData1, PxcScratchAllocator* scratchAllocator,
const bool* lut, SapPairManager& pairManager, BpHandle*& dataArray, PxU32& dataArraySize, PxU32& dataArrayCapacity)
{
const PxU32 nb0 = auxData0->mNb;
const PxU32 nb1 = auxData1->mNb;
if(!nb0 || !nb1)
return;
DataArray da(dataArray, dataArraySize, dataArrayCapacity);
START_STATS
{
const BoxX* boxX0 = auxData0->mBoxX;
const BoxYZ* boxYZ0 = auxData0->mBoxYZ;
const Bp::FilterGroup::Enum* groups0 = auxData0->mGroups;
const PxU32* remap0 = auxData0->mRemap;
const BoxX* boxX1 = auxData1->mBoxX;
const BoxYZ* boxYZ1 = auxData1->mBoxYZ;
const Bp::FilterGroup::Enum* groups1 = auxData1->mGroups;
const PxU32* remap1 = auxData1->mRemap;
bipartitePruning<0>(nb0, boxX0, boxYZ0, remap0, groups0, nb1, boxX1, boxYZ1, remap1, groups1, lut, scratchAllocator, pairManager, da);
bipartitePruning<1>(nb1, boxX1, boxYZ1, remap1, groups1, nb0, boxX0, boxYZ0, remap0, groups0, lut, scratchAllocator, pairManager, da);
}
DUMP_STATS
dataArray = da.mData;
dataArraySize = da.mSize;
dataArrayCapacity = da.mCapacity;
}
} //namespace Bp
} //namespace physx

View File

@@ -0,0 +1,275 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_BROADPHASE_SAP_AUX_H
#define BP_BROADPHASE_SAP_AUX_H
#include "foundation/PxAssert.h"
#include "foundation/PxIntrinsics.h"
#include "foundation/PxUserAllocated.h"
#include "BpBroadPhase.h"
#include "BpBroadPhaseIntegerAABB.h"
#include "foundation/PxBitMap.h"
namespace physx
{
class PxcScratchAllocator;
namespace Bp
{
#define ALIGN_SIZE_16(size) ((unsigned(size)+15)&(unsigned(~15)))
#define NUM_SENTINELS 2
#define BP_SAP_USE_PREFETCH 1//prefetch in batchUpdate
#define BP_SAP_USE_OVERLAP_TEST_ON_REMOVES 1// "Useless" but faster overall because seriously reduces number of calls (from ~10000 to ~3 sometimes!)
//Set 1 to test for group ids in batchCreate/batchUpdate so we can avoid group id test in ComputeCreatedDeletedPairsLists
//Set 0 to neglect group id test in batchCreate/batchUpdate and delay test until ComputeCreatedDeletedPairsLists
#define BP_SAP_TEST_GROUP_ID_CREATEUPDATE 1
#define MAX_BP_HANDLE 0x3fffffff
#define PX_REMOVED_BP_HANDLE 0x3ffffffd
PX_FORCE_INLINE void setMinSentinel(ValType& v, BpHandle& d)
{
v = 0x00000000;//0x00800000; //0x00800000 is -FLT_MAX but setting it to 0 means we don't crash when we get a value outside the float range.
d = (BP_INVALID_BP_HANDLE & ~1);
}
PX_FORCE_INLINE void setMaxSentinel(ValType& v, BpHandle& d)
{
v = 0xffffffff;//0xff7fffff; //0xff7fffff is +FLT_MAX but setting it to 0xffffffff means we don't crash when we get a value outside the float range.
d = BP_INVALID_BP_HANDLE;
}
PX_FORCE_INLINE BpHandle setData(PxU32 owner_box_id, const bool is_max)
{
BpHandle d = BpHandle(owner_box_id<<1);
if(is_max) d |= 1;
return d;
}
PX_FORCE_INLINE bool isSentinel(const BpHandle& d)
{
return (d&~1)==(BP_INVALID_BP_HANDLE & ~1);
}
PX_FORCE_INLINE BpHandle isMax(const BpHandle& d)
{
return BpHandle(d & 1);
}
PX_FORCE_INLINE BpHandle getOwner(const BpHandle& d)
{
return BpHandle(d>>1);
}
class SapBox1D
{
public:
PX_FORCE_INLINE SapBox1D() {}
PX_FORCE_INLINE ~SapBox1D() {}
BpHandle mMinMax[2];//mMinMax[0]=min, mMinMax[1]=max
};
class SapPairManager
{
public:
SapPairManager();
~SapPairManager();
void init(const PxU32 size);
void release();
void shrinkMemory();
const BroadPhasePair* AddPair (BpHandle id0, BpHandle id1, const PxU8 state);
bool RemovePair (BpHandle id0, BpHandle id1);
bool RemovePairs (const PxBitMap& removedAABBs);
const BroadPhasePair* FindPair (BpHandle id0, BpHandle id1) const;
PX_FORCE_INLINE PxU32 GetPairIndex(const BroadPhasePair* PX_RESTRICT pair) const
{
return (PxU32((size_t(pair) - size_t(mActivePairs)))/sizeof(BroadPhasePair));
}
BpHandle* mHashTable;
BpHandle* mNext;
PxU32 mHashSize;
PxU32 mHashCapacity;
PxU32 mMinAllowedHashCapacity;
BroadPhasePair* mActivePairs;
PxU8* mActivePairStates;
PxU32 mNbActivePairs;
PxU32 mActivePairsCapacity;
PxU32 mMask;
BroadPhasePair* FindPair (BpHandle id0, BpHandle id1, PxU32 hash_value) const;
void RemovePair (BpHandle id0, BpHandle id1, PxU32 hash_value, PxU32 pair_index);
void reallocPairs(const bool allocRequired);
enum
{
PAIR_INARRAY=1,
PAIR_REMOVED=2,
PAIR_NEW=4,
PAIR_UNKNOWN=8
};
PX_FORCE_INLINE bool IsInArray(const BroadPhasePair* PX_RESTRICT pair) const
{
const PxU8 state=mActivePairStates[pair-mActivePairs];
return state & PAIR_INARRAY ? true : false;
}
PX_FORCE_INLINE bool IsRemoved(const BroadPhasePair* PX_RESTRICT pair) const
{
const PxU8 state=mActivePairStates[pair-mActivePairs];
return state & PAIR_REMOVED ? true : false;
}
PX_FORCE_INLINE bool IsNew(const BroadPhasePair* PX_RESTRICT pair) const
{
const PxU8 state=mActivePairStates[pair-mActivePairs];
return state & PAIR_NEW ? true : false;
}
PX_FORCE_INLINE bool IsUnknown(const BroadPhasePair* PX_RESTRICT pair) const
{
const PxU8 state=mActivePairStates[pair-mActivePairs];
return state & PAIR_UNKNOWN ? true : false;
}
PX_FORCE_INLINE void ClearState(const BroadPhasePair* PX_RESTRICT pair)
{
mActivePairStates[pair-mActivePairs]=0;
}
PX_FORCE_INLINE void SetInArray(const BroadPhasePair* PX_RESTRICT pair)
{
mActivePairStates[pair-mActivePairs] |= PAIR_INARRAY;
}
PX_FORCE_INLINE void SetRemoved(const BroadPhasePair* PX_RESTRICT pair)
{
mActivePairStates[pair-mActivePairs] |= PAIR_REMOVED;
}
PX_FORCE_INLINE void SetNew(const BroadPhasePair* PX_RESTRICT pair)
{
mActivePairStates[pair-mActivePairs] |= PAIR_NEW;
}
PX_FORCE_INLINE void ClearInArray(const BroadPhasePair* PX_RESTRICT pair)
{
mActivePairStates[pair-mActivePairs] &= ~PAIR_INARRAY;
}
PX_FORCE_INLINE void ClearRemoved(const BroadPhasePair* PX_RESTRICT pair)
{
mActivePairStates[pair-mActivePairs] &= ~PAIR_REMOVED;
}
PX_FORCE_INLINE void ClearNew(const BroadPhasePair* PX_RESTRICT pair)
{
mActivePairStates[pair-mActivePairs] &= ~PAIR_NEW;
}
};
struct DataArray
{
DataArray(BpHandle* data, PxU32 size, PxU32 capacity) : mData(data), mSize(size), mCapacity(capacity) {}
BpHandle* mData;
PxU32 mSize;
PxU32 mCapacity;
PX_NOINLINE void Resize(PxcScratchAllocator* scratchAllocator);
PX_FORCE_INLINE void AddData(const PxU32 data, PxcScratchAllocator* scratchAllocator)
{
if(mSize==mCapacity)
Resize(scratchAllocator);
PX_ASSERT(mSize<mCapacity);
mData[mSize++] = BpHandle(data);
}
};
void addPair(const BpHandle id0, const BpHandle id1, PxcScratchAllocator* scratchAllocator, SapPairManager& pairManager, DataArray& dataArray);
void removePair(BpHandle id0, BpHandle id1, PxcScratchAllocator* scratchAllocator, SapPairManager& pairManager, DataArray& dataArray);
void ComputeCreatedDeletedPairsLists
(const Bp::FilterGroup::Enum* PX_RESTRICT boxGroups,
const BpHandle* PX_RESTRICT dataArray, const PxU32 dataArraySize,
PxcScratchAllocator* scratchAllocator,
BroadPhasePair* & createdPairsList, PxU32& numCreatedPairs, PxU32& maxNumCreatdPairs,
BroadPhasePair* & deletedPairsList, PxU32& numDeletedPairs, PxU32& maxNumDeletedPairs,
PxU32&numActualDeletedPairs,
SapPairManager& pairManager);
struct BoxX
{
PxU32 mMinX;
PxU32 mMaxX;
};
struct BoxYZ
{
PxU32 mMinY;
PxU32 mMinZ;
PxU32 mMaxY;
PxU32 mMaxZ;
};
struct AuxData
{
AuxData(PxU32 nb, const SapBox1D*const* PX_RESTRICT boxes, const BpHandle* PX_RESTRICT indicesSorted, const Bp::FilterGroup::Enum* PX_RESTRICT groupIds);
~AuxData();
BoxX* mBoxX;
BoxYZ* mBoxYZ;
Bp::FilterGroup::Enum* mGroups;
PxU32* mRemap;
PxU32 mNb;
};
void performBoxPruningNewNew( const AuxData* PX_RESTRICT auxData, PxcScratchAllocator* scratchAllocator,
const bool* lut, SapPairManager& pairManager, BpHandle*& dataArray, PxU32& dataArraySize, PxU32& dataArrayCapacity);
void performBoxPruningNewOld( const AuxData* PX_RESTRICT auxData0, const AuxData* PX_RESTRICT auxData1, PxcScratchAllocator* scratchAllocator,
const bool* lut, SapPairManager& pairManager, BpHandle*& dataArray, PxU32& dataArraySize, PxU32& dataArrayCapacity);
PX_FORCE_INLINE bool Intersect2D_Handle
(const BpHandle bDir1Min, const BpHandle bDir1Max, const BpHandle bDir2Min, const BpHandle bDir2Max,
const BpHandle cDir1Min, const BpHandle cDir1Max, const BpHandle cDir2Min, const BpHandle cDir2Max)
{
return (bDir1Max > cDir1Min && cDir1Max > bDir1Min &&
bDir2Max > cDir2Min && cDir2Max > bDir2Min);
}
} //namespace Bp
} //namespace physx
#endif //BP_BROADPHASE_SAP_AUX_H

View File

@@ -0,0 +1,245 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "BpBroadPhaseShared.h"
#include "foundation/PxMemory.h"
#include "foundation/PxBitUtils.h"
using namespace physx;
using namespace Bp;
#define MBP_ALLOC(x) PX_ALLOC(x, "MBP")
#define MBP_FREE(x) PX_FREE(x)
static PX_FORCE_INLINE void storeDwords(PxU32* dest, PxU32 nb, PxU32 value)
{
while(nb--)
*dest++ = value;
}
///////////////////////////////////////////////////////////////////////////////
PairManagerData::PairManagerData() :
mHashSize (0),
mMask (0),
mNbActivePairs (0),
mHashTable (NULL),
mNext (NULL),
mActivePairs (NULL),
mReservedMemory (0)
{
}
///////////////////////////////////////////////////////////////////////////////
PairManagerData::~PairManagerData()
{
purge();
}
///////////////////////////////////////////////////////////////////////////////
void PairManagerData::purge()
{
MBP_FREE(mNext);
MBP_FREE(mActivePairs);
MBP_FREE(mHashTable);
mHashSize = 0;
mMask = 0;
mNbActivePairs = 0;
}
///////////////////////////////////////////////////////////////////////////////
void PairManagerData::reallocPairs()
{
MBP_FREE(mHashTable);
mHashTable = reinterpret_cast<PxU32*>(MBP_ALLOC(mHashSize*sizeof(PxU32)));
storeDwords(mHashTable, mHashSize, INVALID_ID);
// Get some bytes for new entries
InternalPair* newPairs = reinterpret_cast<InternalPair*>(MBP_ALLOC(mHashSize * sizeof(InternalPair))); PX_ASSERT(newPairs);
PxU32* newNext = reinterpret_cast<PxU32*>(MBP_ALLOC(mHashSize * sizeof(PxU32))); PX_ASSERT(newNext);
// Copy old data if needed
if(mNbActivePairs)
PxMemCopy(newPairs, mActivePairs, mNbActivePairs*sizeof(InternalPair));
// ### check it's actually needed... probably only for pairs whose hash value was cut by the and
// yeah, since hash(id0, id1) is a constant
// However it might not be needed to recompute them => only less efficient but still ok
for(PxU32 i=0;i<mNbActivePairs;i++)
{
const PxU32 hashValue = hash(mActivePairs[i].getId0(), mActivePairs[i].getId1()) & mMask; // New hash value with new mask
newNext[i] = mHashTable[hashValue];
mHashTable[hashValue] = i;
}
// Delete old data
MBP_FREE(mNext);
MBP_FREE(mActivePairs);
// Assign new pointer
mActivePairs = newPairs;
mNext = newNext;
}
///////////////////////////////////////////////////////////////////////////////
void PairManagerData::shrinkMemory()
{
// Check correct memory against actually used memory
const PxU32 correctHashSize = PxNextPowerOfTwo(mNbActivePairs);
if(mHashSize==correctHashSize)
return;
if(mReservedMemory && correctHashSize < mReservedMemory)
return;
// Reduce memory used
mHashSize = correctHashSize;
mMask = mHashSize-1;
reallocPairs();
}
///////////////////////////////////////////////////////////////////////////////
void PairManagerData::reserveMemory(PxU32 memSize)
{
if(!memSize)
return;
if(!PxIsPowerOfTwo(memSize))
memSize = PxNextPowerOfTwo(memSize);
mHashSize = memSize;
mMask = mHashSize-1;
mReservedMemory = memSize;
reallocPairs();
}
///////////////////////////////////////////////////////////////////////////////
PX_NOINLINE PxU32 PairManagerData::growPairs(PxU32 fullHashValue)
{
// Get more entries
mHashSize = PxNextPowerOfTwo(mNbActivePairs+1);
mMask = mHashSize-1;
reallocPairs();
// Recompute hash value with new hash size
return fullHashValue & mMask;
}
///////////////////////////////////////////////////////////////////////////////
void PairManagerData::removePair(PxU32 /*id0*/, PxU32 /*id1*/, PxU32 hashValue, PxU32 pairIndex)
{
// Walk the hash table to fix mNext
{
PxU32 offset = mHashTable[hashValue];
PX_ASSERT(offset!=INVALID_ID);
PxU32 previous=INVALID_ID;
while(offset!=pairIndex)
{
previous = offset;
offset = mNext[offset];
}
// Let us go/jump us
if(previous!=INVALID_ID)
{
PX_ASSERT(mNext[previous]==pairIndex);
mNext[previous] = mNext[pairIndex];
}
// else we were the first
else mHashTable[hashValue] = mNext[pairIndex];
// we're now free to reuse mNext[pairIndex] without breaking the list
}
#if PX_DEBUG
mNext[pairIndex]=INVALID_ID;
#endif
// Invalidate entry
// Fill holes
{
// 1) Remove last pair
const PxU32 lastPairIndex = mNbActivePairs-1;
if(lastPairIndex==pairIndex)
{
mNbActivePairs--;
}
else
{
const InternalPair* last = &mActivePairs[lastPairIndex];
const PxU32 lastHashValue = hash(last->getId0(), last->getId1()) & mMask;
// Walk the hash table to fix mNext
PxU32 offset = mHashTable[lastHashValue];
PX_ASSERT(offset!=INVALID_ID);
PxU32 previous=INVALID_ID;
while(offset!=lastPairIndex)
{
previous = offset;
offset = mNext[offset];
}
// Let us go/jump us
if(previous!=INVALID_ID)
{
PX_ASSERT(mNext[previous]==lastPairIndex);
mNext[previous] = mNext[lastPairIndex];
}
// else we were the first
else mHashTable[lastHashValue] = mNext[lastPairIndex];
// we're now free to reuse mNext[lastPairIndex] without breaking the list
#if PX_DEBUG
mNext[lastPairIndex]=INVALID_ID;
#endif
// Don't invalidate entry since we're going to shrink the array
// 2) Re-insert in free slot
mActivePairs[pairIndex] = mActivePairs[lastPairIndex];
#if PX_DEBUG
PX_ASSERT(mNext[pairIndex]==INVALID_ID);
#endif
mNext[pairIndex] = mHashTable[lastHashValue];
mHashTable[lastHashValue] = pairIndex;
mNbActivePairs--;
}
}
}

View File

@@ -0,0 +1,252 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_BROADPHASE_SHARED_H
#define BP_BROADPHASE_SHARED_H
#include "BpBroadPhaseIntegerAABB.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxHash.h"
#include "foundation/PxVecMath.h"
namespace physx
{
namespace Bp
{
#define INVALID_ID 0xffffffff
#define INVALID_USER_ID 0xffffffff
struct InternalPair : public PxUserAllocated
{
PX_FORCE_INLINE PxU32 getId0() const { return id0_isNew & ~PX_SIGN_BITMASK; }
PX_FORCE_INLINE PxU32 getId1() const { return id1_isUpdated & ~PX_SIGN_BITMASK; }
PX_FORCE_INLINE PxU32 isNew() const { return id0_isNew & PX_SIGN_BITMASK; }
PX_FORCE_INLINE PxU32 isUpdated() const { return id1_isUpdated & PX_SIGN_BITMASK; }
PX_FORCE_INLINE void setNewPair(PxU32 id0, PxU32 id1)
{
PX_ASSERT(!(id0 & PX_SIGN_BITMASK));
PX_ASSERT(!(id1 & PX_SIGN_BITMASK));
id0_isNew = id0 | PX_SIGN_BITMASK;
id1_isUpdated = id1;
}
PX_FORCE_INLINE void setNewPair2(PxU32 id0, PxU32 id1)
{
PX_ASSERT(!(id0 & PX_SIGN_BITMASK));
PX_ASSERT(!(id1 & PX_SIGN_BITMASK));
id0_isNew = id0;
id1_isUpdated = id1;
}
PX_FORCE_INLINE void setUpdated() { id1_isUpdated |= PX_SIGN_BITMASK; }
PX_FORCE_INLINE void clearUpdated() { id1_isUpdated &= ~PX_SIGN_BITMASK; }
PX_FORCE_INLINE void clearNew() { id0_isNew &= ~PX_SIGN_BITMASK; }
protected:
PxU32 id0_isNew;
PxU32 id1_isUpdated;
};
PX_FORCE_INLINE bool differentPair(const InternalPair& p, PxU32 id0, PxU32 id1) { return (id0!=p.getId0()) || (id1!=p.getId1()); }
PX_FORCE_INLINE PxU32 hash(PxU32 id0, PxU32 id1) { return PxComputeHash( (id0&0xffff)|(id1<<16)); }
//PX_FORCE_INLINE PxU32 hash(PxU32 id0, PxU32 id1) { return PxComputeHash(PxU64(id0)|(PxU64(id1)<<32)) ; }
PX_FORCE_INLINE void sort(PxU32& id0, PxU32& id1) { if(id0>id1) PxSwap(id0, id1); }
class PairManagerData
{
public:
PairManagerData();
~PairManagerData();
PX_FORCE_INLINE PxU32 getPairIndex(const InternalPair* pair) const
{
return (PxU32((size_t(pair) - size_t(mActivePairs)))/sizeof(InternalPair));
}
// Internal version saving hash computation
PX_FORCE_INLINE InternalPair* findPair(PxU32 id0, PxU32 id1, PxU32 hashValue) const
{
if(!mHashTable)
return NULL; // Nothing has been allocated yet
InternalPair* PX_RESTRICT activePairs = mActivePairs;
const PxU32* PX_RESTRICT next = mNext;
// Look for it in the table
PxU32 offset = mHashTable[hashValue];
while(offset!=INVALID_ID && differentPair(activePairs[offset], id0, id1))
{
PX_ASSERT(activePairs[offset].getId0()!=INVALID_USER_ID);
offset = next[offset]; // Better to have a separate array for this
}
if(offset==INVALID_ID)
return NULL;
PX_ASSERT(offset<mNbActivePairs);
// Match mActivePairs[offset] => the pair is persistent
return &activePairs[offset];
}
PX_FORCE_INLINE InternalPair* addPairInternal(PxU32 id0, PxU32 id1)
{
// Order the ids
sort(id0, id1);
const PxU32 fullHashValue = hash(id0, id1);
PxU32 hashValue = fullHashValue & mMask;
{
InternalPair* PX_RESTRICT p = findPair(id0, id1, hashValue);
if(p)
{
p->setUpdated();
return p; // Persistent pair
}
}
// This is a new pair
if(mNbActivePairs >= mHashSize)
hashValue = growPairs(fullHashValue);
const PxU32 pairIndex = mNbActivePairs++;
InternalPair* PX_RESTRICT p = &mActivePairs[pairIndex];
p->setNewPair(id0, id1);
mNext[pairIndex] = mHashTable[hashValue];
mHashTable[hashValue] = pairIndex;
return p;
}
PxU32 mHashSize;
PxU32 mMask;
PxU32 mNbActivePairs;
PxU32* mHashTable;
PxU32* mNext;
InternalPair* mActivePairs;
PxU32 mReservedMemory;
void purge();
void reallocPairs();
void shrinkMemory();
void reserveMemory(PxU32 memSize);
PX_NOINLINE PxU32 growPairs(PxU32 fullHashValue);
void removePair(PxU32 id0, PxU32 id1, PxU32 hashValue, PxU32 pairIndex);
};
struct AABB_Xi
{
PX_FORCE_INLINE AABB_Xi() {}
PX_FORCE_INLINE ~AABB_Xi() {}
PX_FORCE_INLINE void initFromFloats(const void* PX_RESTRICT minX, const void* PX_RESTRICT maxX)
{
mMinX = encodeFloat(*reinterpret_cast<const PxU32*>(minX));
mMaxX = encodeFloat(*reinterpret_cast<const PxU32*>(maxX));
}
PX_FORCE_INLINE void initFromPxVec4(const PxVec4& min, const PxVec4& max)
{
initFromFloats(&min.x, &max.x);
}
PX_FORCE_INLINE void operator = (const AABB_Xi& box)
{
mMinX = box.mMinX;
mMaxX = box.mMaxX;
}
PX_FORCE_INLINE void initSentinel()
{
mMinX = 0xffffffff;
}
PX_FORCE_INLINE bool isSentinel() const
{
return mMinX == 0xffffffff;
}
PxU32 mMinX;
PxU32 mMaxX;
};
struct AABB_YZn
{
PX_FORCE_INLINE AABB_YZn() {}
PX_FORCE_INLINE ~AABB_YZn() {}
PX_FORCE_INLINE void initFromPxVec4(const PxVec4& min, const PxVec4& max)
{
mMinY = -min.y;
mMinZ = -min.z;
mMaxY = max.y;
mMaxZ = max.z;
}
PX_FORCE_INLINE void operator = (const AABB_YZn& box)
{
using namespace physx::aos;
V4StoreA(V4LoadA(&box.mMinY), &mMinY);
}
float mMinY;
float mMinZ;
float mMaxY;
float mMaxZ;
};
struct AABB_YZr
{
PX_FORCE_INLINE AABB_YZr() {}
PX_FORCE_INLINE ~AABB_YZr() {}
PX_FORCE_INLINE void initFromPxVec4(const PxVec4& min, const PxVec4& max)
{
mMinY = min.y;
mMinZ = min.z;
mMaxY = max.y;
mMaxZ = max.z;
}
PX_FORCE_INLINE void operator = (const AABB_YZr& box)
{
using namespace physx::aos;
V4StoreA(V4LoadA(&box.mMinY), &mMinY);
}
float mMinY;
float mMinZ;
float mMaxY;
float mMaxZ;
};
} //namespace Bp
} //namespace physx
#endif // BP_BROADPHASE_SHARED_H

View File

@@ -0,0 +1,143 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "BpBroadPhase.h"
#include "common/PxProfileZone.h"
#include "foundation/PxBitMap.h"
using namespace physx;
using namespace Bp;
#if PX_CHECKED
bool BroadPhaseUpdateData::isValid(const BroadPhaseUpdateData& updateData, const BroadPhase& bp, const bool skipBoundValidation, PxU64 contextID)
{
PX_PROFILE_ZONE("BroadPhaseUpdateData::isValid", contextID);
return (updateData.isValid(skipBoundValidation) && bp.isValid(updateData));
}
static bool testHandles(PxU32 size, const BpHandle* handles, const PxU32 capacity, const Bp::FilterGroup::Enum* groups, const PxBounds3* bounds, PxBitMap& bitmap)
{
if(!handles && size)
return false;
/* ValType minVal=0;
ValType maxVal=0xffffffff;*/
for(PxU32 i=0;i<size;i++)
{
const BpHandle h = handles[i];
if(h>=capacity)
return false;
// Array in ascending order of id.
if(i>0 && (h < handles[i-1]))
return false;
if(groups && groups[h]==FilterGroup::eINVALID)
return false;
bitmap.set(h);
if(bounds)
{
if(!bounds[h].isFinite())
return false;
for(PxU32 j=0;j<3;j++)
{
//Max must be greater than min.
if(bounds[h].minimum[j]>bounds[h].maximum[j])
return false;
#if 0
//Bounds have an upper limit.
if(bounds[created[i]].getMax(j)>=maxVal)
return false;
//Bounds have a lower limit.
if(bounds[created[i]].getMin(j)<=minVal)
return false;
//Max must be odd.
if(4 != (bounds[created[i]].getMax(j) & 4))
return false;
//Min must be even.
if(0 != (bounds[created[i]].getMin(j) & 4))
return false;
#endif
}
}
}
return true;
}
static bool testBitmap(const PxBitMap& bitmap, PxU32 size, const BpHandle* handles)
{
while(size--)
{
const BpHandle h = *handles++;
if(bitmap.test(h))
return false;
}
return true;
}
bool BroadPhaseUpdateData::isValid(const bool skipBoundValidation) const
{
const PxBounds3* bounds = skipBoundValidation ? NULL : getAABBs();
const PxU32 boxesCapacity = getCapacity();
const Bp::FilterGroup::Enum* groups = getGroups();
PxBitMap createdBitmap; createdBitmap.resizeAndClear(boxesCapacity);
PxBitMap updatedBitmap; updatedBitmap.resizeAndClear(boxesCapacity);
PxBitMap removedBitmap; removedBitmap.resizeAndClear(boxesCapacity);
if(!testHandles(getNumCreatedHandles(), getCreatedHandles(), boxesCapacity, groups, bounds, createdBitmap))
return false;
if(!testHandles(getNumUpdatedHandles(), getUpdatedHandles(), boxesCapacity, groups, bounds, updatedBitmap))
return false;
if(!testHandles(getNumRemovedHandles(), getRemovedHandles(), boxesCapacity, NULL, NULL, removedBitmap))
return false;
if(1)
{
// Created/updated
if(!testBitmap(createdBitmap, getNumUpdatedHandles(), getUpdatedHandles()))
return false;
// Created/removed
if(!testBitmap(createdBitmap, getNumRemovedHandles(), getRemovedHandles()))
return false;
// Updated/removed
if(!testBitmap(updatedBitmap, getNumRemovedHandles(), getRemovedHandles()))
return false;
}
return true;
}
#endif

View File

@@ -0,0 +1,74 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "BpFiltering.h"
using namespace physx;
using namespace Bp;
BpFilter::BpFilter(bool discardKineKine, bool discardStaticKine)
{
for(int j = 0; j < Bp::FilterType::COUNT; j++)
for(int i = 0; i < Bp::FilterType::COUNT; i++)
mLUT[j][i] = false;
mLUT[Bp::FilterType::STATIC][Bp::FilterType::DYNAMIC] = mLUT[Bp::FilterType::DYNAMIC][Bp::FilterType::STATIC] = true;
mLUT[Bp::FilterType::STATIC][Bp::FilterType::KINEMATIC] = mLUT[Bp::FilterType::KINEMATIC][Bp::FilterType::STATIC] = !discardStaticKine;
mLUT[Bp::FilterType::DYNAMIC][Bp::FilterType::KINEMATIC] = mLUT[Bp::FilterType::KINEMATIC][Bp::FilterType::DYNAMIC] = true;
mLUT[Bp::FilterType::DYNAMIC][Bp::FilterType::DYNAMIC] = true;
mLUT[Bp::FilterType::KINEMATIC][Bp::FilterType::KINEMATIC] = !discardKineKine;
mLUT[Bp::FilterType::STATIC][Bp::FilterType::AGGREGATE] = mLUT[Bp::FilterType::AGGREGATE][Bp::FilterType::STATIC] = true;
mLUT[Bp::FilterType::KINEMATIC][Bp::FilterType::AGGREGATE] = mLUT[Bp::FilterType::AGGREGATE][Bp::FilterType::KINEMATIC] = true;
mLUT[Bp::FilterType::DYNAMIC][Bp::FilterType::AGGREGATE] = mLUT[Bp::FilterType::AGGREGATE][Bp::FilterType::DYNAMIC] = true;
mLUT[Bp::FilterType::AGGREGATE][Bp::FilterType::AGGREGATE] = true;
//Enable deformable surface interactions
mLUT[Bp::FilterType::DEFORMABLE_SURFACE][Bp::FilterType::DYNAMIC] = mLUT[Bp::FilterType::DYNAMIC][Bp::FilterType::DEFORMABLE_SURFACE] = true;
mLUT[Bp::FilterType::DEFORMABLE_SURFACE][Bp::FilterType::STATIC] = mLUT[Bp::FilterType::STATIC][Bp::FilterType::DEFORMABLE_SURFACE] = true;
mLUT[Bp::FilterType::DEFORMABLE_SURFACE][Bp::FilterType::KINEMATIC] = mLUT[Bp::FilterType::KINEMATIC][Bp::FilterType::DEFORMABLE_SURFACE] = true;
mLUT[Bp::FilterType::DEFORMABLE_SURFACE][Bp::FilterType::DEFORMABLE_SURFACE] = true;
//Enable deformable volume interactions
mLUT[Bp::FilterType::DEFORMABLE_VOLUME][Bp::FilterType::DYNAMIC] = mLUT[Bp::FilterType::DYNAMIC][Bp::FilterType::DEFORMABLE_VOLUME] = true;
mLUT[Bp::FilterType::DEFORMABLE_VOLUME][Bp::FilterType::STATIC] = mLUT[Bp::FilterType::STATIC][Bp::FilterType::DEFORMABLE_VOLUME] = true;
mLUT[Bp::FilterType::DEFORMABLE_VOLUME][Bp::FilterType::KINEMATIC] = mLUT[Bp::FilterType::KINEMATIC][Bp::FilterType::DEFORMABLE_VOLUME] = true;
mLUT[Bp::FilterType::DEFORMABLE_VOLUME][Bp::FilterType::DEFORMABLE_VOLUME] = true;
//Enable particle system interactions
mLUT[Bp::FilterType::PARTICLESYSTEM][Bp::FilterType::DYNAMIC] = mLUT[Bp::FilterType::DYNAMIC][Bp::FilterType::PARTICLESYSTEM] = true;
mLUT[Bp::FilterType::PARTICLESYSTEM][Bp::FilterType::STATIC] = mLUT[Bp::FilterType::STATIC][Bp::FilterType::PARTICLESYSTEM] = true;
mLUT[Bp::FilterType::PARTICLESYSTEM][Bp::FilterType::KINEMATIC] = mLUT[Bp::FilterType::KINEMATIC][Bp::FilterType::PARTICLESYSTEM] = true;
mLUT[Bp::FilterType::PARTICLESYSTEM][Bp::FilterType::PARTICLESYSTEM] = true;
}
BpFilter::~BpFilter()
{
}