feat(physics): wire physx sdk into build

This commit is contained in:
2026-04-15 12:22:15 +08:00
parent 5bf258df6d
commit 31f40e2cbb
2044 changed files with 752623 additions and 1 deletions

View File

@@ -0,0 +1,217 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_AABBMANAGER_H
#define BP_AABBMANAGER_H
#include "foundation/PxHashSet.h"
#include "foundation/PxHashMap.h"
#include "BpAABBManagerTasks.h"
#include "BpAABBManagerBase.h"
namespace physx
{
namespace Cm
{
class FlushPool;
}
namespace Bp
{
struct BroadPhasePair;
class Aggregate;
class PersistentPairs;
class PersistentActorAggregatePair;
class PersistentAggregateAggregatePair;
class PersistentSelfCollisionPairs;
struct AggPair
{
PX_FORCE_INLINE AggPair() {}
PX_FORCE_INLINE AggPair(ShapeHandle index0, ShapeHandle index1) : mIndex0(index0), mIndex1(index1) {}
ShapeHandle mIndex0;
ShapeHandle mIndex1;
PX_FORCE_INLINE bool operator==(const AggPair& p) const
{
return (p.mIndex0 == mIndex0) && (p.mIndex1 == mIndex1);
}
};
typedef PxCoalescedHashMap<AggPair, PersistentPairs*> AggPairMap;
// PT: TODO: isn't there a generic pair structure somewhere? refactor with AggPair anyway
struct Pair
{
PX_FORCE_INLINE Pair(PxU32 id0, PxU32 id1) : mID0(id0), mID1(id1) {}
PX_FORCE_INLINE Pair(){}
PX_FORCE_INLINE bool operator<(const Pair& p) const
{
const PxU64 value0 = *reinterpret_cast<const PxU64*>(this);
const PxU64 value1 = *reinterpret_cast<const PxU64*>(&p);
return value0 < value1;
}
PX_FORCE_INLINE bool operator==(const Pair& p) const
{
const PxU64 value0 = *reinterpret_cast<const PxU64*>(this);
const PxU64 value1 = *reinterpret_cast<const PxU64*>(&p);
return value0 == value1;
}
PX_FORCE_INLINE bool operator!=(const Pair& p) const
{
const PxU64 value0 = *reinterpret_cast<const PxU64*>(this);
const PxU64 value1 = *reinterpret_cast<const PxU64*>(&p);
return value0 != value1;
}
PxU32 mID0;
PxU32 mID1;
};
class AABBManager;
class PostBroadPhaseStage2Task : public Cm::Task
{
Cm::FlushPool* mFlushPool;
AABBManager& mManager;
PX_NOCOPY(PostBroadPhaseStage2Task)
public:
PostBroadPhaseStage2Task(PxU64 contextID, AABBManager& manager) : Cm::Task(contextID), mFlushPool(NULL), mManager(manager)
{
}
virtual const char* getName() const { return "PostBroadPhaseStage2Task"; }
void setFlushPool(Cm::FlushPool* pool) { mFlushPool = pool; }
virtual void runInternal();
};
class ProcessAggPairsBase;
/**
\brief A structure responsible for:
* storing an aabb representation for each active shape in the related scene
* managing the creation/removal of aabb representations when their related shapes are created/removed
* updating all aabbs that require an update due to modification of shape geometry or transform
* updating the aabb of all aggregates from the union of the aabbs of all shapes that make up each aggregate
* computing and reporting the incremental changes to the set of overlapping aabb pairs
*/
class AABBManager : public AABBManagerBase
{
PX_NOCOPY(AABBManager)
public:
AABBManager(BroadPhase& bp, BoundsArray& boundsArray, PxFloatArrayPinned& contactDistance,
PxU32 maxNbAggregates, PxU32 maxNbShapes, PxVirtualAllocator& allocator, PxU64 contextID,
PxPairFilteringMode::Enum kineKineFilteringMode, PxPairFilteringMode::Enum staticKineFilteringMode);
virtual ~AABBManager() {}
// AABBManagerBase
virtual void destroy() PX_OVERRIDE PX_FINAL;
virtual AggregateHandle createAggregate(BoundsIndex index, Bp::FilterGroup::Enum group, void* userData, PxU32 maxNumShapes, PxAggregateFilterHint filterHint, PxU32 envID) PX_OVERRIDE PX_FINAL;
virtual bool destroyAggregate(BoundsIndex& index, Bp::FilterGroup::Enum& group, AggregateHandle aggregateHandle) PX_OVERRIDE PX_FINAL;
virtual bool addBounds(BoundsIndex index, PxReal contactDistance, Bp::FilterGroup::Enum group, void* userdata, AggregateHandle aggregateHandle, ElementType::Enum volumeType, PxU32 envID) PX_OVERRIDE PX_FINAL;
virtual bool removeBounds(BoundsIndex index) PX_OVERRIDE PX_FINAL;
virtual void updateBPFirstPass(PxU32 numCpuTasks, Cm::FlushPool& flushPool, bool hasContactDistanceUpdated, PxBaseTask* continuation) PX_OVERRIDE PX_FINAL;
virtual void updateBPSecondPass(PxcScratchAllocator* scratchAllocator, PxBaseTask* continuation) PX_OVERRIDE PX_FINAL;
virtual void postBroadPhase(PxBaseTask*, Cm::FlushPool& flushPool) PX_OVERRIDE PX_FINAL;
virtual void reallocateChangedAABBMgActorHandleMap(const PxU32 size) PX_OVERRIDE PX_FINAL;
virtual bool getOutOfBoundsObjects(OutOfBoundsData& data) PX_OVERRIDE PX_FINAL;
virtual void clearOutOfBoundsObjects() PX_OVERRIDE PX_FINAL;
virtual void visualize(PxRenderOutput& out) PX_OVERRIDE PX_FINAL;
virtual void releaseDeferredAggregateIds() PX_OVERRIDE PX_FINAL {}
//~AABBManagerBase
void preBpUpdate_CPU(PxU32 numCpuTasks);
// PT: TODO: what is that BpCacheData for?
BpCacheData* getBpCacheData();
void putBpCacheData(BpCacheData*);
void resetBpCacheData();
PxMutex mMapLock;
private:
//void reserveShapeSpace(PxU32 nbShapes);
void postBpStage2(PxBaseTask*, Cm::FlushPool&);
void postBpStage3(PxBaseTask*);
PostBroadPhaseStage2Task mPostBroadPhase2;
Cm::DelegateTask<AABBManager, &AABBManager::postBpStage3> mPostBroadPhase3;
PreBpUpdateTask mPreBpUpdateTask;
PxU32 mTimestamp;
PxU32 mFirstFreeAggregate;
PxArray<Aggregate*> mAggregates; // PT: indexed by AggregateHandle
PxArray<Aggregate*> mDirtyAggregates;
AggPairMap mActorAggregatePairs;
AggPairMap mAggregateAggregatePairs;
PxArray<ProcessAggPairsBase*> mAggPairTasks;
PxHashSet<Pair> mCreatedPairsTmp; // PT: temp hashset for dubious post filtering, persistent to minimize allocs
PxSList mBpThreadContextPool;
PxArray<void*> mOutOfBoundsObjects;
PxArray<void*> mOutOfBoundsAggregates;
PX_FORCE_INLINE Aggregate* getAggregateFromHandle(AggregateHandle handle)
{
PX_ASSERT(handle<mAggregates.size());
return mAggregates[handle];
}
void startAggregateBoundsComputationTasks(PxU32 nbToGo, PxU32 numCpuTasks, Cm::FlushPool& flushPool);
PersistentActorAggregatePair* createPersistentActorAggregatePair(ShapeHandle volA, ShapeHandle volB);
PersistentAggregateAggregatePair* createPersistentAggregateAggregatePair(ShapeHandle volA, ShapeHandle volB);
void updatePairs(PersistentPairs& p, BpCacheData* data = NULL);
void handleOriginShift();
public:
void processBPCreatedPair(const BroadPhasePair& pair);
void processBPDeletedPair(const BroadPhasePair& pair);
friend class PersistentActorAggregatePair;
friend class PersistentAggregateAggregatePair;
friend class ProcessSelfCollisionPairsParallel;
friend class PostBroadPhaseStage2Task;
};
} //namespace Bp
} //namespace physx
#endif //BP_AABBMANAGER_H

View File

@@ -0,0 +1,396 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_AABBMANAGER_BASE_H
#define BP_AABBMANAGER_BASE_H
#include "foundation/PxAllocator.h"
#include "foundation/PxPinnedArray.h"
#include "foundation/PxBitMap.h"
#include "foundation/PxSList.h"
#include "foundation/PxBitUtils.h"
#include "BpVolumeData.h"
#include "BpBroadPhaseUpdate.h"
#include "GuBounds.h"
#include "PxFiltering.h"
#include "PxAggregate.h"
#include "foundation/PxSimpleTypes.h"
namespace physx
{
class PxcScratchAllocator;
class PxRenderOutput;
class PxBaseTask;
namespace Cm
{
class FlushPool;
}
namespace Bp
{
typedef PxU32 BoundsIndex;
//typedef PxU32 ActorHandle;
/**
\brief Changes to the configuration of overlap pairs are reported as void* pairs.
\note Each void* in the pair corresponds to the void* passed to AABBManager::createVolume.
\see AABBManager::createVolume, AABBManager::getCreatedOverlaps, AABBManager::getDestroyedOverlaps
*/
struct AABBOverlap
{
PX_FORCE_INLINE AABBOverlap() {}
PX_FORCE_INLINE AABBOverlap(void* userData0, void* userData1/*, ActorHandle pairHandle*/) : mUserData0(userData0), mUserData1(userData1)/*, mPairHandle(pairHandle)*/
{
// PT: TODO: why is this forbidden?
PX_ASSERT(userData0 != userData1);
}
// PT: these will eventually be the userData pointers passed to addBounds(), i.e. Sc::ElementSim pointers in PhysX. This may not be
// necessary at all, since in the current design the bounds indices themselves come from BP clients (they're not handles managed by the BP).
// So there's a 1:1 mapping between bounds-indices (which are effectively edlement IDs in PhysX) and the userData pointers (Sc::ElementSim).
// Thus we could just return bounds indices directly to users - at least in the context of PhysX, maybe the standalone BP is different.
void* mUserData0;
void* mUserData1;
// PT: TODO: not sure what happened there but mPairUserData is not used inside the BP itself so we need to revisit this.
/* union
{
ActorHandle mPairHandle; //For created pairs, this is the index into the pair in the pair manager
void* mUserData; //For deleted pairs, this is the user data written by the application to the pair
};*/
void* mPairUserData; //For deleted pairs, this is the user data written by the application to the pair
};
struct BpCacheData : public PxSListEntry
{
PxArray<AABBOverlap> mCreatedPairs[2];
PxArray<AABBOverlap> mDeletedPairs[2];
void reset()
{
mCreatedPairs[0].resizeUninitialized(0);
mCreatedPairs[1].resizeUninitialized(0);
mDeletedPairs[0].resizeUninitialized(0);
mDeletedPairs[1].resizeUninitialized(0);
}
};
typedef PxPinnedArray<Bp::FilterGroup::Enum> GroupsArrayPinned;
typedef PxPinnedArray<VolumeData> VolumeDataArrayPinned;
typedef PxPinnedArray<ShapeHandle> ShapeHandleArrayPinned;
class BoundsArray : public PxUserAllocated
{
PX_NOCOPY(BoundsArray)
public:
BoundsArray(PxVirtualAllocator& allocator) : mBounds(allocator), mHasAnythingChanged(true) {} //needs to be set explicitly for PxgBounds first copy
virtual ~BoundsArray(){}
PX_FORCE_INLINE void initEntry(PxU32 index)
{
index++; // PT: always pretend we need one more entry, to make sure reading the last used entry will be SIMD-safe.
const PxU32 oldCapacity = mBounds.capacity();
if (index >= oldCapacity)
{
const PxU32 newCapacity = PxNextPowerOfTwo(index);
mBounds.reserve(newCapacity);
mBounds.forceSize_Unsafe(newCapacity);
}
}
virtual void updateBounds(const PxTransform& transform, const PxGeometry& geom, PxU32 index, PxU32 /*indexFrom*/)
{
Gu::computeBounds(mBounds[index], geom, transform, 0.0f, 1.0f);
mHasAnythingChanged = true;
}
virtual void setBounds(const PxBounds3& bounds, PxU32 index)
{
// PX_CHECK_AND_RETURN(bounds.isValid() && !bounds.isEmpty(), "BoundsArray::setBounds - illegal bounds\n");
mBounds[index] = bounds;
mHasAnythingChanged = true;
}
PX_FORCE_INLINE const PxBounds3* begin() const { return mBounds.begin(); }
PX_FORCE_INLINE PxBounds3* begin() { return mBounds.begin(); }
PX_FORCE_INLINE PxBoundsArrayPinned& getBounds() { return mBounds; }
PX_FORCE_INLINE const PxBounds3& getBounds(PxU32 index) const { return mBounds[index]; }
PX_FORCE_INLINE PxU32 size() const { return mBounds.size(); }
PX_FORCE_INLINE bool hasChanged() const { return mHasAnythingChanged; }
PX_FORCE_INLINE void resetChangedState() { mHasAnythingChanged = false; }
PX_FORCE_INLINE void setChangedState() { mHasAnythingChanged = true; }
void shiftOrigin(const PxVec3& shift)
{
// we shift some potential NaNs here because we don't know what's active, but should be harmless
const PxU32 nbBounds = mBounds.size();
for(PxU32 i=0; i<nbBounds; i++)
{
mBounds[i].minimum -= shift;
mBounds[i].maximum -= shift;
}
mHasAnythingChanged = true;
}
protected:
PxBoundsArrayPinned mBounds;
bool mHasAnythingChanged;
};
/**
\brief A structure responsible for:
* storing an aabb representation for each active shape in the related scene
* managing the creation/removal of aabb representations when their related shapes are created/removed
* updating all aabbs that require an update due to modification of shape geometry or transform
* updating the aabb of all aggregates from the union of the aabbs of all shapes that make up each aggregate
* computing and reporting the incremental changes to the set of overlapping aabb pairs
*/
class AABBManagerBase : public PxUserAllocated
{
PX_NOCOPY(AABBManagerBase)
public:
AABBManagerBase(BroadPhase& bp, BoundsArray& boundsArray, PxFloatArrayPinned& contactDistance,
PxU32 maxNbAggregates, PxU32 maxNbShapes, PxVirtualAllocator& allocator, PxU64 contextID,
PxPairFilteringMode::Enum kineKineFilteringMode, PxPairFilteringMode::Enum staticKineFilteringMode);
virtual ~AABBManagerBase() {}
virtual void destroy() = 0;
virtual AggregateHandle createAggregate(BoundsIndex index, Bp::FilterGroup::Enum group, void* userData, PxU32 maxNumShapes, PxAggregateFilterHint filterHint, PxU32 envID) = 0;
virtual bool destroyAggregate(BoundsIndex& index, Bp::FilterGroup::Enum& group, AggregateHandle aggregateHandle) = 0;
virtual bool addBounds(BoundsIndex index, PxReal contactDistance, Bp::FilterGroup::Enum group, void* userdata, AggregateHandle aggregateHandle, ElementType::Enum volumeType, PxU32 envID) = 0;
virtual bool removeBounds(BoundsIndex index) = 0;
void reserveSpaceForBounds(BoundsIndex index);
PX_FORCE_INLINE PxIntBool isMarkedForRemove(BoundsIndex index) const { return mRemovedHandleMap.boundedTest(index); }
// PX_FORCE_INLINE PxIntBool isMarkedForAdd(BoundsIndex index) const { return mAddedHandleMap.boundedTest(index); }
PX_FORCE_INLINE BroadPhase* getBroadPhase() const { return &mBroadPhase; }
PX_FORCE_INLINE BoundsArray& getBoundsArray() { return mBoundsArray; }
PX_FORCE_INLINE PxU32 getNbActiveAggregates() const { return mNbAggregates; }
PX_FORCE_INLINE const float* getContactDistances() const { return mContactDistance.begin(); }
PX_FORCE_INLINE PxBitMapPinned& getChangedAABBMgActorHandleMap() { return mChangedHandleMap; }
PX_FORCE_INLINE void* getUserData(const BoundsIndex index) const { return (index<mVolumeData.size()) ? mVolumeData[index].getUserData() : NULL; }
void setContactDistance(BoundsIndex handle, PxReal offset)
{
// PT: this works even for aggregated shapes, since the corresponding bit will also be set in the 'updated' map.
mContactDistance.begin()[handle] = offset;
setPersistentStateChanged();
mChangedHandleMap.growAndSet(handle);
}
void setBPGroup(BoundsIndex index, Bp::FilterGroup::Enum group)
{
PX_ASSERT((index + 1) < mVolumeData.size());
PX_ASSERT(group != Bp::FilterGroup::eINVALID); // PT: we use group == Bp::FilterGroup::eINVALID to mark removed/invalid entries
mGroups[index] = group;
}
virtual void updateBPFirstPass(PxU32 numCpuTasks, Cm::FlushPool& flushPool, bool hasContactDistanceUpdated, PxBaseTask* continuation) = 0;
virtual void updateBPSecondPass(PxcScratchAllocator* scratchAllocator, PxBaseTask* continuation) = 0;
virtual void postBroadPhase(PxBaseTask*, Cm::FlushPool& flushPool) = 0;
virtual void reallocateChangedAABBMgActorHandleMap(const PxU32 size) = 0;
AABBOverlap* getCreatedOverlaps(ElementType::Enum type, PxU32& count)
{
PX_ASSERT(type < ElementType::eCOUNT);
count = mCreatedOverlaps[type].size();
return mCreatedOverlaps[type].begin();
}
AABBOverlap* getDestroyedOverlaps(ElementType::Enum type, PxU32& count)
{
PX_ASSERT(type < ElementType::eCOUNT);
count = mDestroyedOverlaps[type].size();
return mDestroyedOverlaps[type].begin();
}
void freeBuffers();
struct OutOfBoundsData
{
PxU32 mNbOutOfBoundsObjects;
PxU32 mNbOutOfBoundsAggregates;
void** mOutOfBoundsObjects;
void** mOutOfBoundsAggregates;
};
virtual bool getOutOfBoundsObjects(OutOfBoundsData&) { return false; }
virtual void clearOutOfBoundsObjects() {}
void shiftOrigin(const PxVec3& shift);
virtual void visualize(PxRenderOutput& out) = 0;
virtual void releaseDeferredAggregateIds() = 0;
virtual void setGPUStateChanged() {}
virtual void setPersistentStateChanged() {}
#if PX_ENABLE_SIM_STATS
PxU32 getGpuDynamicsLostFoundPairsStats() { return mGpuDynamicsLostFoundPairsStats; }
PxU32 getGpuDynamicsTotalAggregatePairsStats() { return mGpuDynamicsTotalAggregatePairsStats; }
PxU32 getGpuDynamicsLostFoundAggregatePairsStats() { return mGpuDynamicsLostFoundAggregatePairsStats; }
#else
PX_CATCH_UNDEFINED_ENABLE_SIM_STATS
#endif
protected:
void reserveShapeSpace(PxU32 nbShapes);
// PT: we have bitmaps here probably to quickly handle added/removed objects during same frame.
// PT: TODO: consider replacing with plain arrays (easier to parse, already existing below, etc)
PxBitMapPinned mAddedHandleMap; // PT: indexed by BoundsIndex
PxBitMapPinned mRemovedHandleMap; // PT: indexed by BoundsIndex
PxBitMapPinned mChangedHandleMap;
//Returns true if the bounds was pending insert, false otherwise
PX_FORCE_INLINE bool removeBPEntry(BoundsIndex index) // PT: only for objects passed to the BP
{
if (mAddedHandleMap.test(index)) // PT: if object had been added this frame...
{
mAddedHandleMap.reset(index); // PT: ...then simply revert the previous operation locally (it hasn't been passed to the BP yet).
return true;
}
else
mRemovedHandleMap.set(index); // PT: else we need to remove it from the BP
return false;
}
PX_FORCE_INLINE void addBPEntry(BoundsIndex index)
{
if (mRemovedHandleMap.test(index))
mRemovedHandleMap.reset(index);
else
mAddedHandleMap.set(index);
}
//ML: we create mGroups and mContactDistance in the AABBManager constructor. PxArray will take PxVirtualAllocator as a parameter. Therefore, if GPU BP is using,
//we will passed a pinned host memory allocator, otherwise, we will just pass a normal allocator.
GroupsArrayPinned mGroups; // NOTE: we stick Bp::FilterGroup::eINVALID in this slot to indicate that the entry is invalid (removed or never inserted.)
PxInt32ArrayPinned mEnvIDs; // PT: should ideally be in the GPU class
PxFloatArrayPinned& mContactDistance;
VolumeDataArrayPinned mVolumeData;
BpFilter mFilters;
PX_FORCE_INLINE void initEntry(BoundsIndex index, PxReal contactDistance, Bp::FilterGroup::Enum group, void* userData)
{
if ((index + 1) >= mVolumeData.size())
reserveShapeSpace(index + 1);
// PT: TODO: why is this needed at all? Why aren't size() and capacity() enough?
mUsedSize = PxMax(index + 1, mUsedSize);
PX_ASSERT(group != Bp::FilterGroup::eINVALID); // PT: we use group == Bp::FilterGroup::eINVALID to mark removed/invalid entries
mGroups[index] = group;
mContactDistance.begin()[index] = contactDistance;
mVolumeData[index].setUserData(userData);
}
PX_FORCE_INLINE void initEntry(BoundsIndex index, PxReal contactDistance, Bp::FilterGroup::Enum group, void* userData, ElementType::Enum volumeType)
{
initEntry(index, contactDistance, group, userData);
mVolumeData[index].setVolumeType(volumeType); // PT: must be done after setUserData
}
PX_FORCE_INLINE void resetEntry(BoundsIndex index)
{
mGroups[index] = Bp::FilterGroup::eINVALID;
mContactDistance.begin()[index] = 0.0f;
mVolumeData[index].reset();
if(index<mEnvIDs.size())
mEnvIDs[index] = PX_INVALID_U32;
}
// PT: TODO: remove confusion between BoundsIndex and ShapeHandle here!
ShapeHandleArrayPinned mAddedHandles;
ShapeHandleArrayPinned mUpdatedHandles; // PT: TODO: only on CPU
ShapeHandleArrayPinned mRemovedHandles;
BroadPhase& mBroadPhase;
BoundsArray& mBoundsArray;
PxArray<AABBOverlap> mCreatedOverlaps[ElementType::eCOUNT];
PxArray<AABBOverlap> mDestroyedOverlaps[ElementType::eCOUNT];
PxU32 mUsedSize; // highest used value + 1
PxU32 mNbAggregates;
#if PX_ENABLE_SIM_STATS
PxU32 mGpuDynamicsLostFoundPairsStats;
PxU32 mGpuDynamicsTotalAggregatePairsStats;
PxU32 mGpuDynamicsLostFoundAggregatePairsStats;
#else
PX_CATCH_UNDEFINED_ENABLE_SIM_STATS
#endif
#if BP_USE_AGGREGATE_GROUP_TAIL
// PT: TODO: even in the 3.4 trunk this stuff is a clumsy mess: groups are "BpHandle" suddenly passed
// to BroadPhaseUpdateData as "ShapeHandle".
//Free aggregate group ids.
PxU32 mAggregateGroupTide;
PxArray<Bp::FilterGroup::Enum> mFreeAggregateGroups; // PT: TODO: remove this useless array
#endif
PxU64 mContextID;
bool mOriginShifted;
#if BP_USE_AGGREGATE_GROUP_TAIL
PX_FORCE_INLINE void releaseAggregateGroup(const Bp::FilterGroup::Enum group)
{
PX_ASSERT(group != Bp::FilterGroup::eINVALID);
mFreeAggregateGroups.pushBack(group);
}
PX_FORCE_INLINE Bp::FilterGroup::Enum getAggregateGroup()
{
PxU32 id;
if (mFreeAggregateGroups.size())
id = mFreeAggregateGroups.popBack();
else
{
id = mAggregateGroupTide--;
id <<= BP_FILTERING_TYPE_SHIFT_BIT;
id |= FilterType::AGGREGATE;
}
const Bp::FilterGroup::Enum group = Bp::FilterGroup::Enum(id);
PX_ASSERT(group != Bp::FilterGroup::eINVALID);
return group;
}
#endif
};
} //namespace Bp
} //namespace physx
#endif //BP_AABBMANAGER_BASE_H

View File

@@ -0,0 +1,95 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_AABB_MANAGER_TASKS_H
#define BP_AABB_MANAGER_TASKS_H
#include "foundation/PxUserAllocated.h"
#include "CmTask.h"
namespace physx
{
namespace Bp
{
class AABBManager;
class Aggregate;
class AggregateBoundsComputationTask : public Cm::Task, public PxUserAllocated
{
PX_NOCOPY(AggregateBoundsComputationTask)
public:
AggregateBoundsComputationTask(PxU64 contextId) :
Cm::Task (contextId),
mManager (NULL),
mStart (0),
mNbToGo (0),
mAggregates (NULL)
{}
~AggregateBoundsComputationTask() {}
virtual const char* getName() const { return "AggregateBoundsComputationTask"; }
virtual void runInternal();
void Init(AABBManager* manager, PxU32 start, PxU32 nb, Aggregate** aggregates)
{
mManager = manager;
mStart = start;
mNbToGo = nb;
mAggregates = aggregates;
}
private:
AABBManager* mManager;
PxU32 mStart;
PxU32 mNbToGo;
Aggregate** mAggregates;
};
class PreBpUpdateTask : public Cm::Task, public PxUserAllocated
{
PX_NOCOPY(PreBpUpdateTask)
public:
PreBpUpdateTask(PxU64 contextId) : Cm::Task(contextId), mManager(NULL), mNumCpuTasks(0) {}
~PreBpUpdateTask() {}
virtual const char* getName() const { return "PreBpUpdateTask"; }
virtual void runInternal();
void Init(AABBManager* manager, PxU32 numCpuTasks)
{
mManager = manager;
mNumCpuTasks = numCpuTasks;
}
private:
AABBManager* mManager;
PxU32 mNumCpuTasks;
};
}
} //namespace physx
#endif // BP_AABB_MANAGER_TASKS_H

View File

@@ -0,0 +1,214 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_BROADPHASE_H
#define BP_BROADPHASE_H
#include "foundation/PxUserAllocated.h"
#include "PxBroadPhase.h"
#include "BpBroadPhaseUpdate.h"
namespace physx
{
class PxcScratchAllocator;
class PxBaseTask;
namespace Bp
{
class BroadPhaseUpdateData;
/**
\brief Base broad phase class. Functions only relevant to MBP.
*/
class BroadPhaseBase : public PxBroadPhaseRegions, public PxUserAllocated
{
public:
BroadPhaseBase() {}
virtual ~BroadPhaseBase() {}
/**
\brief Gets broad-phase caps.
\param caps [out] Broad-phase caps
*/
virtual void getCaps(PxBroadPhaseCaps& caps) const
{
caps.mMaxNbRegions = 0;
}
// PxBroadPhaseRegions
virtual PxU32 getNbRegions() const PX_OVERRIDE { return 0; }
virtual PxU32 getRegions(PxBroadPhaseRegionInfo*, PxU32, PxU32) const PX_OVERRIDE { return 0; }
virtual PxU32 addRegion(const PxBroadPhaseRegion&, bool, const PxBounds3*, const PxReal*) PX_OVERRIDE { return 0xffffffff;}
virtual bool removeRegion(PxU32) PX_OVERRIDE { return false; }
virtual PxU32 getNbOutOfBoundsObjects() const PX_OVERRIDE { return 0; }
virtual const PxU32* getOutOfBoundsObjects() const PX_OVERRIDE { return NULL; }
//~PxBroadPhaseRegions
};
/*
\brief Structure used to report created and deleted broadphase pairs
\note The indices mVolA and mVolB correspond to the bounds indices
BroadPhaseUpdateData::mCreated used by BroadPhase::update
\see BroadPhase::getCreatedPairs, BroadPhase::getDeletedPairs
*/
struct BroadPhasePair
{
BroadPhasePair(ShapeHandle volA, ShapeHandle volB) :
mVolA (PxMin(volA, volB)),
mVolB (PxMax(volA, volB))
{
}
BroadPhasePair() :
mVolA (BP_INVALID_BP_HANDLE),
mVolB (BP_INVALID_BP_HANDLE)
{
}
ShapeHandle mVolA; // NB: mVolA < mVolB
ShapeHandle mVolB;
};
class BroadPhase : public BroadPhaseBase
{
public:
/**
\brief Instantiate a BroadPhase instance.
\param[in] bpType - the bp type (either mbp or sap). This is typically specified in PxSceneDesc.
\param[in] maxNbRegions is the expected maximum number of broad-phase regions.
\param[in] maxNbBroadPhaseOverlaps is the expected maximum number of broad-phase overlaps.
\param[in] maxNbStaticShapes is the expected maximum number of static shapes.
\param[in] maxNbDynamicShapes is the expected maximum number of dynamic shapes.
\param[in] contextID is the context ID parameter sent to the profiler
\return The instantiated BroadPhase.
\note maxNbRegions is only used if mbp is the chosen broadphase (PxBroadPhaseType::eMBP)
\note maxNbRegions, maxNbBroadPhaseOverlaps, maxNbStaticShapes and maxNbDynamicShapes are typically specified in PxSceneLimits
*/
static BroadPhase* create(
const PxBroadPhaseType::Enum bpType,
const PxU32 maxNbRegions,
const PxU32 maxNbBroadPhaseOverlaps,
const PxU32 maxNbStaticShapes,
const PxU32 maxNbDynamicShapes,
PxU64 contextID);
virtual PxBroadPhaseType::Enum getType() const = 0;
/**
\brief Shutdown of the broadphase.
*/
virtual void release() = 0;
/**
\brief Updates the broadphase and computes the lists of created/deleted pairs.
\param[in] scratchAllocator - a PxcScratchAllocator instance used for temporary memory allocations.
This must be non-null.
\param[in] updateData a description of changes to the collection of aabbs since the last broadphase update.
The changes detail the indices of the bounds that have been added/updated/removed as well as an array of all
bound coordinates and an array of group ids used to filter pairs with the same id.
\see BroadPhaseUpdateData
\param[in] continuation the task that is in the queue to be executed immediately after the broadphase has completed its update. NULL is not supported.
\note In PX_CHECKED and PX_DEBUG build configurations illegal input data (that does not conform to the BroadPhaseUpdateData specifications) triggers
a special code-path that entirely bypasses the broadphase and issues a warning message to the error stream. No guarantees can be made about the
correctness/consistency of broadphase behavior with illegal input data in PX_RELEASE and PX_PROFILE configs because validity checks are not active
in these builds.
*/
virtual void update(PxcScratchAllocator* scratchAllocator, const BroadPhaseUpdateData& updateData, physx::PxBaseTask* continuation) = 0;
/**
\brief prepare broad phase data.
*/
virtual void preBroadPhase(const Bp::BroadPhaseUpdateData& updateData) = 0;
/**
\brief Fetch the results of any asynchronous broad phase work.
*/
virtual void fetchBroadPhaseResults() = 0;
/*
\brief Get created pairs.
Note that each overlap pair is reported only on the frame when the overlap first occurs. The overlap persists
until the pair appears in the list of deleted pairs or either of the bounds in the pair is removed from the broadphase.
A created overlap must involve at least one of the bounds of the overlap pair appearing in either the created or updated list.
It is impossible for the same pair to appear simultaneously in the list of created and deleted overlap pairs.
An overlap is defined as a pair of bounds that overlap on all three axes; that is when maxA > minB and maxB > minA for all three axes.
\param nbCreatedPairs [out] The number of created aabb overlap pairs computed in the execution of update() that has just completed.
\return The array of created aabb overlap pairs computed in the execution of update() that has just completed.
*/
virtual const BroadPhasePair* getCreatedPairs(PxU32& nbCreatedPairs) const = 0;
/**
\brief Get deleted pairs.
Note that a deleted pair can only be reported if that pair has already appeared in the list of created pairs in an earlier update.
A lost overlap occurs when a pair of bounds previously overlapped on all three axes but have now separated on at least one axis.
A lost overlap must involve at least one of the bounds of the lost overlap pair appearing in the updated list.
Lost overlaps arising from removal of bounds from the broadphase do not appear in the list of deleted pairs.
It is impossible for the same pair to appear simultaneously in the list of created and deleted pairs.
\param nbDeletedPairs [out] The number of deleted overlap pairs computed in the execution of update() that has just completed.
\return The array of deleted overlap pairs computed in the execution of update() that has just completed.
*/
virtual const BroadPhasePair* getDeletedPairs(PxU32& nbDeletedPairs) const = 0;
/**
\brief After the broadphase has completed its update() function and the created/deleted pairs have been queried
with getCreatedPairs/getDeletedPairs it is desirable to free any memory that was temporarily acquired for the update but is
is no longer required post-update. This can be achieved with the function freeBuffers().
*/
virtual void freeBuffers() = 0;
/**
\brief Adjust internal structures after all bounds have been adjusted due to a scene origin shift.
*/
virtual void shiftOrigin(const PxVec3& shift, const PxBounds3* boundsArray, const PxReal* contactDistances) = 0;
#if PX_CHECKED
/**
\brief Test that the created/updated/removed lists obey the rules that
1. object ids can only feature in the created list if they have never been previously added or if they were previously removed.
2. object ids can only be added to the updated list if they have been previously added without being removed.
3. objects ids can only be added to the removed list if they have been previously added without being removed.
*/
virtual bool isValid(const BroadPhaseUpdateData& updateData) const = 0;
#endif
};
} //namespace Bp
} //namespace physx
#endif //BP_BROADPHASE_H

View File

@@ -0,0 +1,214 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_BROADPHASE_UPDATE_H
#define BP_BROADPHASE_UPDATE_H
#include "BpFiltering.h"
#include "foundation/PxBounds3.h"
#include "foundation/PxUnionCast.h"
namespace physx
{
namespace Bp
{
typedef PxU32 ShapeHandle;
typedef PxU32 BpHandle;
#define BP_INVALID_BP_HANDLE 0x3fffffff
class BroadPhase;
class BroadPhaseUpdateData
{
public:
/**
\brief A structure detailing the changes to the collection of aabbs, whose overlaps are computed in the broadphase.
The structure consists of per-object arrays of object bounds and object groups, and three arrays that index
into the per-object arrays, denoting the bounds which are to be created, updated and removed in the broad phase.
* each entry in the object arrays represents the same shape or aggregate from frame to frame.
* each entry in an index array must be less than the capacity of the per-object arrays.
* no index value may appear in more than one index array, and may not occur more than once in that array.
An index value is said to be "in use" if it has appeared in a created list in a previous update, and has not
since occurred in a removed list.
\param[in] created an array of indices describing the bounds that must be inserted into the broadphase.
Each index in the array must not be in use.
\param[in] updated an array of indices (referencing the boxBounds and boxGroups arrays) describing the bounds
that have moved since the last broadphase update. Each index in the array must be in use, and each object
whose index is in use and whose AABB has changed must appear in the update list.
\param[in] removed an array of indices describing the bounds that must be removed from the broad phase. Each index in
the array must be in use.
\param[in] boxBounds an array of bounds coordinates for the AABBs to be processed by the broadphase.
An entry is valid if its values are integer bitwise representations of floating point numbers that satisfy max>min in each dimension,
along with a further rule that minima(maxima) must have even(odd) values.
Each entry whose index is either in use or appears in the created array must be valid. An entry whose index is either not in use or
appears in the removed array need not be valid.
\param[in] boxGroups an array of group ids, one for each bound, used for pair filtering. Bounds with the same group id will not be
reported as overlap pairs by the broad phase. Zero is reserved for static bounds.
Entries in this array are immutable: the only way to change the group of an object is to remove it from the broad phase and reinsert
it at a different index (recall that each index must appear at most once in the created/updated/removed lists).
\param[in] boxesCapacity the length of the boxBounds and boxGroups arrays.
\see BroadPhase::update
*/
BroadPhaseUpdateData(
const ShapeHandle* created, PxU32 createdSize,
const ShapeHandle* updated, PxU32 updatedSize,
const ShapeHandle* removed, PxU32 removedSize,
const PxBounds3* boxBounds, const Bp::FilterGroup::Enum* boxGroups, const PxReal* boxContactDistances, const PxU32* boxEnvIDs, PxU32 boxesCapacity,
const BpFilter& filter,
bool stateChanged,
bool gpuStateChanged
) :
mCreated (created),
mCreatedSize (createdSize),
mUpdated (updated),
mUpdatedSize (updatedSize),
mRemoved (removed),
mRemovedSize (removedSize),
mBoxBounds (boxBounds),
mBoxGroups (boxGroups),
mBoxDistances (boxContactDistances),
mBoxEnvIDs (boxEnvIDs),
mBoxesCapacity (boxesCapacity),
mFilter (filter),
mStateChanged (stateChanged),
mGpuStateChanged(gpuStateChanged)
{
}
BroadPhaseUpdateData(
const ShapeHandle* created, PxU32 createdSize,
const ShapeHandle* updated, PxU32 updatedSize,
const ShapeHandle* removed, PxU32 removedSize,
const PxBounds3* boxBounds, const Bp::FilterGroup::Enum* boxGroups, const PxReal* boxContactDistances, PxU32 boxesCapacity,
const BpFilter& filter,
bool stateChanged,
bool gpuStateChanged
) :
mCreated (created),
mCreatedSize (createdSize),
mUpdated (updated),
mUpdatedSize (updatedSize),
mRemoved (removed),
mRemovedSize (removedSize),
mBoxBounds (boxBounds),
mBoxGroups (boxGroups),
mBoxDistances (boxContactDistances),
mBoxEnvIDs (NULL),
mBoxesCapacity (boxesCapacity),
mFilter (filter),
mStateChanged (stateChanged),
mGpuStateChanged(gpuStateChanged)
{
}
BroadPhaseUpdateData(const BroadPhaseUpdateData& other) :
mCreated (other.mCreated),
mCreatedSize (other.mCreatedSize),
mUpdated (other.mUpdated),
mUpdatedSize (other.mUpdatedSize),
mRemoved (other.mRemoved),
mRemovedSize (other.mRemovedSize),
mBoxBounds (other.mBoxBounds),
mBoxGroups (other.mBoxGroups),
mBoxDistances (other.mBoxDistances),
mBoxEnvIDs (other.mBoxEnvIDs),
mBoxesCapacity (other.mBoxesCapacity),
mFilter (other.mFilter),
mStateChanged (other.mStateChanged),
mGpuStateChanged(other.mGpuStateChanged)
{
}
BroadPhaseUpdateData& operator=(const BroadPhaseUpdateData& other);
PX_FORCE_INLINE const ShapeHandle* getCreatedHandles() const { return mCreated; }
PX_FORCE_INLINE PxU32 getNumCreatedHandles() const { return mCreatedSize; }
PX_FORCE_INLINE const ShapeHandle* getUpdatedHandles() const { return mUpdated; }
PX_FORCE_INLINE PxU32 getNumUpdatedHandles() const { return mUpdatedSize; }
PX_FORCE_INLINE const ShapeHandle* getRemovedHandles() const { return mRemoved; }
PX_FORCE_INLINE PxU32 getNumRemovedHandles() const { return mRemovedSize; }
PX_FORCE_INLINE const PxBounds3* getAABBs() const { return mBoxBounds; }
PX_FORCE_INLINE const Bp::FilterGroup::Enum* getGroups() const { return mBoxGroups; }
PX_FORCE_INLINE const PxReal* getContactDistance() const { return mBoxDistances; }
PX_FORCE_INLINE const PxU32* getEnvIDs() const { return mBoxEnvIDs; }
PX_FORCE_INLINE PxU32 getCapacity() const { return mBoxesCapacity; }
PX_FORCE_INLINE const BpFilter& getFilter() const { return mFilter; }
PX_FORCE_INLINE bool getStateChanged() const { return mStateChanged; }
PX_FORCE_INLINE bool getGpuStateChanged() const { return mGpuStateChanged; }
#if PX_CHECKED
static bool isValid(const BroadPhaseUpdateData& updateData, const BroadPhase& bp, const bool skipBoundValidation, PxU64 contextID);
bool isValid(const bool skipBoundValidation) const;
#endif
private:
const ShapeHandle* mCreated;
const PxU32 mCreatedSize;
const ShapeHandle* mUpdated;
const PxU32 mUpdatedSize;
const ShapeHandle* mRemoved;
const PxU32 mRemovedSize;
const PxBounds3* mBoxBounds;
const Bp::FilterGroup::Enum* mBoxGroups;
const PxReal* mBoxDistances;
const PxU32* mBoxEnvIDs;
const PxU32 mBoxesCapacity;
const BpFilter& mFilter;
const bool mStateChanged;
const bool mGpuStateChanged;
};
} //namespace Bp
} //namespace physx
#endif

View File

@@ -0,0 +1,130 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_FILTERING_H
#define BP_FILTERING_H
#include "PxvConfig.h"
#include "foundation/PxAssert.h"
namespace physx
{
namespace Bp
{
#define BP_USE_AGGREGATE_GROUP_TAIL 1
#define BP_FILTERING_TYPE_SHIFT_BIT 3
#define BP_FILTERING_TYPE_MASK 7
/*
\brief AABBManager volumes with the same filter group value are guaranteed never to generate an overlap pair.
\note To ensure that static pairs never overlap, add static shapes with eSTATICS.
The value eDYNAMICS_BASE provides a minimum recommended group value for dynamic shapes.
If dynamics shapes are assigned group values greater than or equal to eDYNAMICS_BASE then
they are allowed to generate broadphase overlaps with statics, and other dynamic shapes provided
they have different group values.
\see AABBManager::createVolume
*/
struct FilterGroup
{
enum Enum
{
eSTATICS = 0,
eDYNAMICS_BASE = 1,
#if BP_USE_AGGREGATE_GROUP_TAIL
eAGGREGATE_BASE = 0xfffffffe,
#endif
eINVALID = 0xffffffff
};
};
struct FilterType
{
enum Enum
{
STATIC = 0,
KINEMATIC = 1,
DYNAMIC = 2,
AGGREGATE = 3,
DEFORMABLE_SURFACE = 4,
DEFORMABLE_VOLUME = 5,
PARTICLESYSTEM = 6,
COUNT = 7
};
};
PX_FORCE_INLINE Bp::FilterGroup::Enum getFilterGroup_Statics()
{
return Bp::FilterGroup::eSTATICS;
}
PX_FORCE_INLINE Bp::FilterGroup::Enum getFilterGroup_Dynamics(PxU32 rigidId, bool isKinematic)
{
const PxU32 group = rigidId + Bp::FilterGroup::eDYNAMICS_BASE;
const PxU32 type = isKinematic ? FilterType::KINEMATIC : FilterType::DYNAMIC;
return Bp::FilterGroup::Enum((group<< BP_FILTERING_TYPE_SHIFT_BIT)|type);
}
PX_FORCE_INLINE Bp::FilterGroup::Enum getFilterGroup(bool isStatic, PxU32 rigidId, bool isKinematic)
{
return isStatic ? getFilterGroup_Statics() : getFilterGroup_Dynamics(rigidId, isKinematic);
}
PX_FORCE_INLINE bool groupFiltering(const Bp::FilterGroup::Enum group0, const Bp::FilterGroup::Enum group1, const bool* PX_RESTRICT lut)
{
/* const int g0 = group0 & ~3;
const int g1 = group1 & ~3;
if(g0==g1)
return false;*/
if(group0==group1)
{
PX_ASSERT((group0 & ~BP_FILTERING_TYPE_MASK)==(group1 & ~BP_FILTERING_TYPE_MASK));
return false;
}
const int type0 = group0 & BP_FILTERING_TYPE_MASK;
const int type1 = group1 & BP_FILTERING_TYPE_MASK;
return lut[type0*Bp::FilterType::COUNT+type1];
}
class BpFilter
{
public:
BpFilter(bool discardKineKine, bool discardStaticKine);
~BpFilter();
PX_FORCE_INLINE const bool* getLUT() const { return &mLUT[0][0]; }
bool mLUT[Bp::FilterType::COUNT][Bp::FilterType::COUNT];
};
}
}
#endif

View File

@@ -0,0 +1,123 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_VOLUME_DATA_H
#define BP_VOLUME_DATA_H
#include "PxvConfig.h"
#include "foundation/PxAssert.h"
namespace physx
{
namespace Bp
{
typedef PxU32 AggregateHandle; // PT: currently an index in mAggregates array
struct ElementType
{
enum Enum
{
eSHAPE = 0,
eTRIGGER,
eCOUNT
};
};
PX_COMPILE_TIME_ASSERT(ElementType::eCOUNT <= 4); // 2 bits reserved for type
#define PX_CUDA_INLINE PX_CUDA_CALLABLE PX_FORCE_INLINE
struct VolumeData
{
PX_CUDA_INLINE void reset()
{
mAggregate = PX_INVALID_U32;
mUserData = NULL;
}
PX_CUDA_INLINE void setSingleActor() { mAggregate = PX_INVALID_U32; }
PX_CUDA_INLINE bool isSingleActor() const { return mAggregate == PX_INVALID_U32; }
PX_CUDA_INLINE void setUserData(void* userData)
{
// PX_ASSERT(!(size_t(userData) & 3));
mUserData = userData;
}
PX_CUDA_INLINE void* getUserData() const
{
return reinterpret_cast<void*>(size_t(mUserData)& (~size_t(3)));
}
PX_CUDA_INLINE void setVolumeType(ElementType::Enum volumeType)
{
PX_ASSERT(volumeType < 2);
mUserData = reinterpret_cast<void*>(size_t(getUserData()) | size_t(volumeType));
}
PX_CUDA_INLINE ElementType::Enum getVolumeType() const
{
return ElementType::Enum(size_t(mUserData) & 3);
}
PX_CUDA_INLINE void setAggregate(AggregateHandle handle)
{
PX_ASSERT(handle != PX_INVALID_U32);
mAggregate = (handle << 1) | 1;
}
PX_CUDA_INLINE bool isAggregate() const { return !isSingleActor() && ((mAggregate & 1) != 0); }
PX_CUDA_INLINE void setAggregated(AggregateHandle handle)
{
PX_ASSERT(handle != PX_INVALID_U32);
mAggregate = (handle << 1) | 0;
}
PX_CUDA_INLINE bool isAggregated() const
{
return !isSingleActor() && ((mAggregate & 1) == 0);
}
PX_CUDA_INLINE AggregateHandle getAggregateOwner() const { return mAggregate >> 1; }
PX_CUDA_INLINE AggregateHandle getAggregate() const { return mAggregate >> 1; }
private:
void* mUserData; // PT: in PhysX this is an Sc::ElementSim ptr
// PT: TODO: consider moving this to a separate array, which wouldn't be allocated at all for people not using aggregates.
// PT: current encoding:
// aggregate == PX_INVALID_U32 => single actor
// aggregate != PX_INVALID_U32 => aggregate index<<1|LSB. LSB==1 for aggregates, LSB==0 for aggregated actors.
AggregateHandle mAggregate;
};
}
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,101 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "BpAABBManagerBase.h"
#include "BpBroadPhase.h"
using namespace physx;
using namespace Bp;
AABBManagerBase::AABBManagerBase( BroadPhase& bp, BoundsArray& boundsArray, PxFloatArrayPinned& contactDistance,
PxU32 maxNbAggregates, PxU32 maxNbShapes, PxVirtualAllocator& allocator, PxU64 contextID,
PxPairFilteringMode::Enum kineKineFilteringMode, PxPairFilteringMode::Enum staticKineFilteringMode) :
mAddedHandleMap (allocator),
mRemovedHandleMap (allocator),
mChangedHandleMap (allocator),
mGroups (allocator),
mEnvIDs (allocator),
mContactDistance (contactDistance),
mVolumeData (allocator),
mFilters (kineKineFilteringMode == PxPairFilteringMode::eKILL, staticKineFilteringMode == PxPairFilteringMode::eKILL),
mAddedHandles (allocator),
mUpdatedHandles (allocator),
mRemovedHandles (allocator),
mBroadPhase (bp),
mBoundsArray (boundsArray),
mUsedSize (0),
mNbAggregates (0),
#if PX_ENABLE_SIM_STATS
mGpuDynamicsLostFoundPairsStats(0),
mGpuDynamicsTotalAggregatePairsStats(0),
mGpuDynamicsLostFoundAggregatePairsStats(0),
#else
PX_CATCH_UNDEFINED_ENABLE_SIM_STATS
#endif
#if BP_USE_AGGREGATE_GROUP_TAIL
mAggregateGroupTide (PxU32(Bp::FilterGroup::eAGGREGATE_BASE)),
#endif
mContextID (contextID),
mOriginShifted (false)
{
PX_UNUSED(maxNbAggregates); // PT: TODO: use it or remove it
reserveShapeSpace(PxMax(maxNbShapes, 1u));
// mCreatedOverlaps.reserve(16000);
}
void AABBManagerBase::reserveShapeSpace(PxU32 nbTotalBounds)
{
nbTotalBounds = PxNextPowerOfTwo(nbTotalBounds);
mGroups.resize(nbTotalBounds, Bp::FilterGroup::eINVALID);
mVolumeData.resize(nbTotalBounds); //KS - must be initialized so that userData is NULL for SQ-only shapes
mContactDistance.resizeUninitialized(nbTotalBounds);
mAddedHandleMap.resize(nbTotalBounds);
mRemovedHandleMap.resize(nbTotalBounds);
}
void AABBManagerBase::reserveSpaceForBounds(BoundsIndex index)
{
if ((index + 1) >= mVolumeData.size())
reserveShapeSpace(index + 1);
resetEntry(index); //KS - make sure this entry is flagged as invalid
}
void AABBManagerBase::freeBuffers()
{
// PT: TODO: investigate if we need more stuff here
mBroadPhase.freeBuffers();
}
void AABBManagerBase::shiftOrigin(const PxVec3& shift)
{
mBroadPhase.shiftOrigin(shift, mBoundsArray.begin(), mContactDistance.begin());
mOriginShifted = true;
}

View File

@@ -0,0 +1,58 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "BpBroadPhase.h"
#include "BpBroadPhaseSap.h"
#include "BpBroadPhaseMBP.h"
#include "BpBroadPhaseABP.h"
using namespace physx;
using namespace Bp;
BroadPhase* BroadPhase::create(
const PxBroadPhaseType::Enum bpType,
const PxU32 maxNbRegions,
const PxU32 maxNbBroadPhaseOverlaps,
const PxU32 maxNbStaticShapes,
const PxU32 maxNbDynamicShapes,
PxU64 contextID)
{
if(bpType==PxBroadPhaseType::eABP)
return PX_NEW(BroadPhaseABP)(maxNbBroadPhaseOverlaps, maxNbStaticShapes, maxNbDynamicShapes, contextID, false);
else if(bpType==PxBroadPhaseType::ePABP)
return PX_NEW(BroadPhaseABP)(maxNbBroadPhaseOverlaps, maxNbStaticShapes, maxNbDynamicShapes, contextID, true);
else if(bpType==PxBroadPhaseType::eMBP)
return PX_NEW(BroadPhaseMBP)(maxNbRegions, maxNbBroadPhaseOverlaps, maxNbStaticShapes, maxNbDynamicShapes, contextID);
else if(bpType==PxBroadPhaseType::eSAP)
return PX_NEW(BroadPhaseSap)(maxNbBroadPhaseOverlaps, maxNbStaticShapes, maxNbDynamicShapes, contextID);
else
{
PX_ASSERT(0);
return NULL;
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,98 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_BROADPHASE_ABP_H
#define BP_BROADPHASE_ABP_H
#include "foundation/PxArray.h"
#include "BpBroadPhase.h"
#include "PxPhysXConfig.h"
#include "BpBroadPhaseUpdate.h"
#define ABP_MT2
namespace internalABP{
class ABP;
}
namespace physx
{
namespace Bp
{
class BroadPhaseABP : public BroadPhase
{
PX_NOCOPY(BroadPhaseABP)
public:
BroadPhaseABP( PxU32 maxNbBroadPhaseOverlaps,
PxU32 maxNbStaticShapes,
PxU32 maxNbDynamicShapes,
PxU64 contextID,
bool enableMT);
virtual ~BroadPhaseABP();
// BroadPhase
virtual PxBroadPhaseType::Enum getType() const PX_OVERRIDE PX_FINAL { return mEnableMT ? PxBroadPhaseType::ePABP : PxBroadPhaseType::eABP; }
virtual void release() PX_OVERRIDE PX_FINAL { PX_DELETE_THIS; }
virtual void update(PxcScratchAllocator* scratchAllocator, const BroadPhaseUpdateData& updateData, physx::PxBaseTask* continuation) PX_OVERRIDE;
virtual void preBroadPhase(const Bp::BroadPhaseUpdateData&) PX_OVERRIDE PX_FINAL {}
virtual void fetchBroadPhaseResults() PX_OVERRIDE PX_FINAL {}
virtual const BroadPhasePair* getCreatedPairs(PxU32&) const PX_OVERRIDE PX_FINAL;
virtual const BroadPhasePair* getDeletedPairs(PxU32&) const PX_OVERRIDE PX_FINAL;
virtual void freeBuffers() PX_OVERRIDE PX_FINAL;
virtual void shiftOrigin(const PxVec3& shift, const PxBounds3* boundsArray, const PxReal* contactDistances) PX_OVERRIDE PX_FINAL;
#if PX_CHECKED
virtual bool isValid(const BroadPhaseUpdateData& updateData) const PX_OVERRIDE PX_FINAL;
#endif
//~BroadPhase
internalABP::ABP* mABP; // PT: TODO: aggregate
PxU32 mNbAdded;
PxU32 mNbUpdated;
PxU32 mNbRemoved;
const BpHandle* mCreatedHandles;
const BpHandle* mUpdatedHandles;
const BpHandle* mRemovedHandles;
PxArray<BroadPhasePair> mCreated;
PxArray<BroadPhasePair> mDeleted;
const Bp::FilterGroup::Enum*mGroups;
const BpFilter* mFilter;
const PxU64 mContextID;
const bool mEnableMT;
void addObjects();
void removeObjects();
void updateObjects();
};
} //namespace Bp
} //namespace physx
#endif // BP_BROADPHASE_ABP_H

View File

@@ -0,0 +1,310 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_BROADPHASE_INTEGER_AABB_H
#define BP_BROADPHASE_INTEGER_AABB_H
#include "BpFiltering.h"
#include "foundation/PxBounds3.h"
#include "foundation/PxUnionCast.h"
namespace physx
{
namespace Bp
{
/*
\brief Encode a single float value with lossless encoding to integer
*/
PX_FORCE_INLINE PxU32 encodeFloat(PxU32 ir)
{
//we may need to check on -0 and 0
//But it should make no practical difference.
if(ir & PX_SIGN_BITMASK) //negative?
return ~ir;//reverse sequence of negative numbers
else
return ir | PX_SIGN_BITMASK; // flip sign
}
/*
\brief Encode a single float value with lossless encoding to integer
*/
PX_FORCE_INLINE PxU32 decodeFloat(PxU32 ir)
{
if(ir & PX_SIGN_BITMASK) //positive?
return ir & ~PX_SIGN_BITMASK; //flip sign
else
return ~ir; //undo reversal
}
/**
\brief Integer representation of PxBounds3 used by BroadPhase
\see BroadPhaseUpdateData
*/
typedef PxU32 ValType;
class IntegerAABB
{
public:
enum
{
MIN_X = 0,
MIN_Y,
MIN_Z,
MAX_X,
MAX_Y,
MAX_Z
};
IntegerAABB(const PxBounds3& b, PxReal contactDistance)
{
const PxVec3 dist(contactDistance);
encode(PxBounds3(b.minimum - dist, b.maximum + dist));
}
/*
\brief Return the minimum along a specified axis
\param[in] i is the axis
*/
PX_FORCE_INLINE ValType getMin(PxU32 i) const { return (mMinMax)[MIN_X+i]; }
/*
\brief Return the maximum along a specified axis
\param[in] i is the axis
*/
PX_FORCE_INLINE ValType getMax(PxU32 i) const { return (mMinMax)[MAX_X+i]; }
/*
\brief Return one of the six min/max values of the bound
\param[in] isMax determines whether a min or max value is returned
\param[in] index is the axis
*/
PX_FORCE_INLINE ValType getExtent(PxU32 isMax, PxU32 index) const
{
PX_ASSERT(isMax<=1);
return (mMinMax)[3*isMax+index];
}
/*
\brief Return the minimum on the x axis
*/
PX_FORCE_INLINE ValType getMinX() const { return mMinMax[MIN_X]; }
/*
\brief Return the minimum on the y axis
*/
PX_FORCE_INLINE ValType getMinY() const { return mMinMax[MIN_Y]; }
/*
\brief Return the minimum on the z axis
*/
PX_FORCE_INLINE ValType getMinZ() const { return mMinMax[MIN_Z]; }
/*
\brief Return the maximum on the x axis
*/
PX_FORCE_INLINE ValType getMaxX() const { return mMinMax[MAX_X]; }
/*
\brief Return the maximum on the y axis
*/
PX_FORCE_INLINE ValType getMaxY() const { return mMinMax[MAX_Y]; }
/*
\brief Return the maximum on the z axis
*/
PX_FORCE_INLINE ValType getMaxZ() const { return mMinMax[MAX_Z]; }
/*
\brief Encode float bounds so they are stored as integer bounds
\param[in] bounds is the bounds to be encoded
\note The integer values of minima are always even, while the integer values of maxima are always odd
\note The encoding process masks off the last four bits for minima and masks on the last four bits for maxima.
This keeps the bounds constant when its shape is subjected to small global pose perturbations. In turn, this helps
reduce computational effort in the broadphase update by reducing the amount of sorting required on near-stationary
bodies that are aligned along one or more axis.
\see decode
*/
PX_FORCE_INLINE void encode(const PxBounds3& bounds)
{
const PxU32* PX_RESTRICT min = PxUnionCast<const PxU32*, const PxF32*>(&bounds.minimum.x);
const PxU32* PX_RESTRICT max = PxUnionCast<const PxU32*, const PxF32*>(&bounds.maximum.x);
//Avoid min=max by enforcing the rule that mins are even and maxs are odd.
mMinMax[MIN_X] = encodeFloatMin(min[0]);
mMinMax[MIN_Y] = encodeFloatMin(min[1]);
mMinMax[MIN_Z] = encodeFloatMin(min[2]);
mMinMax[MAX_X] = encodeFloatMax(max[0]) | (1<<2);
mMinMax[MAX_Y] = encodeFloatMax(max[1]) | (1<<2);
mMinMax[MAX_Z] = encodeFloatMax(max[2]) | (1<<2);
}
/*
\brief Decode from integer bounds to float bounds
\param[out] bounds is the decoded float bounds
\note Encode followed by decode will produce a float bound larger than the original
due to the masking in encode.
\see encode
*/
PX_FORCE_INLINE void decode(PxBounds3& bounds) const
{
PxU32* PX_RESTRICT min = PxUnionCast<PxU32*, PxF32*>(&bounds.minimum.x);
PxU32* PX_RESTRICT max = PxUnionCast<PxU32*, PxF32*>(&bounds.maximum.x);
min[0] = decodeFloat(mMinMax[MIN_X]);
min[1] = decodeFloat(mMinMax[MIN_Y]);
min[2] = decodeFloat(mMinMax[MIN_Z]);
max[0] = decodeFloat(mMinMax[MAX_X]);
max[1] = decodeFloat(mMinMax[MAX_Y]);
max[2] = decodeFloat(mMinMax[MAX_Z]);
}
/*
\brief Encode a single minimum value from integer bounds to float bounds
\note The encoding process masks off the last four bits for minima
\see encode
*/
static PX_FORCE_INLINE ValType encodeFloatMin(PxU32 source)
{
return ((encodeFloat(source) >> eGRID_SNAP_VAL) - 1) << eGRID_SNAP_VAL;
}
/*
\brief Encode a single maximum value from integer bounds to float bounds
\note The encoding process masks on the last four bits for maxima
\see encode
*/
static PX_FORCE_INLINE ValType encodeFloatMax(PxU32 source)
{
return ((encodeFloat(source) >> eGRID_SNAP_VAL) + 1) << eGRID_SNAP_VAL;
}
/*
\brief Shift the encoded bounds by a specified vector
\param[in] shift is the vector used to shift the bounds
*/
PX_FORCE_INLINE void shift(const PxVec3& shift)
{
::physx::PxBounds3 elemBounds;
decode(elemBounds);
elemBounds.minimum -= shift;
elemBounds.maximum -= shift;
encode(elemBounds);
}
/*
\brief Test if this aabb lies entirely inside another aabb
\param[in] box is the other box
\return True if this aabb lies entirely inside box
*/
PX_INLINE bool isInside(const IntegerAABB& box) const
{
if(box.mMinMax[MIN_X]>mMinMax[MIN_X]) return false;
if(box.mMinMax[MIN_Y]>mMinMax[MIN_Y]) return false;
if(box.mMinMax[MIN_Z]>mMinMax[MIN_Z]) return false;
if(box.mMinMax[MAX_X]<mMinMax[MAX_X]) return false;
if(box.mMinMax[MAX_Y]<mMinMax[MAX_Y]) return false;
if(box.mMinMax[MAX_Z]<mMinMax[MAX_Z]) return false;
return true;
}
/*
\brief Test if this aabb and another intersect
\param[in] b is the other box
\return True if this aabb and b intersect
*/
PX_FORCE_INLINE bool intersects(const IntegerAABB& b) const
{
return !(b.mMinMax[MIN_X] > mMinMax[MAX_X] || mMinMax[MIN_X] > b.mMinMax[MAX_X] ||
b.mMinMax[MIN_Y] > mMinMax[MAX_Y] || mMinMax[MIN_Y] > b.mMinMax[MAX_Y] ||
b.mMinMax[MIN_Z] > mMinMax[MAX_Z] || mMinMax[MIN_Z] > b.mMinMax[MAX_Z]);
}
PX_FORCE_INLINE bool intersects1D(const IntegerAABB& b, const PxU32 axis) const
{
const PxU32 maxAxis = axis + 3;
return !(b.mMinMax[axis] > mMinMax[maxAxis] || mMinMax[axis] > b.mMinMax[maxAxis]);
}
/*
\brief Expand bounds to include another
\note This is used to compute the aggregate bounds of multiple shape bounds
\param[in] b is the bounds to be included
*/
PX_FORCE_INLINE void include(const IntegerAABB& b)
{
mMinMax[MIN_X] = PxMin(mMinMax[MIN_X], b.mMinMax[MIN_X]);
mMinMax[MIN_Y] = PxMin(mMinMax[MIN_Y], b.mMinMax[MIN_Y]);
mMinMax[MIN_Z] = PxMin(mMinMax[MIN_Z], b.mMinMax[MIN_Z]);
mMinMax[MAX_X] = PxMax(mMinMax[MAX_X], b.mMinMax[MAX_X]);
mMinMax[MAX_Y] = PxMax(mMinMax[MAX_Y], b.mMinMax[MAX_Y]);
mMinMax[MAX_Z] = PxMax(mMinMax[MAX_Z], b.mMinMax[MAX_Z]);
}
/*
\brief Set the bounds to (max, max, max), (min, min, min)
*/
PX_INLINE void setEmpty()
{
mMinMax[MIN_X] = mMinMax[MIN_Y] = mMinMax[MIN_Z] = 0xff7fffff; //PX_IR(PX_MAX_F32);
mMinMax[MAX_X] = mMinMax[MAX_Y] = mMinMax[MAX_Z] = 0x00800000; ///PX_IR(0.0f);
}
ValType mMinMax[6];
private:
enum
{
eGRID_SNAP_VAL = 4
};
};
PX_FORCE_INLINE ValType encodeMin(const PxBounds3& bounds, PxU32 axis, PxReal contactDistance)
{
const PxReal val = bounds.minimum[axis] - contactDistance;
const PxU32 min = PxUnionCast<PxU32, PxF32>(val);
const PxU32 m = IntegerAABB::encodeFloatMin(min);
return m;
}
PX_FORCE_INLINE ValType encodeMax(const PxBounds3& bounds, PxU32 axis, PxReal contactDistance)
{
const PxReal val = bounds.maximum[axis] + contactDistance;
const PxU32 max = PxUnionCast<PxU32, PxF32>(val);
const PxU32 m = IntegerAABB::encodeFloatMax(max) | (1<<2);
return m;
}
} //namespace Bp
} //namespace physx
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,112 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_BROADPHASE_MBP_H
#define BP_BROADPHASE_MBP_H
#include "BpBroadPhase.h"
#include "BpBroadPhaseMBPCommon.h"
#include "foundation/PxArray.h"
namespace internalMBP
{
class MBP;
}
namespace physx
{
namespace Bp
{
class BroadPhaseMBP : public BroadPhase
{
PX_NOCOPY(BroadPhaseMBP)
public:
BroadPhaseMBP( PxU32 maxNbRegions,
PxU32 maxNbBroadPhaseOverlaps,
PxU32 maxNbStaticShapes,
PxU32 maxNbDynamicShapes,
PxU64 contextID);
virtual ~BroadPhaseMBP();
// BroadPhaseBase
virtual void getCaps(PxBroadPhaseCaps& caps) const PX_OVERRIDE PX_FINAL;
//~BroadPhaseBase
// PxBroadPhaseRegions
virtual PxU32 getNbRegions() const PX_OVERRIDE PX_FINAL;
virtual PxU32 getRegions(PxBroadPhaseRegionInfo* userBuffer, PxU32 bufferSize, PxU32 startIndex=0) const PX_OVERRIDE PX_FINAL;
virtual PxU32 addRegion(const PxBroadPhaseRegion& region, bool populateRegion, const PxBounds3* boundsArray, const PxReal* contactDistance) PX_OVERRIDE PX_FINAL;
virtual bool removeRegion(PxU32 handle) PX_OVERRIDE PX_FINAL;
virtual PxU32 getNbOutOfBoundsObjects() const PX_OVERRIDE PX_FINAL;
virtual const PxU32* getOutOfBoundsObjects() const PX_OVERRIDE PX_FINAL;
//~PxBroadPhaseRegions
// BroadPhase
virtual PxBroadPhaseType::Enum getType() const PX_OVERRIDE PX_FINAL { return PxBroadPhaseType::eMBP; }
virtual void release() PX_OVERRIDE PX_FINAL { PX_DELETE_THIS; }
virtual void update(PxcScratchAllocator* scratchAllocator, const BroadPhaseUpdateData& updateData, physx::PxBaseTask* continuation) PX_OVERRIDE;
virtual void preBroadPhase(const Bp::BroadPhaseUpdateData&) PX_OVERRIDE PX_FINAL {}
virtual void fetchBroadPhaseResults() PX_OVERRIDE PX_FINAL {}
virtual const BroadPhasePair* getCreatedPairs(PxU32&) const PX_OVERRIDE PX_FINAL;
virtual const BroadPhasePair* getDeletedPairs(PxU32&) const PX_OVERRIDE PX_FINAL;
virtual void freeBuffers() PX_OVERRIDE PX_FINAL;
virtual void shiftOrigin(const PxVec3& shift, const PxBounds3* boundsArray, const PxReal* contactDistances) PX_OVERRIDE PX_FINAL;
#if PX_CHECKED
virtual bool isValid(const BroadPhaseUpdateData& updateData) const PX_OVERRIDE PX_FINAL;
#endif
//~BroadPhase
internalMBP::MBP* mMBP; // PT: TODO: aggregate
MBP_Handle* mMapping;
PxU32 mCapacity;
PxArray<BroadPhasePair> mCreated;
PxArray<BroadPhasePair> mDeleted;
const Bp::FilterGroup::Enum*mGroups;
const BpFilter* mFilter;
const PxU64 mContextID;
void setUpdateData(const BroadPhaseUpdateData& updateData);
void addObjects(const BroadPhaseUpdateData& updateData);
void removeObjects(const BroadPhaseUpdateData& updateData);
void updateObjects(const BroadPhaseUpdateData& updateData);
void update();
void postUpdate();
void allocateMappingArray(PxU32 newCapacity);
PxU32 getCurrentNbPairs() const;
};
} //namespace Bp
} //namespace physx
#endif // BP_BROADPHASE_MBP_H

View File

@@ -0,0 +1,198 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_BROADPHASE_MBP_COMMON_H
#define BP_BROADPHASE_MBP_COMMON_H
#include "PxPhysXConfig.h"
#include "BpBroadPhaseIntegerAABB.h"
#include "foundation/PxUserAllocated.h"
namespace physx
{
namespace Bp
{
#define MBP_USE_WORDS
#define MBP_USE_NO_CMP_OVERLAP
#if PX_INTEL_FAMILY && !defined(PX_SIMD_DISABLED)
#define MBP_SIMD_OVERLAP
#endif
#ifdef MBP_USE_WORDS
typedef PxU16 MBP_Index;
#else
typedef PxU32 MBP_Index;
#endif
typedef PxU32 MBP_ObjectIndex; // PT: index in mMBP_Objects
typedef PxU32 MBP_Handle; // PT: returned to MBP users, combination of index/flip-flop/static-bit
struct IAABB : public PxUserAllocated
{
PX_FORCE_INLINE bool isInside(const IAABB& box) const
{
if(box.mMinX>mMinX) return false;
if(box.mMinY>mMinY) return false;
if(box.mMinZ>mMinZ) return false;
if(box.mMaxX<mMaxX) return false;
if(box.mMaxY<mMaxY) return false;
if(box.mMaxZ<mMaxZ) return false;
return true;
}
PX_FORCE_INLINE PxIntBool intersects(const IAABB& a) const
{
if(mMaxX < a.mMinX || a.mMaxX < mMinX
|| mMaxY < a.mMinY || a.mMaxY < mMinY
|| mMaxZ < a.mMinZ || a.mMaxZ < mMinZ
)
return PxIntFalse;
return PxIntTrue;
}
PX_FORCE_INLINE PxIntBool intersectNoTouch(const IAABB& a) const
{
if(mMaxX <= a.mMinX || a.mMaxX <= mMinX
|| mMaxY <= a.mMinY || a.mMaxY <= mMinY
|| mMaxZ <= a.mMinZ || a.mMaxZ <= mMinZ
)
return PxIntFalse;
return PxIntTrue;
}
PX_FORCE_INLINE void initFrom2(const PxBounds3& box)
{
const PxU32* PX_RESTRICT binary = reinterpret_cast<const PxU32*>(&box.minimum.x);
mMinX = encodeFloat(binary[0])>>1;
mMinY = encodeFloat(binary[1])>>1;
mMinZ = encodeFloat(binary[2])>>1;
mMaxX = encodeFloat(binary[3])>>1;
mMaxY = encodeFloat(binary[4])>>1;
mMaxZ = encodeFloat(binary[5])>>1;
}
PX_FORCE_INLINE void decode(PxBounds3& box) const
{
PxU32* PX_RESTRICT binary = reinterpret_cast<PxU32*>(&box.minimum.x);
binary[0] = decodeFloat(mMinX<<1);
binary[1] = decodeFloat(mMinY<<1);
binary[2] = decodeFloat(mMinZ<<1);
binary[3] = decodeFloat(mMaxX<<1);
binary[4] = decodeFloat(mMaxY<<1);
binary[5] = decodeFloat(mMaxZ<<1);
}
PX_FORCE_INLINE PxU32 getMin(PxU32 i) const { return (&mMinX)[i]; }
PX_FORCE_INLINE PxU32 getMax(PxU32 i) const { return (&mMaxX)[i]; }
PxU32 mMinX;
PxU32 mMinY;
PxU32 mMinZ;
PxU32 mMaxX;
PxU32 mMaxY;
PxU32 mMaxZ;
};
struct SIMD_AABB : public PxUserAllocated
{
PX_FORCE_INLINE void initFrom(const PxBounds3& box)
{
const PxU32* PX_RESTRICT binary = reinterpret_cast<const PxU32*>(&box.minimum.x);
mMinX = encodeFloat(binary[0]);
mMinY = encodeFloat(binary[1]);
mMinZ = encodeFloat(binary[2]);
mMaxX = encodeFloat(binary[3]);
mMaxY = encodeFloat(binary[4]);
mMaxZ = encodeFloat(binary[5]);
}
PX_FORCE_INLINE void initFrom2(const PxBounds3& box)
{
const PxU32* PX_RESTRICT binary = reinterpret_cast<const PxU32*>(&box.minimum.x);
mMinX = encodeFloat(binary[0])>>1;
mMinY = encodeFloat(binary[1])>>1;
mMinZ = encodeFloat(binary[2])>>1;
mMaxX = encodeFloat(binary[3])>>1;
mMaxY = encodeFloat(binary[4])>>1;
mMaxZ = encodeFloat(binary[5])>>1;
}
PX_FORCE_INLINE void decode(PxBounds3& box) const
{
PxU32* PX_RESTRICT binary = reinterpret_cast<PxU32*>(&box.minimum.x);
binary[0] = decodeFloat(mMinX<<1);
binary[1] = decodeFloat(mMinY<<1);
binary[2] = decodeFloat(mMinZ<<1);
binary[3] = decodeFloat(mMaxX<<1);
binary[4] = decodeFloat(mMaxY<<1);
binary[5] = decodeFloat(mMaxZ<<1);
}
PX_FORCE_INLINE bool isInside(const SIMD_AABB& box) const
{
if(box.mMinX>mMinX) return false;
if(box.mMinY>mMinY) return false;
if(box.mMinZ>mMinZ) return false;
if(box.mMaxX<mMaxX) return false;
if(box.mMaxY<mMaxY) return false;
if(box.mMaxZ<mMaxZ) return false;
return true;
}
PX_FORCE_INLINE PxIntBool intersects(const SIMD_AABB& a) const
{
if(mMaxX < a.mMinX || a.mMaxX < mMinX
|| mMaxY < a.mMinY || a.mMaxY < mMinY
|| mMaxZ < a.mMinZ || a.mMaxZ < mMinZ
)
return PxIntFalse;
return PxIntTrue;
}
PX_FORCE_INLINE PxIntBool intersectNoTouch(const SIMD_AABB& a) const
{
if(mMaxX <= a.mMinX || a.mMaxX <= mMinX
|| mMaxY <= a.mMinY || a.mMaxY <= mMinY
|| mMaxZ <= a.mMinZ || a.mMaxZ <= mMinZ
)
return PxIntFalse;
return PxIntTrue;
}
PxU32 mMinX;
PxU32 mMaxX;
PxU32 mMinY;
PxU32 mMinZ;
PxU32 mMaxY;
PxU32 mMaxZ;
};
}
} // namespace physx
#endif // BP_BROADPHASE_MBP_COMMON_H

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,211 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_BROADPHASE_SAP_H
#define BP_BROADPHASE_SAP_H
#include "BpBroadPhase.h"
#include "BpBroadPhaseSapAux.h"
#include "CmPool.h"
#include "CmTask.h"
namespace physx
{
class PxcScratchAllocator;
namespace Gu
{
class Axes;
}
namespace Bp
{
class SapEndPoint;
class IntegerAABB;
class BroadPhaseBatchUpdateWorkTask: public Cm::Task
{
public:
BroadPhaseBatchUpdateWorkTask(PxU64 contextId=0) :
Cm::Task (contextId),
mSap (NULL),
mAxis (0xffffffff),
mPairs (NULL),
mPairsSize (0),
mPairsCapacity (0)
{
}
virtual void runInternal();
virtual const char* getName() const { return "BpBroadphaseSap.batchUpdate"; }
void set(class BroadPhaseSap* sap, const PxU32 axis) {mSap = sap; mAxis = axis;}
BroadPhasePair* getPairs() const {return mPairs;}
PxU32 getPairsSize() const {return mPairsSize;}
PxU32 getPairsCapacity() const {return mPairsCapacity;}
void setPairs(BroadPhasePair* pairs, const PxU32 pairsCapacity) {mPairs = pairs; mPairsCapacity = pairsCapacity;}
void setNumPairs(const PxU32 pairsSize) {mPairsSize=pairsSize;}
private:
class BroadPhaseSap* mSap;
PxU32 mAxis;
BroadPhasePair* mPairs;
PxU32 mPairsSize;
PxU32 mPairsCapacity;
};
//KS - TODO, this could be reduced to U16 in smaller scenes
struct BroadPhaseActivityPocket
{
PxU32 mStartIndex;
PxU32 mEndIndex;
};
class BroadPhaseSap : public BroadPhase
{
PX_NOCOPY(BroadPhaseSap)
public:
friend class BroadPhaseBatchUpdateWorkTask;
friend class SapUpdateWorkTask;
friend class SapPostUpdateWorkTask;
BroadPhaseSap(const PxU32 maxNbBroadPhaseOverlaps, const PxU32 maxNbStaticShapes, const PxU32 maxNbDynamicShapes, PxU64 contextID);
virtual ~BroadPhaseSap();
// BroadPhase
virtual PxBroadPhaseType::Enum getType() const PX_OVERRIDE PX_FINAL { return PxBroadPhaseType::eSAP; }
virtual void release() PX_OVERRIDE PX_FINAL;
virtual void update(PxcScratchAllocator* scratchAllocator, const BroadPhaseUpdateData& updateData, physx::PxBaseTask* continuation) PX_OVERRIDE PX_FINAL;
virtual void preBroadPhase(const Bp::BroadPhaseUpdateData&) PX_OVERRIDE PX_FINAL {}
virtual void fetchBroadPhaseResults() PX_OVERRIDE PX_FINAL {}
virtual const BroadPhasePair* getCreatedPairs(PxU32& nbCreatedPairs) const PX_OVERRIDE PX_FINAL { nbCreatedPairs = mCreatedPairsSize; return mCreatedPairsArray; }
virtual const BroadPhasePair* getDeletedPairs(PxU32& nbDeletedPairs) const PX_OVERRIDE PX_FINAL { nbDeletedPairs = mDeletedPairsSize; return mDeletedPairsArray; }
virtual void freeBuffers() PX_OVERRIDE PX_FINAL;
virtual void shiftOrigin(const PxVec3& shift, const PxBounds3* boundsArray, const PxReal* contactDistances) PX_OVERRIDE PX_FINAL;
#if PX_CHECKED
virtual bool isValid(const BroadPhaseUpdateData& updateData) const PX_OVERRIDE PX_FINAL;
#endif
//~BroadPhase
private:
void resizeBuffers();
PxcScratchAllocator* mScratchAllocator;
//Data passed in from updateV.
const BpHandle* mCreated;
PxU32 mCreatedSize;
const BpHandle* mRemoved;
PxU32 mRemovedSize;
const BpHandle* mUpdated;
PxU32 mUpdatedSize;
const PxBounds3* mBoxBoundsMinMax;
const Bp::FilterGroup::Enum*mBoxGroups;
const BpFilter* mFilter;
const PxReal* mContactDistance;
PxU32 mBoxesCapacity;
//Boxes.
SapBox1D* mBoxEndPts[3]; //Position of box min/max in sorted arrays of end pts (needs to have mBoxesCapacity).
//End pts (endpts of boxes sorted along each axis).
ValType* mEndPointValues[3]; //Sorted arrays of min and max box coords
BpHandle* mEndPointDatas[3]; //Corresponding owner id and isMin/isMax for each entry in the sorted arrays of min and max box coords.
PxU8* mBoxesUpdated;
BpHandle* mSortedUpdateElements;
BroadPhaseActivityPocket* mActivityPockets;
BpHandle* mListNext;
BpHandle* mListPrev;
PxU32 mBoxesSize; //Number of sorted boxes + number of unsorted (new) boxes
PxU32 mBoxesSizePrev; //Number of sorted boxes
PxU32 mEndPointsCapacity; //Capacity of sorted arrays.
//Default maximum number of overlap pairs
PxU32 mDefaultPairsCapacity;
//Box-box overlap pairs created or removed each update.
BpHandle* mData;
PxU32 mDataSize;
PxU32 mDataCapacity;
//All current box-box overlap pairs.
SapPairManager mPairs;
//Created and deleted overlap pairs reported back through api.
BroadPhasePair* mCreatedPairsArray;
PxU32 mCreatedPairsSize;
PxU32 mCreatedPairsCapacity;
BroadPhasePair* mDeletedPairsArray;
PxU32 mDeletedPairsSize;
PxU32 mDeletedPairsCapacity;
PxU32 mActualDeletedPairSize;
bool setUpdateData(const BroadPhaseUpdateData& updateData);
void update();
void postUpdate();
//Batch create/remove/update.
void batchCreate();
void batchRemove();
void batchUpdate();
void batchUpdate(const PxU32 Axis, BroadPhasePair*& pairs, PxU32& pairsSize, PxU32& pairsCapacity);
void batchUpdateFewUpdates(const PxU32 Axis, BroadPhasePair*& pairs, PxU32& pairsSize, PxU32& pairsCapacity);
void ComputeSortedLists( //const PxVec4& globalMin, const PxVec4& globalMax,
BpHandle* PX_RESTRICT newBoxIndicesSorted, PxU32& newBoxIndicesCount, BpHandle* PX_RESTRICT oldBoxIndicesSorted, PxU32& oldBoxIndicesCount,
bool& allNewBoxesStatics, bool& allOldBoxesStatics);
BroadPhaseBatchUpdateWorkTask mBatchUpdateTasks[3];
const PxU64 mContextID;
#if PX_DEBUG
bool isSelfOrdered() const;
bool isSelfConsistent() const;
#endif
};
} //namespace Bp
} //namespace physx
#endif //BP_BROADPHASE_SAP_H

View File

@@ -0,0 +1,911 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "BpBroadPhaseSapAux.h"
#include "PxcScratchAllocator.h"
namespace physx
{
namespace Bp
{
PX_FORCE_INLINE void PxBpHandleSwap(BpHandle& a, BpHandle& b)
{
const BpHandle c = a; a = b; b = c;
}
PX_FORCE_INLINE void Sort(BpHandle& id0, BpHandle& id1)
{
if(id0>id1) PxBpHandleSwap(id0, id1);
}
PX_FORCE_INLINE bool DifferentPair(const BroadPhasePair& p, BpHandle id0, BpHandle id1)
{
return (id0!=p.mVolA) || (id1!=p.mVolB);
}
PX_FORCE_INLINE int Hash32Bits_1(int key)
{
key += ~(key << 15);
key ^= (key >> 10);
key += (key << 3);
key ^= (key >> 6);
key += ~(key << 11);
key ^= (key >> 16);
return key;
}
PX_FORCE_INLINE PxU32 Hash(BpHandle id0, BpHandle id1)
{
return PxU32(Hash32Bits_1( int(PxU32(id0)|(PxU32(id1)<<16)) ));
}
///////////////////////////////////////////////////////////////////////////////
SapPairManager::SapPairManager() :
mHashTable (NULL),
mNext (NULL),
mHashSize (0),
mHashCapacity (0),
mMinAllowedHashCapacity (0),
mActivePairs (NULL),
mActivePairStates (NULL),
mNbActivePairs (0),
mActivePairsCapacity (0),
mMask (0)
{
}
///////////////////////////////////////////////////////////////////////////////
SapPairManager::~SapPairManager()
{
PX_ASSERT(NULL==mHashTable);
PX_ASSERT(NULL==mNext);
PX_ASSERT(NULL==mActivePairs);
PX_ASSERT(NULL==mActivePairStates);
}
///////////////////////////////////////////////////////////////////////////////
void SapPairManager::init(const PxU32 size)
{
mHashTable=reinterpret_cast<BpHandle*>(PX_ALLOC(ALIGN_SIZE_16(sizeof(BpHandle)*size), "BpHandle"));
mNext=reinterpret_cast<BpHandle*>(PX_ALLOC(ALIGN_SIZE_16(sizeof(BpHandle)*size), "BpHandle"));
mActivePairs=reinterpret_cast<BroadPhasePair*>(PX_ALLOC(ALIGN_SIZE_16(sizeof(BroadPhasePair)*size), "BroadPhasePair"));
mActivePairStates=reinterpret_cast<PxU8*>(PX_ALLOC(ALIGN_SIZE_16(sizeof(PxU8)*size), "BroadPhaseContextSap ActivePairStates"));
mHashCapacity=size;
mMinAllowedHashCapacity = size;
mActivePairsCapacity=size;
}
///////////////////////////////////////////////////////////////////////////////
void SapPairManager::release()
{
PX_FREE(mHashTable);
PX_FREE(mNext);
PX_FREE(mActivePairs);
PX_FREE(mActivePairStates);
mHashSize = 0;
mHashCapacity = 0;
mMinAllowedHashCapacity = 0;
mNbActivePairs = 0;
mActivePairsCapacity = 0;
mMask = 0;
}
///////////////////////////////////////////////////////////////////////////////
const BroadPhasePair* SapPairManager::FindPair(BpHandle id0, BpHandle id1) const
{
if(0==mHashSize) return NULL; // Nothing has been allocated yet
// Order the ids
Sort(id0, id1);
// Compute hash value for this pair
PxU32 HashValue = Hash(id0, id1) & mMask;
PX_ASSERT(HashValue<mHashCapacity);
// Look for it in the table
PX_ASSERT(HashValue<mHashCapacity);
PxU32 Offset = mHashTable[HashValue];
PX_ASSERT(BP_INVALID_BP_HANDLE==Offset || Offset<mActivePairsCapacity);
while(Offset!=BP_INVALID_BP_HANDLE && DifferentPair(mActivePairs[Offset], id0, id1))
{
PX_ASSERT(mActivePairs[Offset].mVolA!=BP_INVALID_BP_HANDLE);
PX_ASSERT(Offset<mHashCapacity);
Offset = mNext[Offset]; // Better to have a separate array for this
PX_ASSERT(BP_INVALID_BP_HANDLE==Offset || Offset<mActivePairsCapacity);
}
if(Offset==BP_INVALID_BP_HANDLE) return NULL;
PX_ASSERT(Offset<mNbActivePairs);
// Match mActivePairs[Offset] => the pair is persistent
PX_ASSERT(Offset<mActivePairsCapacity);
return &mActivePairs[Offset];
}
///////////////////////////////////////////////////////////////////////////////
// Internal version saving hash computation
PX_FORCE_INLINE BroadPhasePair* SapPairManager::FindPair(BpHandle id0, BpHandle id1, PxU32 hash_value) const
{
if(0==mHashSize) return NULL; // Nothing has been allocated yet
// Look for it in the table
PX_ASSERT(hash_value<mHashCapacity);
PxU32 Offset = mHashTable[hash_value];
PX_ASSERT(BP_INVALID_BP_HANDLE==Offset || Offset<mActivePairsCapacity);
while(Offset!=BP_INVALID_BP_HANDLE && DifferentPair(mActivePairs[Offset], id0, id1))
{
PX_ASSERT(mActivePairs[Offset].mVolA!=BP_INVALID_BP_HANDLE);
PX_ASSERT(Offset<mHashCapacity);
Offset = mNext[Offset]; // Better to have a separate array for this
PX_ASSERT(BP_INVALID_BP_HANDLE==Offset || Offset<mActivePairsCapacity);
}
if(Offset==BP_INVALID_BP_HANDLE) return NULL;
PX_ASSERT(Offset<mNbActivePairs);
// Match mActivePairs[Offset] => the pair is persistent
PX_ASSERT(Offset<mActivePairsCapacity);
return &mActivePairs[Offset];
}
///////////////////////////////////////////////////////////////////////////////
const BroadPhasePair* SapPairManager::AddPair(BpHandle id0, BpHandle id1, const PxU8 state)
{
// Order the ids
Sort(id0, id1);
PxU32 HashValue = Hash(id0, id1) & mMask;
BroadPhasePair* P = FindPair(id0, id1, HashValue);
if(P)
{
return P; // Persistent pair
}
// This is a new pair
if(mNbActivePairs >= mHashSize)
{
// Get more entries
mHashSize = PxNextPowerOfTwo(mNbActivePairs+1);
mMask = mHashSize-1;
reallocPairs(mHashSize>mHashCapacity);
// Recompute hash value with new hash size
HashValue = Hash(id0, id1) & mMask;
}
PX_ASSERT(mNbActivePairs<mActivePairsCapacity);
BroadPhasePair* p = &mActivePairs[mNbActivePairs];
p->mVolA = id0; // ### CMOVs would be nice here
p->mVolB = id1;
mActivePairStates[mNbActivePairs]=state;
PX_ASSERT(mNbActivePairs<mHashSize);
PX_ASSERT(mNbActivePairs<mHashCapacity);
PX_ASSERT(HashValue<mHashCapacity);
mNext[mNbActivePairs] = mHashTable[HashValue];
mHashTable[HashValue] = BpHandle(mNbActivePairs++);
return p;
}
///////////////////////////////////////////////////////////////////////////////
void SapPairManager::RemovePair(BpHandle /*id0*/, BpHandle /*id1*/, PxU32 hash_value, PxU32 pair_index)
{
// Walk the hash table to fix mNext
{
PX_ASSERT(hash_value<mHashCapacity);
PxU32 Offset = mHashTable[hash_value];
PX_ASSERT(Offset!=BP_INVALID_BP_HANDLE);
PxU32 Previous=BP_INVALID_BP_HANDLE;
while(Offset!=pair_index)
{
Previous = Offset;
PX_ASSERT(Offset<mHashCapacity);
Offset = mNext[Offset];
}
// Let us go/jump us
if(Previous!=BP_INVALID_BP_HANDLE)
{
PX_ASSERT(Previous<mHashCapacity);
PX_ASSERT(pair_index<mHashCapacity);
PX_ASSERT(mNext[Previous]==pair_index);
mNext[Previous] = mNext[pair_index];
}
// else we were the first
else
{
PX_ASSERT(hash_value<mHashCapacity);
PX_ASSERT(pair_index<mHashCapacity);
mHashTable[hash_value] = mNext[pair_index];
}
}
// we're now free to reuse mNext[PairIndex] without breaking the list
#if PX_DEBUG
PX_ASSERT(pair_index<mHashCapacity);
mNext[pair_index]=BP_INVALID_BP_HANDLE;
#endif
// Invalidate entry
// Fill holes
{
// 1) Remove last pair
const PxU32 LastPairIndex = mNbActivePairs-1;
if(LastPairIndex==pair_index)
{
mNbActivePairs--;
}
else
{
PX_ASSERT(LastPairIndex<mActivePairsCapacity);
const BroadPhasePair* Last = &mActivePairs[LastPairIndex];
const PxU32 LastHashValue = Hash(Last->mVolA, Last->mVolB) & mMask;
// Walk the hash table to fix mNext
PX_ASSERT(LastHashValue<mHashCapacity);
PxU32 Offset = mHashTable[LastHashValue];
PX_ASSERT(Offset!=BP_INVALID_BP_HANDLE);
PxU32 Previous=BP_INVALID_BP_HANDLE;
while(Offset!=LastPairIndex)
{
Previous = Offset;
PX_ASSERT(Offset<mHashCapacity);
Offset = mNext[Offset];
}
// Let us go/jump us
if(Previous!=BP_INVALID_BP_HANDLE)
{
PX_ASSERT(Previous<mHashCapacity);
PX_ASSERT(LastPairIndex<mHashCapacity);
PX_ASSERT(mNext[Previous]==LastPairIndex);
mNext[Previous] = mNext[LastPairIndex];
}
// else we were the first
else
{
PX_ASSERT(LastHashValue<mHashCapacity);
PX_ASSERT(LastPairIndex<mHashCapacity);
mHashTable[LastHashValue] = mNext[LastPairIndex];
}
// we're now free to reuse mNext[LastPairIndex] without breaking the list
#if PX_DEBUG
PX_ASSERT(LastPairIndex<mHashCapacity);
mNext[LastPairIndex]=BP_INVALID_BP_HANDLE;
#endif
// Don't invalidate entry since we're going to shrink the array
// 2) Re-insert in free slot
PX_ASSERT(pair_index<mActivePairsCapacity);
PX_ASSERT(LastPairIndex<mActivePairsCapacity);
mActivePairs[pair_index] = mActivePairs[LastPairIndex];
mActivePairStates[pair_index] = mActivePairStates[LastPairIndex];
#if PX_DEBUG
PX_ASSERT(pair_index<mHashCapacity);
PX_ASSERT(mNext[pair_index]==BP_INVALID_BP_HANDLE);
#endif
PX_ASSERT(pair_index<mHashCapacity);
PX_ASSERT(LastHashValue<mHashCapacity);
mNext[pair_index] = mHashTable[LastHashValue];
mHashTable[LastHashValue] = BpHandle(pair_index);
mNbActivePairs--;
}
}
}
bool SapPairManager::RemovePair(BpHandle id0, BpHandle id1)
{
// Order the ids
Sort(id0, id1);
const PxU32 HashValue = Hash(id0, id1) & mMask;
const BroadPhasePair* P = FindPair(id0, id1, HashValue);
if(!P) return false;
PX_ASSERT(P->mVolA==id0);
PX_ASSERT(P->mVolB==id1);
RemovePair(id0, id1, HashValue, GetPairIndex(P));
shrinkMemory();
return true;
}
bool SapPairManager::RemovePairs(const PxBitMap& removedAABBs)
{
PxU32 i=0;
while(i<mNbActivePairs)
{
const BpHandle id0 = mActivePairs[i].mVolA;
const BpHandle id1 = mActivePairs[i].mVolB;
if(removedAABBs.test(id0) || removedAABBs.test(id1))
{
const PxU32 HashValue = Hash(id0, id1) & mMask;
RemovePair(id0, id1, HashValue, i);
}
else i++;
}
return true;
}
void SapPairManager::shrinkMemory()
{
//Compute the hash size given the current number of active pairs.
const PxU32 correctHashSize = PxNextPowerOfTwo(mNbActivePairs);
//If we have the correct hash size then no action required.
if(correctHashSize==mHashSize || (correctHashSize < mMinAllowedHashCapacity && mHashSize == mMinAllowedHashCapacity))
return;
//The hash size can be reduced so take action.
//Don't let the hash size fall below a threshold value.
PxU32 newHashSize = correctHashSize;
if(newHashSize < mMinAllowedHashCapacity)
{
newHashSize = mMinAllowedHashCapacity;
}
mHashSize = newHashSize;
mMask = newHashSize-1;
reallocPairs( (newHashSize > mMinAllowedHashCapacity) || (mHashSize <= (mHashCapacity >> 2)) || (mHashSize <= (mActivePairsCapacity >> 2)));
}
void SapPairManager::reallocPairs(const bool allocRequired)
{
if(allocRequired)
{
PX_FREE(mHashTable);
mHashCapacity=mHashSize;
mActivePairsCapacity=mHashSize;
mHashTable = reinterpret_cast<BpHandle*>(PX_ALLOC(mHashSize*sizeof(BpHandle), "BpHandle"));
for(PxU32 i=0;i<mHashSize;i++)
{
mHashTable[i] = BP_INVALID_BP_HANDLE;
}
// Get some bytes for new entries
BroadPhasePair* NewPairs = reinterpret_cast<BroadPhasePair*>(PX_ALLOC(mHashSize * sizeof(BroadPhasePair), "BroadPhasePair")); PX_ASSERT(NewPairs);
BpHandle* NewNext = reinterpret_cast<BpHandle*>(PX_ALLOC(mHashSize * sizeof(BpHandle), "BpHandle")); PX_ASSERT(NewNext);
PxU8* NewPairStates = reinterpret_cast<PxU8*>(PX_ALLOC(mHashSize * sizeof(PxU8), "SapPairStates")); PX_ASSERT(NewPairStates);
// Copy old data if needed
if(mNbActivePairs)
{
PxMemCopy(NewPairs, mActivePairs, mNbActivePairs*sizeof(BroadPhasePair));
PxMemCopy(NewPairStates, mActivePairStates, mNbActivePairs*sizeof(PxU8));
}
// ### check it's actually needed... probably only for pairs whose hash value was cut by the and
// yeah, since Hash(id0, id1) is a constant
// However it might not be needed to recompute them => only less efficient but still ok
for(PxU32 i=0;i<mNbActivePairs;i++)
{
const PxU32 HashValue = Hash(mActivePairs[i].mVolA, mActivePairs[i].mVolB) & mMask; // New hash value with new mask
NewNext[i] = mHashTable[HashValue];
PX_ASSERT(HashValue<mHashCapacity);
mHashTable[HashValue] = BpHandle(i);
}
// Delete old data
PX_FREE(mNext);
PX_FREE(mActivePairs);
PX_FREE(mActivePairStates);
// Assign new pointer
mActivePairs = NewPairs;
mActivePairStates = NewPairStates;
mNext = NewNext;
}
else
{
for(PxU32 i=0;i<mHashSize;i++)
{
mHashTable[i] = BP_INVALID_BP_HANDLE;
}
// ### check it's actually needed... probably only for pairs whose hash value was cut by the and
// yeah, since Hash(id0, id1) is a constant
// However it might not be needed to recompute them => only less efficient but still ok
for(PxU32 i=0;i<mNbActivePairs;i++)
{
const PxU32 HashValue = Hash(mActivePairs[i].mVolA, mActivePairs[i].mVolB) & mMask; // New hash value with new mask
mNext[i] = mHashTable[HashValue];
PX_ASSERT(HashValue<mHashCapacity);
mHashTable[HashValue] = BpHandle(i);
}
}
}
void resizeCreatedDeleted(BroadPhasePair*& pairs, PxU32& maxNumPairs)
{
PX_ASSERT(pairs);
PX_ASSERT(maxNumPairs>0);
const PxU32 newMaxNumPairs=2*maxNumPairs;
BroadPhasePair* newPairs=reinterpret_cast<BroadPhasePair*>(PX_ALLOC(sizeof(BroadPhasePair)*newMaxNumPairs, "BroadPhasePair"));
PxMemCopy(newPairs, pairs, sizeof(BroadPhasePair)*maxNumPairs);
PX_FREE(pairs);
pairs=newPairs;
maxNumPairs=newMaxNumPairs;
}
void ComputeCreatedDeletedPairsLists
(const Bp::FilterGroup::Enum* PX_RESTRICT boxGroups,
const BpHandle* PX_RESTRICT dataArray, const PxU32 dataArraySize,
PxcScratchAllocator* scratchAllocator,
BroadPhasePair*& createdPairsList, PxU32& numCreatedPairs, PxU32& maxNumCreatedPairs,
BroadPhasePair*& deletedPairsList, PxU32& numDeletedPairs, PxU32& maxNumDeletedPairs,
PxU32& numActualDeletedPairs,
SapPairManager& pairManager)
{
#if BP_SAP_TEST_GROUP_ID_CREATEUPDATE
PX_UNUSED(boxGroups);
#endif
for(PxU32 i=0;i<dataArraySize;i++)
{
const PxU32 ID = dataArray[i];
PX_ASSERT(ID<pairManager.mNbActivePairs);
const BroadPhasePair* PX_RESTRICT UP = pairManager.mActivePairs + ID;
PX_ASSERT(pairManager.IsInArray(UP));
if(pairManager.IsRemoved(UP))
{
if(!pairManager.IsNew(UP))
{
// No need to call "ClearInArray" in this case, since the pair will get removed anyway
if(numDeletedPairs==maxNumDeletedPairs)
{
BroadPhasePair* newDeletedPairsList = reinterpret_cast<BroadPhasePair*>(scratchAllocator->alloc(sizeof(BroadPhasePair)*2*maxNumDeletedPairs, true));
PxMemCopy(newDeletedPairsList, deletedPairsList, sizeof(BroadPhasePair)*maxNumDeletedPairs);
scratchAllocator->free(deletedPairsList);
deletedPairsList = newDeletedPairsList;
maxNumDeletedPairs = 2*maxNumDeletedPairs;
}
PX_ASSERT(numDeletedPairs<maxNumDeletedPairs);
//PX_ASSERT((uintptr_t)UP->mUserData != 0xcdcdcdcd);
deletedPairsList[numDeletedPairs++] = BroadPhasePair(UP->mVolA,UP->mVolB/*, ID*/);
}
}
else
{
pairManager.ClearInArray(UP);
// Add => already there... Might want to create user data, though
if(pairManager.IsNew(UP))
{
#if !BP_SAP_TEST_GROUP_ID_CREATEUPDATE
if(groupFiltering(boxGroups[UP->mVolA], boxGroups[UP->mVolB]))
#endif
{
if(numCreatedPairs==maxNumCreatedPairs)
{
BroadPhasePair* newCreatedPairsList = reinterpret_cast<BroadPhasePair*>(scratchAllocator->alloc(sizeof(BroadPhasePair)*2*maxNumCreatedPairs, true));
PxMemCopy(newCreatedPairsList, createdPairsList, sizeof(BroadPhasePair)*maxNumCreatedPairs);
scratchAllocator->free(createdPairsList);
createdPairsList = newCreatedPairsList;
maxNumCreatedPairs = 2*maxNumCreatedPairs;
}
PX_ASSERT(numCreatedPairs<maxNumCreatedPairs);
createdPairsList[numCreatedPairs++] = BroadPhasePair(UP->mVolA,UP->mVolB/*, ID*/);
}
pairManager.ClearNew(UP);
}
}
}
//Record pairs that are to be deleted because they were simultaneously created and removed
//from different axis sorts.
numActualDeletedPairs=numDeletedPairs;
for(PxU32 i=0;i<dataArraySize;i++)
{
const PxU32 ID = dataArray[i];
PX_ASSERT(ID<pairManager.mNbActivePairs);
const BroadPhasePair* PX_RESTRICT UP = pairManager.mActivePairs + ID;
if(pairManager.IsRemoved(UP) && pairManager.IsNew(UP))
{
PX_ASSERT(pairManager.IsInArray(UP));
if(numActualDeletedPairs==maxNumDeletedPairs)
{
BroadPhasePair* newDeletedPairsList = reinterpret_cast<BroadPhasePair*>(scratchAllocator->alloc(sizeof(BroadPhasePair)*2*maxNumDeletedPairs, true));
PxMemCopy(newDeletedPairsList, deletedPairsList, sizeof(BroadPhasePair)*maxNumDeletedPairs);
scratchAllocator->free(deletedPairsList);
deletedPairsList = newDeletedPairsList;
maxNumDeletedPairs = 2*maxNumDeletedPairs;
}
PX_ASSERT(numActualDeletedPairs<=maxNumDeletedPairs);
deletedPairsList[numActualDeletedPairs++] = BroadPhasePair(UP->mVolA,UP->mVolB/*, ID*/); //KS - should we even get here????
}
}
// // #### try batch removal here
// for(PxU32 i=0;i<numActualDeletedPairs;i++)
// {
// const BpHandle id0 = deletedPairsList[i].mVolA;
// const BpHandle id1 = deletedPairsList[i].mVolB;
//#if PX_DEBUG
// const bool Status = pairManager.RemovePair(id0, id1);
// PX_ASSERT(Status);
//#else
// pairManager.RemovePair(id0, id1);
//#endif
// }
//Only report deleted pairs from different groups.
#if !BP_SAP_TEST_GROUP_ID_CREATEUPDATE
for(PxU32 i=0;i<numDeletedPairs;i++)
{
const PxU32 id0 = deletedPairsList[i].mVolA;
const PxU32 id1 = deletedPairsList[i].mVolB;
if(!groupFiltering(boxGroups[id0], boxGroups[id1]))
{
while((numDeletedPairs-1) > i && boxGroups[deletedPairsList[numDeletedPairs-1].mVolA] == boxGroups[deletedPairsList[numDeletedPairs-1].mVolB])
{
numDeletedPairs--;
}
deletedPairsList[i]=deletedPairsList[numDeletedPairs-1];
numDeletedPairs--;
}
}
#endif
}
//#define PRINT_STATS
#ifdef PRINT_STATS
#include <stdio.h>
static PxU32 gNbIter = 0;
static PxU32 gNbTests = 0;
static PxU32 gNbPairs = 0;
#define START_STATS gNbIter = gNbTests = gNbPairs = 0;
#define INCREASE_STATS_NB_ITER gNbIter++;
#define INCREASE_STATS_NB_TESTS gNbTests++;
#define INCREASE_STATS_NB_PAIRS gNbPairs++;
#define DUMP_STATS printf("%d %d %d\n", gNbIter, gNbTests, gNbPairs);
#else
#define START_STATS
#define INCREASE_STATS_NB_ITER
#define INCREASE_STATS_NB_TESTS
#define INCREASE_STATS_NB_PAIRS
#define DUMP_STATS
#endif
void DataArray::Resize(PxcScratchAllocator* scratchAllocator)
{
BpHandle* newDataArray = reinterpret_cast<BpHandle*>(scratchAllocator->alloc(sizeof(BpHandle)*mCapacity*2, true));
PxMemCopy(newDataArray, mData, mCapacity*sizeof(BpHandle));
scratchAllocator->free(mData);
mData = newDataArray;
mCapacity *= 2;
}
static PX_FORCE_INLINE int intersect2D(const BoxYZ& a, const BoxYZ& b)
{
const bool b0 = b.mMaxY < a.mMinY;
const bool b1 = a.mMaxY < b.mMinY;
const bool b2 = b.mMaxZ < a.mMinZ;
const bool b3 = a.mMaxZ < b.mMinZ;
// const bool b4 = b0 || b1 || b2 || b3;
const bool b4 = b0 | b1 | b2 | b3;
return !b4;
}
void addPair(const BpHandle id0, const BpHandle id1, PxcScratchAllocator* scratchAllocator, SapPairManager& pairManager, DataArray& dataArray)
{
const BroadPhasePair* UP = reinterpret_cast<const BroadPhasePair*>(pairManager.AddPair(id0, id1, SapPairManager::PAIR_UNKNOWN));
//If the hash table has reached its limit then we're unable to add a new pair.
if(NULL==UP)
return;
PX_ASSERT(UP);
if(pairManager.IsUnknown(UP))
{
pairManager.ClearState(UP);
pairManager.SetInArray(UP);
dataArray.AddData(pairManager.GetPairIndex(UP), scratchAllocator);
pairManager.SetNew(UP);
}
pairManager.ClearRemoved(UP);
}
void removePair(BpHandle id0, BpHandle id1, PxcScratchAllocator* scratchAllocator, SapPairManager& pairManager, DataArray& dataArray)
{
const BroadPhasePair* UP = reinterpret_cast<const BroadPhasePair*>(pairManager.FindPair(id0, id1));
if(UP)
{
if(!pairManager.IsInArray(UP))
{
pairManager.SetInArray(UP);
dataArray.AddData(pairManager.GetPairIndex(UP), scratchAllocator);
}
pairManager.SetRemoved(UP);
}
}
struct AddPairParams
{
AddPairParams(const PxU32* remap0, const PxU32* remap1, PxcScratchAllocator* alloc, SapPairManager* pm, DataArray* da) :
mRemap0 (remap0),
mRemap1 (remap1),
mScratchAllocator (alloc),
mPairManager (pm),
mDataArray (da)
{
}
const PxU32* mRemap0;
const PxU32* mRemap1;
PxcScratchAllocator* mScratchAllocator;
SapPairManager* mPairManager;
DataArray* mDataArray;
};
static void addPair(const AddPairParams* PX_RESTRICT params, const BpHandle id0_, const BpHandle id1_)
{
SapPairManager& pairManager = *params->mPairManager;
const BroadPhasePair* UP = reinterpret_cast<const BroadPhasePair*>(pairManager.AddPair(params->mRemap0[id0_], params->mRemap1[id1_], SapPairManager::PAIR_UNKNOWN));
//If the hash table has reached its limit then we're unable to add a new pair.
if(NULL==UP)
return;
PX_ASSERT(UP);
if(pairManager.IsUnknown(UP))
{
pairManager.ClearState(UP);
pairManager.SetInArray(UP);
params->mDataArray->AddData(pairManager.GetPairIndex(UP), params->mScratchAllocator);
pairManager.SetNew(UP);
}
pairManager.ClearRemoved(UP);
}
// PT: TODO: use SIMD
AuxData::AuxData(PxU32 nb, const SapBox1D*const* PX_RESTRICT boxes, const BpHandle* PX_RESTRICT indicesSorted, const Bp::FilterGroup::Enum* PX_RESTRICT groupIds)
{
// PT: TODO: use scratch allocator / etc
BoxX* PX_RESTRICT boxX = reinterpret_cast<BoxX*>(PX_ALLOC(sizeof(BoxX)*(nb+1), "mBoxX"));
BoxYZ* PX_RESTRICT boxYZ = reinterpret_cast<BoxYZ*>(PX_ALLOC(sizeof(BoxYZ)*nb, "mBoxYZ"));
Bp::FilterGroup::Enum* PX_RESTRICT groups = reinterpret_cast<Bp::FilterGroup::Enum*>(PX_ALLOC(sizeof(Bp::FilterGroup::Enum)*nb, "mGroups"));
PxU32* PX_RESTRICT remap = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32)*nb, "mRemap"));
mBoxX = boxX;
mBoxYZ = boxYZ;
mGroups = groups;
mRemap = remap;
mNb = nb;
const PxU32 axis0 = 0;
const PxU32 axis1 = 2;
const PxU32 axis2 = 1;
const SapBox1D* PX_RESTRICT boxes0 = boxes[axis0];
const SapBox1D* PX_RESTRICT boxes1 = boxes[axis1];
const SapBox1D* PX_RESTRICT boxes2 = boxes[axis2];
for(PxU32 i=0;i<nb;i++)
{
const PxU32 boxID = indicesSorted[i];
groups[i] = groupIds[boxID];
remap[i] = boxID;
const SapBox1D& currentBoxX = boxes0[boxID];
boxX[i].mMinX = currentBoxX.mMinMax[0];
boxX[i].mMaxX = currentBoxX.mMinMax[1];
const SapBox1D& currentBoxY = boxes1[boxID];
boxYZ[i].mMinY = currentBoxY.mMinMax[0];
boxYZ[i].mMaxY = currentBoxY.mMinMax[1];
const SapBox1D& currentBoxZ = boxes2[boxID];
boxYZ[i].mMinZ = currentBoxZ.mMinMax[0];
boxYZ[i].mMaxZ = currentBoxZ.mMinMax[1];
}
boxX[nb].mMinX = 0xffffffff;
}
AuxData::~AuxData()
{
PX_FREE(mRemap);
PX_FREE(mGroups);
PX_FREE(mBoxYZ);
PX_FREE(mBoxX);
}
void performBoxPruningNewNew( const AuxData* PX_RESTRICT auxData, PxcScratchAllocator* scratchAllocator,
const bool* lut, SapPairManager& pairManager, BpHandle*& dataArray, PxU32& dataArraySize, PxU32& dataArrayCapacity)
{
const PxU32 nb = auxData->mNb;
if(!nb)
return;
DataArray da(dataArray, dataArraySize, dataArrayCapacity);
START_STATS
{
BoxX* boxX = auxData->mBoxX;
BoxYZ* boxYZ = auxData->mBoxYZ;
Bp::FilterGroup::Enum* groups = auxData->mGroups;
PxU32* remap = auxData->mRemap;
AddPairParams params(remap, remap, scratchAllocator, &pairManager, &da);
PxU32 runningIndex = 0;
PxU32 index0 = 0;
while(runningIndex<nb && index0<nb)
{
#if BP_SAP_TEST_GROUP_ID_CREATEUPDATE
const Bp::FilterGroup::Enum group0 = groups[index0];
#endif
const BoxX& boxX0 = boxX[index0];
const BpHandle minLimit = boxX0.mMinX;
while(boxX[runningIndex++].mMinX<minLimit);
const BpHandle maxLimit = boxX0.mMaxX;
PxU32 index1 = runningIndex;
while(boxX[index1].mMinX <= maxLimit)
{
INCREASE_STATS_NB_ITER
#if BP_SAP_TEST_GROUP_ID_CREATEUPDATE
if(groupFiltering(group0, groups[index1], lut))
#endif
{
INCREASE_STATS_NB_TESTS
if(intersect2D(boxYZ[index0], boxYZ[index1]))
/* __m128i b = _mm_loadu_si128(reinterpret_cast<const __m128i*>(&boxYZ[index0].mMinY));
b = _mm_shuffle_epi32(b, 78);
const __m128i a = _mm_loadu_si128(reinterpret_cast<const __m128i*>(&boxYZ[index1].mMinY));
const __m128i d = _mm_cmpgt_epi32(a, b);
const int mask = _mm_movemask_epi8(d);
if(mask==0x0000ff00)*/
{
INCREASE_STATS_NB_PAIRS
addPair(&params, index0, index1);
}
}
index1++;
}
index0++;
}
}
DUMP_STATS
dataArray = da.mData;
dataArraySize = da.mSize;
dataArrayCapacity = da.mCapacity;
}
template<int codepath>
static void bipartitePruning(
const PxU32 nb0, const BoxX* PX_RESTRICT boxX0, const BoxYZ* PX_RESTRICT boxYZ0, const PxU32* PX_RESTRICT remap0, const Bp::FilterGroup::Enum* PX_RESTRICT groups0,
const PxU32 nb1, const BoxX* PX_RESTRICT boxX1, const BoxYZ* PX_RESTRICT boxYZ1, const PxU32* PX_RESTRICT remap1, const Bp::FilterGroup::Enum* PX_RESTRICT groups1,
const bool* lut, PxcScratchAllocator* scratchAllocator, SapPairManager& pairManager, DataArray& dataArray)
{
AddPairParams params(remap0, remap1, scratchAllocator, &pairManager, &dataArray);
PxU32 runningIndex = 0;
PxU32 index0 = 0;
while(runningIndex<nb1 && index0<nb0)
{
#if BP_SAP_TEST_GROUP_ID_CREATEUPDATE
const Bp::FilterGroup::Enum group0 = groups0[index0];
#endif
const BpHandle minLimit = boxX0[index0].mMinX;
if(!codepath)
{
while(boxX1[runningIndex].mMinX<minLimit)
runningIndex++;
}
else
{
while(boxX1[runningIndex].mMinX<=minLimit)
runningIndex++;
}
const BpHandle maxLimit = boxX0[index0].mMaxX;
PxU32 index1 = runningIndex;
while(boxX1[index1].mMinX <= maxLimit)
{
INCREASE_STATS_NB_ITER
#if BP_SAP_TEST_GROUP_ID_CREATEUPDATE
if(groupFiltering(group0, groups1[index1], lut))
#endif
{
INCREASE_STATS_NB_TESTS
if(intersect2D(boxYZ0[index0], boxYZ1[index1]))
{
INCREASE_STATS_NB_PAIRS
addPair(&params, index0, index1);
}
}
index1++;
}
index0++;
}
}
void performBoxPruningNewOld( const AuxData* PX_RESTRICT auxData0, const AuxData* PX_RESTRICT auxData1, PxcScratchAllocator* scratchAllocator,
const bool* lut, SapPairManager& pairManager, BpHandle*& dataArray, PxU32& dataArraySize, PxU32& dataArrayCapacity)
{
const PxU32 nb0 = auxData0->mNb;
const PxU32 nb1 = auxData1->mNb;
if(!nb0 || !nb1)
return;
DataArray da(dataArray, dataArraySize, dataArrayCapacity);
START_STATS
{
const BoxX* boxX0 = auxData0->mBoxX;
const BoxYZ* boxYZ0 = auxData0->mBoxYZ;
const Bp::FilterGroup::Enum* groups0 = auxData0->mGroups;
const PxU32* remap0 = auxData0->mRemap;
const BoxX* boxX1 = auxData1->mBoxX;
const BoxYZ* boxYZ1 = auxData1->mBoxYZ;
const Bp::FilterGroup::Enum* groups1 = auxData1->mGroups;
const PxU32* remap1 = auxData1->mRemap;
bipartitePruning<0>(nb0, boxX0, boxYZ0, remap0, groups0, nb1, boxX1, boxYZ1, remap1, groups1, lut, scratchAllocator, pairManager, da);
bipartitePruning<1>(nb1, boxX1, boxYZ1, remap1, groups1, nb0, boxX0, boxYZ0, remap0, groups0, lut, scratchAllocator, pairManager, da);
}
DUMP_STATS
dataArray = da.mData;
dataArraySize = da.mSize;
dataArrayCapacity = da.mCapacity;
}
} //namespace Bp
} //namespace physx

View File

@@ -0,0 +1,275 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_BROADPHASE_SAP_AUX_H
#define BP_BROADPHASE_SAP_AUX_H
#include "foundation/PxAssert.h"
#include "foundation/PxIntrinsics.h"
#include "foundation/PxUserAllocated.h"
#include "BpBroadPhase.h"
#include "BpBroadPhaseIntegerAABB.h"
#include "foundation/PxBitMap.h"
namespace physx
{
class PxcScratchAllocator;
namespace Bp
{
#define ALIGN_SIZE_16(size) ((unsigned(size)+15)&(unsigned(~15)))
#define NUM_SENTINELS 2
#define BP_SAP_USE_PREFETCH 1//prefetch in batchUpdate
#define BP_SAP_USE_OVERLAP_TEST_ON_REMOVES 1// "Useless" but faster overall because seriously reduces number of calls (from ~10000 to ~3 sometimes!)
//Set 1 to test for group ids in batchCreate/batchUpdate so we can avoid group id test in ComputeCreatedDeletedPairsLists
//Set 0 to neglect group id test in batchCreate/batchUpdate and delay test until ComputeCreatedDeletedPairsLists
#define BP_SAP_TEST_GROUP_ID_CREATEUPDATE 1
#define MAX_BP_HANDLE 0x3fffffff
#define PX_REMOVED_BP_HANDLE 0x3ffffffd
PX_FORCE_INLINE void setMinSentinel(ValType& v, BpHandle& d)
{
v = 0x00000000;//0x00800000; //0x00800000 is -FLT_MAX but setting it to 0 means we don't crash when we get a value outside the float range.
d = (BP_INVALID_BP_HANDLE & ~1);
}
PX_FORCE_INLINE void setMaxSentinel(ValType& v, BpHandle& d)
{
v = 0xffffffff;//0xff7fffff; //0xff7fffff is +FLT_MAX but setting it to 0xffffffff means we don't crash when we get a value outside the float range.
d = BP_INVALID_BP_HANDLE;
}
PX_FORCE_INLINE BpHandle setData(PxU32 owner_box_id, const bool is_max)
{
BpHandle d = BpHandle(owner_box_id<<1);
if(is_max) d |= 1;
return d;
}
PX_FORCE_INLINE bool isSentinel(const BpHandle& d)
{
return (d&~1)==(BP_INVALID_BP_HANDLE & ~1);
}
PX_FORCE_INLINE BpHandle isMax(const BpHandle& d)
{
return BpHandle(d & 1);
}
PX_FORCE_INLINE BpHandle getOwner(const BpHandle& d)
{
return BpHandle(d>>1);
}
class SapBox1D
{
public:
PX_FORCE_INLINE SapBox1D() {}
PX_FORCE_INLINE ~SapBox1D() {}
BpHandle mMinMax[2];//mMinMax[0]=min, mMinMax[1]=max
};
class SapPairManager
{
public:
SapPairManager();
~SapPairManager();
void init(const PxU32 size);
void release();
void shrinkMemory();
const BroadPhasePair* AddPair (BpHandle id0, BpHandle id1, const PxU8 state);
bool RemovePair (BpHandle id0, BpHandle id1);
bool RemovePairs (const PxBitMap& removedAABBs);
const BroadPhasePair* FindPair (BpHandle id0, BpHandle id1) const;
PX_FORCE_INLINE PxU32 GetPairIndex(const BroadPhasePair* PX_RESTRICT pair) const
{
return (PxU32((size_t(pair) - size_t(mActivePairs)))/sizeof(BroadPhasePair));
}
BpHandle* mHashTable;
BpHandle* mNext;
PxU32 mHashSize;
PxU32 mHashCapacity;
PxU32 mMinAllowedHashCapacity;
BroadPhasePair* mActivePairs;
PxU8* mActivePairStates;
PxU32 mNbActivePairs;
PxU32 mActivePairsCapacity;
PxU32 mMask;
BroadPhasePair* FindPair (BpHandle id0, BpHandle id1, PxU32 hash_value) const;
void RemovePair (BpHandle id0, BpHandle id1, PxU32 hash_value, PxU32 pair_index);
void reallocPairs(const bool allocRequired);
enum
{
PAIR_INARRAY=1,
PAIR_REMOVED=2,
PAIR_NEW=4,
PAIR_UNKNOWN=8
};
PX_FORCE_INLINE bool IsInArray(const BroadPhasePair* PX_RESTRICT pair) const
{
const PxU8 state=mActivePairStates[pair-mActivePairs];
return state & PAIR_INARRAY ? true : false;
}
PX_FORCE_INLINE bool IsRemoved(const BroadPhasePair* PX_RESTRICT pair) const
{
const PxU8 state=mActivePairStates[pair-mActivePairs];
return state & PAIR_REMOVED ? true : false;
}
PX_FORCE_INLINE bool IsNew(const BroadPhasePair* PX_RESTRICT pair) const
{
const PxU8 state=mActivePairStates[pair-mActivePairs];
return state & PAIR_NEW ? true : false;
}
PX_FORCE_INLINE bool IsUnknown(const BroadPhasePair* PX_RESTRICT pair) const
{
const PxU8 state=mActivePairStates[pair-mActivePairs];
return state & PAIR_UNKNOWN ? true : false;
}
PX_FORCE_INLINE void ClearState(const BroadPhasePair* PX_RESTRICT pair)
{
mActivePairStates[pair-mActivePairs]=0;
}
PX_FORCE_INLINE void SetInArray(const BroadPhasePair* PX_RESTRICT pair)
{
mActivePairStates[pair-mActivePairs] |= PAIR_INARRAY;
}
PX_FORCE_INLINE void SetRemoved(const BroadPhasePair* PX_RESTRICT pair)
{
mActivePairStates[pair-mActivePairs] |= PAIR_REMOVED;
}
PX_FORCE_INLINE void SetNew(const BroadPhasePair* PX_RESTRICT pair)
{
mActivePairStates[pair-mActivePairs] |= PAIR_NEW;
}
PX_FORCE_INLINE void ClearInArray(const BroadPhasePair* PX_RESTRICT pair)
{
mActivePairStates[pair-mActivePairs] &= ~PAIR_INARRAY;
}
PX_FORCE_INLINE void ClearRemoved(const BroadPhasePair* PX_RESTRICT pair)
{
mActivePairStates[pair-mActivePairs] &= ~PAIR_REMOVED;
}
PX_FORCE_INLINE void ClearNew(const BroadPhasePair* PX_RESTRICT pair)
{
mActivePairStates[pair-mActivePairs] &= ~PAIR_NEW;
}
};
struct DataArray
{
DataArray(BpHandle* data, PxU32 size, PxU32 capacity) : mData(data), mSize(size), mCapacity(capacity) {}
BpHandle* mData;
PxU32 mSize;
PxU32 mCapacity;
PX_NOINLINE void Resize(PxcScratchAllocator* scratchAllocator);
PX_FORCE_INLINE void AddData(const PxU32 data, PxcScratchAllocator* scratchAllocator)
{
if(mSize==mCapacity)
Resize(scratchAllocator);
PX_ASSERT(mSize<mCapacity);
mData[mSize++] = BpHandle(data);
}
};
void addPair(const BpHandle id0, const BpHandle id1, PxcScratchAllocator* scratchAllocator, SapPairManager& pairManager, DataArray& dataArray);
void removePair(BpHandle id0, BpHandle id1, PxcScratchAllocator* scratchAllocator, SapPairManager& pairManager, DataArray& dataArray);
void ComputeCreatedDeletedPairsLists
(const Bp::FilterGroup::Enum* PX_RESTRICT boxGroups,
const BpHandle* PX_RESTRICT dataArray, const PxU32 dataArraySize,
PxcScratchAllocator* scratchAllocator,
BroadPhasePair* & createdPairsList, PxU32& numCreatedPairs, PxU32& maxNumCreatdPairs,
BroadPhasePair* & deletedPairsList, PxU32& numDeletedPairs, PxU32& maxNumDeletedPairs,
PxU32&numActualDeletedPairs,
SapPairManager& pairManager);
struct BoxX
{
PxU32 mMinX;
PxU32 mMaxX;
};
struct BoxYZ
{
PxU32 mMinY;
PxU32 mMinZ;
PxU32 mMaxY;
PxU32 mMaxZ;
};
struct AuxData
{
AuxData(PxU32 nb, const SapBox1D*const* PX_RESTRICT boxes, const BpHandle* PX_RESTRICT indicesSorted, const Bp::FilterGroup::Enum* PX_RESTRICT groupIds);
~AuxData();
BoxX* mBoxX;
BoxYZ* mBoxYZ;
Bp::FilterGroup::Enum* mGroups;
PxU32* mRemap;
PxU32 mNb;
};
void performBoxPruningNewNew( const AuxData* PX_RESTRICT auxData, PxcScratchAllocator* scratchAllocator,
const bool* lut, SapPairManager& pairManager, BpHandle*& dataArray, PxU32& dataArraySize, PxU32& dataArrayCapacity);
void performBoxPruningNewOld( const AuxData* PX_RESTRICT auxData0, const AuxData* PX_RESTRICT auxData1, PxcScratchAllocator* scratchAllocator,
const bool* lut, SapPairManager& pairManager, BpHandle*& dataArray, PxU32& dataArraySize, PxU32& dataArrayCapacity);
PX_FORCE_INLINE bool Intersect2D_Handle
(const BpHandle bDir1Min, const BpHandle bDir1Max, const BpHandle bDir2Min, const BpHandle bDir2Max,
const BpHandle cDir1Min, const BpHandle cDir1Max, const BpHandle cDir2Min, const BpHandle cDir2Max)
{
return (bDir1Max > cDir1Min && cDir1Max > bDir1Min &&
bDir2Max > cDir2Min && cDir2Max > bDir2Min);
}
} //namespace Bp
} //namespace physx
#endif //BP_BROADPHASE_SAP_AUX_H

View File

@@ -0,0 +1,245 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "BpBroadPhaseShared.h"
#include "foundation/PxMemory.h"
#include "foundation/PxBitUtils.h"
using namespace physx;
using namespace Bp;
#define MBP_ALLOC(x) PX_ALLOC(x, "MBP")
#define MBP_FREE(x) PX_FREE(x)
static PX_FORCE_INLINE void storeDwords(PxU32* dest, PxU32 nb, PxU32 value)
{
while(nb--)
*dest++ = value;
}
///////////////////////////////////////////////////////////////////////////////
PairManagerData::PairManagerData() :
mHashSize (0),
mMask (0),
mNbActivePairs (0),
mHashTable (NULL),
mNext (NULL),
mActivePairs (NULL),
mReservedMemory (0)
{
}
///////////////////////////////////////////////////////////////////////////////
PairManagerData::~PairManagerData()
{
purge();
}
///////////////////////////////////////////////////////////////////////////////
void PairManagerData::purge()
{
MBP_FREE(mNext);
MBP_FREE(mActivePairs);
MBP_FREE(mHashTable);
mHashSize = 0;
mMask = 0;
mNbActivePairs = 0;
}
///////////////////////////////////////////////////////////////////////////////
void PairManagerData::reallocPairs()
{
MBP_FREE(mHashTable);
mHashTable = reinterpret_cast<PxU32*>(MBP_ALLOC(mHashSize*sizeof(PxU32)));
storeDwords(mHashTable, mHashSize, INVALID_ID);
// Get some bytes for new entries
InternalPair* newPairs = reinterpret_cast<InternalPair*>(MBP_ALLOC(mHashSize * sizeof(InternalPair))); PX_ASSERT(newPairs);
PxU32* newNext = reinterpret_cast<PxU32*>(MBP_ALLOC(mHashSize * sizeof(PxU32))); PX_ASSERT(newNext);
// Copy old data if needed
if(mNbActivePairs)
PxMemCopy(newPairs, mActivePairs, mNbActivePairs*sizeof(InternalPair));
// ### check it's actually needed... probably only for pairs whose hash value was cut by the and
// yeah, since hash(id0, id1) is a constant
// However it might not be needed to recompute them => only less efficient but still ok
for(PxU32 i=0;i<mNbActivePairs;i++)
{
const PxU32 hashValue = hash(mActivePairs[i].getId0(), mActivePairs[i].getId1()) & mMask; // New hash value with new mask
newNext[i] = mHashTable[hashValue];
mHashTable[hashValue] = i;
}
// Delete old data
MBP_FREE(mNext);
MBP_FREE(mActivePairs);
// Assign new pointer
mActivePairs = newPairs;
mNext = newNext;
}
///////////////////////////////////////////////////////////////////////////////
void PairManagerData::shrinkMemory()
{
// Check correct memory against actually used memory
const PxU32 correctHashSize = PxNextPowerOfTwo(mNbActivePairs);
if(mHashSize==correctHashSize)
return;
if(mReservedMemory && correctHashSize < mReservedMemory)
return;
// Reduce memory used
mHashSize = correctHashSize;
mMask = mHashSize-1;
reallocPairs();
}
///////////////////////////////////////////////////////////////////////////////
void PairManagerData::reserveMemory(PxU32 memSize)
{
if(!memSize)
return;
if(!PxIsPowerOfTwo(memSize))
memSize = PxNextPowerOfTwo(memSize);
mHashSize = memSize;
mMask = mHashSize-1;
mReservedMemory = memSize;
reallocPairs();
}
///////////////////////////////////////////////////////////////////////////////
PX_NOINLINE PxU32 PairManagerData::growPairs(PxU32 fullHashValue)
{
// Get more entries
mHashSize = PxNextPowerOfTwo(mNbActivePairs+1);
mMask = mHashSize-1;
reallocPairs();
// Recompute hash value with new hash size
return fullHashValue & mMask;
}
///////////////////////////////////////////////////////////////////////////////
void PairManagerData::removePair(PxU32 /*id0*/, PxU32 /*id1*/, PxU32 hashValue, PxU32 pairIndex)
{
// Walk the hash table to fix mNext
{
PxU32 offset = mHashTable[hashValue];
PX_ASSERT(offset!=INVALID_ID);
PxU32 previous=INVALID_ID;
while(offset!=pairIndex)
{
previous = offset;
offset = mNext[offset];
}
// Let us go/jump us
if(previous!=INVALID_ID)
{
PX_ASSERT(mNext[previous]==pairIndex);
mNext[previous] = mNext[pairIndex];
}
// else we were the first
else mHashTable[hashValue] = mNext[pairIndex];
// we're now free to reuse mNext[pairIndex] without breaking the list
}
#if PX_DEBUG
mNext[pairIndex]=INVALID_ID;
#endif
// Invalidate entry
// Fill holes
{
// 1) Remove last pair
const PxU32 lastPairIndex = mNbActivePairs-1;
if(lastPairIndex==pairIndex)
{
mNbActivePairs--;
}
else
{
const InternalPair* last = &mActivePairs[lastPairIndex];
const PxU32 lastHashValue = hash(last->getId0(), last->getId1()) & mMask;
// Walk the hash table to fix mNext
PxU32 offset = mHashTable[lastHashValue];
PX_ASSERT(offset!=INVALID_ID);
PxU32 previous=INVALID_ID;
while(offset!=lastPairIndex)
{
previous = offset;
offset = mNext[offset];
}
// Let us go/jump us
if(previous!=INVALID_ID)
{
PX_ASSERT(mNext[previous]==lastPairIndex);
mNext[previous] = mNext[lastPairIndex];
}
// else we were the first
else mHashTable[lastHashValue] = mNext[lastPairIndex];
// we're now free to reuse mNext[lastPairIndex] without breaking the list
#if PX_DEBUG
mNext[lastPairIndex]=INVALID_ID;
#endif
// Don't invalidate entry since we're going to shrink the array
// 2) Re-insert in free slot
mActivePairs[pairIndex] = mActivePairs[lastPairIndex];
#if PX_DEBUG
PX_ASSERT(mNext[pairIndex]==INVALID_ID);
#endif
mNext[pairIndex] = mHashTable[lastHashValue];
mHashTable[lastHashValue] = pairIndex;
mNbActivePairs--;
}
}
}

View File

@@ -0,0 +1,252 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_BROADPHASE_SHARED_H
#define BP_BROADPHASE_SHARED_H
#include "BpBroadPhaseIntegerAABB.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxHash.h"
#include "foundation/PxVecMath.h"
namespace physx
{
namespace Bp
{
#define INVALID_ID 0xffffffff
#define INVALID_USER_ID 0xffffffff
struct InternalPair : public PxUserAllocated
{
PX_FORCE_INLINE PxU32 getId0() const { return id0_isNew & ~PX_SIGN_BITMASK; }
PX_FORCE_INLINE PxU32 getId1() const { return id1_isUpdated & ~PX_SIGN_BITMASK; }
PX_FORCE_INLINE PxU32 isNew() const { return id0_isNew & PX_SIGN_BITMASK; }
PX_FORCE_INLINE PxU32 isUpdated() const { return id1_isUpdated & PX_SIGN_BITMASK; }
PX_FORCE_INLINE void setNewPair(PxU32 id0, PxU32 id1)
{
PX_ASSERT(!(id0 & PX_SIGN_BITMASK));
PX_ASSERT(!(id1 & PX_SIGN_BITMASK));
id0_isNew = id0 | PX_SIGN_BITMASK;
id1_isUpdated = id1;
}
PX_FORCE_INLINE void setNewPair2(PxU32 id0, PxU32 id1)
{
PX_ASSERT(!(id0 & PX_SIGN_BITMASK));
PX_ASSERT(!(id1 & PX_SIGN_BITMASK));
id0_isNew = id0;
id1_isUpdated = id1;
}
PX_FORCE_INLINE void setUpdated() { id1_isUpdated |= PX_SIGN_BITMASK; }
PX_FORCE_INLINE void clearUpdated() { id1_isUpdated &= ~PX_SIGN_BITMASK; }
PX_FORCE_INLINE void clearNew() { id0_isNew &= ~PX_SIGN_BITMASK; }
protected:
PxU32 id0_isNew;
PxU32 id1_isUpdated;
};
PX_FORCE_INLINE bool differentPair(const InternalPair& p, PxU32 id0, PxU32 id1) { return (id0!=p.getId0()) || (id1!=p.getId1()); }
PX_FORCE_INLINE PxU32 hash(PxU32 id0, PxU32 id1) { return PxComputeHash( (id0&0xffff)|(id1<<16)); }
//PX_FORCE_INLINE PxU32 hash(PxU32 id0, PxU32 id1) { return PxComputeHash(PxU64(id0)|(PxU64(id1)<<32)) ; }
PX_FORCE_INLINE void sort(PxU32& id0, PxU32& id1) { if(id0>id1) PxSwap(id0, id1); }
class PairManagerData
{
public:
PairManagerData();
~PairManagerData();
PX_FORCE_INLINE PxU32 getPairIndex(const InternalPair* pair) const
{
return (PxU32((size_t(pair) - size_t(mActivePairs)))/sizeof(InternalPair));
}
// Internal version saving hash computation
PX_FORCE_INLINE InternalPair* findPair(PxU32 id0, PxU32 id1, PxU32 hashValue) const
{
if(!mHashTable)
return NULL; // Nothing has been allocated yet
InternalPair* PX_RESTRICT activePairs = mActivePairs;
const PxU32* PX_RESTRICT next = mNext;
// Look for it in the table
PxU32 offset = mHashTable[hashValue];
while(offset!=INVALID_ID && differentPair(activePairs[offset], id0, id1))
{
PX_ASSERT(activePairs[offset].getId0()!=INVALID_USER_ID);
offset = next[offset]; // Better to have a separate array for this
}
if(offset==INVALID_ID)
return NULL;
PX_ASSERT(offset<mNbActivePairs);
// Match mActivePairs[offset] => the pair is persistent
return &activePairs[offset];
}
PX_FORCE_INLINE InternalPair* addPairInternal(PxU32 id0, PxU32 id1)
{
// Order the ids
sort(id0, id1);
const PxU32 fullHashValue = hash(id0, id1);
PxU32 hashValue = fullHashValue & mMask;
{
InternalPair* PX_RESTRICT p = findPair(id0, id1, hashValue);
if(p)
{
p->setUpdated();
return p; // Persistent pair
}
}
// This is a new pair
if(mNbActivePairs >= mHashSize)
hashValue = growPairs(fullHashValue);
const PxU32 pairIndex = mNbActivePairs++;
InternalPair* PX_RESTRICT p = &mActivePairs[pairIndex];
p->setNewPair(id0, id1);
mNext[pairIndex] = mHashTable[hashValue];
mHashTable[hashValue] = pairIndex;
return p;
}
PxU32 mHashSize;
PxU32 mMask;
PxU32 mNbActivePairs;
PxU32* mHashTable;
PxU32* mNext;
InternalPair* mActivePairs;
PxU32 mReservedMemory;
void purge();
void reallocPairs();
void shrinkMemory();
void reserveMemory(PxU32 memSize);
PX_NOINLINE PxU32 growPairs(PxU32 fullHashValue);
void removePair(PxU32 id0, PxU32 id1, PxU32 hashValue, PxU32 pairIndex);
};
struct AABB_Xi
{
PX_FORCE_INLINE AABB_Xi() {}
PX_FORCE_INLINE ~AABB_Xi() {}
PX_FORCE_INLINE void initFromFloats(const void* PX_RESTRICT minX, const void* PX_RESTRICT maxX)
{
mMinX = encodeFloat(*reinterpret_cast<const PxU32*>(minX));
mMaxX = encodeFloat(*reinterpret_cast<const PxU32*>(maxX));
}
PX_FORCE_INLINE void initFromPxVec4(const PxVec4& min, const PxVec4& max)
{
initFromFloats(&min.x, &max.x);
}
PX_FORCE_INLINE void operator = (const AABB_Xi& box)
{
mMinX = box.mMinX;
mMaxX = box.mMaxX;
}
PX_FORCE_INLINE void initSentinel()
{
mMinX = 0xffffffff;
}
PX_FORCE_INLINE bool isSentinel() const
{
return mMinX == 0xffffffff;
}
PxU32 mMinX;
PxU32 mMaxX;
};
struct AABB_YZn
{
PX_FORCE_INLINE AABB_YZn() {}
PX_FORCE_INLINE ~AABB_YZn() {}
PX_FORCE_INLINE void initFromPxVec4(const PxVec4& min, const PxVec4& max)
{
mMinY = -min.y;
mMinZ = -min.z;
mMaxY = max.y;
mMaxZ = max.z;
}
PX_FORCE_INLINE void operator = (const AABB_YZn& box)
{
using namespace physx::aos;
V4StoreA(V4LoadA(&box.mMinY), &mMinY);
}
float mMinY;
float mMinZ;
float mMaxY;
float mMaxZ;
};
struct AABB_YZr
{
PX_FORCE_INLINE AABB_YZr() {}
PX_FORCE_INLINE ~AABB_YZr() {}
PX_FORCE_INLINE void initFromPxVec4(const PxVec4& min, const PxVec4& max)
{
mMinY = min.y;
mMinZ = min.z;
mMaxY = max.y;
mMaxZ = max.z;
}
PX_FORCE_INLINE void operator = (const AABB_YZr& box)
{
using namespace physx::aos;
V4StoreA(V4LoadA(&box.mMinY), &mMinY);
}
float mMinY;
float mMinZ;
float mMaxY;
float mMaxZ;
};
} //namespace Bp
} //namespace physx
#endif // BP_BROADPHASE_SHARED_H

View File

@@ -0,0 +1,143 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "BpBroadPhase.h"
#include "common/PxProfileZone.h"
#include "foundation/PxBitMap.h"
using namespace physx;
using namespace Bp;
#if PX_CHECKED
bool BroadPhaseUpdateData::isValid(const BroadPhaseUpdateData& updateData, const BroadPhase& bp, const bool skipBoundValidation, PxU64 contextID)
{
PX_PROFILE_ZONE("BroadPhaseUpdateData::isValid", contextID);
return (updateData.isValid(skipBoundValidation) && bp.isValid(updateData));
}
static bool testHandles(PxU32 size, const BpHandle* handles, const PxU32 capacity, const Bp::FilterGroup::Enum* groups, const PxBounds3* bounds, PxBitMap& bitmap)
{
if(!handles && size)
return false;
/* ValType minVal=0;
ValType maxVal=0xffffffff;*/
for(PxU32 i=0;i<size;i++)
{
const BpHandle h = handles[i];
if(h>=capacity)
return false;
// Array in ascending order of id.
if(i>0 && (h < handles[i-1]))
return false;
if(groups && groups[h]==FilterGroup::eINVALID)
return false;
bitmap.set(h);
if(bounds)
{
if(!bounds[h].isFinite())
return false;
for(PxU32 j=0;j<3;j++)
{
//Max must be greater than min.
if(bounds[h].minimum[j]>bounds[h].maximum[j])
return false;
#if 0
//Bounds have an upper limit.
if(bounds[created[i]].getMax(j)>=maxVal)
return false;
//Bounds have a lower limit.
if(bounds[created[i]].getMin(j)<=minVal)
return false;
//Max must be odd.
if(4 != (bounds[created[i]].getMax(j) & 4))
return false;
//Min must be even.
if(0 != (bounds[created[i]].getMin(j) & 4))
return false;
#endif
}
}
}
return true;
}
static bool testBitmap(const PxBitMap& bitmap, PxU32 size, const BpHandle* handles)
{
while(size--)
{
const BpHandle h = *handles++;
if(bitmap.test(h))
return false;
}
return true;
}
bool BroadPhaseUpdateData::isValid(const bool skipBoundValidation) const
{
const PxBounds3* bounds = skipBoundValidation ? NULL : getAABBs();
const PxU32 boxesCapacity = getCapacity();
const Bp::FilterGroup::Enum* groups = getGroups();
PxBitMap createdBitmap; createdBitmap.resizeAndClear(boxesCapacity);
PxBitMap updatedBitmap; updatedBitmap.resizeAndClear(boxesCapacity);
PxBitMap removedBitmap; removedBitmap.resizeAndClear(boxesCapacity);
if(!testHandles(getNumCreatedHandles(), getCreatedHandles(), boxesCapacity, groups, bounds, createdBitmap))
return false;
if(!testHandles(getNumUpdatedHandles(), getUpdatedHandles(), boxesCapacity, groups, bounds, updatedBitmap))
return false;
if(!testHandles(getNumRemovedHandles(), getRemovedHandles(), boxesCapacity, NULL, NULL, removedBitmap))
return false;
if(1)
{
// Created/updated
if(!testBitmap(createdBitmap, getNumUpdatedHandles(), getUpdatedHandles()))
return false;
// Created/removed
if(!testBitmap(createdBitmap, getNumRemovedHandles(), getRemovedHandles()))
return false;
// Updated/removed
if(!testBitmap(updatedBitmap, getNumRemovedHandles(), getRemovedHandles()))
return false;
}
return true;
}
#endif

View File

@@ -0,0 +1,74 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "BpFiltering.h"
using namespace physx;
using namespace Bp;
BpFilter::BpFilter(bool discardKineKine, bool discardStaticKine)
{
for(int j = 0; j < Bp::FilterType::COUNT; j++)
for(int i = 0; i < Bp::FilterType::COUNT; i++)
mLUT[j][i] = false;
mLUT[Bp::FilterType::STATIC][Bp::FilterType::DYNAMIC] = mLUT[Bp::FilterType::DYNAMIC][Bp::FilterType::STATIC] = true;
mLUT[Bp::FilterType::STATIC][Bp::FilterType::KINEMATIC] = mLUT[Bp::FilterType::KINEMATIC][Bp::FilterType::STATIC] = !discardStaticKine;
mLUT[Bp::FilterType::DYNAMIC][Bp::FilterType::KINEMATIC] = mLUT[Bp::FilterType::KINEMATIC][Bp::FilterType::DYNAMIC] = true;
mLUT[Bp::FilterType::DYNAMIC][Bp::FilterType::DYNAMIC] = true;
mLUT[Bp::FilterType::KINEMATIC][Bp::FilterType::KINEMATIC] = !discardKineKine;
mLUT[Bp::FilterType::STATIC][Bp::FilterType::AGGREGATE] = mLUT[Bp::FilterType::AGGREGATE][Bp::FilterType::STATIC] = true;
mLUT[Bp::FilterType::KINEMATIC][Bp::FilterType::AGGREGATE] = mLUT[Bp::FilterType::AGGREGATE][Bp::FilterType::KINEMATIC] = true;
mLUT[Bp::FilterType::DYNAMIC][Bp::FilterType::AGGREGATE] = mLUT[Bp::FilterType::AGGREGATE][Bp::FilterType::DYNAMIC] = true;
mLUT[Bp::FilterType::AGGREGATE][Bp::FilterType::AGGREGATE] = true;
//Enable deformable surface interactions
mLUT[Bp::FilterType::DEFORMABLE_SURFACE][Bp::FilterType::DYNAMIC] = mLUT[Bp::FilterType::DYNAMIC][Bp::FilterType::DEFORMABLE_SURFACE] = true;
mLUT[Bp::FilterType::DEFORMABLE_SURFACE][Bp::FilterType::STATIC] = mLUT[Bp::FilterType::STATIC][Bp::FilterType::DEFORMABLE_SURFACE] = true;
mLUT[Bp::FilterType::DEFORMABLE_SURFACE][Bp::FilterType::KINEMATIC] = mLUT[Bp::FilterType::KINEMATIC][Bp::FilterType::DEFORMABLE_SURFACE] = true;
mLUT[Bp::FilterType::DEFORMABLE_SURFACE][Bp::FilterType::DEFORMABLE_SURFACE] = true;
//Enable deformable volume interactions
mLUT[Bp::FilterType::DEFORMABLE_VOLUME][Bp::FilterType::DYNAMIC] = mLUT[Bp::FilterType::DYNAMIC][Bp::FilterType::DEFORMABLE_VOLUME] = true;
mLUT[Bp::FilterType::DEFORMABLE_VOLUME][Bp::FilterType::STATIC] = mLUT[Bp::FilterType::STATIC][Bp::FilterType::DEFORMABLE_VOLUME] = true;
mLUT[Bp::FilterType::DEFORMABLE_VOLUME][Bp::FilterType::KINEMATIC] = mLUT[Bp::FilterType::KINEMATIC][Bp::FilterType::DEFORMABLE_VOLUME] = true;
mLUT[Bp::FilterType::DEFORMABLE_VOLUME][Bp::FilterType::DEFORMABLE_VOLUME] = true;
//Enable particle system interactions
mLUT[Bp::FilterType::PARTICLESYSTEM][Bp::FilterType::DYNAMIC] = mLUT[Bp::FilterType::DYNAMIC][Bp::FilterType::PARTICLESYSTEM] = true;
mLUT[Bp::FilterType::PARTICLESYSTEM][Bp::FilterType::STATIC] = mLUT[Bp::FilterType::STATIC][Bp::FilterType::PARTICLESYSTEM] = true;
mLUT[Bp::FilterType::PARTICLESYSTEM][Bp::FilterType::KINEMATIC] = mLUT[Bp::FilterType::KINEMATIC][Bp::FilterType::PARTICLESYSTEM] = true;
mLUT[Bp::FilterType::PARTICLESYSTEM][Bp::FilterType::PARTICLESYSTEM] = true;
}
BpFilter::~BpFilter()
{
}