feat(physics): wire physx sdk into build

This commit is contained in:
2026-04-15 12:22:15 +08:00
parent 5bf258df6d
commit 31f40e2cbb
2044 changed files with 752623 additions and 1 deletions

View File

@@ -0,0 +1,588 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geometry/PxGeometry.h"
#include "foundation/PxHashMap.h"
#include "foundation/PxUserAllocated.h"
#include "GuCCDSweepConvexMesh.h"
#ifndef PXS_CCD_H
#define PXS_CCD_H
#define CCD_DEBUG_PRINTS 0
#define CCD_POST_DEPENETRATE_DIST 0.001f
#define CCD_ROTATION_LOCKING 0
#define CCD_MIN_TIME_LEFT 0.01f
#define DEBUG_RENDER_CCD 0
#if CCD_DEBUG_PRINTS
namespace physx {
extern void printCCDDebug(const char* msg, const PxsRigidBody* atom0, PxGeometryType::Enum g0, bool printPtr = true);
extern void printShape(PxsRigidBody* atom0, PxGeometryType::Enum g0, const char* annotation, PxReal dt, PxU32 pass, bool printPtr = true);
}
#define PRINTCCDSHAPE(x) printShape x
#define PRINTCCDDEBUG(x) printCCDDebug x
#else
#define PRINTCCDSHAPE(x)
#define PRINTCCDDEBUG(x)
#endif
namespace physx
{
float computeCCDThreshold(const PxGeometry& geometry);
// ------------------------------------------------------------------------------------------------------------
// a fraction of objects will be CCD active so this is dynamic, not a member of PsxRigidBody
// CCD code builds a temporary array of PxsCCDPair objects (allocated in blocks)
// this is done to gather scattered data from memory and also to reduce PxsRidigBody permanent memory footprint
// we have to do it every pass since new CMs can become fast moving after each pass (and sometimes cease to be)
//
struct PxsCCDBody;
class PxsRigidBody;
struct PxsShapeCore;
struct PxsRigidCore;
class PxsContactManager;
class PxsContext;
class PxCCDContactModifyCallback;
class PxcNpThreadContext;
class PxvNphaseImplementationContext;
namespace Dy
{
class ThresholdStream;
}
/**
\brief structure to represent interactions between a given body and another body.
*/
struct PxsCCDOverlap
{
//The body the interaction relates to
PxsCCDBody* mBody;
//The next interaction in the list
PxsCCDOverlap* mNext;
};
/**
\brief Temporary CCD representation for a shape.
Stores data about a shape that may be frequently used in CCD. It also stores update counters per-shape that can be compared with the body's update
counter to determine if the shape needs its transforms re-calculated. This avoids us needing to store a list of shapes in a CCD body.
*/
struct PxsCCDShape : public Gu::CCDShape
{
const PxsShapeCore* mShapeCore; //Shape core (can be shared)
const PxsRigidCore* mRigidCore; //Rigid body core
PxNodeIndex mNodeIndex;
};
/**
\brief Structure to represent a body in the CCD system.
*/
struct PxsCCDBody
{
Cm::SpatialVector mPreSolverVelocity;
PxU16 mIndex; //The CCD body's index
bool mPassDone; //Whether it has been processed in the current CCD pass
bool mHasAnyPassDone; //Whether this body was influenced by any passes
PxReal mTimeLeft; //CCD time left to elapse (normalized in range 0-1)
PxsRigidBody* mBody; //The rigid body
PxsCCDOverlap* mOverlappingObjects; //A list of overlapping bodies for island update
PxU32 mUpdateCount; //How many times this body has eben updated in the CCD. This is correlated with CCD shapes' update counts.
PxU32 mNbInteractionsThisPass; //How many interactions this pass
/**
\brief Returns the CCD body's index.
\return The CCD body's index.
*/
PX_FORCE_INLINE PxU32 getIndex() const { return mIndex; }
/**
\brief Tests whether this body has already registered an overlap with a given body.
\param[in] body The body to test against.
\return Whether this body has already registered an overlap with a given body.
*/
bool overlaps(PxsCCDBody* body) const
{
PxsCCDOverlap* overlaps = mOverlappingObjects;
while(overlaps)
{
if(overlaps->mBody == body)
return true;
overlaps = overlaps->mNext;
}
return false;
}
/**
\brief Registers an overlap with a given body
\param[in] overlap The CCD overlap to register.
*/
void addOverlap(PxsCCDOverlap* overlap)
{
overlap->mNext = mOverlappingObjects;
mOverlappingObjects = overlap;
}
};
/**
\brief a container class used in the CCD that minimizes frequency of hitting the allocator.
This class stores a set of blocks of memory. It is effectively an array that resizes more efficiently because it doesn't need to
reallocate an entire buffer and copy data.
*/
template<typename T, int BLOCK_SIZE>
struct PxsCCDBlockArray
{
/**
\brief A block of data
*/
struct Block : PxUserAllocated { T items[BLOCK_SIZE]; };
/**
\brief A header for a block of data.
*/
struct BlockInfo
{
Block* block;
PxU32 count; // number of elements in this block
BlockInfo(Block* aBlock, PxU32 aCount) : block(aBlock), count(aCount) {}
};
/*
\brief An array of block headers
*/
PxArray<BlockInfo> blocks;
/**
\brief The current block.
*/
PxU32 currentBlock;
/**
\brief Constructor
*/
PxsCCDBlockArray() : currentBlock(0)
{
blocks.pushBack(BlockInfo(PX_NEW(Block), 0));
}
/**
\brief Destructor
*/
~PxsCCDBlockArray()
{
for (PxU32 i = 0; i < blocks.size(); i++)
{
PX_DELETE(blocks[i].block);
}
currentBlock = 0;
}
/**
\brief Clears this block array.
\note This clear function also deletes all additional blocks
*/
void clear()
{
for (PxU32 i = 0; i < blocks.size(); i++)
{
PX_DELETE(blocks[i].block);
}
blocks.clear();
blocks.pushBack(BlockInfo(PX_NEW(Block), 0)); // at least one block is expected to always be present in the array
currentBlock = 0;
}
/**
\brief Clears this block array but does not release the memory.
*/
void clear_NoDelete()
{
currentBlock = 0;
blocks[0].count = 0;
}
/**
\brief Push a new element onto the back of the block array
\return The new element
*/
T& pushBack()
{
PxU32 numBlocks = blocks.size();
if (blocks[currentBlock].count == BLOCK_SIZE)
{
if((currentBlock + 1) == numBlocks)
{
blocks.pushBack(BlockInfo(PX_NEW(Block), 0));
numBlocks ++;
}
currentBlock++;
blocks[currentBlock].count = 0;
}
const PxU32 count = blocks[currentBlock].count ++;
return blocks[currentBlock].block->items[count];
}
/**
\brief Pushes a new element onto the back of this array, intitializing it to match the data
\param data The data to initialize the new element to
\return The new element
*/
T& pushBack(T& data)
{
PxU32 numBlocks = blocks.size();
if (blocks[currentBlock].count == BLOCK_SIZE)
{
if((currentBlock + 1) == numBlocks)
{
blocks.pushBack(BlockInfo(PX_NEW(Block), 0));
numBlocks ++;
}
currentBlock++;
blocks[currentBlock].count = 0;
}
const PxU32 count = blocks[currentBlock].count ++;
blocks[currentBlock].block->items[count] = data;
return blocks[currentBlock].block->items[count];
}
/**
\brief Pops the last element from the list.
*/
void popBack()
{
PX_ASSERT(blocks[currentBlock].count > 0);
if (blocks[currentBlock].count > 1)
blocks[currentBlock].count --;
else
{
PX_DELETE(blocks[currentBlock].block);
blocks.popBack();
currentBlock--;
}
}
/**
\brief Returns the current size of the array.
\return The current size of the array.
*/
PxU32 size() const
{
return (currentBlock)*BLOCK_SIZE + blocks[currentBlock].count;
}
/**
\brief Returns the element at a given index in the array
\param[in] index The index of the element in the array
\return The element at a given index in the array.
*/
T& operator[] (PxU32 index) const
{
PX_ASSERT(index/BLOCK_SIZE < blocks.size());
PX_ASSERT(index%BLOCK_SIZE < blocks[index/BLOCK_SIZE].count);
return blocks[index/BLOCK_SIZE].block->items[index%BLOCK_SIZE];
}
};
/**
\brief A structure to represent a potential CCD interaction between a pair of shapes
*/
struct PxsCCDPair
{
/**
\brief Defines whether this is an estimated TOI or an accurate TOI.
We store pairs in a priority queue based on the TOIs. We use cheap estimates to cull away work and lazily evaluate TOIs. This means that an element in the
priority queue may either be an estimate or a precise result.
*/
enum E_TOIType
{
eEstimate,
ePrecise
};
PxsRigidBody* mBa0; // Body A. Can be NULL for statics
PxsRigidBody* mBa1; // Body B. Can be NULL for statics
PxsCCDShape* mCCDShape0; // Shape A
PxsCCDShape* mCCDShape1; // Shape B
PxVec3 mMinToiNormal; // The contact normal. Only valid for precise results. On the surface of body/shape A
PxReal mMinToi; // Min TOI. Valid for both precise and estimated results but estimates may be too early (i.e. conservative).
PxReal mPenetrationPostStep; // Valid only for precise sweeps. Only used for initial intersections (i.e. at TOI = 0).
PxVec3 mMinToiPoint; // The contact point. Only valid for precise sweep results.
PxReal mPenetration; // The penetration. Only valid for precise sweep results.
PxsContactManager* mCm; // The contact manager.
PxU32 mIslandId; // The index of the island this pair is in
PxGeometryType::Enum mG0, mG1; // The geometry types for shapes 0 and 1
bool mIsEarliestToiHit; // Indicates this was the earliest hit for one of the bodies in the pair
bool mIsModifiable; // Indicates whether this contact is modifiable
PxU32 mFaceIndex; // The face index. Only valid for precise sweeps involving meshes or heightfields.
PxU16 mMaterialIndex0; // The material index for shape 0
PxU16 mMaterialIndex1; // The material index for shape 1
PxReal mDynamicFriction; // The dynamic friction coefficient
PxReal mStaticFriction; // The static friction coefficient
PxReal mRestitution; // The restitution coefficient
PxU32 mEstimatePass; // The current estimation pass. Used after a sweep hit was found to determine if the pair needs re-estimating.
PxReal mAppliedForce; // The applied force for this pair. Only valid if the pair has been responded to.
PxReal mMaxImpulse; // The maximum impulse to be applied
E_TOIType mToiType; // The TOI type (estimate, precise).
bool mHasFriction; // Whether we want to simulate CCD friction for this pair
/**
\brief Perform a precise sweep for this pair
\param[in] threadContext The per-thread context
\param[in] dt The time-step
\param[in] pass The current CCD pass
\return The normalized TOI. <=1.0 indicates a hit. Otherwise PX_MAX_REAL.
*/
PxReal sweepFindToi(PxcNpThreadContext& threadContext, PxReal dt, PxU32 pass, PxReal ccdThreshold);
/**
\brief Performs a sweep estimation for this pair
\return The normalized TOI. <= 1.0 indicates a potential hit, otherwise PX_MAX_REAL.
*/
PxReal sweepEstimateToi(PxReal ccdThreshold);
/**
\brief Advances this pair to the TOI
\param[in] dt The time-step
\param[in] clipTrajectoryToToi Indicates whether we clip the body's trajectory to the end pose. Only done in the final pass
\return Whether the advance was successful. An advance will be unsuccessful if body bodies were already updated.
*/
bool sweepAdvanceToToi(PxReal dt, bool clipTrajectoryToToi);
/**
\brief Updates the transforms of the shapes involved in this pair.
*/
void updateShapes();
};
/**
\brief Block array of CCD bodies
*/
typedef PxsCCDBlockArray<PxsCCDBody, 128> PxsCCDBodyArray;
/**
\brief Block array of CCD pairs
*/
typedef PxsCCDBlockArray<PxsCCDPair, 128> PxsCCDPairArray;
/**
\brief Block array of CCD overlaps
*/
typedef PxsCCDBlockArray<PxsCCDOverlap, 128> PxsCCDOverlapArray;
/**
\brief Block array of CCD shapes
*/
typedef PxsCCDBlockArray<PxsCCDShape, 128> PxsCCDShapeArray;
/**
\brief Pair structure to be able to look-up a rigid body-shape pair in a map
*/
typedef PxPair<const PxsRigidCore*, const PxsShapeCore*> PxsRigidShapePair;
/**
\brief CCD context object.
*/
class PxsCCDContext : public PxUserAllocated
{
public:
/**
\brief Constructor for PxsCCDContext
\param[in] context The PxsContext that is associated with this PxsCCDContext.
*/
PxsCCDContext(PxsContext* context, Dy::ThresholdStream& thresholdStream, PxvNphaseImplementationContext& nPhaseContext, PxReal ccdThreshold);
/**
\brief Destructor for PxsCCDContext
*/
~PxsCCDContext();
/**
\brief Returns the CCD contact modification callback
\return The CCD contact modification callback
*/
PX_FORCE_INLINE PxCCDContactModifyCallback* getCCDContactModifyCallback() const { return mCCDContactModifyCallback; }
/**
\brief Sets the CCD contact modification callback
\param[in] c The CCD contact modification callback
*/
PX_FORCE_INLINE void setCCDContactModifyCallback(PxCCDContactModifyCallback* c) { mCCDContactModifyCallback = c; }
/**
\brief Returns the maximum number of CCD passes
\return The maximum number of CCD passes
*/
PX_FORCE_INLINE PxU32 getCCDMaxPasses() const { return mCCDMaxPasses; }
/**
\brief Sets the maximum number of CCD passes
\param[in] ccdMaxPasses The maximum number of CCD passes
*/
PX_FORCE_INLINE void setCCDMaxPasses(PxU32 ccdMaxPasses) { mCCDMaxPasses = ccdMaxPasses; }
/**
\brief Returns the current CCD pass
\return The current CCD pass
*/
PX_FORCE_INLINE PxU32 getCurrentCCDPass() const { return miCCDPass; }
/**
\brief Returns The number of swept hits reported
\return The number of swept hits reported
*/
PX_FORCE_INLINE PxI32 getNumSweepHits() const { return mSweepTotalHits; }
/**
\brief Returns The number of updated bodies
\return The number of updated bodies in this CCD pass
*/
PX_FORCE_INLINE PxU32 getNumUpdatedBodies() const { return mUpdatedCCDBodies.size(); }
/**
\brief Returns The update bodies array
\return The updated bodies array from this CCD pass
*/
PX_FORCE_INLINE PxsRigidBody*const* getUpdatedBodies() const { return mUpdatedCCDBodies.begin(); }
/**
\brief Returns Clears the updated bodies array
*/
PX_FORCE_INLINE void clearUpdatedBodies() { mUpdatedCCDBodies.forceSize_Unsafe(0); }
PX_FORCE_INLINE PxReal getCCDThreshold() const { return mCCDThreshold; }
PX_FORCE_INLINE void setCCDThreshold(PxReal t) { mCCDThreshold = t; }
/**
\brief Runs the CCD contact modification.
\param[in] contacts The list of modifiable contacts
\param[in] contactCount The number of contacts
\param[in] shapeCore0 The first shape core
\param[in] shapeCore1 The second shape core
\param[in] rigidCore0 The first rigid core
\param[in] rigidCore1 The second rigid core
\param[in] rigid0 The first rigid body
\param[in] rigid1 The second rigid body
*/
void runCCDModifiableContact(PxModifiableContact* PX_RESTRICT contacts, PxU32 contactCount, const PxsShapeCore* PX_RESTRICT shapeCore0,
const PxsShapeCore* PX_RESTRICT shapeCore1, const PxsRigidCore* PX_RESTRICT rigidCore0, const PxsRigidCore* PX_RESTRICT rigidCore1,
const PxsRigidBody* PX_RESTRICT rigid0, const PxsRigidBody* PX_RESTRICT rigid1);
/**
\brief Performs a single CCD update
This occurs after broad phase and is responsible for creating islands, finding the TOI of collisions, filtering contacts, issuing modification callbacks and responding to
collisions. At the end of this phase all bodies will have stepper to their first TOI if they were involved in a CCD collision this frame.
\param[in] dt The timestep to simulate
\param[in] continuation The continuation task
\param[in] islandSim The island manager
\param[in] disableResweep If this is true, we perform a reduced-fidelity CCD approach
*/
void updateCCD(PxReal dt, PxBaseTask* continuation, IG::IslandSim& islandSim, bool disableResweep, PxI32 numFastMovingShapes);
/**
\brief Signals the beginning of a CCD multi-pass update
*/
void updateCCDBegin();
/**
\brief Resets the CCD contact state in any contact managers that previously had a reported CCD touch. This must be called if CCD update is bypassed for a frame
*/
void resetContactManagers();
private:
/**
\brief Verifies the consistency of the CCD context at the beginning
*/
void verifyCCDBegin();
/**
\brief Cleans up after the CCD update has completed
*/
void updateCCDEnd();
/**
\brief Spawns the update island tasks after the initial sweep estimates have been performed
\param[in] continuation The continuation task
*/
void postCCDSweep(PxBaseTask* continuation);
/**
\brief Creates contact buffers for CCD contacts. These will be sent to the user in the contact notification.
\param[in] continuation The continuation task
*/
void postCCDAdvance(PxBaseTask* continuation);
/**
\brief The final phase of the CCD task chain. Cleans up after the parallel update/postCCDAdvance stages.
\param[in] continuation The continuation task
*/
void postCCDDepenetrate(PxBaseTask* continuation);
typedef Cm::DelegateTask<PxsCCDContext, &PxsCCDContext::postCCDSweep> PostCCDSweepTask;
typedef Cm::DelegateTask<PxsCCDContext, &PxsCCDContext::postCCDAdvance> PostCCDAdvanceTask;
typedef Cm::DelegateTask<PxsCCDContext, &PxsCCDContext::postCCDDepenetrate> PostCCDDepenetrateTask;
PostCCDSweepTask mPostCCDSweepTask;
PostCCDAdvanceTask mPostCCDAdvanceTask;
PostCCDDepenetrateTask mPostCCDDepenetrateTask;
PxCCDContactModifyCallback* mCCDContactModifyCallback;
// CCD global data
bool mDisableCCDResweep;
PxU32 miCCDPass;
PxI32 mSweepTotalHits;
// a fraction of objects will be CCD active so PxsCCDBody is dynamic, not a member of PxsRigidBody
PxsCCDBodyArray mCCDBodies;
PxsCCDOverlapArray mCCDOverlaps;
PxsCCDShapeArray mCCDShapes;
PxArray<PxsCCDBody*> mIslandBodies;
PxArray<PxU16> mIslandSizes;
PxArray<PxsRigidBody*> mUpdatedCCDBodies;
PxHashMap<PxsRigidShapePair, PxsCCDShape*> mMap;
// temporary array updated during CCD update
//Array<PxsCCDPair> mCCDPairs;
PxsCCDPairArray mCCDPairs;
PxArray<PxsCCDPair*> mCCDPtrPairs;
// number of pairs per island
PxArray<PxU32> mCCDIslandHistogram;
// thread context valid during CCD update
PxcNpThreadContext* mCCDThreadContext;
// number of pairs to process per thread
PxU32 mCCDPairsPerBatch;
PxU32 mCCDMaxPasses;
PxsContext* mContext;
Dy::ThresholdStream& mThresholdStream;
PxvNphaseImplementationContext& mNphaseContext;
PxMutex mMutex;
PxReal mCCDThreshold;
private:
PX_NOCOPY(PxsCCDContext)
};
}
#endif

View File

@@ -0,0 +1,148 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_CONTACT_MANAGER_H
#define PXS_CONTACT_MANAGER_H
#include "PxvConfig.h"
#include "PxcNpWorkUnit.h"
namespace physx
{
class PxsRigidBody;
namespace Dy
{
class DynamicsContext;
}
namespace Sc
{
class ShapeInteraction;
}
/**
\brief Additional header structure for CCD contact data stream.
*/
struct PxsCCDContactHeader
{
/**
\brief Stream for next collision. The same pair can collide multiple times during multiple CCD passes.
*/
PxsCCDContactHeader* nextStream; //4 //8
/**
\brief Size (in bytes) of the CCD contact stream (not including force buffer)
*/
PxU16 contactStreamSize; //6 //10
/**
\brief Defines whether the stream is from a previous pass.
It could happen that the stream can not get allocated because we run out of memory. In that case the current event should not use the stream
from an event of the previous pass.
*/
PxU16 isFromPreviousPass; //8 //12
PxU8 pad[12 - sizeof(PxsCCDContactHeader*)]; //16
};
PX_COMPILE_TIME_ASSERT((sizeof(PxsCCDContactHeader) & 0xF) == 0);
class PxsContactManager
{
public:
PxsContactManager(PxU32 index);
~PxsContactManager();
PX_FORCE_INLINE void setDisableStrongFriction(PxU32 d) { (!d) ? mNpUnit.mFlags &= ~PxcNpWorkUnitFlag::eDISABLE_STRONG_FRICTION
: mNpUnit.mFlags |= PxcNpWorkUnitFlag::eDISABLE_STRONG_FRICTION; }
PX_FORCE_INLINE PxReal getRestDistance() const { return mNpUnit.mRestDistance; }
PX_FORCE_INLINE void setRestDistance(PxReal v) { mNpUnit.mRestDistance = v; }
PX_FORCE_INLINE PxU8 getDominance0() const { return mNpUnit.getDominance0(); }
PX_FORCE_INLINE void setDominance0(PxU8 v) { mNpUnit.setDominance0(v); }
PX_FORCE_INLINE PxU8 getDominance1() const { return mNpUnit.getDominance1(); }
PX_FORCE_INLINE void setDominance1(PxU8 v) { mNpUnit.setDominance1(v); }
PX_FORCE_INLINE PxU16 getTouchStatus() const { return PxU16(mNpUnit.mStatusFlags & PxcNpWorkUnitStatusFlag::eHAS_TOUCH); }
PX_FORCE_INLINE PxU16 touchStatusKnown() const { return PxU16(mNpUnit.mStatusFlags & PxcNpWorkUnitStatusFlag::eTOUCH_KNOWN); }
PX_FORCE_INLINE PxI32 getTouchIdx() const { return (mNpUnit.mStatusFlags & PxcNpWorkUnitStatusFlag::eHAS_TOUCH) ? 1 : (mNpUnit.mStatusFlags & PxcNpWorkUnitStatusFlag::eHAS_NO_TOUCH ? -1 : 0); }
PX_FORCE_INLINE PxU32 getIndex() const { return mCmIndex; }
PX_FORCE_INLINE PxU16 getHasCCDRetouch() const { return PxU16(mNpUnit.mStatusFlags & PxcNpWorkUnitStatusFlag::eHAS_CCD_RETOUCH); }
PX_FORCE_INLINE void clearCCDRetouch() { mNpUnit.mStatusFlags &= ~PxcNpWorkUnitStatusFlag::eHAS_CCD_RETOUCH; }
PX_FORCE_INLINE void raiseCCDRetouch() { mNpUnit.mStatusFlags |= PxcNpWorkUnitStatusFlag::eHAS_CCD_RETOUCH; }
// flags stuff - needs to be refactored
PX_FORCE_INLINE PxIntBool isChangeable() const { return PxIntBool(mFlags & PXS_CM_CHANGEABLE); }
PX_FORCE_INLINE PxIntBool getCCD() const { return PxIntBool((mFlags & PXS_CM_CCD_LINEAR) && (mNpUnit.mFlags & PxcNpWorkUnitFlag::eDETECT_CCD_CONTACTS)); }
PX_FORCE_INLINE PxIntBool getHadCCDContact() const { return PxIntBool(mFlags & PXS_CM_CCD_CONTACT); }
PX_FORCE_INLINE void setHadCCDContact() { mFlags |= PXS_CM_CCD_CONTACT; }
void setCCD(bool enable);
PX_FORCE_INLINE void clearCCDContactInfo() { mFlags &= ~PXS_CM_CCD_CONTACT; mNpUnit.mCCDContacts = NULL; }
PX_FORCE_INLINE PxcNpWorkUnit& getWorkUnit() { return mNpUnit; }
PX_FORCE_INLINE const PxcNpWorkUnit& getWorkUnit() const { return mNpUnit; }
PX_FORCE_INLINE PxsRigidBody* getRigidBody0() const { return mRigidBody0; }
PX_FORCE_INLINE PxsRigidBody* getRigidBody1() const { return mRigidBody1; }
PX_FORCE_INLINE Sc::ShapeInteraction* getShapeInteraction() const { return mShapeInteraction; }
// Setup solver-constraints
PX_FORCE_INLINE void resetCachedState()
{
// happens when the body transform or shape relative transform changes.
mNpUnit.clearCachedState();
}
private:
//KS - moving this up - we want to get at flags
PxsRigidBody* mRigidBody0;
PxsRigidBody* mRigidBody1;
PxU32 mFlags;
PxU32 mCmIndex; // PT: moved to padding bytes from mNpUnit
Sc::ShapeInteraction* mShapeInteraction;
// everything required for narrow phase to run
PxcNpWorkUnit mNpUnit;
enum
{
PXS_CM_CHANGEABLE = (1 << 0),
PXS_CM_CCD_LINEAR = (1 << 1),
PXS_CM_CCD_CONTACT = (1 << 2)
};
friend class Sc::ShapeInteraction;
};
}
#endif

View File

@@ -0,0 +1,110 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_CONTACT_MANAGER_STATE_H
#define PXS_CONTACT_MANAGER_STATE_H
#include "foundation/PxSimpleTypes.h"
namespace physx
{
struct PxsShapeCore;
/**
There is an implicit 1:1 mapping between PxgContactManagerInput and PxsContactManagerOutput. The structures are split because PxgNpContactManagerInput contains constant
data that is produced by the CPU code and PxgNpContactManagerOutput contains per-frame contact information produced by the NP.
There is also a 1:1 mapping between the PxgNpContactManager and PxsContactManager. This mapping is handled within the PxgNPhaseCore.
The previous contact states are implicitly cached in PxsContactManager and will be propagated to the solver. Friction correlation is also done implicitly using cached
information in PxsContactManager.
The NP will produce a list of pairs that found/lost patches for the solver along with updating the PxgNpContactManagerOutput for all pairs.
*/
struct PxsContactManagerStatusFlag
{
enum Enum
{
eHAS_NO_TOUCH = (1 << 0),
eHAS_TOUCH = (1 << 1),
//eHAS_SOLVER_CONSTRAINTS = (1 << 2),
eREQUEST_CONSTRAINTS = (1 << 3),
eHAS_CCD_RETOUCH = (1 << 4), // Marks pairs that are touching at a CCD pass and were touching at discrete collision or at a previous CCD pass already
// but we can not tell whether they lost contact in a pass before. We send them as pure eNOTIFY_TOUCH_CCD events to the
// contact report callback if requested.
eDIRTY_MANAGER = (1 << 5),
eTOUCH_KNOWN = eHAS_NO_TOUCH | eHAS_TOUCH, // The touch status is known (if narrowphase never ran for a pair then no flag will be set)
eSTATIC_OR_KINEMATIC = (1 << 6)
};
};
struct PX_ALIGN_PREFIX(16) PxsContactManagerOutput
{
PxU8* contactPatches; //Start index/ptr for contact patches
PxU8* contactPoints; //Start index/ptr for contact points
PxReal* contactForces; //Start index/ptr for contact forces
PxU8* frictionPatches; //Contact patches friction information
PxU8 allflagsStart; //padding for compatibility with existing code
PxU8 nbPatches; //Num patches
PxU8 statusFlag; //Status flag (has touch etc.)
PxU8 prevPatches; //Previous number of patches
PxU16 nbContacts; //Num contacts
PxU16 flags; //Not really part of outputs, but we have 4 bytes of padding, so why not?
PxU8 pad[8];
PX_FORCE_INLINE PxU32* getInternalFaceIndice() const
{
return contactForces ? reinterpret_cast<PxU32*>(contactForces + nbContacts) : NULL;
}
}
PX_ALIGN_SUFFIX(16);
PX_COMPILE_TIME_ASSERT((sizeof(PxsContactManagerOutput) & 0xf) == 0);
struct PX_ALIGN_PREFIX(4) PxsContactManagerOutputCounts
{
PxU8 nbPatches; //Num patches
PxU8 prevPatches; //Previous number of patches
PxU8 statusFlag; //Status flag;
PxU8 unused; //Unused
} PX_ALIGN_SUFFIX(4);
struct PX_ALIGN_PREFIX(8) PxsTorsionalFrictionData
{
PxReal mTorsionalPatchRadius;
PxReal mMinTorsionalRadius;
PxsTorsionalFrictionData() {}
PxsTorsionalFrictionData(const PxReal patchRadius, const PxReal minPatchRadius) :
mTorsionalPatchRadius(patchRadius), mMinTorsionalRadius(minPatchRadius) {}
} PX_ALIGN_SUFFIX(8);
}
#endif //PXG_CONTACT_MANAGER_H

View File

@@ -0,0 +1,302 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_CONTEXT_H
#define PXS_CONTEXT_H
#include "foundation/PxPinnedArray.h"
#include "foundation/PxPool.h"
#include "PxVisualizationParameter.h"
#include "PxSceneDesc.h"
#include "common/PxRenderOutput.h"
#include "CmPool.h"
#include "PxvNphaseImplementationContext.h"
#include "PxvSimStats.h"
#include "PxsContactManager.h"
#include "PxcNpBatch.h"
#include "PxcConstraintBlockStream.h"
#include "PxcNpCacheStreamPair.h"
#include "PxcNpMemBlockPool.h"
#include "CmUtils.h"
#include "CmTask.h"
#include "PxContactModifyCallback.h"
#include "PxsTransformCache.h"
#include "GuPersistentContactManifold.h"
#include "PxcNpThreadContext.h"
namespace physx
{
#if PX_SUPPORT_GPU_PHYSX
class PxCudaContextManager;
#endif
class PxsRigidBody;
struct PxcConstraintBlock;
class PxsMaterialManager;
class PxsCCDContext;
struct PxsContactManagerOutput;
struct PxvContactManagerTouchEvent;
namespace Cm
{
class FlushPool;
}
namespace IG
{
typedef PxU32 EdgeIndex;
}
enum PxsTouchEventCount
{
PXS_LOST_TOUCH_COUNT,
PXS_NEW_TOUCH_COUNT,
PXS_CCD_RETOUCH_COUNT, // pairs that are touching at a CCD pass and were touching at discrete collision or at a previous CCD pass already
// (but they could have lost touch in between)
PXS_TOUCH_EVENT_COUNT
};
class PxsContext : public PxUserAllocated, public PxcNpContext
{
PX_NOCOPY(PxsContext)
public:
PxsContext(const PxSceneDesc& desc, PxTaskManager*, Cm::FlushPool&, PxCudaContextManager*, PxU32 poolSlabSize, PxU64 contextID);
~PxsContext();
void createTransformCache(PxVirtualAllocatorCallback& allocatorCallback);
PxsContactManager* createContactManager(PxsContactManager* contactManager, bool useCCD);
void createCache(Gu::Cache& cache, PxGeometryType::Enum geomType0, PxGeometryType::Enum geomType1);
void destroyCache(Gu::Cache& cache);
void destroyContactManager(PxsContactManager* cm);
PX_FORCE_INLINE PxU64 getContextId() const { return mContextID; }
// Collision properties
PX_FORCE_INLINE PxContactModifyCallback* getContactModifyCallback() const { return mContactModifyCallback; }
PX_FORCE_INLINE void setContactModifyCallback(PxContactModifyCallback* c) { mContactModifyCallback = c; mNpImplementationContext->setContactModifyCallback(c);}
// resource-related
void setScratchBlock(void* addr, PxU32 size);
PX_FORCE_INLINE void setContactDistance(const PxFloatArrayPinned* contactDistances) { mContactDistances = contactDistances; }
// Task-related
void updateContactManager(PxReal dt, bool hasContactDistanceChanged, PxBaseTask* continuation,
PxBaseTask* firstPassContinuation, Cm::FanoutTask* updateBoundAndShapeTask);
void secondPassUpdateContactManager(PxReal dt, PxBaseTask* continuation);
void fetchUpdateContactManager();
void swapStreams();
void resetThreadContexts();
// Manager status change
bool getManagerTouchEventCount(PxU32* newTouch, PxU32* lostTouch, PxU32* ccdTouch) const;
void fillManagerTouchEvents(
PxvContactManagerTouchEvent* newTouch, PxU32& newTouchCount,
PxvContactManagerTouchEvent* lostTouch, PxU32& lostTouchCount,
PxvContactManagerTouchEvent* ccdTouch, PxU32& ccdTouchCount);
void beginUpdate();
// PX_ENABLE_SIM_STATS
PX_FORCE_INLINE PxvSimStats& getSimStats() { return mSimStats; }
PX_FORCE_INLINE const PxvSimStats& getSimStats() const { return mSimStats; }
PX_FORCE_INLINE Cm::FlushPool& getTaskPool() const { return mTaskPool; }
PX_FORCE_INLINE PxRenderBuffer& getRenderBuffer() { return mRenderBuffer; }
PX_FORCE_INLINE PxReal getRenderScale() const { return mVisualizationParams[PxVisualizationParameter::eSCALE]; }
PX_FORCE_INLINE PxReal getVisualizationParameter(PxVisualizationParameter::Enum param) const
{
PX_ASSERT(param < PxVisualizationParameter::eNUM_VALUES);
return mVisualizationParams[param];
}
PX_FORCE_INLINE void setVisualizationParameter(PxVisualizationParameter::Enum param, PxReal value)
{
PX_ASSERT(param < PxVisualizationParameter::eNUM_VALUES);
PX_ASSERT(value >= 0.0f);
mVisualizationParams[param] = value;
}
PX_FORCE_INLINE void setVisualizationCullingBox(const PxBounds3& box) { mVisualizationCullingBox = box; }
PX_FORCE_INLINE const PxBounds3& getVisualizationCullingBox() const { return mVisualizationCullingBox; }
PX_FORCE_INLINE bool getPCM() const { return mPCM; }
PX_FORCE_INLINE bool getContactCacheFlag() const { return mContactCache; }
PX_FORCE_INLINE bool getCreateAveragePoint() const { return mCreateAveragePoint; }
// general stuff
void shiftOrigin(const PxVec3& shift);
PX_FORCE_INLINE void setPCM(bool enabled) { mPCM = enabled; }
PX_FORCE_INLINE void setContactCache(bool enabled) { mContactCache = enabled; }
PX_FORCE_INLINE PxcScratchAllocator& getScratchAllocator() { return mScratchAllocator; }
PX_FORCE_INLINE PxsTransformCache& getTransformCache() { return *mTransformCache; }
PX_FORCE_INLINE const PxReal* getContactDistances() const { return mContactDistances->begin(); }
PX_FORCE_INLINE PxvNphaseImplementationContext* getNphaseImplementationContext() const { return mNpImplementationContext; }
PX_FORCE_INLINE void setNphaseImplementationContext(PxvNphaseImplementationContext* ctx) { mNpImplementationContext = ctx; }
PX_FORCE_INLINE PxvNphaseImplementationContext* getNphaseFallbackImplementationContext() const { return mNpFallbackImplementationContext; }
PX_FORCE_INLINE void setNphaseFallbackImplementationContext(PxvNphaseImplementationContext* ctx) { mNpFallbackImplementationContext = ctx; }
PxU32 getMaxPatchCount() const { return mMaxPatches; }
PX_FORCE_INLINE PxcNpThreadContext* getNpThreadContext()
{
// We may want to conditional compile to exclude this on single threaded implementations
// if it is determined to be a performance hit.
return mNpThreadContextPool.get();
}
PX_FORCE_INLINE void putNpThreadContext(PxcNpThreadContext* threadContext)
{ mNpThreadContextPool.put(threadContext); }
PX_FORCE_INLINE PxMutex& getLock() { return mLock; }
PX_FORCE_INLINE PxTaskManager& getTaskManager()
{
PX_ASSERT(mTaskManager);
return *mTaskManager;
}
PX_FORCE_INLINE PxCudaContextManager* getCudaContextManager()
{
return mCudaContextManager;
}
PX_FORCE_INLINE void clearManagerTouchEvents();
PX_FORCE_INLINE Cm::PoolList<PxsContactManager>& getContactManagerPool()
{
return mContactManagerPool;
}
PX_FORCE_INLINE void setActiveContactManager(const PxsContactManager* manager, PxIntBool useCCD)
{
/*const PxU32 index = manager->getIndex();
if(index >= mActiveContactManager.size())
{
const PxU32 newSize = (2 * index + 256)&~255;
mActiveContactManager.resize(newSize);
}
mActiveContactManager.set(index);*/
//Record any pairs that have CCD enabled!
if(useCCD)
{
const PxU32 index = manager->getIndex();
if(index >= mActiveContactManagersWithCCD.size())
{
const PxU32 newSize = (2 * index + 256)&~255;
mActiveContactManagersWithCCD.resize(newSize);
}
mActiveContactManagersWithCCD.set(index);
}
}
private:
void mergeCMDiscreteUpdateResults(PxBaseTask* continuation);
// Threading
PxcThreadCoherentCache<PxcNpThreadContext, PxcNpContext>
mNpThreadContextPool;
// Contact managers
Cm::PoolList<PxsContactManager> mContactManagerPool;
PxPool<Gu::LargePersistentContactManifold> mManifoldPool;
PxPool<Gu::SpherePersistentContactManifold> mSphereManifoldPool;
// PxBitMap mActiveContactManager;
PxBitMap mActiveContactManagersWithCCD; //KS - adding to filter any pairs that had a touch
PxBitMap mContactManagersWithCCDTouch; //KS - adding to filter any pairs that had a touch
PxBitMap mContactManagerTouchEvent;
//Cm::BitMap mContactManagerPatchChangeEvent;
PxU32 mCMTouchEventCount[PXS_TOUCH_EVENT_COUNT];
PxMutex mLock;
PxContactModifyCallback* mContactModifyCallback;
// narrowphase platform-dependent implementations support
PxvNphaseImplementationContext* mNpImplementationContext;
PxvNphaseImplementationContext* mNpFallbackImplementationContext;
// debug rendering (CS TODO: MS would like to have these wrapped into a class)
PxReal mVisualizationParams[PxVisualizationParameter::eNUM_VALUES];
PxBounds3 mVisualizationCullingBox;
PxTaskManager* mTaskManager;
Cm::FlushPool& mTaskPool;
PxCudaContextManager* mCudaContextManager;
// PxU32 mTouchesLost;
// PxU32 mTouchesFound;
// PX_ENABLE_SIM_STATS
PxvSimStats mSimStats;
bool mPCM;
bool mContactCache;
bool mCreateAveragePoint;
PxsTransformCache* mTransformCache;
const PxFloatArrayPinned* mContactDistances;
PxU32 mMaxPatches;
const PxU64 mContextID;
friend class PxsCCDContext;
friend class PxsNphaseImplementationContext;
friend class PxgNphaseImplementationContext; //FDTODO ideally it shouldn't be here..
};
PX_FORCE_INLINE void PxsContext::clearManagerTouchEvents()
{
mContactManagerTouchEvent.clear();
for(PxU32 i = 0; i < PXS_TOUCH_EVENT_COUNT; ++i)
{
mCMTouchEventCount[i] = 0;
}
}
}
#endif

View File

@@ -0,0 +1,93 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_HEAP_MEMORY_ALLOCATOR_H
#define PXS_HEAP_MEMORY_ALLOCATOR_H
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxUserAllocated.h"
namespace physx
{
struct PxsHeapStats
{
enum Enum
{
eOTHER = 0,
eBROADPHASE,
eNARROWPHASE,
eSOLVER,
eARTICULATION,
eSIMULATION,
eSIMULATION_ARTICULATION,
eSIMULATION_PARTICLES,
eSIMULATION_SOFTBODY,
eSIMULATION_FEMCLOTH,
eSHARED_PARTICLES,
eSHARED_SOFTBODY,
eSHARED_FEMCLOTH,
eHEAPSTATS_COUNT
};
PxU64 stats[eHEAPSTATS_COUNT];
PxsHeapStats()
{
for (PxU32 i = 0; i < eHEAPSTATS_COUNT; i++)
{
stats[i] = 0;
}
}
};
// PT: TODO: consider dropping this class
class PxsHeapMemoryAllocator : public PxVirtualAllocatorCallback, public PxUserAllocated
{
public:
virtual ~PxsHeapMemoryAllocator(){}
// PxVirtualAllocatorCallback
//virtual void* allocate(size_t size, int group, const char* file, int line) = 0;
//virtual void deallocate(void* ptr) = 0;
//~PxVirtualAllocatorCallback
};
class PxsHeapMemoryAllocatorManager : public PxUserAllocated
{
public:
virtual ~PxsHeapMemoryAllocatorManager() {}
virtual PxU64 getDeviceMemorySize() const = 0;
virtual PxsHeapStats getDeviceHeapStats() const = 0;
virtual void flushDeferredDeallocs() = 0;
PxsHeapMemoryAllocator* mMappedMemoryAllocators;
};
}
#endif

View File

@@ -0,0 +1,142 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_ISLAND_MANAGER_TYPES_H
#define PXS_ISLAND_MANAGER_TYPES_H
namespace physx
{
class PxsContactManager;
typedef PxU32 NodeType;
typedef PxU32 EdgeType;
class PxsIslandIndices
{
public:
PxsIslandIndices() {}
~PxsIslandIndices() {}
NodeType bodies;
NodeType articulations;
EdgeType contactManagers;
EdgeType constraints;
};
// PT: it needs to be a PxU64 because we store a PxNodeIndex there for articulations (and we do use all the data)
typedef PxU64 PxsNodeType;
/**
\brief Each contact manager or constraint references two separate bodies, where
a body can be a dynamic rigid body, a kinematic rigid body, an articulation or a static.
The struct PxsIndexedInteraction describes the bodies that make up the pair.
*/
struct PxsIndexedInteraction // 24
{
/**
\brief An enumerated list of all possible body types.
A body type is stored for each body in the pair.
*/
enum Enum
{
eBODY = 0,
eKINEMATIC = 1,
eARTICULATION = 2,
eWORLD = 3
};
/**
\brief An index describing how to access body0
\note If body0 is a dynamic (eBODY) rigid body then solverBody0 is an index into PxsIslandObjects::bodies.
\note If body0 is a kinematic (eKINEMATIC) rigid body then solverBody0 is an index into PxsIslandManager::getActiveKinematics.
\note If body0 is a static (eWORLD) then solverBody0 is PX_MAX_U32 or PX_MAX_U64, depending on the platform being 32- or 64-bit.
\note If body0 is an articulation then the articulation is found directly from Dy::getArticulation(articulation0)
\note If body0 is an deformable volume then the deformable volume is found directly from Dy::getDeformableVolume(deformableVolume0)
*/
union
{
PxsNodeType solverBody0;
PxsNodeType articulation0;
};
/**
\brief An index describing how to access body1
\note If body1 is a dynamic (eBODY) rigid body then solverBody1 is an index into PxsIslandObjects::bodies.
\note If body1 is a kinematic (eKINEMATIC) rigid body then solverBody1 is an index into PxsIslandManager::getActiveKinematics.
\note If body1 is a static (eWORLD) then solverBody1 is PX_MAX_U32 or PX_MAX_U64, depending on the platform being 32- or 64-bit.
\note If body1 is an articulation then the articulation is found directly from Dy::getArticulation(articulation1)
\note If body0 is an deformable volume then the deformable volume is found directly from Dy::getDeformableVolume(deformableVolume1)
*/
union
{
PxsNodeType solverBody1;
PxsNodeType articulation1;
};
/**
\brief The type (eBODY, eKINEMATIC etc) of body0
*/
PxU8 indexType0;
/**
\brief The type (eBODY, eKINEMATIC etc) of body1
*/
PxU8 indexType1;
PxU8 pad[2];
};
// PT: TODO: this is the only type left, merge it with base class and stop wasting padding bytes
/**
\see PxsIslandObjects, PxsIndexedInteraction
*/
struct PxsIndexedContactManager : public PxsIndexedInteraction // 32
{
/**
\brief The contact manager corresponds to the value set in PxsIslandManager::setEdgeRigidCM
*/
PxsContactManager* contactManager;
PxsIndexedContactManager(PxsContactManager* cm) : contactManager(cm) {}
};
#if !PX_X64
PX_COMPILE_TIME_ASSERT(0==(sizeof(PxsIndexedContactManager) & 0x0f));
#endif
} //namespace physx
#endif

View File

@@ -0,0 +1,858 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_ISLAND_SIM_H
#define PXS_ISLAND_SIM_H
#include "foundation/PxAssert.h"
#include "foundation/PxBitMap.h"
#include "foundation/PxArray.h"
#include "CmPriorityQueue.h"
#include "CmBlockArray.h"
#include "PxNodeIndex.h"
namespace physx
{
struct PartitionEdge;
namespace IG
{
#define IG_INVALID_ISLAND 0xFFFFFFFFu
#define IG_INVALID_EDGE 0xFFFFFFFFu
#define IG_LIMIT_DIRTY_NODES 0
#define IG_SANITY_CHECKS 0
typedef PxU32 IslandId;
typedef PxU32 EdgeIndex;
typedef PxU32 EdgeInstanceIndex;
struct Edge
{
//Edge instances can be implicitly calculated based on this edge index, which is an offset into the array of edges.
//From that, the child edge index is simply the
//The constraint or contact referenced by this edge
enum EdgeType
{
eCONTACT_MANAGER,
eCONSTRAINT,
eSOFT_BODY_CONTACT,
eFEM_CLOTH_CONTACT,
ePARTICLE_SYSTEM_CONTACT,
eEDGE_TYPE_COUNT
};
enum EdgeState
{
eINSERTED = 1<<0,
ePENDING_DESTROYED = 1<<1,
eACTIVE = 1<<2,
eIN_DIRTY_LIST = 1<<3,
eDESTROYED = 1<<4,
eREPORT_ONLY_DESTROY= 1<<5,
eACTIVATING = 1<<6
};
PxU16 mEdgeType; // PT: EdgeType. Could be PxU8.
PxU16 mEdgeState; // PT: could be PxU8.
EdgeIndex mNextIslandEdge, mPrevIslandEdge;
PX_FORCE_INLINE void setInserted() { mEdgeState |= eINSERTED; }
PX_FORCE_INLINE void clearInserted() { mEdgeState &= ~eINSERTED; }
PX_FORCE_INLINE void clearDestroyed() { mEdgeState &= ~eDESTROYED; }
PX_FORCE_INLINE void setPendingDestroyed() { mEdgeState |= ePENDING_DESTROYED; }
PX_FORCE_INLINE void clearPendingDestroyed() { mEdgeState &= ~ePENDING_DESTROYED; }
PX_FORCE_INLINE void activateEdge() { mEdgeState |= eACTIVE; }
PX_FORCE_INLINE void deactivateEdge() { mEdgeState &= ~eACTIVE; }
PX_FORCE_INLINE void markInDirtyList() { mEdgeState |= eIN_DIRTY_LIST; }
PX_FORCE_INLINE void clearInDirtyList() { mEdgeState &= ~eIN_DIRTY_LIST; }
PX_FORCE_INLINE void setReportOnlyDestroy() { mEdgeState |= eREPORT_ONLY_DESTROY; }
public:
Edge() : mEdgeType(Edge::eCONTACT_MANAGER), mEdgeState(eDESTROYED),
mNextIslandEdge(IG_INVALID_EDGE), mPrevIslandEdge(IG_INVALID_EDGE)
{
}
PX_FORCE_INLINE PxIntBool isInserted() const { return PxIntBool(mEdgeState & eINSERTED); }
PX_FORCE_INLINE PxIntBool isDestroyed() const { return PxIntBool(mEdgeState & eDESTROYED); }
PX_FORCE_INLINE PxIntBool isPendingDestroyed() const { return PxIntBool(mEdgeState & ePENDING_DESTROYED); }
PX_FORCE_INLINE PxIntBool isActive() const { return PxIntBool(mEdgeState & eACTIVE); }
PX_FORCE_INLINE PxIntBool isInDirtyList() const { return PxIntBool(mEdgeState & eIN_DIRTY_LIST); }
PX_FORCE_INLINE PxIntBool isReportOnlyDestroy() const { return PxIntBool(mEdgeState & eREPORT_ONLY_DESTROY); }
PX_FORCE_INLINE EdgeType getEdgeType() const { return EdgeType(mEdgeType); }
};
struct EdgeInstance
{
EdgeInstanceIndex mNextEdge, mPrevEdge; //The next edge instance in this node's list of edge instances
EdgeInstance() : mNextEdge(IG_INVALID_EDGE), mPrevEdge(IG_INVALID_EDGE)
{
}
};
template<typename Handle>
class HandleManager
{
PxArray<Handle> mFreeHandles;
Handle mCurrentHandle;
public:
HandleManager() : mFreeHandles("FreeHandles"), mCurrentHandle(0)
{
}
~HandleManager(){}
Handle getHandle()
{
if(mFreeHandles.size())
{
Handle handle = mFreeHandles.popBack();
PX_ASSERT(isValidHandle(handle));
return handle;
}
return mCurrentHandle++;
}
bool isNotFreeHandle(Handle handle) const
{
for(PxU32 a = 0; a < mFreeHandles.size(); ++a)
{
if(mFreeHandles[a] == handle)
return false;
}
return true;
}
void freeHandle(Handle handle)
{
PX_ASSERT(isValidHandle(handle));
PX_ASSERT(isNotFreeHandle(handle));
if(handle == mCurrentHandle)
mCurrentHandle--;
else
mFreeHandles.pushBack(handle);
}
bool isValidHandle(Handle handle) const
{
return handle < mCurrentHandle;
}
PX_FORCE_INLINE PxU32 getTotalHandles() const { return mCurrentHandle; }
};
class Node
{
public:
enum NodeType
{
eRIGID_BODY_TYPE,
eARTICULATION_TYPE,
eDEFORMABLE_SURFACE_TYPE,
eDEFORMABLE_VOLUME_TYPE,
ePARTICLESYSTEM_TYPE,
eTYPE_COUNT
};
enum State
{
eREADY_FOR_SLEEPING = 1u << 0, //! Ready to go to sleep
eACTIVE = 1u << 1, //! Active
eKINEMATIC = 1u << 2, //! Kinematic
eDELETED = 1u << 3, //! Is pending deletion
eDIRTY = 1u << 4, //! Is dirty (i.e. lost a connection)
eACTIVATING = 1u << 5 //! Is in the activating list
};
EdgeInstanceIndex mFirstEdgeIndex;
PxU8 mFlags;
PxU8 mType;
PxU16 mStaticTouchCount;
//PxU32 mActiveNodeIndex; //! Look-up for this node in the active nodes list, activating list or deactivating list...
PxNodeIndex mNextNode, mPrevNode;
//A counter for the number of active references to this body. Whenever an edge is activated, this is incremented.
//Whenver an edge is deactivated, this is decremented. This is used for kinematic bodies to determine if they need
//to be in the active kinematics list
PxU32 mActiveRefCount;
//A node can correspond with one kind of user-defined object
void* mObject;
PX_FORCE_INLINE Node() : mType(eRIGID_BODY_TYPE) { reset(); }
PX_FORCE_INLINE ~Node() { }
PX_FORCE_INLINE void reset()
{
mFirstEdgeIndex = IG_INVALID_EDGE;
mFlags = eDELETED;
mObject = NULL;
mActiveRefCount = 0;
mStaticTouchCount = 0;
}
PX_FORCE_INLINE void setActive() { mFlags |= eACTIVE; }
PX_FORCE_INLINE void clearActive() { mFlags &= ~eACTIVE; }
PX_FORCE_INLINE void setActivating() { mFlags |= eACTIVATING; }
PX_FORCE_INLINE void clearActivating() { mFlags &= ~eACTIVATING; }
//Activates a body/node.
PX_FORCE_INLINE void setIsReadyForSleeping() { mFlags |= eREADY_FOR_SLEEPING; }
PX_FORCE_INLINE void clearIsReadyForSleeping() { mFlags &= (~eREADY_FOR_SLEEPING); }
PX_FORCE_INLINE void setIsDeleted() { mFlags |= eDELETED; }
PX_FORCE_INLINE void setKinematicFlag() { PX_ASSERT(!isKinematic()); mFlags |= eKINEMATIC; }
PX_FORCE_INLINE void clearKinematicFlag() { PX_ASSERT(isKinematic()); mFlags &= (~eKINEMATIC); }
PX_FORCE_INLINE void markDirty() { mFlags |= eDIRTY; }
PX_FORCE_INLINE void clearDirty() { mFlags &= (~eDIRTY); }
public:
PX_FORCE_INLINE PxIntBool isActive() const { return PxIntBool(mFlags & eACTIVE); }
PX_FORCE_INLINE PxIntBool isActiveOrActivating() const { return PxIntBool(mFlags & (eACTIVE | eACTIVATING)); }
PX_FORCE_INLINE PxIntBool isActivating() const { return PxIntBool(mFlags & eACTIVATING); }
PX_FORCE_INLINE PxIntBool isKinematic() const { return PxIntBool(mFlags & eKINEMATIC); }
PX_FORCE_INLINE PxIntBool isDeleted() const { return PxIntBool(mFlags & eDELETED); }
PX_FORCE_INLINE PxIntBool isDirty() const { return PxIntBool(mFlags & eDIRTY); }
PX_FORCE_INLINE PxIntBool isReadyForSleeping() const { return PxIntBool(mFlags & eREADY_FOR_SLEEPING); }
PX_FORCE_INLINE NodeType getNodeType() const { return NodeType(mType); }
};
struct Island
{
PxNodeIndex mRootNode;
PxNodeIndex mLastNode;
PxU32 mNodeCount[Node::eTYPE_COUNT];
PxU32 mActiveIndex;
EdgeIndex mFirstEdge[Edge::eEDGE_TYPE_COUNT], mLastEdge[Edge::eEDGE_TYPE_COUNT];
PxU32 mEdgeCount[Edge::eEDGE_TYPE_COUNT];
Island() : mActiveIndex(IG_INVALID_ISLAND)
{
for(PxU32 a = 0; a < Edge::eEDGE_TYPE_COUNT; ++a)
{
mFirstEdge[a] = IG_INVALID_EDGE;
mLastEdge[a] = IG_INVALID_EDGE;
mEdgeCount[a] = 0;
}
for(PxU32 a = 0; a < Node::eTYPE_COUNT; ++a)
{
mNodeCount[a] = 0;
}
}
};
struct TraversalState
{
PxNodeIndex mNodeIndex;
PxU32 mCurrentIndex;
PxU32 mPrevIndex;
PxU32 mDepth;
TraversalState()
{
}
TraversalState( PxNodeIndex nodeIndex, PxU32 currentIndex, PxU32 prevIndex, PxU32 depth) :
mNodeIndex(nodeIndex), mCurrentIndex(currentIndex), mPrevIndex(prevIndex), mDepth(depth)
{
}
};
struct QueueElement
{
TraversalState* mState;
PxU32 mHopCount;
QueueElement()
{
}
QueueElement(TraversalState* state, PxU32 hopCount) : mState(state), mHopCount(hopCount)
{
}
};
struct NodeComparator
{
NodeComparator()
{
}
bool operator() (const QueueElement& node0, const QueueElement& node1) const
{
return node0.mHopCount < node1.mHopCount;
}
private:
NodeComparator& operator = (const NodeComparator&);
};
// PT: island-manager data used by both CPU & GPU code.
// This is managed by external code (e.g. SimpleIslandManager) and passed as const data to IslandSim.
class CPUExternalData
{
public:
PX_FORCE_INLINE PxNodeIndex getNodeIndex1(IG::EdgeIndex index) const { return mEdgeNodeIndices[2 * index]; }
PX_FORCE_INLINE PxNodeIndex getNodeIndex2(IG::EdgeIndex index) const { return mEdgeNodeIndices[2 * index + 1]; }
//KS - stores node indices for a given edge. Node index 0 is at 2* edgeId and NodeIndex1 is at 2*edgeId + 1
//can also be used for edgeInstance indexing so there's no need to figure out outboundNode ID either!
Cm::BlockArray<PxNodeIndex> mEdgeNodeIndices;
};
// PT: island-manager data only needed for the GPU version, but stored in CPU code.
// This is managed by external code (e.g. SimpleIslandManager) and passed as non-const data to only one of the IslandSims.
// (It is otherwise optional). IslandSim will create/update this data during island gen.
class GPUExternalData
{
public:
GPUExternalData() :
mFirstPartitionEdges ("mFirstPartitionEdges"),
mDestroyedPartitionEdges ("mDestroyedPartitionEdges"),
mNpIndexPtr (NULL)
{
}
///////////////////////////////////////////////////////////////////////////
PX_FORCE_INLINE PartitionEdge* getFirstPartitionEdge(IG::EdgeIndex edgeIndex) const { return mFirstPartitionEdges[edgeIndex]; }
PX_FORCE_INLINE void setFirstPartitionEdge(IG::EdgeIndex edgeIndex, PartitionEdge* partitionEdge) { mFirstPartitionEdges[edgeIndex] = partitionEdge; }
PxArray<PartitionEdge*> mFirstPartitionEdges;
///////////////////////////////////////////////////////////////////////////
PX_FORCE_INLINE PxU32 getNbDestroyedPartitionEdges() const { return mDestroyedPartitionEdges.size(); }
PX_FORCE_INLINE const PartitionEdge*const* getDestroyedPartitionEdges() const { return mDestroyedPartitionEdges.begin(); }
PX_FORCE_INLINE PartitionEdge** getDestroyedPartitionEdges() { return mDestroyedPartitionEdges.begin(); }
PX_FORCE_INLINE void clearDestroyedPartitionEdges() { mDestroyedPartitionEdges.forceSize_Unsafe(0); }
PxArray<PartitionEdge*> mDestroyedPartitionEdges;
///////////////////////////////////////////////////////////////////////////
PX_FORCE_INLINE const PxBitMap& getActiveContactManagerBitmap() const { return mActiveContactEdges; }
PxBitMap mActiveContactEdges;
///////////////////////////////////////////////////////////////////////////
// PT: these ones are strange, used to store an unrelated ptr from the outside, and only for GPU
PX_FORCE_INLINE void setEdgeNodeIndexPtr(PxU32* ptr) { mNpIndexPtr = ptr; }
PX_FORCE_INLINE PxU32* getEdgeNodeIndexPtr() const { return mNpIndexPtr; }
PxU32* mNpIndexPtr;
};
class IslandSim
{
PX_NOCOPY(IslandSim)
HandleManager<IslandId> mIslandHandles; //! Handle manager for islands
// PT: these arrays are parallel, all indexed by PxNodeIndex::index()
PxArray<Node> mNodes; //! The nodes used in the constraint graph
PxArray<PxU32> mActiveNodeIndex; //! The active node index for each node
PxArray<PxU32> mHopCounts; //! The observed number of "hops" from a given node to its root node. May be inaccurate but used to accelerate searches.
PxArray<PxNodeIndex> mFastRoute; //! The observed last route from a given node to the root node. We try the fast route (unless its broken) before trying others.
PxArray<IslandId> mIslandIds; //! The array of per-node island ids
//
Cm::BlockArray<Edge> mEdges;
Cm::BlockArray<EdgeInstance> mEdgeInstances; //! Edges used to connect nodes in the constraint graph
PxArray<Island> mIslands; //! The array of islands
PxArray<PxU32> mIslandStaticTouchCount; //! Array of static touch counts per-island
PxArray<PxNodeIndex> mActiveNodes[Node::eTYPE_COUNT]; //! An array of active nodes
PxArray<PxNodeIndex> mActiveKinematicNodes; //! An array of active or referenced kinematic nodes
PxArray<EdgeIndex> mActivatedEdges[Edge::eEDGE_TYPE_COUNT]; //! An array of active edges
PxU32 mActiveEdgeCount[Edge::eEDGE_TYPE_COUNT];
PxBitMap mIslandAwake; //! Indicates whether an island is awake or not
//An array of active islands
PxArray<IslandId> mActiveIslands;
PxU32 mInitialActiveNodeCount[Edge::eEDGE_TYPE_COUNT];
PxArray<PxNodeIndex> mNodesToPutToSleep[Node::eTYPE_COUNT];
//Input to this frame's island management (changed nodes/edges)
//Input list of changes observed this frame. If there no changes, no work to be done.
PxArray<EdgeIndex> mDirtyEdges[Edge::eEDGE_TYPE_COUNT];
//Dirty nodes. These nodes lost at least one connection so we need to recompute islands from these nodes
//PxArray<NodeIndex> mDirtyNodes;
PxBitMap mDirtyMap;
#if IG_LIMIT_DIRTY_NODES
PxU32 mLastMapIndex;
#endif
//An array of nodes to activate
PxArray<PxNodeIndex> mActivatingNodes;
PxArray<EdgeIndex> mDestroyedEdges;
//Temporary, transient data used for traversals. TODO - move to PxsSimpleIslandManager. Or if we keep it here, we can
//process multiple island simulations in parallel
Cm::PriorityQueue<QueueElement, NodeComparator> mPriorityQueue; //! Priority queue used for graph traversal
PxArray<TraversalState> mVisitedNodes; //! The list of nodes visited in the current traversal
PxBitMap mVisitedState; //! Indicates whether a node has been visited
PxArray<EdgeIndex> mIslandSplitEdges[Edge::eEDGE_TYPE_COUNT];
PxArray<EdgeIndex> mDeactivatingEdges[Edge::eEDGE_TYPE_COUNT];
public:
// PT: we could perhaps instead pass these as param whenever needed. The coupling otherwise makes it more difficult to unit-test IslandSim in isolation.
const CPUExternalData& mCpuData; // PT: from the simple island manager, shared between accurate/speculative island sim
GPUExternalData* mGpuData; // PT: from the simple island manager, for accurate island sim (null otherwise) and only needed for the GPU version.
protected:
const PxU64 mContextId;
public:
IslandSim(const CPUExternalData& cpuData, GPUExternalData* gpuData, PxU64 contextID);
~IslandSim() {}
void addNode(bool isActive, bool isKinematic, Node::NodeType type, PxNodeIndex nodeIndex, void* object);
void activateNode(PxNodeIndex index);
void deactivateNode(PxNodeIndex index);
void putNodeToSleep(PxNodeIndex index);
void removeConnection(EdgeIndex edgeIndex);
PX_FORCE_INLINE PxU32 getNbActiveNodes(Node::NodeType type) const { return mActiveNodes[type].size(); }
PX_FORCE_INLINE const PxNodeIndex* getActiveNodes(Node::NodeType type) const { return mActiveNodes[type].begin(); }
PX_FORCE_INLINE PxU32 getNbActiveKinematics() const { return mActiveKinematicNodes.size(); }
PX_FORCE_INLINE const PxNodeIndex* getActiveKinematics() const { return mActiveKinematicNodes.begin(); }
PX_FORCE_INLINE PxU32 getNbNodesToActivate(Node::NodeType type) const { return mActiveNodes[type].size() - mInitialActiveNodeCount[type]; }
PX_FORCE_INLINE const PxNodeIndex* getNodesToActivate(Node::NodeType type) const { return mActiveNodes[type].begin() + mInitialActiveNodeCount[type]; }
PX_FORCE_INLINE PxU32 getNbNodesToDeactivate(Node::NodeType type) const { return mNodesToPutToSleep[type].size(); }
PX_FORCE_INLINE const PxNodeIndex* getNodesToDeactivate(Node::NodeType type) const { return mNodesToPutToSleep[type].begin(); }
PX_FORCE_INLINE PxU32 getNbActivatedEdges(Edge::EdgeType type) const { return mActivatedEdges[type].size(); }
PX_FORCE_INLINE const EdgeIndex* getActivatedEdges(Edge::EdgeType type) const { return mActivatedEdges[type].begin(); }
PX_FORCE_INLINE PxU32 getNbActiveEdges(Edge::EdgeType type) const { return mActiveEdgeCount[type]; }
PX_FORCE_INLINE void* getObject(PxNodeIndex nodeIndex, Node::NodeType type) const
{
const Node& node = mNodes[nodeIndex.index()];
PX_ASSERT(node.mType == type);
PX_UNUSED(type);
return node.mObject;
}
PX_FORCE_INLINE void clearDeactivations()
{
for (PxU32 i = 0; i < Node::eTYPE_COUNT; ++i)
{
mNodesToPutToSleep[i].forceSize_Unsafe(0);
mDeactivatingEdges[i].forceSize_Unsafe(0);
}
}
PX_FORCE_INLINE const Island& getIsland(IG::IslandId islandIndex) const { return mIslands[islandIndex]; }
PX_FORCE_INLINE const Island& getIsland(const PxNodeIndex& nodeIndex) const { PX_ASSERT(mIslandIds[nodeIndex.index()] != IG_INVALID_ISLAND); return mIslands[mIslandIds[nodeIndex.index()]]; }
PX_FORCE_INLINE PxU32 getNbActiveIslands() const { return mActiveIslands.size(); }
PX_FORCE_INLINE const IslandId* getActiveIslands() const { return mActiveIslands.begin(); }
PX_FORCE_INLINE PxU32 getNbDeactivatingEdges(const IG::Edge::EdgeType edgeType) const { return mDeactivatingEdges[edgeType].size(); }
PX_FORCE_INLINE const EdgeIndex* getDeactivatingEdges(const IG::Edge::EdgeType edgeType) const { return mDeactivatingEdges[edgeType].begin(); }
// PT: this is not actually used externally
//PX_FORCE_INLINE PxU32 getNbDestroyedEdges() const { return mDestroyedEdges.size(); }
//PX_FORCE_INLINE const EdgeIndex* getDestroyedEdges() const { return mDestroyedEdges.begin(); }
// PT: this is not actually used externally. Still used internally in IslandSim.
//PX_FORCE_INLINE PxU32 getNbDirtyEdges(IG::Edge::EdgeType type) const { return mDirtyEdges[type].size(); }
//PX_FORCE_INLINE const EdgeIndex* getDirtyEdges(IG::Edge::EdgeType type) const { return mDirtyEdges[type].begin(); }
PX_FORCE_INLINE PxU32 getNbEdges() const { return mEdges.size(); }
PX_FORCE_INLINE const Edge& getEdge(EdgeIndex edgeIndex) const { return mEdges[edgeIndex]; }
PX_FORCE_INLINE Edge& getEdge(EdgeIndex edgeIndex) { return mEdges[edgeIndex]; }
PX_FORCE_INLINE PxU32 getNbNodes() const { return mNodes.size(); }
PX_FORCE_INLINE const Node& getNode(const PxNodeIndex& nodeIndex) const { return mNodes[nodeIndex.index()]; }
PX_FORCE_INLINE PxU32 getActiveNodeIndex(const PxNodeIndex& nodeIndex) const { return mActiveNodeIndex[nodeIndex.index()]; }
PX_FORCE_INLINE const PxU32* getActiveNodeIndex() const { return mActiveNodeIndex.begin(); }
//PX_FORCE_INLINE PxU32 getNbActiveNodeIndex() const { return mActiveNodeIndex.size(); }
PX_FORCE_INLINE PxU32 getNbIslands() const { return mIslandStaticTouchCount.size(); }
PX_FORCE_INLINE const PxU32* getIslandStaticTouchCount() const { return mIslandStaticTouchCount.begin(); }
PX_FORCE_INLINE PxU32 getIslandStaticTouchCount(const PxNodeIndex& nodeIndex) const
{
PX_ASSERT(mIslandIds[nodeIndex.index()] != IG_INVALID_ISLAND);
return mIslandStaticTouchCount[mIslandIds[nodeIndex.index()]];
}
PX_FORCE_INLINE const IG::IslandId* getIslandIds() const { return mIslandIds.begin(); }
PX_FORCE_INLINE PxU64 getContextId() const { return mContextId; }
void setKinematic(PxNodeIndex nodeIndex);
void setDynamic(PxNodeIndex nodeIndex);
bool checkInternalConsistency() const;
PX_INLINE void activateNode_ForGPUSolver(PxNodeIndex index)
{
IG::Node& node = mNodes[index.index()];
node.clearIsReadyForSleeping(); //Clear the "isReadyForSleeping" flag. Just in case it was set
}
PX_INLINE void deactivateNode_ForGPUSolver(PxNodeIndex index)
{
IG::Node& node = mNodes[index.index()];
node.setIsReadyForSleeping();
}
// PT: these three functions added for multithreaded implementation of Sc::Scene::islandInsertion
void preallocateConnections(EdgeIndex handle);
bool addConnectionPreallocated(PxNodeIndex nodeHandle1, PxNodeIndex nodeHandle2, Edge::EdgeType edgeType, EdgeIndex handle);
void addDelayedDirtyEdges(PxU32 nbHandles, const EdgeIndex* handles);
// PT: called by SimpleIslandManager. Made public to remove friendship, make the API clearer, and unit-testable.
void addConnection(PxNodeIndex nodeHandle1, PxNodeIndex nodeHandle2, Edge::EdgeType edgeType, EdgeIndex handle);
void wakeIslands(); // PT: this is always followed by a call to processNewEdges(). Merge the two?
void wakeIslands2();
void processNewEdges();
// PT: called by ThirdPassTask::runInternal. Made public to remove friendship, make the API clearer, and unit-testable.
void removeDestroyedEdges(); // PT: this is always followed by a call to processLostEdges(). Merge the two?
void processLostEdges(const PxArray<PxNodeIndex>& destroyedNodes, bool allowDeactivation, bool permitKinematicDeactivation, PxU32 dirtyNodeLimit);
private:
void wakeIslandsInternal(bool flag);
void insertNewEdges();
void removeConnectionInternal(EdgeIndex edgeIndex);
void addConnectionToGraph(EdgeIndex index);
void removeConnectionFromGraph(EdgeIndex edgeIndex);
//Merges 2 islands together. The returned id is the id of the merged island
IslandId mergeIslands(IslandId island0, IslandId island1, PxNodeIndex node0, PxNodeIndex node1);
void mergeIslandsInternal(Island& island0, Island& island1, IslandId islandId0, IslandId islandId1, PxNodeIndex node0, PxNodeIndex node1);
void unwindRoute(PxU32 traversalIndex, PxNodeIndex lastNode, PxU32 hopCount, IslandId id);
void activateIslandInternal(const Island& island);
void activateIsland(IslandId island);
void deactivateIsland(IslandId island);
#if IG_SANITY_CHECKS
bool canFindRoot(PxNodeIndex startNode, PxNodeIndex targetNode, PxArray<PxNodeIndex>* visitedNodes);
#endif
bool tryFastPath(PxNodeIndex startNode, PxNodeIndex targetNode, IslandId islandId);
bool findRoute(PxNodeIndex startNode, PxNodeIndex targetNode, IslandId islandId);
#if PX_DEBUG
bool isPathTo(PxNodeIndex startNode, PxNodeIndex targetNode) const;
#endif
void activateNodeInternal(PxNodeIndex index);
void deactivateNodeInternal(PxNodeIndex index);
PX_FORCE_INLINE void makeEdgeActive(EdgeInstanceIndex index, bool testEdgeType);
IslandId addNodeToIsland(PxNodeIndex nodeIndex1, PxNodeIndex nodeIndex2, IslandId islandId2, bool active1, bool active2);
/* PX_FORCE_INLINE void notifyReadyForSleeping(const PxNodeIndex nodeIndex)
{
Node& node = mNodes[nodeIndex.index()];
//PX_ASSERT(node.isActive());
node.setIsReadyForSleeping();
}
PX_FORCE_INLINE void notifyNotReadyForSleeping(const PxNodeIndex nodeIndex)
{
Node& node = mNodes[nodeIndex.index()];
PX_ASSERT(node.isActive() || node.isActivating());
node.clearIsReadyForSleeping();
}*/
PX_FORCE_INLINE void markIslandActive(IslandId islandId)
{
Island& island = mIslands[islandId];
PX_ASSERT(!mIslandAwake.test(islandId));
PX_ASSERT(island.mActiveIndex == IG_INVALID_ISLAND);
mIslandAwake.set(islandId);
island.mActiveIndex = mActiveIslands.size();
mActiveIslands.pushBack(islandId);
}
PX_FORCE_INLINE void markIslandInactive(IslandId islandId)
{
Island& island = mIslands[islandId];
PX_ASSERT(mIslandAwake.test(islandId));
PX_ASSERT(island.mActiveIndex != IG_INVALID_ISLAND);
PX_ASSERT(mActiveIslands[island.mActiveIndex] == islandId);
IslandId replaceId = mActiveIslands[mActiveIslands.size()-1];
PX_ASSERT(mIslandAwake.test(replaceId));
Island& replaceIsland = mIslands[replaceId];
replaceIsland.mActiveIndex = island.mActiveIndex;
mActiveIslands[island.mActiveIndex] = replaceId;
mActiveIslands.forceSize_Unsafe(mActiveIslands.size()-1);
island.mActiveIndex = IG_INVALID_ISLAND;
mIslandAwake.reset(islandId);
}
PX_FORCE_INLINE void markKinematicActive(PxNodeIndex nodeIndex)
{
const PxU32 index = nodeIndex.index();
const Node& node = mNodes[index];
PX_ASSERT(node.isKinematic());
if(node.mActiveRefCount == 0 && mActiveNodeIndex[index] == PX_INVALID_NODE)
{
//PX_ASSERT(mActiveNodeIndex[index] == PX_INVALID_NODE);
//node.mActiveNodeIndex = mActiveKinematicNodes.size();
mActiveNodeIndex[index] = mActiveKinematicNodes.size();
mActiveKinematicNodes.pushBack(nodeIndex);
}
}
PX_FORCE_INLINE void markKinematicInactive(PxNodeIndex nodeIndex)
{
const PxU32 index = nodeIndex.index();
const Node& node = mNodes[index];
PX_ASSERT(node.isKinematic());
PX_ASSERT(mActiveNodeIndex[index] != PX_INVALID_NODE);
PX_ASSERT(mActiveKinematicNodes[mActiveNodeIndex[index]].index() == index);
if(node.mActiveRefCount == 0)
{
//Only remove from active kinematic list if it has no active contacts referencing it *and* it is asleep
if(mActiveNodeIndex[index] != PX_INVALID_NODE)
{
//Need to verify active node index because there is an edge case where a node could be woken, then put to
//sleep in the same frame. This would mean that it would not have an active index at this stage.
PxNodeIndex replaceIndex = mActiveKinematicNodes.back();
PX_ASSERT(mActiveNodeIndex[replaceIndex.index()] == mActiveKinematicNodes.size() - 1);
mActiveNodeIndex[replaceIndex.index()] = mActiveNodeIndex[index];
mActiveKinematicNodes[mActiveNodeIndex[index]] = replaceIndex;
mActiveKinematicNodes.forceSize_Unsafe(mActiveKinematicNodes.size() - 1);
mActiveNodeIndex[index] = PX_INVALID_NODE;
}
}
}
PX_FORCE_INLINE void markActive(PxNodeIndex nodeIndex)
{
const PxU32 index = nodeIndex.index();
const Node& node = mNodes[index];
PX_ASSERT(!node.isKinematic());
PX_ASSERT(mActiveNodeIndex[index] == PX_INVALID_NODE);
mActiveNodeIndex[index] = mActiveNodes[node.mType].size();
mActiveNodes[node.mType].pushBack(nodeIndex);
}
PX_FORCE_INLINE void markInactive(PxNodeIndex nodeIndex)
{
const PxU32 index = nodeIndex.index();
const Node& node = mNodes[index];
PX_ASSERT(!node.isKinematic());
PX_ASSERT(mActiveNodeIndex[index] != PX_INVALID_NODE);
PxArray<PxNodeIndex>& activeNodes = mActiveNodes[node.mType];
PX_ASSERT(activeNodes[mActiveNodeIndex[index]].index() == index);
const PxU32 initialActiveNodeCount = mInitialActiveNodeCount[node.mType];
if(mActiveNodeIndex[index] < initialActiveNodeCount)
{
//It's in the initial active node set. We retain a list of active nodes, where the existing active nodes
//are at the beginning of the array and the newly activated nodes are at the end of the array...
//The solution is to move the node to the end of the initial active node list in this case
PxU32 activeNodeIndex = mActiveNodeIndex[index];
PxNodeIndex replaceIndex = activeNodes[initialActiveNodeCount - 1];
PX_ASSERT(mActiveNodeIndex[replaceIndex.index()] == initialActiveNodeCount - 1);
mActiveNodeIndex[index] = mActiveNodeIndex[replaceIndex.index()];
mActiveNodeIndex[replaceIndex.index()] = activeNodeIndex;
activeNodes[activeNodeIndex] = replaceIndex;
activeNodes[mActiveNodeIndex[index]] = nodeIndex;
mInitialActiveNodeCount[node.mType]--;
}
PX_ASSERT(!node.isKinematic());
PX_ASSERT(mActiveNodeIndex[index] != PX_INVALID_NODE);
PX_ASSERT(activeNodes[mActiveNodeIndex[index]].index() == index);
PxNodeIndex replaceIndex = activeNodes.back();
PX_ASSERT(mActiveNodeIndex[replaceIndex.index()] == activeNodes.size() - 1);
mActiveNodeIndex[replaceIndex.index()] = mActiveNodeIndex[index];
activeNodes[mActiveNodeIndex[index]] = replaceIndex;
activeNodes.forceSize_Unsafe(activeNodes.size() - 1);
mActiveNodeIndex[index] = PX_INVALID_NODE;
}
PX_FORCE_INLINE void markEdgeActive(EdgeIndex index, PxNodeIndex nodeIndex1, PxNodeIndex nodeIndex2)
{
Edge& edge = mEdges[index];
PX_ASSERT((edge.mEdgeState & Edge::eACTIVATING) == 0);
edge.mEdgeState |= Edge::eACTIVATING;
mActivatedEdges[edge.mEdgeType].pushBack(index);
mActiveEdgeCount[edge.mEdgeType]++;
//Set the active bit...
if(mGpuData && edge.mEdgeType == Edge::eCONTACT_MANAGER)
mGpuData->mActiveContactEdges.set(index);
const PxU32 index1 = nodeIndex1.index();
const PxU32 index2 = nodeIndex2.index();
if (index1 != PX_INVALID_NODE && index2 != PX_INVALID_NODE)
{
PX_ASSERT((!mNodes[index1].isKinematic()) || (!mNodes[index2].isKinematic()) || edge.getEdgeType() == IG::Edge::eCONTACT_MANAGER);
{
Node& node = mNodes[index1];
if(node.mActiveRefCount == 0 && node.isKinematic() && !node.isActiveOrActivating())
markKinematicActive(nodeIndex1); //Add to active kinematic list
node.mActiveRefCount++;
}
{
Node& node = mNodes[index2];
if(node.mActiveRefCount == 0 && node.isKinematic() && !node.isActiveOrActivating())
markKinematicActive(nodeIndex2); //Add to active kinematic list
node.mActiveRefCount++;
}
}
}
void removeEdgeFromActivatingList(EdgeIndex index);
PX_FORCE_INLINE void removeEdgeFromIsland(Island& island, EdgeIndex edgeIndex)
{
Edge& edge = mEdges[edgeIndex];
if(edge.mNextIslandEdge != IG_INVALID_EDGE)
{
PX_ASSERT(mEdges[edge.mNextIslandEdge].mPrevIslandEdge == edgeIndex);
mEdges[edge.mNextIslandEdge].mPrevIslandEdge = edge.mPrevIslandEdge;
}
else
{
PX_ASSERT(island.mLastEdge[edge.mEdgeType] == edgeIndex);
island.mLastEdge[edge.mEdgeType] = edge.mPrevIslandEdge;
}
if(edge.mPrevIslandEdge != IG_INVALID_EDGE)
{
PX_ASSERT(mEdges[edge.mPrevIslandEdge].mNextIslandEdge == edgeIndex);
mEdges[edge.mPrevIslandEdge].mNextIslandEdge = edge.mNextIslandEdge;
}
else
{
PX_ASSERT(island.mFirstEdge[edge.mEdgeType] == edgeIndex);
island.mFirstEdge[edge.mEdgeType] = edge.mNextIslandEdge;
}
island.mEdgeCount[edge.mEdgeType]--;
edge.mNextIslandEdge = edge.mPrevIslandEdge = IG_INVALID_EDGE;
}
PX_FORCE_INLINE void addEdgeToIsland(Island& island, EdgeIndex edgeIndex)
{
Edge& edge = mEdges[edgeIndex];
PX_ASSERT(edge.mNextIslandEdge == IG_INVALID_EDGE && edge.mPrevIslandEdge == IG_INVALID_EDGE);
if(island.mLastEdge[edge.mEdgeType] != IG_INVALID_EDGE)
{
PX_ASSERT(mEdges[island.mLastEdge[edge.mEdgeType]].mNextIslandEdge == IG_INVALID_EDGE);
mEdges[island.mLastEdge[edge.mEdgeType]].mNextIslandEdge = edgeIndex;
}
else
{
PX_ASSERT(island.mFirstEdge[edge.mEdgeType] == IG_INVALID_EDGE);
island.mFirstEdge[edge.mEdgeType] = edgeIndex;
}
edge.mPrevIslandEdge = island.mLastEdge[edge.mEdgeType];
island.mLastEdge[edge.mEdgeType] = edgeIndex;
island.mEdgeCount[edge.mEdgeType]++;
}
PX_FORCE_INLINE void removeNodeFromIsland(Island& island, PxNodeIndex nodeIndex)
{
Node& node = mNodes[nodeIndex.index()];
if(node.mNextNode.isValid())
{
PX_ASSERT(mNodes[node.mNextNode.index()].mPrevNode.index() == nodeIndex.index());
mNodes[node.mNextNode.index()].mPrevNode = node.mPrevNode;
}
else
{
PX_ASSERT(island.mLastNode.index() == nodeIndex.index());
island.mLastNode = node.mPrevNode;
}
if(node.mPrevNode.isValid())
{
PX_ASSERT(mNodes[node.mPrevNode.index()].mNextNode.index() == nodeIndex.index());
mNodes[node.mPrevNode.index()].mNextNode = node.mNextNode;
}
else
{
PX_ASSERT(island.mRootNode.index() == nodeIndex.index());
island.mRootNode = node.mNextNode;
}
island.mNodeCount[node.mType]--;
node.mNextNode = node.mPrevNode = PxNodeIndex();
}
};
}
}
#endif

View File

@@ -0,0 +1,49 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_KERNEL_WRANGLER_H
#define PXS_KERNEL_WRANGLER_H
#include "foundation/PxUserAllocated.h"
namespace physx
{
class PxCudaContextManager;
class KernelWrangler;
class PxsKernelWranglerManager : public PxUserAllocated
{
public:
PX_FORCE_INLINE KernelWrangler* getKernelWrangler() { return mKernelWrangler; }
PX_FORCE_INLINE PxCudaContextManager* getCudaContextManager() { return mCudaContextManager; }
KernelWrangler* mKernelWrangler;
PxCudaContextManager* mCudaContextManager;
};
}
#endif

View File

@@ -0,0 +1,183 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_MATERIAL_COMBINER_H
#define PXS_MATERIAL_COMBINER_H
#include "PxsMaterialCore.h"
namespace physx
{
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal combineScalars(PxReal a, PxReal b, PxI32 combineMode)
{
switch (combineMode)
{
case PxCombineMode::eAVERAGE:
return 0.5f * (a + b);
case PxCombineMode::eMIN:
return PxMin(a,b);
case PxCombineMode::eMULTIPLY:
return a * b;
case PxCombineMode::eMAX:
return PxMax(a,b);
default:
return PxReal(0);
}
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal PxsCombinePxReal(PxReal val0, PxReal val1, PxI32 combineMode)
{
switch (combineMode)
{
case PxCombineMode::eAVERAGE:
return 0.5f * (val0 + val1);
case PxCombineMode::eMIN:
return PxMin(val0, val1);
case PxCombineMode::eMULTIPLY:
return (val0 * val1);
case PxCombineMode::eMAX:
return PxMax(val0, val1);
}
return 0.0f;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void PxsCombineMaterials(const PxsMaterialData& mat0Data, const PxsMaterialData& mat1Data,
PxReal& combinedStaticFriction, PxReal& combinedDynamicFriction,
PxReal& combinedRestitution, PxU32& combinedMaterialFlags, PxReal& combinedDamping)
{
const PxReal r0 = mat0Data.restitution;
const PxReal r1 = mat1Data.restitution;
const bool compliant0 = r0 < 0.0f;
const bool compliant1 = r1 < 0.0f;
const bool exactlyOneCompliant = compliant0 ^ compliant1;
const bool bothCompliant = compliant0 & compliant1;
const bool compliantAcc0 = !!(mat0Data.flags & PxMaterialFlag::eCOMPLIANT_ACCELERATION_SPRING);
const bool compliantAcc1 = !!(mat1Data.flags & PxMaterialFlag::eCOMPLIANT_ACCELERATION_SPRING);
const bool exactlyOneAccCompliant = compliantAcc0 ^ compliantAcc1;
// combine restitution
{
// For rigid-rigid or compliant-compliant interactions, follow the user's choice of combine mode but make sure it stays negative for multiply.
// For rigid-compliant interactions, we go with the compliant behavior.
// For forceCompliant-accelerationCompliant, we go with the accelerationCompliant behavior
if (bothCompliant && exactlyOneAccCompliant)
{
combinedRestitution = compliantAcc0 ? r0 : r1;
}
else
{
const PxCombineMode::Enum combineMode =
exactlyOneCompliant ? PxCombineMode::eMIN
: PxMax(mat0Data.getRestitutionCombineMode(), mat1Data.getRestitutionCombineMode());
const PxReal flipSign = (bothCompliant && (combineMode == PxCombineMode::eMULTIPLY)) ? -1.0f : 1.0f;
combinedRestitution = flipSign * combineScalars(r0, r1, combineMode);
}
}
// combine damping
{
// For rigid-rigid or compliant-compliant interactions, follow the user's choice of combine mode.
// For rigid-compliant interactions, we go with the compliant behavior.
// For forceCompliant-accelerationCompliant, we go with the accelerationCompliant behavior
const PxReal d0 = mat0Data.damping;
const PxReal d1 = mat1Data.damping;
if (bothCompliant && exactlyOneAccCompliant)
{
combinedDamping = compliantAcc0 ? d0 : d1;
}
else
{
const PxCombineMode::Enum combineMode =
exactlyOneCompliant ? PxCombineMode::eMAX
: PxMax(mat0Data.getDampingCombineMode(), mat1Data.getDampingCombineMode());
combinedDamping = combineScalars(d0, d1, combineMode);
}
}
// combine isotropic friction
{
const PxU32 combineFlags = (mat0Data.flags | mat1Data.flags); //& (PxMaterialFlag::eDISABLE_STRONG_FRICTION|PxMaterialFlag::eDISABLE_FRICTION); //eventually set DisStrongFric flag, lower all others.
if (!(combineFlags & PxMaterialFlag::eDISABLE_FRICTION))
{
const PxI32 fictionCombineMode = PxMax(mat0Data.getFrictionCombineMode(), mat1Data.getFrictionCombineMode());
PxReal dynFriction = 0.0f;
PxReal staFriction = 0.0f;
dynFriction = PxsCombinePxReal(mat0Data.dynamicFriction, mat1Data.dynamicFriction, fictionCombineMode);
staFriction = PxsCombinePxReal(mat0Data.staticFriction, mat1Data.staticFriction, fictionCombineMode);
/*switch (fictionCombineMode)
{
case PxCombineMode::eAVERAGE:
dynFriction = 0.5f * (mat0Data.dynamicFriction + mat1Data.dynamicFriction);
staFriction = 0.5f * (mat0Data.staticFriction + mat1Data.staticFriction);
break;
case PxCombineMode::eMIN:
dynFriction = PxMin(mat0Data.dynamicFriction, mat1Data.dynamicFriction);
staFriction = PxMin(mat0Data.staticFriction, mat1Data.staticFriction);
break;
case PxCombineMode::eMULTIPLY:
dynFriction = (mat0Data.dynamicFriction * mat1Data.dynamicFriction);
staFriction = (mat0Data.staticFriction * mat1Data.staticFriction);
break;
case PxCombineMode::eMAX:
dynFriction = PxMax(mat0Data.dynamicFriction, mat1Data.dynamicFriction);
staFriction = PxMax(mat0Data.staticFriction, mat1Data.staticFriction);
break;
} */
//isotropic case
const PxReal fDynFriction = PxMax(dynFriction, 0.0f);
#if PX_CUDA_COMPILER
const PxReal fStaFriction = (staFriction - fDynFriction) >= 0 ? staFriction : fDynFriction;
#else
const PxReal fStaFriction = physx::intrinsics::fsel(staFriction - fDynFriction, staFriction, fDynFriction);
#endif
combinedDynamicFriction = fDynFriction;
combinedStaticFriction = fStaFriction;
combinedMaterialFlags = combineFlags;
}
else
{
combinedMaterialFlags = combineFlags | PxMaterialFlag::eDISABLE_STRONG_FRICTION;
combinedDynamicFriction = 0.0f;
combinedStaticFriction = 0.0f;
}
}
}
}
#endif

View File

@@ -0,0 +1,51 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_MEMORY_MANAGER_H
#define PXS_MEMORY_MANAGER_H
#include "foundation/PxPreprocessor.h"
#include "foundation/PxUserAllocated.h"
namespace physx
{
class PxVirtualAllocatorCallback;
class PxsMemoryManager : public PxUserAllocated
{
public:
virtual ~PxsMemoryManager(){}
virtual PxVirtualAllocatorCallback* getHostMemoryAllocator() = 0;
virtual PxVirtualAllocatorCallback* getDeviceMemoryAllocator() = 0;
};
// PT: this is for CPU, see createPxgMemoryManager for GPU
PxsMemoryManager* createDefaultMemoryManager();
}
#endif

View File

@@ -0,0 +1,60 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_NPHASE_COMMON_H
#define PXS_NPHASE_COMMON_H
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxAssert.h"
namespace physx
{
struct PxsContactManagerBase
{
static const PxU32 NEW_CONTACT_MANAGER_MASK = 0x80000000;
static const PxU32 MaxBucketBits = 7;
const PxU32 mBucketId;
PxsContactManagerBase(const PxU32 bucketId) : mBucketId(bucketId)
{
PX_ASSERT(bucketId < (1 << MaxBucketBits));
}
PX_FORCE_INLINE PxU32 computeId(const PxU32 index) const { PX_ASSERT(index < PxU32(1 << (32 - (MaxBucketBits - 1)))); return (index << MaxBucketBits) | (mBucketId); }
static PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 computeIndexFromId(const PxU32 id) { return id >> MaxBucketBits; }
static PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 computeBucketIndexFromId(const PxU32 id) { return id & ((1 << MaxBucketBits) - 1); }
private:
PX_NOCOPY(PxsContactManagerBase)
};
}
#endif

View File

@@ -0,0 +1,191 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_NPHASE_IMPLEMENTATION_CONTEXT_H
#define PXS_NPHASE_IMPLEMENTATION_CONTEXT_H
#include "PxvNphaseImplementationContext.h"
#include "PxsContactManagerState.h"
#include "PxcNpCache.h"
#include "foundation/PxPinnedArray.h"
class PxsCMDiscreteUpdateTask;
namespace physx
{
struct PxsContactManagers : PxsContactManagerBase
{
PxArray<PxsContactManagerOutput> mOutputContactManagers;
PxArray<PxsContactManager*> mContactManagerMapping;
PxArray<Gu::Cache> mCaches;
// PT: these buffers should be in pinned memory but may not be if pinned allocation failed.
PxPinnedArraySafe<const Sc::ShapeInteraction*> mShapeInteractionsGPU;
PxFloatArrayPinnedSafe mRestDistancesGPU;
PxPinnedArraySafe<PxsTorsionalFrictionData> mTorsionalPropertiesGPU;
PxsContactManagers(const PxU32 bucketId, PxVirtualAllocatorCallback* callback) : PxsContactManagerBase(bucketId),
mOutputContactManagers ("mOutputContactManagers"),
mContactManagerMapping ("mContactManagerMapping"),
mCaches ("mCaches"),
mShapeInteractionsGPU (callback),
mRestDistancesGPU (callback),
mTorsionalPropertiesGPU (callback)
{
}
void clear()
{
mOutputContactManagers.forceSize_Unsafe(0);
mContactManagerMapping.forceSize_Unsafe(0);
mCaches.forceSize_Unsafe(0);
mShapeInteractionsGPU.forceSize_Unsafe(0);
mRestDistancesGPU.forceSize_Unsafe(0);
mTorsionalPropertiesGPU.forceSize_Unsafe(0);
}
private:
PX_NOCOPY(PxsContactManagers)
};
class PxsNphaseImplementationContext : public PxvNphaseImplementationFallback
{
PX_NOCOPY(PxsNphaseImplementationContext)
public:
PxsNphaseImplementationContext(PxsContext& context, IG::IslandSim* islandSim, PxVirtualAllocatorCallback* callback, PxU32 index, bool gpu) :
PxvNphaseImplementationFallback (context),
mNarrowPhasePairs (index, callback),
mNewNarrowPhasePairs (index, callback),
mModifyCallback (NULL),
mIslandSim (islandSim),
mGPU (gpu)
{}
// PxvNphaseImplementationContext
virtual void destroy() PX_OVERRIDE PX_FINAL;
virtual void updateContactManager(PxReal dt, bool hasContactDistanceChanged, PxBaseTask* continuation,
PxBaseTask* firstPassContinuation, Cm::FanoutTask* updateBoundAndShape) PX_OVERRIDE PX_FINAL;
virtual void postBroadPhaseUpdateContactManager(PxBaseTask*) PX_OVERRIDE PX_FINAL {}
virtual void secondPassUpdateContactManager(PxReal dt, PxBaseTask* continuation) PX_OVERRIDE PX_FINAL;
virtual void fetchUpdateContactManager() PX_OVERRIDE PX_FINAL {}
virtual void registerContactManager(PxsContactManager* cm, const Sc::ShapeInteraction* shapeInteraction, PxI32 touching, PxU32 numPatches) PX_OVERRIDE PX_FINAL;
// virtual void registerContactManagers(PxsContactManager** cm, Sc::ShapeInteraction** shapeInteractions, PxU32 nbContactManagers, PxU32 maxContactManagerId);
virtual void unregisterContactManager(PxsContactManager* cm) PX_OVERRIDE PX_FINAL;
virtual void refreshContactManager(PxsContactManager* cm) PX_OVERRIDE PX_FINAL;
virtual void registerShape(const PxNodeIndex& /*nodeIndex*/, const PxsShapeCore& /*shapeCore*/, const PxU32 /*transformCacheID*/, PxActor* /*actor*/, const bool /*isDeformableSurface*/) PX_OVERRIDE PX_FINAL {}
virtual void unregisterShape(const PxsShapeCore& /*shapeCore*/, const PxU32 /*transformCacheID*/, const bool /*isDeformableSurface*/) PX_OVERRIDE PX_FINAL {}
virtual void registerAggregate(const PxU32 /*transformCacheID*/) PX_OVERRIDE PX_FINAL {}
virtual void registerMaterial(const PxsMaterialCore&) PX_OVERRIDE PX_FINAL {}
virtual void updateMaterial(const PxsMaterialCore&) PX_OVERRIDE PX_FINAL {}
virtual void unregisterMaterial(const PxsMaterialCore&) PX_OVERRIDE PX_FINAL {}
virtual void registerMaterial(const PxsDeformableSurfaceMaterialCore&) PX_OVERRIDE PX_FINAL {}
virtual void updateMaterial(const PxsDeformableSurfaceMaterialCore&) PX_OVERRIDE PX_FINAL {}
virtual void unregisterMaterial(const PxsDeformableSurfaceMaterialCore&) PX_OVERRIDE PX_FINAL {}
virtual void registerMaterial(const PxsDeformableVolumeMaterialCore&) PX_OVERRIDE PX_FINAL {}
virtual void updateMaterial(const PxsDeformableVolumeMaterialCore&) PX_OVERRIDE PX_FINAL {}
virtual void unregisterMaterial(const PxsDeformableVolumeMaterialCore&) PX_OVERRIDE PX_FINAL {}
virtual void registerMaterial(const PxsPBDMaterialCore&) PX_OVERRIDE PX_FINAL {}
virtual void updateMaterial(const PxsPBDMaterialCore&) PX_OVERRIDE PX_FINAL {}
virtual void unregisterMaterial(const PxsPBDMaterialCore&) PX_OVERRIDE PX_FINAL {}
virtual void updateShapeMaterial(const PxsShapeCore&) PX_OVERRIDE PX_FINAL {}
virtual void startNarrowPhaseTasks() PX_OVERRIDE PX_FINAL {}
virtual void appendContactManagers() PX_OVERRIDE PX_FINAL;
virtual PxsContactManagerOutput& getNewContactManagerOutput(PxU32 npIndex) PX_OVERRIDE PX_FINAL;
virtual PxsContactManagerOutputIterator getContactManagerOutputs() PX_OVERRIDE PX_FINAL;
virtual void setContactModifyCallback(PxContactModifyCallback* callback) PX_OVERRIDE PX_FINAL { mModifyCallback = callback; }
virtual void acquireContext() PX_OVERRIDE PX_FINAL {}
virtual void releaseContext() PX_OVERRIDE PX_FINAL {}
virtual void preallocateNewBuffers(PxU32 /*nbNewPairs*/, PxU32 /*maxIndex*/) PX_OVERRIDE PX_FINAL { /*TODO - implement if it's useful to do so*/}
virtual void lock() PX_OVERRIDE PX_FINAL { mContactManagerMutex.lock(); }
virtual void unlock() PX_OVERRIDE PX_FINAL { mContactManagerMutex.unlock(); }
virtual PxsContactManagerOutputCounts* getLostFoundPatchOutputCounts() PX_OVERRIDE PX_FINAL { return mGPU ? mCmFoundLostOutputCounts.begin() : NULL; }
virtual PxsContactManager** getLostFoundPatchManagers() PX_OVERRIDE PX_FINAL { return mGPU ? mCmFoundLost.begin() : NULL; }
virtual PxU32 getNbLostFoundPatchManagers() PX_OVERRIDE PX_FINAL { return mGPU ? mCmFoundLost.size() : 0; }
virtual PxsContactManagerOutput* getGPUContactManagerOutputBase() PX_OVERRIDE PX_FINAL { return NULL; }
virtual PxReal* getGPURestDistances() PX_OVERRIDE PX_FINAL { return NULL; }
virtual Sc::ShapeInteraction** getGPUShapeInteractions() PX_OVERRIDE PX_FINAL { return NULL; }
virtual PxsTorsionalFrictionData* getGPUTorsionalData() PX_OVERRIDE PX_FINAL { return NULL; }
//~PxvNphaseImplementationContext
// PxvNphaseImplementationFallback
virtual void processContactManager(PxReal dt, PxsContactManagerOutput* cmOutputs, PxBaseTask* continuation) PX_OVERRIDE PX_FINAL;
virtual void processContactManagerSecondPass(PxReal dt, PxBaseTask* continuation) PX_OVERRIDE PX_FINAL;
virtual void unregisterContactManagerFallback(PxsContactManager* cm, PxsContactManagerOutput* cmOutputs) PX_OVERRIDE PX_FINAL;
virtual void refreshContactManagerFallback(PxsContactManager* cm, PxsContactManagerOutput* cmOutputs) PX_OVERRIDE PX_FINAL;
virtual void appendContactManagersFallback(PxsContactManagerOutput* cmOutputs) PX_OVERRIDE PX_FINAL;
virtual void removeContactManagersFallback(PxsContactManagerOutput* cmOutputs) PX_OVERRIDE PX_FINAL;
virtual const Sc::ShapeInteraction*const* getShapeInteractionsGPU() const PX_OVERRIDE PX_FINAL { return mNarrowPhasePairs.mShapeInteractionsGPU.begin(); }
virtual const PxReal* getRestDistancesGPU() const PX_OVERRIDE PX_FINAL { return mNarrowPhasePairs.mRestDistancesGPU.begin(); }
virtual const PxsTorsionalFrictionData* getTorsionalDataGPU() const PX_OVERRIDE PX_FINAL { return mNarrowPhasePairs.mTorsionalPropertiesGPU.begin(); }
//~PxvNphaseImplementationFallback
PxArray<PxU32> mRemovedContactManagers;
PxsContactManagers mNarrowPhasePairs;
PxsContactManagers mNewNarrowPhasePairs;
PxContactModifyCallback* mModifyCallback;
IG::IslandSim* mIslandSim;
PxMutex mContactManagerMutex;
PxArray<PxsCMDiscreteUpdateTask*> mCmTasks;
PxArray<PxsContactManagerOutputCounts> mCmFoundLostOutputCounts;
PxArray<PxsContactManager*> mCmFoundLost;
const bool mGPU;
private:
void unregisterContactManagerInternal(PxU32 npIndex, PxsContactManagers& managers, PxsContactManagerOutput* cmOutputs);
PX_FORCE_INLINE void unregisterAndForceSize(PxsContactManagers& cms, PxU32 index)
{
unregisterContactManagerInternal(index, cms, cms.mOutputContactManagers.begin());
cms.mOutputContactManagers.forceSize_Unsafe(cms.mOutputContactManagers.size()-1);
}
void appendNewLostPairs();
};
}
#endif

View File

@@ -0,0 +1,122 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_PARTICLE_BUFFER_H
#define PXS_PARTICLE_BUFFER_H
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxUserAllocated.h"
#include "PxParticleSystemFlag.h"
namespace physx
{
class PxCudaContextManager;
struct PxParticleVolume;
struct PxParticleRigidFilterPair;
struct PxParticleRigidAttachment;
class PxsParticleBuffer
{
public:
virtual void release() = 0;
virtual PxVec4* getPositionInvMassesD() const = 0;
virtual PxVec4* getVelocitiesD() const = 0;
virtual PxU32* getPhasesD() const = 0;
virtual PxParticleVolume* getParticleVolumesD() const = 0;
virtual PxVec4* getPositionInvMassesH() const = 0;
virtual PxVec4* getVelocitiesH() const = 0;
virtual PxU32* getPhasesH() const = 0;
virtual PxParticleVolume* getParticleVolumesH() const = 0;
virtual void setNbActiveParticles(PxU32 nbActiveParticles) = 0;
virtual PxU32 getNbActiveParticles() const = 0;
virtual PxU32 getMaxParticles() const = 0;
virtual PxU32 getNbParticleVolumes() const = 0;
virtual void setNbParticleVolumes(PxU32 nbParticleVolumes) = 0;
virtual PxU32 getMaxParticleVolumes() const = 0;
virtual void setRigidFilters(PxParticleRigidFilterPair* filters, PxU32 nbFilters) = 0;
virtual void setRigidAttachments(PxParticleRigidAttachment* attachments, PxU32 nbAttachments) = 0;
virtual PxU32 getFlatListStartIndex() const = 0;
virtual void raiseFlags(PxParticleBufferFlag::Enum flags) = 0;
virtual PxU32 getUniqueId() const = 0;
virtual void allocHostBuffers() = 0;
protected:
virtual ~PxsParticleBuffer() {}
};
class PxsParticleAndDiffuseBuffer : public PxsParticleBuffer
{
public:
virtual PxVec4* getDiffusePositionLifeTimeD() const = 0;
virtual PxVec4* getDiffuseVelocitiesD() const = 0;
virtual PxU32 getNbActiveDiffuseParticles() const = 0;
virtual void setMaxActiveDiffuseParticles(PxU32 maxActiveDiffuseParticles) = 0;
virtual PxU32 getMaxDiffuseParticles() const = 0;
virtual void setDiffuseParticleParams(const PxDiffuseParticleParams& params) = 0;
virtual const PxDiffuseParticleParams& getDiffuseParticleParams() const = 0;
protected:
virtual ~PxsParticleAndDiffuseBuffer() {}
};
class PxsParticleClothBuffer : public PxsParticleBuffer
{
public:
virtual PxVec4* getRestPositionsD() = 0;
virtual PxU32* getTrianglesD() const = 0;
virtual void setNbTriangles(PxU32 nbTriangles) = 0;
virtual PxU32 getNbTriangles() const = 0;
virtual PxU32 getNbSprings() const = 0;
virtual PxParticleSpring* getSpringsD() = 0;
virtual void setCloths(PxPartitionedParticleCloth& cloths) = 0;
protected:
virtual ~PxsParticleClothBuffer() {}
};
class PxsParticleRigidBuffer : public PxsParticleBuffer
{
public:
virtual PxU32* getRigidOffsetsD() const = 0;
virtual PxReal* getRigidCoefficientsD() const = 0;
virtual PxVec4* getRigidLocalPositionsD() const = 0;
virtual PxVec4* getRigidTranslationsD() const = 0;
virtual PxVec4* getRigidRotationsD() const = 0;
virtual PxVec4* getRigidLocalNormalsD() const = 0;
virtual void setNbRigids(PxU32 nbRigids) = 0;
virtual PxU32 getNbRigids() const = 0;
protected:
virtual ~PxsParticleRigidBuffer() {}
};
}
#endif

View File

@@ -0,0 +1,131 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_PARTITION_EDGE_H
#define PXS_PARTITION_EDGE_H
// PT: this is a temporary place for code related to PartitionEdge. This seems to be a GPU-only class
// but for some reason some CPU bits do need it. Ideally it would be fully contained inside the GPU DLL.
#include "PxsIslandSim.h"
#include "PxcNpWorkUnit.h"
namespace physx
{
// PT: TODO: mNextPatch is almost always null so it would make sense to store that cold data somewhere else, e.g:
// - use one bit to mark the general case where mNextPatch is null
// - store non-null mNextPatch in a hashmap indexed by mUniqueIndex (no need to reserve the full memory for it)
//
// The only annoying bit is that the mechanism needed to actually walk the linked list (i.e. the hashmap) needs to be
// available in processPartitionEdges below, so that's more GPU stuff exposed to the CPU code. But I guess we crossed
// that line a while ago when the heads of the LLs moved to the low-level island manager anyway. And in fact we could
// put the two in the same structure eventually, like a PartitionEdgeManager.
//
// In any case the benefit of the change would be a smaller PartitionEdge. Going further, the nodes can be retrieved from
// the edge index, so if mNextPatch also disappears then only the unique ID remains, which.... can be derived from
// the PartitionEdge address?! So it would be just the "edge index" with some bits encoded in it, just 4 bytes.
//
// This is per-edge data so we could also merge this with CPUExternalData, which already contains the node indices, and
// has an implicit unique index as the index into mEdgeNodeIndices. But maybe we cannot because there can be multiple
// PartitionEdge for the same source edge (hence the linked list).
//
// Another idea would be to store the edge index instead. You would need access to the edge manager in CPU code but no
// hashmap or new structure is needed.
struct PartitionEdge
{
enum Enum
{
HAS_INFINITE_MASS0 = (1<<0),
HAS_INFINITE_MASS1 = (1<<1),
HAS_THRESHOLD = (1<<2),
IS_CONTACT = (1<<3),
SPECIAL_HANDLED = (1<<4),
NB_BITS = 5
};
PxNodeIndex mNode0; //! The node index for node 0. Can be obtained from the edge index alternatively
PxNodeIndex mNode1; //! The node index for node 1. Can be obtained from the edge index alternatively
PartitionEdge* mNextPatch; //! for the contact manager has more than 1 patch, we have next patch's edge and previous patch's edge to connect to this edge
private:
IG::EdgeIndex mEdgeIndex; //! The edge index into the island manager. Used to identify the contact manager/constraint
public:
PxU32 mUniqueIndex; //! a unique ID for this edge
PX_FORCE_INLINE IG::EdgeIndex getEdgeIndex() const { return mEdgeIndex >> NB_BITS; }
PX_FORCE_INLINE PxU32 isArticulation0() const { return mNode0.isArticulation(); }
PX_FORCE_INLINE PxU32 isArticulation1() const { return mNode1.isArticulation(); }
PX_FORCE_INLINE PxU32 hasInfiniteMass0() const { return mEdgeIndex & HAS_INFINITE_MASS0; }
PX_FORCE_INLINE PxU32 hasInfiniteMass1() const { return mEdgeIndex & HAS_INFINITE_MASS1; }
PX_FORCE_INLINE void setInfiniteMass0() { mEdgeIndex |= HAS_INFINITE_MASS0; }
PX_FORCE_INLINE void setInfiniteMass1() { mEdgeIndex |= HAS_INFINITE_MASS1; }
PX_FORCE_INLINE void setHasThreshold() { mEdgeIndex |= HAS_THRESHOLD; }
PX_FORCE_INLINE PxU32 hasThreshold() const { return mEdgeIndex & HAS_THRESHOLD; }
PX_FORCE_INLINE void setIsContact() { mEdgeIndex |= IS_CONTACT; }
PX_FORCE_INLINE PxU32 isContact() const { return mEdgeIndex & IS_CONTACT; }
PX_FORCE_INLINE void setSpecialHandled() { mEdgeIndex |= SPECIAL_HANDLED; }
PX_FORCE_INLINE void clearSpecialHandled() { mEdgeIndex &= ~SPECIAL_HANDLED; }
PX_FORCE_INLINE PxU32 isSpecialHandled() const { return mEdgeIndex & SPECIAL_HANDLED; }
//KS - This constructor explicitly does not set mUniqueIndex. It is filled in by the pool allocator and this constructor
//is called afterwards. We do not want to stomp the uniqueIndex value
PartitionEdge(IG::EdgeIndex index) :
mNextPatch(NULL),
mEdgeIndex(index << NB_BITS)
{
PX_ASSERT(!(index & 0xf8000000)); // PT: reserve 5 bits for internal flags
}
};
PX_COMPILE_TIME_ASSERT(sizeof(PartitionEdge)<=32); // PT: 2 of them per cache-line
static PX_FORCE_INLINE void processPartitionEdges(const IG::GPUExternalData* gpuData, const PxcNpWorkUnit& unit)
{
if(gpuData && !(unit.mFlags & PxcNpWorkUnitFlag::eDISABLE_RESPONSE))
{
PxU32* edgeNodeIndices = gpuData->getEdgeNodeIndexPtr();
if(edgeNodeIndices) // PT: only non-null for GPU version
{
const PartitionEdge* partitionEdge = gpuData->getFirstPartitionEdge(unit.mEdgeIndex);
while(partitionEdge)
{
edgeNodeIndices[partitionEdge->mUniqueIndex] = unit.mNpIndex;
partitionEdge = partitionEdge->mNextPatch;
}
}
}
}
}
#endif

View File

@@ -0,0 +1,236 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_RIGID_BODY_H
#define PXS_RIGID_BODY_H
#include "PxvDynamics.h"
#include "CmSpatialVector.h"
#include "foundation/PxMutex.h"
namespace physx
{
struct PxsCCDBody;
PX_ALIGN_PREFIX(16)
class PxsRigidBody
{
public:
enum PxsRigidBodyFlag
{
eFROZEN = 1 << 0, //This flag indicates that the stabilization is enabled and the body is
//"frozen". By "frozen", we mean that the body's transform is unchanged
//from the previous frame. This permits various optimizations.
eFREEZE_THIS_FRAME = 1 << 1,
eUNFREEZE_THIS_FRAME = 1 << 2,
eACTIVATE_THIS_FRAME = 1 << 3,
eDEACTIVATE_THIS_FRAME = 1 << 4,
// PT: this flag is now only used on the GPU. For the CPU the data is now stored directly in PxsBodyCore.
eDISABLE_GRAVITY_GPU = 1 << 5,
eSPECULATIVE_CCD = 1 << 6,
eENABLE_GYROSCOPIC = 1 << 7,
eRETAIN_ACCELERATION = 1 << 8,
eFIRST_BODY_COPY_GPU = 1 << 9, // Flag to raise to indicate that the body is DMA'd to the GPU for the first time
eVELOCITY_COPY_GPU = 1 << 10 // Flag to raise to indicate that linear and angular velocities should be DMA'd to the GPU
};
PX_FORCE_INLINE PxsRigidBody(PxsBodyCore* core, PxReal freeze_count) :
mLastTransform (core->body2World),
mInternalFlags (0),
mSolverIterationCounts (core->solverIterationCounts),
mCCD (NULL),
mCore (core),
mSleepLinVelAcc (PxVec3(0.0f)),
mFreezeCount (freeze_count),
mSleepAngVelAcc (PxVec3(0.0f)),
mAccelScale (1.0f)
{}
PX_FORCE_INLINE ~PxsRigidBody() {}
PX_FORCE_INLINE const PxTransform& getPose() const { PX_ASSERT(mCore->body2World.isSane()); return mCore->body2World; }
PX_FORCE_INLINE const PxVec3& getLinearVelocity() const { PX_ASSERT(mCore->linearVelocity.isFinite()); return mCore->linearVelocity; }
PX_FORCE_INLINE const PxVec3& getAngularVelocity() const { PX_ASSERT(mCore->angularVelocity.isFinite()); return mCore->angularVelocity; }
PX_FORCE_INLINE void setVelocity(const PxVec3& linear,
const PxVec3& angular) { PX_ASSERT(linear.isFinite()); PX_ASSERT(angular.isFinite());
mCore->linearVelocity = linear;
mCore->angularVelocity = angular; }
PX_FORCE_INLINE void setLinearVelocity(const PxVec3& linear) { PX_ASSERT(linear.isFinite()); mCore->linearVelocity = linear; }
PX_FORCE_INLINE void setAngularVelocity(const PxVec3& angular) { PX_ASSERT(angular.isFinite()); mCore->angularVelocity = angular; }
PX_FORCE_INLINE void constrainLinearVelocity();
PX_FORCE_INLINE void constrainAngularVelocity();
PX_FORCE_INLINE PxU32 getIterationCounts() { return mCore->solverIterationCounts; }
PX_FORCE_INLINE PxReal getReportThreshold() const { return mCore->contactReportThreshold; }
PX_FORCE_INLINE const PxTransform& getLastCCDTransform() const { return mLastTransform; }
PX_FORCE_INLINE void saveLastCCDTransform() { mLastTransform = mCore->body2World; }
PX_FORCE_INLINE bool isKinematic() const { return mCore->inverseMass == 0.0f; }
PX_FORCE_INLINE void setPose(const PxTransform& pose) { mCore->body2World = pose; }
PX_FORCE_INLINE void setPosition(const PxVec3& position) { mCore->body2World.p = position; }
PX_FORCE_INLINE PxReal getInvMass() const { return mCore->inverseMass; }
PX_FORCE_INLINE PxVec3 getInvInertia() const { return mCore->inverseInertia; }
PX_FORCE_INLINE PxReal getMass() const { return 1.0f/mCore->inverseMass; }
PX_FORCE_INLINE PxVec3 getInertia() const { return PxVec3(1.0f/mCore->inverseInertia.x,
1.0f/mCore->inverseInertia.y,
1.0f/mCore->inverseInertia.z); }
PX_FORCE_INLINE PxsBodyCore& getCore() { return *mCore; }
PX_FORCE_INLINE const PxsBodyCore& getCore() const { return *mCore; }
PX_FORCE_INLINE PxU32 isActivateThisFrame() const { return PxU32(mInternalFlags & eACTIVATE_THIS_FRAME); }
PX_FORCE_INLINE PxU32 isDeactivateThisFrame() const { return PxU32(mInternalFlags & eDEACTIVATE_THIS_FRAME); }
PX_FORCE_INLINE PxU32 isFreezeThisFrame() const { return PxU32(mInternalFlags & eFREEZE_THIS_FRAME); }
PX_FORCE_INLINE PxU32 isUnfreezeThisFrame() const { return PxU32(mInternalFlags & eUNFREEZE_THIS_FRAME); }
PX_FORCE_INLINE void clearFreezeFlag() { mInternalFlags &= ~eFREEZE_THIS_FRAME; }
PX_FORCE_INLINE void clearUnfreezeFlag() { mInternalFlags &= ~eUNFREEZE_THIS_FRAME; }
PX_FORCE_INLINE void clearAllFrameFlags() { mInternalFlags &= ~(eFREEZE_THIS_FRAME | eUNFREEZE_THIS_FRAME | eACTIVATE_THIS_FRAME | eDEACTIVATE_THIS_FRAME); }
PX_FORCE_INLINE void resetSleepFilter() { mSleepAngVelAcc = mSleepLinVelAcc = PxVec3(0.0f); }
// PT: implemented in PxsCCD.cpp:
void advanceToToi(PxReal toi, PxReal dt, bool clip);
void advancePrevPoseToToi(PxReal toi);
// PxTransform getAdvancedTransform(PxReal toi) const;
Cm::SpatialVector getPreSolverVelocities() const;
PxTransform mLastTransform; //28
PxU16 mInternalFlags; //30
PxU16 mSolverIterationCounts; //32
PxsCCDBody* mCCD; //40 // only valid during CCD
PxsBodyCore* mCore; //48
PxVec3 mSleepLinVelAcc; //60
PxReal mFreezeCount; //64
PxVec3 mSleepAngVelAcc; //76
PxReal mAccelScale; //80
}
PX_ALIGN_SUFFIX(16);
PX_COMPILE_TIME_ASSERT(0 == (sizeof(PxsRigidBody) & 0x0f));
void PxsRigidBody::constrainLinearVelocity()
{
const PxU32 lockFlags = mCore->lockFlags;
if(lockFlags)
{
if(lockFlags & PxRigidDynamicLockFlag::eLOCK_LINEAR_X)
mCore->linearVelocity.x = 0.0f;
if(lockFlags & PxRigidDynamicLockFlag::eLOCK_LINEAR_Y)
mCore->linearVelocity.y = 0.0f;
if(lockFlags & PxRigidDynamicLockFlag::eLOCK_LINEAR_Z)
mCore->linearVelocity.z = 0.0f;
}
}
void PxsRigidBody::constrainAngularVelocity()
{
const PxU32 lockFlags = mCore->lockFlags;
if(lockFlags)
{
if(lockFlags & PxRigidDynamicLockFlag::eLOCK_ANGULAR_X)
mCore->angularVelocity.x = 0.0f;
if(lockFlags & PxRigidDynamicLockFlag::eLOCK_ANGULAR_Y)
mCore->angularVelocity.y = 0.0f;
if(lockFlags & PxRigidDynamicLockFlag::eLOCK_ANGULAR_Z)
mCore->angularVelocity.z = 0.0f;
}
}
struct PxsRigidBodyExternalAcceleration
{
PxVec3 linearAcceleration;
PxVec3 angularAcceleration;
PxsRigidBodyExternalAcceleration() : linearAcceleration(PxVec3(0.0f)), angularAcceleration(PxVec3(0.0f))
{ }
PxsRigidBodyExternalAcceleration(const PxVec3& linearAcc, const PxVec3& angularAcc) :
linearAcceleration(linearAcc), angularAcceleration(angularAcc)
{ }
};
struct PxsExternalAccelerationProvider
{
PxArray<PxsRigidBodyExternalAcceleration> mAccelerations;
PxMutex mLock;
volatile PxU32 mArraySize; //Required because of multi threading
PxsExternalAccelerationProvider() : mArraySize(0)
{ }
PX_FORCE_INLINE void setValue(PxsRigidBodyExternalAcceleration& value, PxU32 index, PxU32 maxNumBodies)
{
if (mArraySize < maxNumBodies)
{
PxMutex::ScopedLock lock(mLock);
if (mArraySize < maxNumBodies) //Test again because only after the lock we are sure that only one thread is active at a time
{
mAccelerations.resize(maxNumBodies);
mArraySize = maxNumBodies; //Only now the resize is complete - mAccelerations.size() might already change before the array actually allocated the new memory
}
}
PX_ASSERT(index < mArraySize);
mAccelerations[index] = value;
}
PX_FORCE_INLINE bool hasAccelerations() const
{
return mArraySize > 0;
}
PX_FORCE_INLINE const PxsRigidBodyExternalAcceleration& get(PxU32 index) const
{
PX_ASSERT(index < mArraySize);
return mAccelerations[index];
}
PX_FORCE_INLINE void clearAll()
{
if (mArraySize > 0)
{
mAccelerations.clear();
mArraySize = 0;
}
else if (mAccelerations.capacity() > 0)
mAccelerations.reset();
}
};
}
#endif

View File

@@ -0,0 +1,262 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_SIMPLE_ISLAND_GEN_H
#define PXS_SIMPLE_ISLAND_GEN_H
#include "foundation/PxUserAllocated.h"
#include "PxsIslandSim.h"
#include "CmTask.h"
/*
PT: runs first part of the island gen's second pass in parallel with the Pxg-level constraint partitioning.
mIslandGen task spawns Pxg constraint partitioning task(s).
mIslandGen runs processNarrowPhaseTouchEvents() in parallel with Pxg.
///////////////////////////////////////////////////////////////////////////////
Previous design:
mPostIslandGen runs as a continuation task after mIslandGen and Pxg.
mPostIslandGen mainly runs mSetEdgesConnectedTask, which:
- calls mSimpleIslandManager->setEdgeConnected()
- calls mSimpleIslandManager-secondPassIslandGen()
- calls wakeObjectsUp()
///////////////////////////////////////////////////////////////////////////////
New design:
postIslandGen is not a task anymore (mPostIslandGen does not exist).
postIslandGen is directly called at the end of mIslandGen.
So it now runs in parallel with Pxg.
mIslandGen and Pxg continue to mSolver task.
postIslandGen mainly runs mSetEdgesConnectedTask, which:
- calls mSimpleIslandManager->setEdgeConnected()
- calls mSimpleIslandManager->secondPassIslandGenPart1()
mSolver now first runs the parts that don't overlap with Pxg:
- calls mSimpleIslandManager-secondPassIslandGenPart2()
- calls wakeObjectsUp()
///////////////////////////////////////////////////////////////////////////////
Before:
mIslandGen->processNarrowPhaseTouchEvents |mPostIslandGen |mSolver
=>PxgConstraintPartition |=>setEdgesConnected->secondPassIslandGen->wakeObjectsUp|
After:
mIslandGen->processNarrowPhaseTouchEvents->postIslandGen |secondPassIslandGenPart2->wakeObjectsUp->mSolver
=>PxgConstraintPartition =>setEdgesConnected->secondPassIslandGenPart1 |
*/
#define USE_SPLIT_SECOND_PASS_ISLAND_GEN 1
namespace physx
{
class PxsContactManager;
// PT: TODO: fw declaring an Sc class here is not good
namespace Sc
{
class Interaction;
}
namespace Dy
{
struct Constraint;
}
namespace IG
{
class SimpleIslandManager;
class ThirdPassTask : public Cm::Task
{
SimpleIslandManager& mIslandManager;
IslandSim& mIslandSim;
public:
ThirdPassTask(PxU64 contextID, SimpleIslandManager& islandManager, IslandSim& islandSim);
virtual void runInternal();
virtual const char* getName() const
{
return "ThirdPassIslandGenTask";
}
private:
PX_NOCOPY(ThirdPassTask)
};
class PostThirdPassTask : public Cm::Task
{
SimpleIslandManager& mIslandManager;
public:
PostThirdPassTask(PxU64 contextID, SimpleIslandManager& islandManager);
virtual void runInternal();
virtual const char* getName() const
{
return "PostThirdPassTask";
}
private:
PX_NOCOPY(PostThirdPassTask)
};
class AuxCpuData
{
public:
PX_FORCE_INLINE PxsContactManager* getContactManager(IG::EdgeIndex edgeId) const { return reinterpret_cast<PxsContactManager*>(mConstraintOrCm[edgeId]); }
PX_FORCE_INLINE Dy::Constraint* getConstraint(IG::EdgeIndex edgeId) const { return reinterpret_cast<Dy::Constraint*>(mConstraintOrCm[edgeId]); }
Cm::BlockArray<void*> mConstraintOrCm; //! Pointers to either the constraint or Cm for this pair
};
class SimpleIslandManager : public PxUserAllocated
{
HandleManager<PxU32> mNodeHandles; //! Handle manager for nodes
HandleManager<EdgeIndex> mEdgeHandles; //! Handle manager for edges
//An array of destroyed nodes
PxArray<PxNodeIndex> mDestroyedNodes;
Cm::BlockArray<Sc::Interaction*> mInteractions;
//Edges destroyed this frame
PxArray<EdgeIndex> mDestroyedEdges;
GPUExternalData mGpuData;
CPUExternalData mCpuData;
AuxCpuData mAuxCpuData;
PxBitMap mConnectedMap;
// PT: TODO: figure out why we still need both
IslandSim mAccurateIslandManager;
IslandSim mSpeculativeIslandManager;
ThirdPassTask mSpeculativeThirdPassTask;
ThirdPassTask mAccurateThirdPassTask;
PostThirdPassTask mPostThirdPassTask;
PxU32 mMaxDirtyNodesPerFrame;
const PxU64 mContextID;
const bool mGPU;
public:
SimpleIslandManager(bool useEnhancedDeterminism, bool gpu, PxU64 contextID);
~SimpleIslandManager();
PxNodeIndex addNode(bool isActive, bool isKinematic, Node::NodeType type, void* object);
void removeNode(const PxNodeIndex index);
// PT: these two functions added for multithreaded implementation of Sc::Scene::islandInsertion
void preallocateContactManagers(PxU32 nb, EdgeIndex* handles);
bool addPreallocatedContactManager(EdgeIndex handle, PxsContactManager* manager, PxNodeIndex nodeHandle1, PxNodeIndex nodeHandle2, Sc::Interaction* interaction, Edge::EdgeType edgeType);
EdgeIndex addContactManager(PxsContactManager* manager, PxNodeIndex nodeHandle1, PxNodeIndex nodeHandle2, Sc::Interaction* interaction, Edge::EdgeType edgeType);
EdgeIndex addConstraint(Dy::Constraint* constraint, PxNodeIndex nodeHandle1, PxNodeIndex nodeHandle2, Sc::Interaction* interaction);
PX_FORCE_INLINE PxIntBool isEdgeConnected(EdgeIndex edgeIndex) const { return mConnectedMap.test(edgeIndex); }
void activateNode(PxNodeIndex index);
void deactivateNode(PxNodeIndex index);
void putNodeToSleep(PxNodeIndex index);
void removeConnection(EdgeIndex edgeIndex);
void firstPassIslandGen();
void additionalSpeculativeActivation();
void secondPassIslandGen();
void secondPassIslandGenPart1();
void secondPassIslandGenPart2();
void thirdPassIslandGen(PxBaseTask* continuation);
PX_INLINE void clearDestroyedPartitionEdges()
{
mGpuData.mDestroyedPartitionEdges.forceSize_Unsafe(0);
}
void setEdgeConnected(EdgeIndex edgeIndex, Edge::EdgeType edgeType);
void setEdgeDisconnected(EdgeIndex edgeIndex);
void setEdgeRigidCM(const EdgeIndex edgeIndex, PxsContactManager* cm);
void clearEdgeRigidCM(const EdgeIndex edgeIndex);
void setKinematic(PxNodeIndex nodeIndex);
void setDynamic(PxNodeIndex nodeIndex);
PX_FORCE_INLINE IslandSim& getSpeculativeIslandSim() { return mSpeculativeIslandManager; }
PX_FORCE_INLINE const IslandSim& getSpeculativeIslandSim() const { return mSpeculativeIslandManager; }
PX_FORCE_INLINE IslandSim& getAccurateIslandSim() { return mAccurateIslandManager; }
PX_FORCE_INLINE const IslandSim& getAccurateIslandSim() const { return mAccurateIslandManager; }
PX_FORCE_INLINE const AuxCpuData& getAuxCpuData() const { return mAuxCpuData; }
PX_FORCE_INLINE PxU32 getNbEdgeHandles() const { return mEdgeHandles.getTotalHandles(); }
PX_FORCE_INLINE PxU32 getNbNodeHandles() const { return mNodeHandles.getTotalHandles(); }
void deactivateEdge(const EdgeIndex edge);
PX_FORCE_INLINE PxsContactManager* getContactManager(IG::EdgeIndex edgeId) const { return reinterpret_cast<PxsContactManager*>(mAuxCpuData.mConstraintOrCm[edgeId]); }
PX_FORCE_INLINE Dy::Constraint* getConstraint(IG::EdgeIndex edgeId) const { return reinterpret_cast<Dy::Constraint*>(mAuxCpuData.mConstraintOrCm[edgeId]); }
PX_FORCE_INLINE Sc::Interaction* getInteractionFromEdgeIndex(IG::EdgeIndex edgeId) const { return mInteractions[edgeId]; }
PX_FORCE_INLINE PxU64 getContextId() const { return mContextID; }
bool checkInternalConsistency();
private:
friend class ThirdPassTask;
friend class PostThirdPassTask;
bool validateDeactivations() const;
EdgeIndex addEdge(void* edge, PxNodeIndex nodeHandle1, PxNodeIndex nodeHandle2, Sc::Interaction* interaction);
EdgeIndex resizeEdgeArrays(EdgeIndex handle, bool flag);
PX_NOCOPY(SimpleIslandManager)
};
}
}
#endif

View File

@@ -0,0 +1,429 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_SIMULATION_CONTROLLER_H
#define PXS_SIMULATION_CONTROLLER_H
#include "PxDirectGPUAPI.h"
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxPreprocessor.h"
#include "foundation/PxTransform.h"
#include "foundation/PxBitMap.h"
#include "foundation/PxPinnedArray.h"
#include "foundation/PxUserAllocated.h"
#include "PxScene.h"
#include "PxParticleSystem.h"
#include "PxArticulationTendonData.h"
#include "PxNodeIndex.h"
namespace physx
{
namespace Dy
{
class Context;
struct Constraint;
class FeatherstoneArticulation;
struct ArticulationJointCore;
class ParticleSystemCore;
class ParticleSystem;
#if PX_SUPPORT_GPU_PHYSX
class DeformableSurface;
class DeformableVolume;
#endif
}
namespace Bp
{
class BoundsArray;
class BroadPhase;
class AABBManagerBase;
}
namespace IG
{
class IslandSim;
}
namespace Sc
{
class BodySim;
class ShapeSimBase;
}
class PxsTransformCache;
class PxvNphaseImplementationContext;
class PxBaseTask;
class PxsContext;
class PxsRigidBody;
class PxsKernelWranglerManager;
class PxsHeapMemoryAllocatorManager;
class PxgParticleSystemCore;
struct PxConeLimitedConstraint;
struct PxsShapeCore;
class PxPhysXGpu;
struct PxgSolverConstraintManagerConstants;
struct PxsExternalAccelerationProvider;
class PxsSimulationControllerCallback : public PxUserAllocated
{
public:
virtual void updateScBodyAndShapeSim(PxBaseTask* continuation) = 0;
virtual PxU32 getNbCcdBodies() = 0;
virtual ~PxsSimulationControllerCallback() {}
};
#if PX_SUPPORT_OMNI_PVD
class PxsSimulationControllerOVDCallbacks : public PxUserAllocated
{
public:
virtual void processRigidDynamicSet(const PxsRigidBody* const * rigids, const void* dataVec, const PxRigidDynamicGPUIndex* gpuIndices, PxRigidDynamicGPUAPIWriteType::Enum dataType, PxU32 nbElements) = 0;
virtual void processArticulationSet(const Dy::FeatherstoneArticulation* const * simBodyVec, const void* dataVec, const PxArticulationGPUIndex* indexVec, PxArticulationGPUAPIWriteType::Enum dataType, PxU32 nbElements,
PxU32 maxLinks, PxU32 maxDofs, PxU32 maxFixedTendons, PxU32 maxTendonJoints, PxU32 maxSpatialTendons, PxU32 maxSpatialTendonAttachments) = 0;
// Returns the number of elements in a data block as well as the size of the datablock, see PxArticulationGPUAPIWriteType::Enum for where the sizes etc are derived
PX_FORCE_INLINE void getArticulationDataElements(PxArticulationGPUAPIWriteType::Enum dataType, PxU32 maxLinks, PxU32 maxDofs, PxU32 maxFixedTendons, PxU32 maxTendonJoints, PxU32 maxSpatialTendons, PxU32 maxSpatialTendonAttachments,
PxU32& nbSubElements, PxU32& blockSize) const;
virtual ~PxsSimulationControllerOVDCallbacks() {}
};
#endif
class PxsSimulationController : public PxUserAllocated
{
public:
PxsSimulationController(PxsSimulationControllerCallback* callback, PxIntBool gpu) : mCallback(callback), mGPU(gpu) {}
virtual ~PxsSimulationController(){}
virtual void addPxgShape(Sc::ShapeSimBase* /*shapeSimBase*/, const PxsShapeCore* /*shapeCore*/, PxNodeIndex /*nodeIndex*/, PxU32 /*index*/){}
virtual void setPxgShapeBodyNodeIndex(PxNodeIndex /*nodeIndex*/, PxU32 /*index*/) {}
virtual void removePxgShape(PxU32 /*index*/){}
virtual void addDynamic(PxsRigidBody* /*rigidBody*/, const PxNodeIndex& /*nodeIndex*/){}
virtual void addDynamics(PxsRigidBody** /*rigidBody*/, const PxU32* /*nodeIndex*/, PxU32 /*nbBodies*/) {}
virtual void addArticulation(Dy::FeatherstoneArticulation* /*articulation*/, const PxNodeIndex& /*nodeIndex*/) {}
virtual void releaseArticulation(Dy::FeatherstoneArticulation* /*articulation*/, const PxNodeIndex& /*nodeIndex*/) {}
virtual void releaseDeferredArticulationIds() {}
#if PX_SUPPORT_GPU_PHYSX
virtual void addSoftBody(Dy::DeformableVolume* /*deformableVolume*/, const PxNodeIndex& /*nodeIndex*/) {}
virtual void releaseSoftBody(Dy::DeformableVolume* /*deformableVolume*/) {}
virtual void releaseDeferredSoftBodyIds() {}
virtual void activateSoftbody(Dy::DeformableVolume* /*deformableVolume*/) {}
virtual void deactivateSoftbody(Dy::DeformableVolume* /*deformableVolume*/) {}
virtual void activateSoftbodySelfCollision(Dy::DeformableVolume* /*deformableVolume*/) {}
virtual void deactivateSoftbodySelfCollision(Dy::DeformableVolume* /*deformableVolume*/) {}
virtual void setSoftBodyWakeCounter(Dy::DeformableVolume* /*deformableVolume*/) {}
virtual void addParticleFilter(Dy::DeformableVolume* /*deformableVolume*/, Dy::ParticleSystem* /*particleSystem*/,
PxU32 /*particleId*/, PxU32 /*userBufferId*/, PxU32 /*tetId*/) {}
virtual void removeParticleFilter(Dy::DeformableVolume* /*deformableVolume*/,
const Dy::ParticleSystem* /*particleSystem*/, PxU32 /*particleId*/, PxU32 /*userBufferId*/, PxU32 /*tetId*/) {}
virtual PxU32 addParticleAttachment(Dy::DeformableVolume* /*deformableVolume*/, const Dy::ParticleSystem* /*particleSystem*/,
PxU32 /*particleId*/, PxU32 /*userBufferId*/, PxU32 /*tetId*/, const PxVec4& /*barycentrics*/, const bool /*isActive*/) { return 0; }
virtual void removeParticleAttachment(Dy::DeformableVolume* /*deformableVolume*/, PxU32 /*handle*/) {}
virtual void addRigidFilter(Dy::DeformableVolume* /*deformableVolume*/, const PxNodeIndex& /*rigidNodeIndex*/, PxU32 /*vertIndex*/) {}
virtual void removeRigidFilter(Dy::DeformableVolume* /*deformableVolume*/,
const PxNodeIndex& /*rigidNodeIndex*/, PxU32 /*vertIndex*/) {}
virtual PxU32 addRigidAttachment(Dy::DeformableVolume* /*deformableVolume*/, const PxNodeIndex& /*softBodyNodeIndex*/,
PxsRigidBody* /*rigidBody*/, const PxNodeIndex& /*rigidNodeIndex*/, PxU32 /*vertIndex*/, const PxVec3& /*actorSpacePose*/,
PxConeLimitedConstraint* /*constraint*/, const bool /*isActive*/, bool /*doConversion*/) { return 0; }
virtual void removeRigidAttachment(Dy::DeformableVolume* /*deformableVolume*/, PxU32 /*handle*/) {}
virtual void addTetRigidFilter(Dy::DeformableVolume* /*deformableVolume*/,
const PxNodeIndex& /*rigidNodeIndex*/, PxU32 /*tetId*/) {}
virtual PxU32 addTetRigidAttachment(Dy::DeformableVolume* /*deformableVolume*/,
PxsRigidBody* /*rigidBody*/, const PxNodeIndex& /*rigidNodeIndex*/, PxU32 /*tetIdx*/,
const PxVec4& /*barycentrics*/, const PxVec3& /*actorSpacePose*/, PxConeLimitedConstraint* /*constraint*/,
const bool /*isActive*/, bool /*doConversion*/) { return 0; }
virtual void removeTetRigidFilter(Dy::DeformableVolume* /*deformableVolume*/,
const PxNodeIndex& /*rigidNodeIndex*/, PxU32 /*tetId*/) {}
virtual void addSoftBodyFilter(Dy::DeformableVolume* /*deformableVolume0*/, Dy::DeformableVolume* /*deformableVolume1*/, PxU32 /*tetIdx0*/,
PxU32 /*tetIdx1*/) {}
virtual void removeSoftBodyFilter(Dy::DeformableVolume* /*deformableVolume0*/, Dy::DeformableVolume* /*deformableVolume1*/, PxU32 /*tetIdx0*/,
PxU32 /*tetId1*/) {}
virtual void addSoftBodyFilters(Dy::DeformableVolume* /*deformableVolume0*/, Dy::DeformableVolume* /*deformableVolume1*/, PxU32* /*tetIndices0*/, PxU32* /*tetIndices1*/,
PxU32 /*tetIndicesSize*/) {}
virtual void removeSoftBodyFilters(Dy::DeformableVolume* /*deformableVolume0*/, Dy::DeformableVolume* /*deformableVolume1*/, PxU32* /*tetIndices0*/, PxU32* /*tetIndices1*/,
PxU32 /*tetIndicesSize*/) {}
virtual PxU32 addSoftBodyAttachment(Dy::DeformableVolume* /*deformableVolume0*/, Dy::DeformableVolume* /*deformableVolume1*/, PxU32 /*tetIdx0*/, PxU32 /*tetIdx1*/,
const PxVec4& /*tetBarycentric0*/, const PxVec4& /*tetBarycentric1*/,
PxConeLimitedConstraint* /*constraint*/, PxReal /*constraintOffset*/, const bool /*isActive*/, bool /*doConversion*/) { return 0; }
virtual void removeSoftBodyAttachment(Dy::DeformableVolume* /*deformableVolume0*/, PxU32 /*handle*/) {}
virtual void addClothFilter(Dy::DeformableVolume* /*deformableVolume*/, Dy::DeformableSurface* /*deformableSurface*/, PxU32 /*triIdx*/, PxU32 /*tetIdx*/) {}
virtual void removeClothFilter(Dy::DeformableVolume* /*deformableVolume*/, Dy::DeformableSurface* /*deformableSurface*/, PxU32 /*triIdx*/, PxU32 /*tetIdx*/) {}
virtual void addVertClothFilter(Dy::DeformableVolume* /*deformableVolume*/, Dy::DeformableSurface* /*deformableSurface*/, PxU32 /*vertIdx*/, PxU32 /*tetIdx*/) {}
virtual void removeVertClothFilter(Dy::DeformableVolume* /*deformableVolume*/, Dy::DeformableSurface* /*deformableSurface*/, PxU32 /*vertIdx*/, PxU32 /*tetIdx*/) {}
virtual PxU32 addClothAttachment(Dy::DeformableVolume* /*deformableVolume*/, Dy::DeformableSurface* /*deformableSurface*/, PxU32 /*triIdx*/,
const PxVec4& /*triBarycentric*/, PxU32 /*tetIdx*/, const PxVec4& /*tetBarycentric*/,
PxConeLimitedConstraint* /*constraint*/, PxReal /*constraintOffset*/,
const bool /*isActive*/, bool /*doConversion*/) { return 0; }
virtual void removeClothAttachment(Dy::DeformableVolume* /*deformableVolume*/,PxU32 /*handle*/) {}
virtual void addFEMCloth(Dy::DeformableSurface*, const PxNodeIndex&) {}
virtual void releaseFEMCloth(Dy::DeformableSurface*) {}
virtual void releaseDeferredFEMClothIds() {}
virtual void activateCloth(Dy::DeformableSurface*) {}
virtual void deactivateCloth(Dy::DeformableSurface*) {}
virtual void setClothWakeCounter(Dy::DeformableSurface*) {}
virtual PxU32 addRigidAttachment(Dy::DeformableSurface* /*cloth*/, const PxNodeIndex& /*clothNodeIndex*/,
PxsRigidBody* /*rigidBody*/, const PxNodeIndex& /*rigidNodeIndex*/, PxU32 /*vertIndex*/, const PxVec3& /*actorSpacePose*/,
PxConeLimitedConstraint* /*constraint*/, const bool /*isActive*/) { return 0; }
virtual void removeRigidAttachment(Dy::DeformableSurface* /*cloth*/, PxU32 /*handle*/) {}
virtual void addTriRigidFilter(Dy::DeformableSurface* /*deformableSurface*/,
const PxNodeIndex& /*rigidNodeIndex*/, PxU32 /*triIdx*/) {}
virtual void removeTriRigidFilter(Dy::DeformableSurface* /*deformableSurface*/,
const PxNodeIndex& /*rigidNodeIndex*/, PxU32 /*triIdx*/) {}
virtual PxU32 addTriRigidAttachment(Dy::DeformableSurface* /*deformableSurface*/,
PxsRigidBody* /*rigidBody*/, const PxNodeIndex& /*rigidNodeIndex*/, PxU32 /*triIdx*/, const PxVec4& /*barycentrics*/,
const PxVec3& /*actorSpacePose*/, PxConeLimitedConstraint* /*constraint*/,
const bool /*isActive*/) { return 0; }
virtual void removeTriRigidAttachment(Dy::DeformableSurface* /*deformableSurface*/, PxU32 /*handle*/) {}
virtual void addClothFilter(Dy::DeformableSurface* /*deformableSurface0*/, Dy::DeformableSurface* /*deformableSurface1*/, PxU32 /*triIdx0*/, PxU32 /*triIdx1*/) {}
virtual void removeClothFilter(Dy::DeformableSurface* /*deformableSurface0*/, Dy::DeformableSurface* /*deformableSurface1*/, PxU32 /*triIdx0*/, PxU32 /*triIdx1*/) {}
virtual PxU32 addTriClothAttachment(Dy::DeformableSurface* /*deformableSurface0*/, Dy::DeformableSurface* /*deformableSurface1*/, PxU32 /*triIdx0*/, PxU32 /*triIdx1*/,
const PxVec4& /*triBarycentric0*/, const PxVec4& /*triBarycentric1*/, const bool /*addToActive*/) { return 0; }
virtual void removeTriClothAttachment(Dy::DeformableSurface* /*deformableSurface*/, PxU32 /*handle*/) {}
virtual void addParticleSystem(Dy::ParticleSystem* /*particleSystem*/, const PxNodeIndex& /*nodeIndex*/) {}
virtual void releaseParticleSystem(Dy::ParticleSystem* /*particleSystem*/) {}
virtual void releaseDeferredParticleSystemIds() {}
#endif
virtual void setEnableOVDReadback(bool) {}
virtual bool getEnableOVDReadback() const { return false; }
virtual void setEnableOVDCollisionReadback(bool) {}
virtual bool getEnableOVDCollisionReadback() const { return false; }
#if PX_SUPPORT_OMNI_PVD
virtual void setOVDCallbacks(PxsSimulationControllerOVDCallbacks& /*ovdCallbacks*/) {}
#endif
virtual void updateDynamic(Dy::FeatherstoneArticulation* /*articulation*/, const PxNodeIndex& /*nodeIndex*/) {}
virtual void addJoint(const Dy::Constraint&) {}
virtual void updateJoint(const PxU32 /*edgeIndex*/, Dy::Constraint* /*constraint*/){}
virtual void updateBodies(PxsRigidBody** /*rigidBodies*/, PxU32* /*nodeIndices*/, const PxU32 /*nbBodies*/, PxsExternalAccelerationProvider* /*externalAccelerations*/) {}
// virtual void updateBody(PxsRigidBody* /*rigidBody*/, const PxU32 /*nodeIndex*/) {}
virtual void updateBodies(PxBaseTask* /*continuation*/){}
virtual void updateShapes(PxBaseTask* /*continuation*/) {}
virtual void preIntegrateAndUpdateBound(PxBaseTask* /*continuation*/, const PxVec3 /*gravity*/, const PxReal /*dt*/){}
virtual void updateParticleSystemsAndSoftBodies(){}
virtual void sortContacts(){}
virtual void update(PxBitMapPinned& /*changedHandleMap*/){}
virtual void updateArticulation(Dy::FeatherstoneArticulation* /*articulation*/, const PxNodeIndex& /*nodeIndex*/) {}
virtual void updateArticulationJoint(Dy::FeatherstoneArticulation* /*articulation*/, const PxNodeIndex& /*nodeIndex*/) {}
// virtual void updateArticulationTendon(Dy::FeatherstoneArticulation* /*articulation*/, const PxNodeIndex& /*nodeIndex*/) {}
virtual void updateArticulationExtAccel(Dy::FeatherstoneArticulation* /*articulation*/, const PxNodeIndex& /*nodeIndex*/) {}
virtual void updateArticulationAfterIntegration(PxsContext* /*llContext*/, Bp::AABBManagerBase* /*aabbManager*/,
PxArray<Sc::BodySim*>& /*ccdBodies*/, PxBaseTask* /*continuation*/, IG::IslandSim& /*islandSim*/, float /*dt*/) {}
virtual void mergeChangedAABBMgHandle() {}
virtual void gpuDmabackData(PxsTransformCache& /*cache*/, Bp::BoundsArray& /*boundArray*/, PxBitMapPinned& /*changedAABBMgrHandles*/, bool /*enableDirectGPUAPI*/){}
virtual void updateScBodyAndShapeSim(PxsTransformCache& cache, Bp::BoundsArray& boundArray, PxBaseTask* continuation) = 0;
virtual PxU32* getActiveBodies() { return NULL; }
virtual PxU32* getDeactiveBodies() { return NULL; }
virtual void** getRigidBodies() { return NULL; }
virtual PxU32 getNbBodies() { return 0; }
virtual PxU32* getUnfrozenShapes() { return NULL; }
virtual PxU32* getFrozenShapes() { return NULL; }
virtual Sc::ShapeSimBase** getShapeSims() { return NULL; }
virtual PxU32 getNbFrozenShapes() { return 0; }
virtual PxU32 getNbUnfrozenShapes() { return 0; }
virtual PxU32 getNbShapes() { return 0; }
virtual void clear() { }
virtual void setBounds(Bp::BoundsArray* /*boundArray*/){}
virtual void reserve(const PxU32 /*nbBodies*/) {}
virtual PxU32 getArticulationRemapIndex(const PxU32 /*nodeIndex*/) { return PX_INVALID_U32;}
//KS - the methods below here should probably be wrapped in if PX_SUPPORT_GPU_PHYSX
// PT: isn't the whole class only needed for GPU anyway?
// AD: Yes.
virtual void setDeformableSurfaceGpuPostSolveCallback(PxPostSolveCallback* /*postSolveCallback*/) { }
virtual void setDeformableVolumeGpuPostSolveCallback(PxPostSolveCallback* /*postSolveCallback*/) { }
// NEW DIRECT-GPU API
virtual bool getRigidDynamicData(void* /*data*/, const PxRigidDynamicGPUIndex* /*gpuIndices*/, PxRigidDynamicGPUAPIReadType::Enum /*dataType*/, PxU32 /*nbElements*/, float /*oneOverDt*/, CUevent /*startEvent*/, CUevent /*finishEvent*/) const { return false; }
virtual bool setRigidDynamicData(const void* /*data*/, const PxRigidDynamicGPUIndex* /*gpuIndices*/, PxRigidDynamicGPUAPIWriteType::Enum /*dataType*/, PxU32 /*nbElements*/, CUevent /*startEvent*/, CUevent /*finishEvent*/) { return false; }
virtual bool getArticulationData(void* /*data*/, const PxArticulationGPUIndex* /*gpuIndices*/, PxArticulationGPUAPIReadType::Enum /*dataType*/, PxU32 /*nbElements*/, CUevent /*startEvent*/, CUevent /*finishEvent*/) const { return false; }
virtual bool setArticulationData(const void* /*data*/, const PxArticulationGPUIndex* /*gpuIndices*/, PxArticulationGPUAPIWriteType::Enum /*dataType*/, PxU32 /*nbElements*/, CUevent /*startEvent*/, CUevent /*finishEvent*/) { return false; }
virtual bool computeArticulationData(void* /*data*/, const PxArticulationGPUIndex* /*gpuIndices*/, PxArticulationGPUAPIComputeType::Enum /*operation*/, PxU32 /*nbElements*/, CUevent /*startEvent*/, CUevent /*finishEvent*/) { return false; }
virtual bool evaluateSDFDistances(PxVec4* /*localGradientAndSDFConcatenated*/, const PxShapeGPUIndex* /*shapeIndices*/, const PxVec4* /*localSamplePointsConcatenated*/, const PxU32* /*samplePointCountPerShape*/, PxU32 /*nbElements*/, PxU32 /*maxPointCount*/, CUevent /*startEvent = NULL*/, CUevent /*finishEvent = NULL*/) { return false; }
virtual bool copyContactData(void* /*data*/, PxU32* /*numContactPairs*/, const PxU32 /*maxContactPairs*/, CUevent /*startEvent*/, CUevent /*copyEvent*/) { return false; }
virtual PxArticulationGPUAPIMaxCounts getArticulationGPUAPIMaxCounts() const { return PxArticulationGPUAPIMaxCounts(); }
virtual bool getD6JointData(void* /*data*/, const PxD6JointGPUIndex* /*gpuIndices*/, PxD6JointGPUAPIReadType::Enum /*dataType*/, PxU32 /*nbElements*/, PxF32 /*oneOverDt*/, CUevent /*startEvent*/, CUevent /*finishEvent*/) const { return false; }
// END NEW DIRECT-GPU API
// DEPRECATED DIRECT-GPU API
PX_DEPRECATED virtual void copySoftBodyDataDEPRECATED(void** /*data*/, void* /*dataSizes*/, void* /*softBodyIndices*/, PxSoftBodyGpuDataFlag::Enum /*flag*/, const PxU32 /*nbCopySoftBodies*/, const PxU32 /*maxSize*/, CUevent /*copyEvent*/) {}
PX_DEPRECATED virtual void applySoftBodyDataDEPRECATED(void** /*data*/, void* /*dataSizes*/, void* /*softBodyIndices*/, PxSoftBodyGpuDataFlag::Enum /*flag*/, const PxU32 /*nbUpdatedSoftBodies*/, const PxU32 /*maxSize*/, CUevent /*applyEvent*/, CUevent /*signalEvent*/) {}
PX_DEPRECATED virtual void applyParticleBufferDataDEPRECATED(const PxU32* /*indices*/, const PxGpuParticleBufferIndexPair* /*indexPairs*/, const PxParticleBufferFlags* /*flags*/, PxU32 /*nbUpdatedBuffers*/, CUevent /*waitEvent*/, CUevent /*signalEvent*/) {}
// END DEPRECATED DIRECT-GPU API
virtual PxU32 getInternalShapeIndex(const PxsShapeCore& /*shapeCore*/) { return PX_INVALID_U32; }
virtual void syncParticleData() {}
virtual void updateBoundsAndShapes(Bp::AABBManagerBase& /*aabbManager*/, bool /*useDirectApi*/){}
#if PX_SUPPORT_GPU_PHYSX
virtual PxU32 getNbDeactivatedDeformableSurfaces() const { return 0; }
virtual PxU32 getNbActivatedDeformableSurfaces() const { return 0; }
virtual Dy::DeformableSurface** getDeactivatedDeformableSurfaces() const { return NULL; }
virtual Dy::DeformableSurface** getActivatedDeformableSurfaces() const { return NULL; }
virtual PxU32 getNbDeactivatedDeformableVolumes() const { return 0; }
virtual PxU32 getNbActivatedDeformableVolumes() const { return 0; }
virtual Dy::DeformableVolume** getDeactivatedDeformableVolumes() const { return NULL; }
virtual Dy::DeformableVolume** getActivatedDeformableVolumes() const { return NULL; }
virtual const PxReal* getDeformableVolumeWakeCounters() const { return NULL; }
virtual bool hasDeformableSurfaces() const { return false; }
virtual bool hasDeformableVolumes() const { return false; }
#endif
protected:
PxsSimulationControllerCallback* mCallback;
public:
const PxIntBool mGPU; // PT: true for GPU version, used to quickly skip calls for CPU version
};
#if PX_SUPPORT_OMNI_PVD
PX_FORCE_INLINE void PxsSimulationControllerOVDCallbacks::getArticulationDataElements(PxArticulationGPUAPIWriteType::Enum dataType, PxU32 maxLinks, PxU32 maxDofs, PxU32 maxFixedTendons, PxU32 maxTendonJoints, PxU32 maxSpatialTendons, PxU32 maxSpatialTendonAttachments,
PxU32& nbSubElements, PxU32& blockSize) const
{
PxU32 singleSubElementSize = 0;
switch(dataType)
{
case PxArticulationGPUAPIWriteType::eJOINT_POSITION:
case PxArticulationGPUAPIWriteType::eJOINT_VELOCITY:
case PxArticulationGPUAPIWriteType::eJOINT_FORCE:
case PxArticulationGPUAPIWriteType::eJOINT_TARGET_VELOCITY:
case PxArticulationGPUAPIWriteType::eJOINT_TARGET_POSITION:
{
nbSubElements = maxDofs;
singleSubElementSize = sizeof(PxReal);
break;
}
case PxArticulationGPUAPIWriteType::eROOT_GLOBAL_POSE:
{
nbSubElements = 1;
singleSubElementSize = sizeof(PxTransform);
break;
}
case PxArticulationGPUAPIWriteType::eROOT_LINEAR_VELOCITY:
case PxArticulationGPUAPIWriteType::eROOT_ANGULAR_VELOCITY:
{
nbSubElements = 1;
singleSubElementSize = sizeof(PxVec3);
break;
}
case PxArticulationGPUAPIWriteType::eLINK_FORCE:
case PxArticulationGPUAPIWriteType::eLINK_TORQUE:
{
nbSubElements = maxLinks;
singleSubElementSize = sizeof(PxVec3);
break;
}
case PxArticulationGPUAPIWriteType::eFIXED_TENDON:
{
nbSubElements = maxFixedTendons;
singleSubElementSize = sizeof(PxGpuFixedTendonData);
break;
}
case PxArticulationGPUAPIWriteType::eFIXED_TENDON_JOINT:
{
nbSubElements = maxFixedTendons * maxTendonJoints;
singleSubElementSize = sizeof(PxGpuTendonJointCoefficientData);
break;
}
case PxArticulationGPUAPIWriteType::eSPATIAL_TENDON:
{
nbSubElements = maxSpatialTendons;
singleSubElementSize = sizeof(PxGpuSpatialTendonData);
break;
}
case PxArticulationGPUAPIWriteType::eSPATIAL_TENDON_ATTACHMENT:
{
nbSubElements = maxSpatialTendons * maxSpatialTendonAttachments;
singleSubElementSize = sizeof(PxGpuTendonAttachmentData);
break;
}
default:
PX_ALWAYS_ASSERT();
nbSubElements = 0;
singleSubElementSize = 0;
break;
}
blockSize = singleSubElementSize * nbSubElements;
}
#endif
}
#endif

View File

@@ -0,0 +1,142 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_TRANSFORM_CACHE_H
#define PXS_TRANSFORM_CACHE_H
#include "CmIDPool.h"
#include "foundation/PxBitMap.h"
#include "foundation/PxTransform.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxPinnedArray.h"
#define PX_DEFAULT_CACHE_SIZE 512
namespace physx
{
struct PxsTransformFlag
{
enum Flags
{
eFROZEN = (1 << 0)
};
};
struct PX_ALIGN_PREFIX(16) PxsCachedTransform
{
PxTransform transform;
PxU32 flags;
PX_FORCE_INLINE PxU32 isFrozen() const { return flags & PxsTransformFlag::eFROZEN; }
}
PX_ALIGN_SUFFIX(16);
class PxsTransformCache : public PxUserAllocated
{
typedef PxU32 RefCountType;
public:
PxsTransformCache(PxVirtualAllocatorCallback& allocatorCallback) : mTransformCache(PxVirtualAllocator(&allocatorCallback)), mHasAnythingChanged(true)
{
/*mTransformCache.reserve(PX_DEFAULT_CACHE_SIZE);
mTransformCache.forceSize_Unsafe(PX_DEFAULT_CACHE_SIZE);*/
mUsedSize = 0;
}
void initEntry(PxU32 index)
{
PxU32 oldCapacity = mTransformCache.capacity();
if (index >= oldCapacity)
{
PxU32 newCapacity = PxNextPowerOfTwo(index);
mTransformCache.reserve(newCapacity);
mTransformCache.forceSize_Unsafe(newCapacity);
}
mUsedSize = PxMax(mUsedSize, index + 1u);
}
PX_FORCE_INLINE void setTransformCache(const PxTransform& transform, PxU32 flags, PxU32 index, PxU32 /*indexFrom*/)
{
mTransformCache[index].transform = transform;
mTransformCache[index].flags = flags;
mHasAnythingChanged = true;
}
PX_FORCE_INLINE const PxsCachedTransform& getTransformCache(PxU32 index) const
{
return mTransformCache[index];
}
PX_FORCE_INLINE PxsCachedTransform& getTransformCache(PxU32 index)
{
return mTransformCache[index];
}
PX_FORCE_INLINE void shiftTransforms(const PxVec3& shift)
{
for (PxU32 i = 0; i < mTransformCache.capacity(); i++)
{
mTransformCache[i].transform.p += shift;
}
mHasAnythingChanged = true;
}
PX_FORCE_INLINE PxU32 getTotalSize() const
{
return mUsedSize;
}
PX_FORCE_INLINE const PxsCachedTransform* getTransforms() const
{
return mTransformCache.begin();
}
PX_FORCE_INLINE PxsCachedTransform* getTransforms()
{
return mTransformCache.begin();
}
PX_FORCE_INLINE PxCachedTransformArrayPinned* getCachedTransformArray()
{
return &mTransformCache;
}
PX_FORCE_INLINE void resetChangedState() { mHasAnythingChanged = false; }
PX_FORCE_INLINE void setChangedState() { mHasAnythingChanged = true; }
PX_FORCE_INLINE bool hasChanged() const { return mHasAnythingChanged; }
protected:
PxCachedTransformArrayPinned mTransformCache;
private:
PxU32 mUsedSize;
bool mHasAnythingChanged;
};
}
#endif

View File

@@ -0,0 +1,205 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXV_NPHASE_IMPLEMENTATION_CONTEXT_H
#define PXV_NPHASE_IMPLEMENTATION_CONTEXT_H
#include "PxSceneDesc.h"
#include "PxsContactManagerState.h"
#include "foundation/PxArray.h"
#include "PxsNphaseCommon.h"
// PT: TODO: forward decl don't work easily with templates, to revisit
#include "PxsMaterialCore.h"
#include "PxsDeformableSurfaceMaterialCore.h"
#include "PxsDeformableVolumeMaterialCore.h"
#include "PxsPBDMaterialCore.h"
namespace physx
{
namespace IG
{
class IslandSim;
typedef PxU32 EdgeIndex;
}
namespace Cm
{
class FanoutTask;
}
namespace Sc
{
class ShapeInteraction;
}
class PxNodeIndex;
class PxBaseTask;
class PxsContext;
struct PxsShapeCore;
class PxsContactManager;
struct PxsContactManagerOutput;
struct PxsTorsionalFrictionData;
class PxsContactManagerOutputIterator
{
PxU32 mOffsets[1<<PxsContactManagerBase::MaxBucketBits];
PxsContactManagerOutput* mOutputs;
public:
PxsContactManagerOutputIterator() : mOutputs(NULL)
{
}
PxsContactManagerOutputIterator(const PxU32* offsets, PxU32 nbOffsets, PxsContactManagerOutput* outputs) : mOutputs(outputs)
{
PX_ASSERT(nbOffsets <= (1<<PxsContactManagerBase::MaxBucketBits));
for(PxU32 a = 0; a < nbOffsets; ++a)
{
mOffsets[a] = offsets[a];
}
}
PX_FORCE_INLINE PxsContactManagerOutput& getContactManagerOutput(PxU32 id)
{
PX_ASSERT((id & PxsContactManagerBase::NEW_CONTACT_MANAGER_MASK) == 0);
PxU32 bucketId = PxsContactManagerBase::computeBucketIndexFromId(id);
PxU32 cmOutId = PxsContactManagerBase::computeIndexFromId(id);
return mOutputs[mOffsets[bucketId] + cmOutId];
}
PxU32 getIndex(PxU32 id) const
{
PX_ASSERT((id & PxsContactManagerBase::NEW_CONTACT_MANAGER_MASK) == 0);
PxU32 bucketId = PxsContactManagerBase::computeBucketIndexFromId(id);
PxU32 cmOutId = PxsContactManagerBase::computeIndexFromId(id);
return mOffsets[bucketId] + cmOutId;
}
};
class PxvNphaseImplementationContext
{
PX_NOCOPY(PxvNphaseImplementationContext)
PxsContext& mContext;
public:
PxvNphaseImplementationContext(PxsContext& context): mContext(context) {}
virtual ~PxvNphaseImplementationContext() {}
virtual void destroy() = 0;
virtual void updateContactManager(PxReal dt, bool hasContactDistanceChanged, PxBaseTask* continuation, PxBaseTask* firstPassContinuation, Cm::FanoutTask* updateBoundAndShapeTask) = 0;
virtual void postBroadPhaseUpdateContactManager(PxBaseTask* continuation) = 0;
virtual void secondPassUpdateContactManager(PxReal dt, PxBaseTask* continuation) = 0;
virtual void fetchUpdateContactManager() = 0;
virtual void registerContactManager(PxsContactManager* cm, const Sc::ShapeInteraction* interaction, PxI32 touching, PxU32 patchCount) = 0;
// virtual void registerContactManagers(PxsContactManager** cm, Sc::ShapeInteraction** shapeInteractions, PxU32 nbContactManagers, PxU32 maxContactManagerId) = 0;
virtual void unregisterContactManager(PxsContactManager* cm) = 0;
virtual void refreshContactManager(PxsContactManager* cm) = 0;
virtual void registerShape(const PxNodeIndex& nodeIndex, const PxsShapeCore& shapeCore, const PxU32 transformCacheID, PxActor* actor, const bool isDeformableSurface = false) = 0;
virtual void unregisterShape(const PxsShapeCore& shapeCore, const PxU32 transformCacheID, const bool isDeformableSurface = false) = 0;
virtual void registerAggregate(const PxU32 transformCacheID) = 0;
virtual void registerMaterial(const PxsMaterialCore& materialCore) = 0;
virtual void updateMaterial(const PxsMaterialCore& materialCore) = 0;
virtual void unregisterMaterial(const PxsMaterialCore& materialCore) = 0;
virtual void registerMaterial(const PxsDeformableSurfaceMaterialCore& materialCore) = 0;
virtual void updateMaterial(const PxsDeformableSurfaceMaterialCore& materialCore) = 0;
virtual void unregisterMaterial(const PxsDeformableSurfaceMaterialCore& materialCore) = 0;
virtual void registerMaterial(const PxsDeformableVolumeMaterialCore& materialCore) = 0;
virtual void updateMaterial(const PxsDeformableVolumeMaterialCore& materialCore) = 0;
virtual void unregisterMaterial(const PxsDeformableVolumeMaterialCore& materialCore) = 0;
virtual void registerMaterial(const PxsPBDMaterialCore& materialCore) = 0;
virtual void updateMaterial(const PxsPBDMaterialCore& materialCore) = 0;
virtual void unregisterMaterial(const PxsPBDMaterialCore& materialCore) = 0;
virtual void updateShapeMaterial(const PxsShapeCore& shapeCore) = 0;
virtual void startNarrowPhaseTasks() = 0;
virtual void appendContactManagers() = 0;
virtual PxsContactManagerOutput& getNewContactManagerOutput(PxU32 index) = 0;
virtual PxsContactManagerOutputIterator getContactManagerOutputs() = 0;
virtual void setContactModifyCallback(PxContactModifyCallback* callback) = 0;
virtual void acquireContext() = 0;
virtual void releaseContext() = 0;
virtual void preallocateNewBuffers(PxU32 nbNewPairs, PxU32 maxIndex) = 0;
virtual void lock() = 0;
virtual void unlock() = 0;
virtual PxsContactManagerOutputCounts* getLostFoundPatchOutputCounts() = 0;
virtual PxsContactManager** getLostFoundPatchManagers() = 0;
virtual PxU32 getNbLostFoundPatchManagers() = 0;
//GPU-specific buffers. Return null for CPU narrow phase
virtual PxsContactManagerOutput* getGPUContactManagerOutputBase() = 0;
virtual PxReal* getGPURestDistances() = 0;
virtual Sc::ShapeInteraction** getGPUShapeInteractions() = 0;
virtual PxsTorsionalFrictionData* getGPUTorsionalData() = 0;
};
class PxvNphaseImplementationFallback: public PxvNphaseImplementationContext
{
PX_NOCOPY(PxvNphaseImplementationFallback)
public:
PxvNphaseImplementationFallback(PxsContext& context) : PxvNphaseImplementationContext(context) {}
virtual ~PxvNphaseImplementationFallback() {}
virtual void unregisterContactManagerFallback(PxsContactManager* cm, PxsContactManagerOutput* cmOutputs) = 0;
virtual void processContactManager(PxReal dt, PxsContactManagerOutput* cmOutputs, PxBaseTask* continuation) = 0;
virtual void processContactManagerSecondPass(PxReal dt, PxBaseTask* continuation) = 0;
virtual void refreshContactManagerFallback(PxsContactManager* cm, PxsContactManagerOutput* cmOutputs) = 0;
virtual void appendContactManagersFallback(PxsContactManagerOutput* outputs) = 0;
virtual void removeContactManagersFallback(PxsContactManagerOutput* cmOutputs) = 0;
virtual const Sc::ShapeInteraction*const* getShapeInteractionsGPU() const = 0;
virtual const PxReal* getRestDistancesGPU() const = 0;
virtual const PxsTorsionalFrictionData* getTorsionalDataGPU() const = 0;
};
PxvNphaseImplementationFallback* createNphaseImplementationContext(PxsContext& context, IG::IslandSim* islandSim, PxVirtualAllocatorCallback* allocator, bool gpuDynamics);
}
#endif